1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 static const u32 hpd_ibx[] = { 41 [HPD_CRT] = SDE_CRT_HOTPLUG, 42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46 }; 47 48 static const u32 hpd_cpt[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54 }; 55 56 static const u32 hpd_mask_i915[] = { 57 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63 }; 64 65 static const u32 hpd_status_g4x[] = { 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72 }; 73 74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81 }; 82 83 /* For display hotplug interrupt */ 84 static void 85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 86 { 87 assert_spin_locked(&dev_priv->irq_lock); 88 89 if (dev_priv->pc8.irqs_disabled) { 90 WARN(1, "IRQs disabled\n"); 91 dev_priv->pc8.regsave.deimr &= ~mask; 92 return; 93 } 94 95 if ((dev_priv->irq_mask & mask) != 0) { 96 dev_priv->irq_mask &= ~mask; 97 I915_WRITE(DEIMR, dev_priv->irq_mask); 98 POSTING_READ(DEIMR); 99 } 100 } 101 102 static void 103 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 104 { 105 assert_spin_locked(&dev_priv->irq_lock); 106 107 if (dev_priv->pc8.irqs_disabled) { 108 WARN(1, "IRQs disabled\n"); 109 dev_priv->pc8.regsave.deimr |= mask; 110 return; 111 } 112 113 if ((dev_priv->irq_mask & mask) != mask) { 114 dev_priv->irq_mask |= mask; 115 I915_WRITE(DEIMR, dev_priv->irq_mask); 116 POSTING_READ(DEIMR); 117 } 118 } 119 120 /** 121 * ilk_update_gt_irq - update GTIMR 122 * @dev_priv: driver private 123 * @interrupt_mask: mask of interrupt bits to update 124 * @enabled_irq_mask: mask of interrupt bits to enable 125 */ 126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 127 uint32_t interrupt_mask, 128 uint32_t enabled_irq_mask) 129 { 130 assert_spin_locked(&dev_priv->irq_lock); 131 132 if (dev_priv->pc8.irqs_disabled) { 133 WARN(1, "IRQs disabled\n"); 134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 136 interrupt_mask); 137 return; 138 } 139 140 dev_priv->gt_irq_mask &= ~interrupt_mask; 141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 143 POSTING_READ(GTIMR); 144 } 145 146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 147 { 148 ilk_update_gt_irq(dev_priv, mask, mask); 149 } 150 151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 152 { 153 ilk_update_gt_irq(dev_priv, mask, 0); 154 } 155 156 /** 157 * snb_update_pm_irq - update GEN6_PMIMR 158 * @dev_priv: driver private 159 * @interrupt_mask: mask of interrupt bits to update 160 * @enabled_irq_mask: mask of interrupt bits to enable 161 */ 162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 163 uint32_t interrupt_mask, 164 uint32_t enabled_irq_mask) 165 { 166 uint32_t new_val; 167 168 assert_spin_locked(&dev_priv->irq_lock); 169 170 if (dev_priv->pc8.irqs_disabled) { 171 WARN(1, "IRQs disabled\n"); 172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 174 interrupt_mask); 175 return; 176 } 177 178 new_val = dev_priv->pm_irq_mask; 179 new_val &= ~interrupt_mask; 180 new_val |= (~enabled_irq_mask & interrupt_mask); 181 182 if (new_val != dev_priv->pm_irq_mask) { 183 dev_priv->pm_irq_mask = new_val; 184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 185 POSTING_READ(GEN6_PMIMR); 186 } 187 } 188 189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190 { 191 snb_update_pm_irq(dev_priv, mask, mask); 192 } 193 194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 195 { 196 snb_update_pm_irq(dev_priv, mask, 0); 197 } 198 199 static bool ivb_can_enable_err_int(struct drm_device *dev) 200 { 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct intel_crtc *crtc; 203 enum pipe pipe; 204 205 assert_spin_locked(&dev_priv->irq_lock); 206 207 for_each_pipe(pipe) { 208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 209 210 if (crtc->cpu_fifo_underrun_disabled) 211 return false; 212 } 213 214 return true; 215 } 216 217 static bool cpt_can_enable_serr_int(struct drm_device *dev) 218 { 219 struct drm_i915_private *dev_priv = dev->dev_private; 220 enum pipe pipe; 221 struct intel_crtc *crtc; 222 223 assert_spin_locked(&dev_priv->irq_lock); 224 225 for_each_pipe(pipe) { 226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 227 228 if (crtc->pch_fifo_underrun_disabled) 229 return false; 230 } 231 232 return true; 233 } 234 235 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 236 enum pipe pipe, bool enable) 237 { 238 struct drm_i915_private *dev_priv = dev->dev_private; 239 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 240 DE_PIPEB_FIFO_UNDERRUN; 241 242 if (enable) 243 ironlake_enable_display_irq(dev_priv, bit); 244 else 245 ironlake_disable_display_irq(dev_priv, bit); 246 } 247 248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 249 enum pipe pipe, bool enable) 250 { 251 struct drm_i915_private *dev_priv = dev->dev_private; 252 if (enable) { 253 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 254 255 if (!ivb_can_enable_err_int(dev)) 256 return; 257 258 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 259 } else { 260 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 261 262 /* Change the state _after_ we've read out the current one. */ 263 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 264 265 if (!was_enabled && 266 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 268 pipe_name(pipe)); 269 } 270 } 271 } 272 273 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 274 enum pipe pipe, bool enable) 275 { 276 struct drm_i915_private *dev_priv = dev->dev_private; 277 278 assert_spin_locked(&dev_priv->irq_lock); 279 280 if (enable) 281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 282 else 283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 286 } 287 288 /** 289 * ibx_display_interrupt_update - update SDEIMR 290 * @dev_priv: driver private 291 * @interrupt_mask: mask of interrupt bits to update 292 * @enabled_irq_mask: mask of interrupt bits to enable 293 */ 294 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 295 uint32_t interrupt_mask, 296 uint32_t enabled_irq_mask) 297 { 298 uint32_t sdeimr = I915_READ(SDEIMR); 299 sdeimr &= ~interrupt_mask; 300 sdeimr |= (~enabled_irq_mask & interrupt_mask); 301 302 assert_spin_locked(&dev_priv->irq_lock); 303 304 if (dev_priv->pc8.irqs_disabled && 305 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 306 WARN(1, "IRQs disabled\n"); 307 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 308 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 309 interrupt_mask); 310 return; 311 } 312 313 I915_WRITE(SDEIMR, sdeimr); 314 POSTING_READ(SDEIMR); 315 } 316 #define ibx_enable_display_interrupt(dev_priv, bits) \ 317 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 318 #define ibx_disable_display_interrupt(dev_priv, bits) \ 319 ibx_display_interrupt_update((dev_priv), (bits), 0) 320 321 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 322 enum transcoder pch_transcoder, 323 bool enable) 324 { 325 struct drm_i915_private *dev_priv = dev->dev_private; 326 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 327 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 328 329 if (enable) 330 ibx_enable_display_interrupt(dev_priv, bit); 331 else 332 ibx_disable_display_interrupt(dev_priv, bit); 333 } 334 335 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 336 enum transcoder pch_transcoder, 337 bool enable) 338 { 339 struct drm_i915_private *dev_priv = dev->dev_private; 340 341 if (enable) { 342 I915_WRITE(SERR_INT, 343 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 344 345 if (!cpt_can_enable_serr_int(dev)) 346 return; 347 348 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 349 } else { 350 uint32_t tmp = I915_READ(SERR_INT); 351 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 352 353 /* Change the state _after_ we've read out the current one. */ 354 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 355 356 if (!was_enabled && 357 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 358 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 359 transcoder_name(pch_transcoder)); 360 } 361 } 362 } 363 364 /** 365 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 366 * @dev: drm device 367 * @pipe: pipe 368 * @enable: true if we want to report FIFO underrun errors, false otherwise 369 * 370 * This function makes us disable or enable CPU fifo underruns for a specific 371 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 372 * reporting for one pipe may also disable all the other CPU error interruts for 373 * the other pipes, due to the fact that there's just one interrupt mask/enable 374 * bit for all the pipes. 375 * 376 * Returns the previous state of underrun reporting. 377 */ 378 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 379 enum pipe pipe, bool enable) 380 { 381 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 384 unsigned long flags; 385 bool ret; 386 387 spin_lock_irqsave(&dev_priv->irq_lock, flags); 388 389 ret = !intel_crtc->cpu_fifo_underrun_disabled; 390 391 if (enable == ret) 392 goto done; 393 394 intel_crtc->cpu_fifo_underrun_disabled = !enable; 395 396 if (IS_GEN5(dev) || IS_GEN6(dev)) 397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 398 else if (IS_GEN7(dev)) 399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 400 else if (IS_GEN8(dev)) 401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 402 403 done: 404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 405 return ret; 406 } 407 408 /** 409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 410 * @dev: drm device 411 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 412 * @enable: true if we want to report FIFO underrun errors, false otherwise 413 * 414 * This function makes us disable or enable PCH fifo underruns for a specific 415 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 416 * underrun reporting for one transcoder may also disable all the other PCH 417 * error interruts for the other transcoders, due to the fact that there's just 418 * one interrupt mask/enable bit for all the transcoders. 419 * 420 * Returns the previous state of underrun reporting. 421 */ 422 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 423 enum transcoder pch_transcoder, 424 bool enable) 425 { 426 struct drm_i915_private *dev_priv = dev->dev_private; 427 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 429 unsigned long flags; 430 bool ret; 431 432 /* 433 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 434 * has only one pch transcoder A that all pipes can use. To avoid racy 435 * pch transcoder -> pipe lookups from interrupt code simply store the 436 * underrun statistics in crtc A. Since we never expose this anywhere 437 * nor use it outside of the fifo underrun code here using the "wrong" 438 * crtc on LPT won't cause issues. 439 */ 440 441 spin_lock_irqsave(&dev_priv->irq_lock, flags); 442 443 ret = !intel_crtc->pch_fifo_underrun_disabled; 444 445 if (enable == ret) 446 goto done; 447 448 intel_crtc->pch_fifo_underrun_disabled = !enable; 449 450 if (HAS_PCH_IBX(dev)) 451 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 452 else 453 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 454 455 done: 456 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 457 return ret; 458 } 459 460 461 void 462 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 463 { 464 u32 reg = PIPESTAT(pipe); 465 u32 pipestat = I915_READ(reg) & 0x7fff0000; 466 467 assert_spin_locked(&dev_priv->irq_lock); 468 469 if ((pipestat & mask) == mask) 470 return; 471 472 /* Enable the interrupt, clear any pending status */ 473 pipestat |= mask | (mask >> 16); 474 I915_WRITE(reg, pipestat); 475 POSTING_READ(reg); 476 } 477 478 void 479 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 480 { 481 u32 reg = PIPESTAT(pipe); 482 u32 pipestat = I915_READ(reg) & 0x7fff0000; 483 484 assert_spin_locked(&dev_priv->irq_lock); 485 486 if ((pipestat & mask) == 0) 487 return; 488 489 pipestat &= ~mask; 490 I915_WRITE(reg, pipestat); 491 POSTING_READ(reg); 492 } 493 494 /** 495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 496 */ 497 static void i915_enable_asle_pipestat(struct drm_device *dev) 498 { 499 drm_i915_private_t *dev_priv = dev->dev_private; 500 unsigned long irqflags; 501 502 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 503 return; 504 505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 506 507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 508 if (INTEL_INFO(dev)->gen >= 4) 509 i915_enable_pipestat(dev_priv, PIPE_A, 510 PIPE_LEGACY_BLC_EVENT_ENABLE); 511 512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 513 } 514 515 /** 516 * i915_pipe_enabled - check if a pipe is enabled 517 * @dev: DRM device 518 * @pipe: pipe to check 519 * 520 * Reading certain registers when the pipe is disabled can hang the chip. 521 * Use this routine to make sure the PLL is running and the pipe is active 522 * before reading such registers if unsure. 523 */ 524 static int 525 i915_pipe_enabled(struct drm_device *dev, int pipe) 526 { 527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 528 529 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 530 /* Locking is horribly broken here, but whatever. */ 531 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 533 534 return intel_crtc->active; 535 } else { 536 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 537 } 538 } 539 540 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 541 { 542 /* Gen2 doesn't have a hardware frame counter */ 543 return 0; 544 } 545 546 /* Called from drm generic code, passed a 'crtc', which 547 * we use as a pipe index 548 */ 549 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 550 { 551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 552 unsigned long high_frame; 553 unsigned long low_frame; 554 u32 high1, high2, low, pixel, vbl_start; 555 556 if (!i915_pipe_enabled(dev, pipe)) { 557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 558 "pipe %c\n", pipe_name(pipe)); 559 return 0; 560 } 561 562 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 563 struct intel_crtc *intel_crtc = 564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 565 const struct drm_display_mode *mode = 566 &intel_crtc->config.adjusted_mode; 567 568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 569 } else { 570 enum transcoder cpu_transcoder = 571 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 572 u32 htotal; 573 574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 575 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 576 577 vbl_start *= htotal; 578 } 579 580 high_frame = PIPEFRAME(pipe); 581 low_frame = PIPEFRAMEPIXEL(pipe); 582 583 /* 584 * High & low register fields aren't synchronized, so make sure 585 * we get a low value that's stable across two reads of the high 586 * register. 587 */ 588 do { 589 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 590 low = I915_READ(low_frame); 591 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 592 } while (high1 != high2); 593 594 high1 >>= PIPE_FRAME_HIGH_SHIFT; 595 pixel = low & PIPE_PIXEL_MASK; 596 low >>= PIPE_FRAME_LOW_SHIFT; 597 598 /* 599 * The frame counter increments at beginning of active. 600 * Cook up a vblank counter by also checking the pixel 601 * counter against vblank start. 602 */ 603 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 604 } 605 606 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 607 { 608 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 609 int reg = PIPE_FRMCOUNT_GM45(pipe); 610 611 if (!i915_pipe_enabled(dev, pipe)) { 612 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 613 "pipe %c\n", pipe_name(pipe)); 614 return 0; 615 } 616 617 return I915_READ(reg); 618 } 619 620 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 621 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 622 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 623 624 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 625 { 626 struct drm_i915_private *dev_priv = dev->dev_private; 627 uint32_t status; 628 629 if (INTEL_INFO(dev)->gen < 7) { 630 status = pipe == PIPE_A ? 631 DE_PIPEA_VBLANK : 632 DE_PIPEB_VBLANK; 633 } else { 634 switch (pipe) { 635 default: 636 case PIPE_A: 637 status = DE_PIPEA_VBLANK_IVB; 638 break; 639 case PIPE_B: 640 status = DE_PIPEB_VBLANK_IVB; 641 break; 642 case PIPE_C: 643 status = DE_PIPEC_VBLANK_IVB; 644 break; 645 } 646 } 647 648 return __raw_i915_read32(dev_priv, DEISR) & status; 649 } 650 651 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 652 unsigned int flags, int *vpos, int *hpos, 653 ktime_t *stime, ktime_t *etime) 654 { 655 struct drm_i915_private *dev_priv = dev->dev_private; 656 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 658 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 659 int position; 660 int vbl_start, vbl_end, htotal, vtotal; 661 bool in_vbl = true; 662 int ret = 0; 663 unsigned long irqflags; 664 665 if (!intel_crtc->active) { 666 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 667 "pipe %c\n", pipe_name(pipe)); 668 return 0; 669 } 670 671 htotal = mode->crtc_htotal; 672 vtotal = mode->crtc_vtotal; 673 vbl_start = mode->crtc_vblank_start; 674 vbl_end = mode->crtc_vblank_end; 675 676 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 677 vbl_start = DIV_ROUND_UP(vbl_start, 2); 678 vbl_end /= 2; 679 vtotal /= 2; 680 } 681 682 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 683 684 /* 685 * Lock uncore.lock, as we will do multiple timing critical raw 686 * register reads, potentially with preemption disabled, so the 687 * following code must not block on uncore.lock. 688 */ 689 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 690 691 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 692 693 /* Get optional system timestamp before query. */ 694 if (stime) 695 *stime = ktime_get(); 696 697 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 698 /* No obvious pixelcount register. Only query vertical 699 * scanout position from Display scan line register. 700 */ 701 if (IS_GEN2(dev)) 702 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 703 else 704 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 705 706 if (HAS_PCH_SPLIT(dev)) { 707 /* 708 * The scanline counter increments at the leading edge 709 * of hsync, ie. it completely misses the active portion 710 * of the line. Fix up the counter at both edges of vblank 711 * to get a more accurate picture whether we're in vblank 712 * or not. 713 */ 714 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 715 if ((in_vbl && position == vbl_start - 1) || 716 (!in_vbl && position == vbl_end - 1)) 717 position = (position + 1) % vtotal; 718 } else { 719 /* 720 * ISR vblank status bits don't work the way we'd want 721 * them to work on non-PCH platforms (for 722 * ilk_pipe_in_vblank_locked()), and there doesn't 723 * appear any other way to determine if we're currently 724 * in vblank. 725 * 726 * Instead let's assume that we're already in vblank if 727 * we got called from the vblank interrupt and the 728 * scanline counter value indicates that we're on the 729 * line just prior to vblank start. This should result 730 * in the correct answer, unless the vblank interrupt 731 * delivery really got delayed for almost exactly one 732 * full frame/field. 733 */ 734 if (flags & DRM_CALLED_FROM_VBLIRQ && 735 position == vbl_start - 1) { 736 position = (position + 1) % vtotal; 737 738 /* Signal this correction as "applied". */ 739 ret |= 0x8; 740 } 741 } 742 } else { 743 /* Have access to pixelcount since start of frame. 744 * We can split this into vertical and horizontal 745 * scanout position. 746 */ 747 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 748 749 /* convert to pixel counts */ 750 vbl_start *= htotal; 751 vbl_end *= htotal; 752 vtotal *= htotal; 753 } 754 755 /* Get optional system timestamp after query. */ 756 if (etime) 757 *etime = ktime_get(); 758 759 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 760 761 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 762 763 in_vbl = position >= vbl_start && position < vbl_end; 764 765 /* 766 * While in vblank, position will be negative 767 * counting up towards 0 at vbl_end. And outside 768 * vblank, position will be positive counting 769 * up since vbl_end. 770 */ 771 if (position >= vbl_start) 772 position -= vbl_end; 773 else 774 position += vtotal - vbl_end; 775 776 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 777 *vpos = position; 778 *hpos = 0; 779 } else { 780 *vpos = position / htotal; 781 *hpos = position - (*vpos * htotal); 782 } 783 784 /* In vblank? */ 785 if (in_vbl) 786 ret |= DRM_SCANOUTPOS_INVBL; 787 788 return ret; 789 } 790 791 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 792 int *max_error, 793 struct timeval *vblank_time, 794 unsigned flags) 795 { 796 struct drm_crtc *crtc; 797 798 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 799 DRM_ERROR("Invalid crtc %d\n", pipe); 800 return -EINVAL; 801 } 802 803 /* Get drm_crtc to timestamp: */ 804 crtc = intel_get_crtc_for_pipe(dev, pipe); 805 if (crtc == NULL) { 806 DRM_ERROR("Invalid crtc %d\n", pipe); 807 return -EINVAL; 808 } 809 810 if (!crtc->enabled) { 811 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 812 return -EBUSY; 813 } 814 815 /* Helper routine in DRM core does all the work: */ 816 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 817 vblank_time, flags, 818 crtc, 819 &to_intel_crtc(crtc)->config.adjusted_mode); 820 } 821 822 static bool intel_hpd_irq_event(struct drm_device *dev, 823 struct drm_connector *connector) 824 { 825 enum drm_connector_status old_status; 826 827 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 828 old_status = connector->status; 829 830 connector->status = connector->funcs->detect(connector, false); 831 if (old_status == connector->status) 832 return false; 833 834 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 835 connector->base.id, 836 drm_get_connector_name(connector), 837 drm_get_connector_status_name(old_status), 838 drm_get_connector_status_name(connector->status)); 839 840 return true; 841 } 842 843 /* 844 * Handle hotplug events outside the interrupt handler proper. 845 */ 846 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 847 848 static void i915_hotplug_work_func(struct work_struct *work) 849 { 850 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 851 hotplug_work); 852 struct drm_device *dev = dev_priv->dev; 853 struct drm_mode_config *mode_config = &dev->mode_config; 854 struct intel_connector *intel_connector; 855 struct intel_encoder *intel_encoder; 856 struct drm_connector *connector; 857 unsigned long irqflags; 858 bool hpd_disabled = false; 859 bool changed = false; 860 u32 hpd_event_bits; 861 862 /* HPD irq before everything is fully set up. */ 863 if (!dev_priv->enable_hotplug_processing) 864 return; 865 866 mutex_lock(&mode_config->mutex); 867 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 868 869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 870 871 hpd_event_bits = dev_priv->hpd_event_bits; 872 dev_priv->hpd_event_bits = 0; 873 list_for_each_entry(connector, &mode_config->connector_list, head) { 874 intel_connector = to_intel_connector(connector); 875 intel_encoder = intel_connector->encoder; 876 if (intel_encoder->hpd_pin > HPD_NONE && 877 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 878 connector->polled == DRM_CONNECTOR_POLL_HPD) { 879 DRM_INFO("HPD interrupt storm detected on connector %s: " 880 "switching from hotplug detection to polling\n", 881 drm_get_connector_name(connector)); 882 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 883 connector->polled = DRM_CONNECTOR_POLL_CONNECT 884 | DRM_CONNECTOR_POLL_DISCONNECT; 885 hpd_disabled = true; 886 } 887 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 888 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 889 drm_get_connector_name(connector), intel_encoder->hpd_pin); 890 } 891 } 892 /* if there were no outputs to poll, poll was disabled, 893 * therefore make sure it's enabled when disabling HPD on 894 * some connectors */ 895 if (hpd_disabled) { 896 drm_kms_helper_poll_enable(dev); 897 mod_timer(&dev_priv->hotplug_reenable_timer, 898 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 899 } 900 901 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 902 903 list_for_each_entry(connector, &mode_config->connector_list, head) { 904 intel_connector = to_intel_connector(connector); 905 intel_encoder = intel_connector->encoder; 906 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 907 if (intel_encoder->hot_plug) 908 intel_encoder->hot_plug(intel_encoder); 909 if (intel_hpd_irq_event(dev, connector)) 910 changed = true; 911 } 912 } 913 mutex_unlock(&mode_config->mutex); 914 915 if (changed) 916 drm_kms_helper_hotplug_event(dev); 917 } 918 919 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 920 { 921 drm_i915_private_t *dev_priv = dev->dev_private; 922 u32 busy_up, busy_down, max_avg, min_avg; 923 u8 new_delay; 924 925 spin_lock(&mchdev_lock); 926 927 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 928 929 new_delay = dev_priv->ips.cur_delay; 930 931 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 932 busy_up = I915_READ(RCPREVBSYTUPAVG); 933 busy_down = I915_READ(RCPREVBSYTDNAVG); 934 max_avg = I915_READ(RCBMAXAVG); 935 min_avg = I915_READ(RCBMINAVG); 936 937 /* Handle RCS change request from hw */ 938 if (busy_up > max_avg) { 939 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 940 new_delay = dev_priv->ips.cur_delay - 1; 941 if (new_delay < dev_priv->ips.max_delay) 942 new_delay = dev_priv->ips.max_delay; 943 } else if (busy_down < min_avg) { 944 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 945 new_delay = dev_priv->ips.cur_delay + 1; 946 if (new_delay > dev_priv->ips.min_delay) 947 new_delay = dev_priv->ips.min_delay; 948 } 949 950 if (ironlake_set_drps(dev, new_delay)) 951 dev_priv->ips.cur_delay = new_delay; 952 953 spin_unlock(&mchdev_lock); 954 955 return; 956 } 957 958 static void notify_ring(struct drm_device *dev, 959 struct intel_ring_buffer *ring) 960 { 961 if (ring->obj == NULL) 962 return; 963 964 trace_i915_gem_request_complete(ring); 965 966 wake_up_all(&ring->irq_queue); 967 i915_queue_hangcheck(dev); 968 } 969 970 static void gen6_pm_rps_work(struct work_struct *work) 971 { 972 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 973 rps.work); 974 u32 pm_iir; 975 int new_delay, adj; 976 977 spin_lock_irq(&dev_priv->irq_lock); 978 pm_iir = dev_priv->rps.pm_iir; 979 dev_priv->rps.pm_iir = 0; 980 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 981 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 982 spin_unlock_irq(&dev_priv->irq_lock); 983 984 /* Make sure we didn't queue anything we're not going to process. */ 985 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 986 987 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 988 return; 989 990 mutex_lock(&dev_priv->rps.hw_lock); 991 992 adj = dev_priv->rps.last_adj; 993 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 994 if (adj > 0) 995 adj *= 2; 996 else 997 adj = 1; 998 new_delay = dev_priv->rps.cur_delay + adj; 999 1000 /* 1001 * For better performance, jump directly 1002 * to RPe if we're below it. 1003 */ 1004 if (new_delay < dev_priv->rps.rpe_delay) 1005 new_delay = dev_priv->rps.rpe_delay; 1006 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1007 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 1008 new_delay = dev_priv->rps.rpe_delay; 1009 else 1010 new_delay = dev_priv->rps.min_delay; 1011 adj = 0; 1012 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1013 if (adj < 0) 1014 adj *= 2; 1015 else 1016 adj = -1; 1017 new_delay = dev_priv->rps.cur_delay + adj; 1018 } else { /* unknown event */ 1019 new_delay = dev_priv->rps.cur_delay; 1020 } 1021 1022 /* sysfs frequency interfaces may have snuck in while servicing the 1023 * interrupt 1024 */ 1025 new_delay = clamp_t(int, new_delay, 1026 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1027 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1028 1029 if (IS_VALLEYVIEW(dev_priv->dev)) 1030 valleyview_set_rps(dev_priv->dev, new_delay); 1031 else 1032 gen6_set_rps(dev_priv->dev, new_delay); 1033 1034 mutex_unlock(&dev_priv->rps.hw_lock); 1035 } 1036 1037 1038 /** 1039 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1040 * occurred. 1041 * @work: workqueue struct 1042 * 1043 * Doesn't actually do anything except notify userspace. As a consequence of 1044 * this event, userspace should try to remap the bad rows since statistically 1045 * it is likely the same row is more likely to go bad again. 1046 */ 1047 static void ivybridge_parity_work(struct work_struct *work) 1048 { 1049 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1050 l3_parity.error_work); 1051 u32 error_status, row, bank, subbank; 1052 char *parity_event[6]; 1053 uint32_t misccpctl; 1054 unsigned long flags; 1055 uint8_t slice = 0; 1056 1057 /* We must turn off DOP level clock gating to access the L3 registers. 1058 * In order to prevent a get/put style interface, acquire struct mutex 1059 * any time we access those registers. 1060 */ 1061 mutex_lock(&dev_priv->dev->struct_mutex); 1062 1063 /* If we've screwed up tracking, just let the interrupt fire again */ 1064 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1065 goto out; 1066 1067 misccpctl = I915_READ(GEN7_MISCCPCTL); 1068 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1069 POSTING_READ(GEN7_MISCCPCTL); 1070 1071 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1072 u32 reg; 1073 1074 slice--; 1075 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1076 break; 1077 1078 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1079 1080 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1081 1082 error_status = I915_READ(reg); 1083 row = GEN7_PARITY_ERROR_ROW(error_status); 1084 bank = GEN7_PARITY_ERROR_BANK(error_status); 1085 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1086 1087 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1088 POSTING_READ(reg); 1089 1090 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1091 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1092 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1093 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1094 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1095 parity_event[5] = NULL; 1096 1097 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1098 KOBJ_CHANGE, parity_event); 1099 1100 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1101 slice, row, bank, subbank); 1102 1103 kfree(parity_event[4]); 1104 kfree(parity_event[3]); 1105 kfree(parity_event[2]); 1106 kfree(parity_event[1]); 1107 } 1108 1109 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1110 1111 out: 1112 WARN_ON(dev_priv->l3_parity.which_slice); 1113 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1114 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1115 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1116 1117 mutex_unlock(&dev_priv->dev->struct_mutex); 1118 } 1119 1120 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1121 { 1122 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1123 1124 if (!HAS_L3_DPF(dev)) 1125 return; 1126 1127 spin_lock(&dev_priv->irq_lock); 1128 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1129 spin_unlock(&dev_priv->irq_lock); 1130 1131 iir &= GT_PARITY_ERROR(dev); 1132 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1133 dev_priv->l3_parity.which_slice |= 1 << 1; 1134 1135 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1136 dev_priv->l3_parity.which_slice |= 1 << 0; 1137 1138 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1139 } 1140 1141 static void ilk_gt_irq_handler(struct drm_device *dev, 1142 struct drm_i915_private *dev_priv, 1143 u32 gt_iir) 1144 { 1145 if (gt_iir & 1146 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1147 notify_ring(dev, &dev_priv->ring[RCS]); 1148 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1149 notify_ring(dev, &dev_priv->ring[VCS]); 1150 } 1151 1152 static void snb_gt_irq_handler(struct drm_device *dev, 1153 struct drm_i915_private *dev_priv, 1154 u32 gt_iir) 1155 { 1156 1157 if (gt_iir & 1158 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1159 notify_ring(dev, &dev_priv->ring[RCS]); 1160 if (gt_iir & GT_BSD_USER_INTERRUPT) 1161 notify_ring(dev, &dev_priv->ring[VCS]); 1162 if (gt_iir & GT_BLT_USER_INTERRUPT) 1163 notify_ring(dev, &dev_priv->ring[BCS]); 1164 1165 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1166 GT_BSD_CS_ERROR_INTERRUPT | 1167 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1168 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1169 i915_handle_error(dev, false); 1170 } 1171 1172 if (gt_iir & GT_PARITY_ERROR(dev)) 1173 ivybridge_parity_error_irq_handler(dev, gt_iir); 1174 } 1175 1176 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1177 struct drm_i915_private *dev_priv, 1178 u32 master_ctl) 1179 { 1180 u32 rcs, bcs, vcs; 1181 uint32_t tmp = 0; 1182 irqreturn_t ret = IRQ_NONE; 1183 1184 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1185 tmp = I915_READ(GEN8_GT_IIR(0)); 1186 if (tmp) { 1187 ret = IRQ_HANDLED; 1188 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1189 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1190 if (rcs & GT_RENDER_USER_INTERRUPT) 1191 notify_ring(dev, &dev_priv->ring[RCS]); 1192 if (bcs & GT_RENDER_USER_INTERRUPT) 1193 notify_ring(dev, &dev_priv->ring[BCS]); 1194 I915_WRITE(GEN8_GT_IIR(0), tmp); 1195 } else 1196 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1197 } 1198 1199 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1200 tmp = I915_READ(GEN8_GT_IIR(1)); 1201 if (tmp) { 1202 ret = IRQ_HANDLED; 1203 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1204 if (vcs & GT_RENDER_USER_INTERRUPT) 1205 notify_ring(dev, &dev_priv->ring[VCS]); 1206 I915_WRITE(GEN8_GT_IIR(1), tmp); 1207 } else 1208 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1209 } 1210 1211 if (master_ctl & GEN8_GT_VECS_IRQ) { 1212 tmp = I915_READ(GEN8_GT_IIR(3)); 1213 if (tmp) { 1214 ret = IRQ_HANDLED; 1215 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1216 if (vcs & GT_RENDER_USER_INTERRUPT) 1217 notify_ring(dev, &dev_priv->ring[VECS]); 1218 I915_WRITE(GEN8_GT_IIR(3), tmp); 1219 } else 1220 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1221 } 1222 1223 return ret; 1224 } 1225 1226 #define HPD_STORM_DETECT_PERIOD 1000 1227 #define HPD_STORM_THRESHOLD 5 1228 1229 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1230 u32 hotplug_trigger, 1231 const u32 *hpd) 1232 { 1233 drm_i915_private_t *dev_priv = dev->dev_private; 1234 int i; 1235 bool storm_detected = false; 1236 1237 if (!hotplug_trigger) 1238 return; 1239 1240 spin_lock(&dev_priv->irq_lock); 1241 for (i = 1; i < HPD_NUM_PINS; i++) { 1242 1243 WARN_ONCE(hpd[i] & hotplug_trigger && 1244 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1245 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1246 hotplug_trigger, i, hpd[i]); 1247 1248 if (!(hpd[i] & hotplug_trigger) || 1249 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1250 continue; 1251 1252 dev_priv->hpd_event_bits |= (1 << i); 1253 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1254 dev_priv->hpd_stats[i].hpd_last_jiffies 1255 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1256 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1257 dev_priv->hpd_stats[i].hpd_cnt = 0; 1258 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1259 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1260 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1261 dev_priv->hpd_event_bits &= ~(1 << i); 1262 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1263 storm_detected = true; 1264 } else { 1265 dev_priv->hpd_stats[i].hpd_cnt++; 1266 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1267 dev_priv->hpd_stats[i].hpd_cnt); 1268 } 1269 } 1270 1271 if (storm_detected) 1272 dev_priv->display.hpd_irq_setup(dev); 1273 spin_unlock(&dev_priv->irq_lock); 1274 1275 /* 1276 * Our hotplug handler can grab modeset locks (by calling down into the 1277 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1278 * queue for otherwise the flush_work in the pageflip code will 1279 * deadlock. 1280 */ 1281 schedule_work(&dev_priv->hotplug_work); 1282 } 1283 1284 static void gmbus_irq_handler(struct drm_device *dev) 1285 { 1286 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1287 1288 wake_up_all(&dev_priv->gmbus_wait_queue); 1289 } 1290 1291 static void dp_aux_irq_handler(struct drm_device *dev) 1292 { 1293 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1294 1295 wake_up_all(&dev_priv->gmbus_wait_queue); 1296 } 1297 1298 #if defined(CONFIG_DEBUG_FS) 1299 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1300 uint32_t crc0, uint32_t crc1, 1301 uint32_t crc2, uint32_t crc3, 1302 uint32_t crc4) 1303 { 1304 struct drm_i915_private *dev_priv = dev->dev_private; 1305 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1306 struct intel_pipe_crc_entry *entry; 1307 int head, tail; 1308 1309 spin_lock(&pipe_crc->lock); 1310 1311 if (!pipe_crc->entries) { 1312 spin_unlock(&pipe_crc->lock); 1313 DRM_ERROR("spurious interrupt\n"); 1314 return; 1315 } 1316 1317 head = pipe_crc->head; 1318 tail = pipe_crc->tail; 1319 1320 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1321 spin_unlock(&pipe_crc->lock); 1322 DRM_ERROR("CRC buffer overflowing\n"); 1323 return; 1324 } 1325 1326 entry = &pipe_crc->entries[head]; 1327 1328 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1329 entry->crc[0] = crc0; 1330 entry->crc[1] = crc1; 1331 entry->crc[2] = crc2; 1332 entry->crc[3] = crc3; 1333 entry->crc[4] = crc4; 1334 1335 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1336 pipe_crc->head = head; 1337 1338 spin_unlock(&pipe_crc->lock); 1339 1340 wake_up_interruptible(&pipe_crc->wq); 1341 } 1342 #else 1343 static inline void 1344 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1345 uint32_t crc0, uint32_t crc1, 1346 uint32_t crc2, uint32_t crc3, 1347 uint32_t crc4) {} 1348 #endif 1349 1350 1351 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1352 { 1353 struct drm_i915_private *dev_priv = dev->dev_private; 1354 1355 display_pipe_crc_irq_handler(dev, pipe, 1356 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1357 0, 0, 0, 0); 1358 } 1359 1360 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1361 { 1362 struct drm_i915_private *dev_priv = dev->dev_private; 1363 1364 display_pipe_crc_irq_handler(dev, pipe, 1365 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1366 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1367 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1368 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1369 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1370 } 1371 1372 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1373 { 1374 struct drm_i915_private *dev_priv = dev->dev_private; 1375 uint32_t res1, res2; 1376 1377 if (INTEL_INFO(dev)->gen >= 3) 1378 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1379 else 1380 res1 = 0; 1381 1382 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1383 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1384 else 1385 res2 = 0; 1386 1387 display_pipe_crc_irq_handler(dev, pipe, 1388 I915_READ(PIPE_CRC_RES_RED(pipe)), 1389 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1390 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1391 res1, res2); 1392 } 1393 1394 /* The RPS events need forcewake, so we add them to a work queue and mask their 1395 * IMR bits until the work is done. Other interrupts can be processed without 1396 * the work queue. */ 1397 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1398 { 1399 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1400 spin_lock(&dev_priv->irq_lock); 1401 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1402 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1403 spin_unlock(&dev_priv->irq_lock); 1404 1405 queue_work(dev_priv->wq, &dev_priv->rps.work); 1406 } 1407 1408 if (HAS_VEBOX(dev_priv->dev)) { 1409 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1410 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1411 1412 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1413 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1414 i915_handle_error(dev_priv->dev, false); 1415 } 1416 } 1417 } 1418 1419 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1420 { 1421 struct drm_device *dev = (struct drm_device *) arg; 1422 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1423 u32 iir, gt_iir, pm_iir; 1424 irqreturn_t ret = IRQ_NONE; 1425 unsigned long irqflags; 1426 int pipe; 1427 u32 pipe_stats[I915_MAX_PIPES]; 1428 1429 atomic_inc(&dev_priv->irq_received); 1430 1431 while (true) { 1432 iir = I915_READ(VLV_IIR); 1433 gt_iir = I915_READ(GTIIR); 1434 pm_iir = I915_READ(GEN6_PMIIR); 1435 1436 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1437 goto out; 1438 1439 ret = IRQ_HANDLED; 1440 1441 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1442 1443 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1444 for_each_pipe(pipe) { 1445 int reg = PIPESTAT(pipe); 1446 pipe_stats[pipe] = I915_READ(reg); 1447 1448 /* 1449 * Clear the PIPE*STAT regs before the IIR 1450 */ 1451 if (pipe_stats[pipe] & 0x8000ffff) { 1452 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1453 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1454 pipe_name(pipe)); 1455 I915_WRITE(reg, pipe_stats[pipe]); 1456 } 1457 } 1458 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1459 1460 for_each_pipe(pipe) { 1461 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1462 drm_handle_vblank(dev, pipe); 1463 1464 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1465 intel_prepare_page_flip(dev, pipe); 1466 intel_finish_page_flip(dev, pipe); 1467 } 1468 1469 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1470 i9xx_pipe_crc_irq_handler(dev, pipe); 1471 } 1472 1473 /* Consume port. Then clear IIR or we'll miss events */ 1474 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1475 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1476 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1477 1478 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1479 hotplug_status); 1480 1481 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1482 1483 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1484 dp_aux_irq_handler(dev); 1485 1486 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1487 I915_READ(PORT_HOTPLUG_STAT); 1488 } 1489 1490 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1491 gmbus_irq_handler(dev); 1492 1493 if (pm_iir) 1494 gen6_rps_irq_handler(dev_priv, pm_iir); 1495 1496 I915_WRITE(GTIIR, gt_iir); 1497 I915_WRITE(GEN6_PMIIR, pm_iir); 1498 I915_WRITE(VLV_IIR, iir); 1499 } 1500 1501 out: 1502 return ret; 1503 } 1504 1505 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1506 { 1507 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1508 int pipe; 1509 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1510 1511 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1512 1513 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1514 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1515 SDE_AUDIO_POWER_SHIFT); 1516 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1517 port_name(port)); 1518 } 1519 1520 if (pch_iir & SDE_AUX_MASK) 1521 dp_aux_irq_handler(dev); 1522 1523 if (pch_iir & SDE_GMBUS) 1524 gmbus_irq_handler(dev); 1525 1526 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1527 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1528 1529 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1530 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1531 1532 if (pch_iir & SDE_POISON) 1533 DRM_ERROR("PCH poison interrupt\n"); 1534 1535 if (pch_iir & SDE_FDI_MASK) 1536 for_each_pipe(pipe) 1537 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1538 pipe_name(pipe), 1539 I915_READ(FDI_RX_IIR(pipe))); 1540 1541 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1542 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1543 1544 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1545 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1546 1547 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1548 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1549 false)) 1550 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1551 1552 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1553 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1554 false)) 1555 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1556 } 1557 1558 static void ivb_err_int_handler(struct drm_device *dev) 1559 { 1560 struct drm_i915_private *dev_priv = dev->dev_private; 1561 u32 err_int = I915_READ(GEN7_ERR_INT); 1562 enum pipe pipe; 1563 1564 if (err_int & ERR_INT_POISON) 1565 DRM_ERROR("Poison interrupt\n"); 1566 1567 for_each_pipe(pipe) { 1568 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1569 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1570 false)) 1571 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1572 pipe_name(pipe)); 1573 } 1574 1575 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1576 if (IS_IVYBRIDGE(dev)) 1577 ivb_pipe_crc_irq_handler(dev, pipe); 1578 else 1579 hsw_pipe_crc_irq_handler(dev, pipe); 1580 } 1581 } 1582 1583 I915_WRITE(GEN7_ERR_INT, err_int); 1584 } 1585 1586 static void cpt_serr_int_handler(struct drm_device *dev) 1587 { 1588 struct drm_i915_private *dev_priv = dev->dev_private; 1589 u32 serr_int = I915_READ(SERR_INT); 1590 1591 if (serr_int & SERR_INT_POISON) 1592 DRM_ERROR("PCH poison interrupt\n"); 1593 1594 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1595 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1596 false)) 1597 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1598 1599 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1600 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1601 false)) 1602 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1603 1604 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1605 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1606 false)) 1607 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1608 1609 I915_WRITE(SERR_INT, serr_int); 1610 } 1611 1612 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1613 { 1614 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1615 int pipe; 1616 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1617 1618 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1619 1620 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1621 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1622 SDE_AUDIO_POWER_SHIFT_CPT); 1623 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1624 port_name(port)); 1625 } 1626 1627 if (pch_iir & SDE_AUX_MASK_CPT) 1628 dp_aux_irq_handler(dev); 1629 1630 if (pch_iir & SDE_GMBUS_CPT) 1631 gmbus_irq_handler(dev); 1632 1633 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1634 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1635 1636 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1637 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1638 1639 if (pch_iir & SDE_FDI_MASK_CPT) 1640 for_each_pipe(pipe) 1641 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1642 pipe_name(pipe), 1643 I915_READ(FDI_RX_IIR(pipe))); 1644 1645 if (pch_iir & SDE_ERROR_CPT) 1646 cpt_serr_int_handler(dev); 1647 } 1648 1649 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1650 { 1651 struct drm_i915_private *dev_priv = dev->dev_private; 1652 enum pipe pipe; 1653 1654 if (de_iir & DE_AUX_CHANNEL_A) 1655 dp_aux_irq_handler(dev); 1656 1657 if (de_iir & DE_GSE) 1658 intel_opregion_asle_intr(dev); 1659 1660 if (de_iir & DE_POISON) 1661 DRM_ERROR("Poison interrupt\n"); 1662 1663 for_each_pipe(pipe) { 1664 if (de_iir & DE_PIPE_VBLANK(pipe)) 1665 drm_handle_vblank(dev, pipe); 1666 1667 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1668 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1669 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1670 pipe_name(pipe)); 1671 1672 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1673 i9xx_pipe_crc_irq_handler(dev, pipe); 1674 1675 /* plane/pipes map 1:1 on ilk+ */ 1676 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1677 intel_prepare_page_flip(dev, pipe); 1678 intel_finish_page_flip_plane(dev, pipe); 1679 } 1680 } 1681 1682 /* check event from PCH */ 1683 if (de_iir & DE_PCH_EVENT) { 1684 u32 pch_iir = I915_READ(SDEIIR); 1685 1686 if (HAS_PCH_CPT(dev)) 1687 cpt_irq_handler(dev, pch_iir); 1688 else 1689 ibx_irq_handler(dev, pch_iir); 1690 1691 /* should clear PCH hotplug event before clear CPU irq */ 1692 I915_WRITE(SDEIIR, pch_iir); 1693 } 1694 1695 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1696 ironlake_rps_change_irq_handler(dev); 1697 } 1698 1699 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1700 { 1701 struct drm_i915_private *dev_priv = dev->dev_private; 1702 enum pipe i; 1703 1704 if (de_iir & DE_ERR_INT_IVB) 1705 ivb_err_int_handler(dev); 1706 1707 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1708 dp_aux_irq_handler(dev); 1709 1710 if (de_iir & DE_GSE_IVB) 1711 intel_opregion_asle_intr(dev); 1712 1713 for_each_pipe(i) { 1714 if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 1715 drm_handle_vblank(dev, i); 1716 1717 /* plane/pipes map 1:1 on ilk+ */ 1718 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 1719 intel_prepare_page_flip(dev, i); 1720 intel_finish_page_flip_plane(dev, i); 1721 } 1722 } 1723 1724 /* check event from PCH */ 1725 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1726 u32 pch_iir = I915_READ(SDEIIR); 1727 1728 cpt_irq_handler(dev, pch_iir); 1729 1730 /* clear PCH hotplug event before clear CPU irq */ 1731 I915_WRITE(SDEIIR, pch_iir); 1732 } 1733 } 1734 1735 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1736 { 1737 struct drm_device *dev = (struct drm_device *) arg; 1738 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1739 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1740 irqreturn_t ret = IRQ_NONE; 1741 1742 atomic_inc(&dev_priv->irq_received); 1743 1744 /* We get interrupts on unclaimed registers, so check for this before we 1745 * do any I915_{READ,WRITE}. */ 1746 intel_uncore_check_errors(dev); 1747 1748 /* disable master interrupt before clearing iir */ 1749 de_ier = I915_READ(DEIER); 1750 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1751 POSTING_READ(DEIER); 1752 1753 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1754 * interrupts will will be stored on its back queue, and then we'll be 1755 * able to process them after we restore SDEIER (as soon as we restore 1756 * it, we'll get an interrupt if SDEIIR still has something to process 1757 * due to its back queue). */ 1758 if (!HAS_PCH_NOP(dev)) { 1759 sde_ier = I915_READ(SDEIER); 1760 I915_WRITE(SDEIER, 0); 1761 POSTING_READ(SDEIER); 1762 } 1763 1764 gt_iir = I915_READ(GTIIR); 1765 if (gt_iir) { 1766 if (INTEL_INFO(dev)->gen >= 6) 1767 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1768 else 1769 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1770 I915_WRITE(GTIIR, gt_iir); 1771 ret = IRQ_HANDLED; 1772 } 1773 1774 de_iir = I915_READ(DEIIR); 1775 if (de_iir) { 1776 if (INTEL_INFO(dev)->gen >= 7) 1777 ivb_display_irq_handler(dev, de_iir); 1778 else 1779 ilk_display_irq_handler(dev, de_iir); 1780 I915_WRITE(DEIIR, de_iir); 1781 ret = IRQ_HANDLED; 1782 } 1783 1784 if (INTEL_INFO(dev)->gen >= 6) { 1785 u32 pm_iir = I915_READ(GEN6_PMIIR); 1786 if (pm_iir) { 1787 gen6_rps_irq_handler(dev_priv, pm_iir); 1788 I915_WRITE(GEN6_PMIIR, pm_iir); 1789 ret = IRQ_HANDLED; 1790 } 1791 } 1792 1793 I915_WRITE(DEIER, de_ier); 1794 POSTING_READ(DEIER); 1795 if (!HAS_PCH_NOP(dev)) { 1796 I915_WRITE(SDEIER, sde_ier); 1797 POSTING_READ(SDEIER); 1798 } 1799 1800 return ret; 1801 } 1802 1803 static irqreturn_t gen8_irq_handler(int irq, void *arg) 1804 { 1805 struct drm_device *dev = arg; 1806 struct drm_i915_private *dev_priv = dev->dev_private; 1807 u32 master_ctl; 1808 irqreturn_t ret = IRQ_NONE; 1809 uint32_t tmp = 0; 1810 enum pipe pipe; 1811 1812 atomic_inc(&dev_priv->irq_received); 1813 1814 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1815 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1816 if (!master_ctl) 1817 return IRQ_NONE; 1818 1819 I915_WRITE(GEN8_MASTER_IRQ, 0); 1820 POSTING_READ(GEN8_MASTER_IRQ); 1821 1822 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1823 1824 if (master_ctl & GEN8_DE_MISC_IRQ) { 1825 tmp = I915_READ(GEN8_DE_MISC_IIR); 1826 if (tmp & GEN8_DE_MISC_GSE) 1827 intel_opregion_asle_intr(dev); 1828 else if (tmp) 1829 DRM_ERROR("Unexpected DE Misc interrupt\n"); 1830 else 1831 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1832 1833 if (tmp) { 1834 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1835 ret = IRQ_HANDLED; 1836 } 1837 } 1838 1839 if (master_ctl & GEN8_DE_PORT_IRQ) { 1840 tmp = I915_READ(GEN8_DE_PORT_IIR); 1841 if (tmp & GEN8_AUX_CHANNEL_A) 1842 dp_aux_irq_handler(dev); 1843 else if (tmp) 1844 DRM_ERROR("Unexpected DE Port interrupt\n"); 1845 else 1846 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 1847 1848 if (tmp) { 1849 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 1850 ret = IRQ_HANDLED; 1851 } 1852 } 1853 1854 for_each_pipe(pipe) { 1855 uint32_t pipe_iir; 1856 1857 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1858 continue; 1859 1860 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 1861 if (pipe_iir & GEN8_PIPE_VBLANK) 1862 drm_handle_vblank(dev, pipe); 1863 1864 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 1865 intel_prepare_page_flip(dev, pipe); 1866 intel_finish_page_flip_plane(dev, pipe); 1867 } 1868 1869 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 1870 hsw_pipe_crc_irq_handler(dev, pipe); 1871 1872 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1873 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1874 false)) 1875 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1876 pipe_name(pipe)); 1877 } 1878 1879 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 1880 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 1881 pipe_name(pipe), 1882 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 1883 } 1884 1885 if (pipe_iir) { 1886 ret = IRQ_HANDLED; 1887 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 1888 } else 1889 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 1890 } 1891 1892 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 1893 /* 1894 * FIXME(BDW): Assume for now that the new interrupt handling 1895 * scheme also closed the SDE interrupt handling race we've seen 1896 * on older pch-split platforms. But this needs testing. 1897 */ 1898 u32 pch_iir = I915_READ(SDEIIR); 1899 1900 cpt_irq_handler(dev, pch_iir); 1901 1902 if (pch_iir) { 1903 I915_WRITE(SDEIIR, pch_iir); 1904 ret = IRQ_HANDLED; 1905 } 1906 } 1907 1908 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1909 POSTING_READ(GEN8_MASTER_IRQ); 1910 1911 return ret; 1912 } 1913 1914 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1915 bool reset_completed) 1916 { 1917 struct intel_ring_buffer *ring; 1918 int i; 1919 1920 /* 1921 * Notify all waiters for GPU completion events that reset state has 1922 * been changed, and that they need to restart their wait after 1923 * checking for potential errors (and bail out to drop locks if there is 1924 * a gpu reset pending so that i915_error_work_func can acquire them). 1925 */ 1926 1927 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1928 for_each_ring(ring, dev_priv, i) 1929 wake_up_all(&ring->irq_queue); 1930 1931 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1932 wake_up_all(&dev_priv->pending_flip_queue); 1933 1934 /* 1935 * Signal tasks blocked in i915_gem_wait_for_error that the pending 1936 * reset state is cleared. 1937 */ 1938 if (reset_completed) 1939 wake_up_all(&dev_priv->gpu_error.reset_queue); 1940 } 1941 1942 /** 1943 * i915_error_work_func - do process context error handling work 1944 * @work: work struct 1945 * 1946 * Fire an error uevent so userspace can see that a hang or error 1947 * was detected. 1948 */ 1949 static void i915_error_work_func(struct work_struct *work) 1950 { 1951 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1952 work); 1953 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1954 gpu_error); 1955 struct drm_device *dev = dev_priv->dev; 1956 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1957 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1958 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1959 int ret; 1960 1961 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 1962 1963 /* 1964 * Note that there's only one work item which does gpu resets, so we 1965 * need not worry about concurrent gpu resets potentially incrementing 1966 * error->reset_counter twice. We only need to take care of another 1967 * racing irq/hangcheck declaring the gpu dead for a second time. A 1968 * quick check for that is good enough: schedule_work ensures the 1969 * correct ordering between hang detection and this work item, and since 1970 * the reset in-progress bit is only ever set by code outside of this 1971 * work we don't need to worry about any other races. 1972 */ 1973 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1974 DRM_DEBUG_DRIVER("resetting chip\n"); 1975 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 1976 reset_event); 1977 1978 /* 1979 * All state reset _must_ be completed before we update the 1980 * reset counter, for otherwise waiters might miss the reset 1981 * pending state and not properly drop locks, resulting in 1982 * deadlocks with the reset work. 1983 */ 1984 ret = i915_reset(dev); 1985 1986 intel_display_handle_reset(dev); 1987 1988 if (ret == 0) { 1989 /* 1990 * After all the gem state is reset, increment the reset 1991 * counter and wake up everyone waiting for the reset to 1992 * complete. 1993 * 1994 * Since unlock operations are a one-sided barrier only, 1995 * we need to insert a barrier here to order any seqno 1996 * updates before 1997 * the counter increment. 1998 */ 1999 smp_mb__before_atomic_inc(); 2000 atomic_inc(&dev_priv->gpu_error.reset_counter); 2001 2002 kobject_uevent_env(&dev->primary->kdev->kobj, 2003 KOBJ_CHANGE, reset_done_event); 2004 } else { 2005 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2006 } 2007 2008 /* 2009 * Note: The wake_up also serves as a memory barrier so that 2010 * waiters see the update value of the reset counter atomic_t. 2011 */ 2012 i915_error_wake_up(dev_priv, true); 2013 } 2014 } 2015 2016 static void i915_report_and_clear_eir(struct drm_device *dev) 2017 { 2018 struct drm_i915_private *dev_priv = dev->dev_private; 2019 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2020 u32 eir = I915_READ(EIR); 2021 int pipe, i; 2022 2023 if (!eir) 2024 return; 2025 2026 pr_err("render error detected, EIR: 0x%08x\n", eir); 2027 2028 i915_get_extra_instdone(dev, instdone); 2029 2030 if (IS_G4X(dev)) { 2031 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2032 u32 ipeir = I915_READ(IPEIR_I965); 2033 2034 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2035 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2036 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2037 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2038 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2039 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2040 I915_WRITE(IPEIR_I965, ipeir); 2041 POSTING_READ(IPEIR_I965); 2042 } 2043 if (eir & GM45_ERROR_PAGE_TABLE) { 2044 u32 pgtbl_err = I915_READ(PGTBL_ER); 2045 pr_err("page table error\n"); 2046 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2047 I915_WRITE(PGTBL_ER, pgtbl_err); 2048 POSTING_READ(PGTBL_ER); 2049 } 2050 } 2051 2052 if (!IS_GEN2(dev)) { 2053 if (eir & I915_ERROR_PAGE_TABLE) { 2054 u32 pgtbl_err = I915_READ(PGTBL_ER); 2055 pr_err("page table error\n"); 2056 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2057 I915_WRITE(PGTBL_ER, pgtbl_err); 2058 POSTING_READ(PGTBL_ER); 2059 } 2060 } 2061 2062 if (eir & I915_ERROR_MEMORY_REFRESH) { 2063 pr_err("memory refresh error:\n"); 2064 for_each_pipe(pipe) 2065 pr_err("pipe %c stat: 0x%08x\n", 2066 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2067 /* pipestat has already been acked */ 2068 } 2069 if (eir & I915_ERROR_INSTRUCTION) { 2070 pr_err("instruction error\n"); 2071 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2072 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2073 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2074 if (INTEL_INFO(dev)->gen < 4) { 2075 u32 ipeir = I915_READ(IPEIR); 2076 2077 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2078 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2079 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2080 I915_WRITE(IPEIR, ipeir); 2081 POSTING_READ(IPEIR); 2082 } else { 2083 u32 ipeir = I915_READ(IPEIR_I965); 2084 2085 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2086 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2087 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2088 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2089 I915_WRITE(IPEIR_I965, ipeir); 2090 POSTING_READ(IPEIR_I965); 2091 } 2092 } 2093 2094 I915_WRITE(EIR, eir); 2095 POSTING_READ(EIR); 2096 eir = I915_READ(EIR); 2097 if (eir) { 2098 /* 2099 * some errors might have become stuck, 2100 * mask them. 2101 */ 2102 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2103 I915_WRITE(EMR, I915_READ(EMR) | eir); 2104 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2105 } 2106 } 2107 2108 /** 2109 * i915_handle_error - handle an error interrupt 2110 * @dev: drm device 2111 * 2112 * Do some basic checking of regsiter state at error interrupt time and 2113 * dump it to the syslog. Also call i915_capture_error_state() to make 2114 * sure we get a record and make it available in debugfs. Fire a uevent 2115 * so userspace knows something bad happened (should trigger collection 2116 * of a ring dump etc.). 2117 */ 2118 void i915_handle_error(struct drm_device *dev, bool wedged) 2119 { 2120 struct drm_i915_private *dev_priv = dev->dev_private; 2121 2122 i915_capture_error_state(dev); 2123 i915_report_and_clear_eir(dev); 2124 2125 if (wedged) { 2126 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2127 &dev_priv->gpu_error.reset_counter); 2128 2129 /* 2130 * Wakeup waiting processes so that the reset work function 2131 * i915_error_work_func doesn't deadlock trying to grab various 2132 * locks. By bumping the reset counter first, the woken 2133 * processes will see a reset in progress and back off, 2134 * releasing their locks and then wait for the reset completion. 2135 * We must do this for _all_ gpu waiters that might hold locks 2136 * that the reset work needs to acquire. 2137 * 2138 * Note: The wake_up serves as the required memory barrier to 2139 * ensure that the waiters see the updated value of the reset 2140 * counter atomic_t. 2141 */ 2142 i915_error_wake_up(dev_priv, false); 2143 } 2144 2145 /* 2146 * Our reset work can grab modeset locks (since it needs to reset the 2147 * state of outstanding pagelips). Hence it must not be run on our own 2148 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2149 * code will deadlock. 2150 */ 2151 schedule_work(&dev_priv->gpu_error.work); 2152 } 2153 2154 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2155 { 2156 drm_i915_private_t *dev_priv = dev->dev_private; 2157 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2159 struct drm_i915_gem_object *obj; 2160 struct intel_unpin_work *work; 2161 unsigned long flags; 2162 bool stall_detected; 2163 2164 /* Ignore early vblank irqs */ 2165 if (intel_crtc == NULL) 2166 return; 2167 2168 spin_lock_irqsave(&dev->event_lock, flags); 2169 work = intel_crtc->unpin_work; 2170 2171 if (work == NULL || 2172 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2173 !work->enable_stall_check) { 2174 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2175 spin_unlock_irqrestore(&dev->event_lock, flags); 2176 return; 2177 } 2178 2179 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2180 obj = work->pending_flip_obj; 2181 if (INTEL_INFO(dev)->gen >= 4) { 2182 int dspsurf = DSPSURF(intel_crtc->plane); 2183 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2184 i915_gem_obj_ggtt_offset(obj); 2185 } else { 2186 int dspaddr = DSPADDR(intel_crtc->plane); 2187 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2188 crtc->y * crtc->fb->pitches[0] + 2189 crtc->x * crtc->fb->bits_per_pixel/8); 2190 } 2191 2192 spin_unlock_irqrestore(&dev->event_lock, flags); 2193 2194 if (stall_detected) { 2195 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2196 intel_prepare_page_flip(dev, intel_crtc->plane); 2197 } 2198 } 2199 2200 /* Called from drm generic code, passed 'crtc' which 2201 * we use as a pipe index 2202 */ 2203 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2204 { 2205 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2206 unsigned long irqflags; 2207 2208 if (!i915_pipe_enabled(dev, pipe)) 2209 return -EINVAL; 2210 2211 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2212 if (INTEL_INFO(dev)->gen >= 4) 2213 i915_enable_pipestat(dev_priv, pipe, 2214 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2215 else 2216 i915_enable_pipestat(dev_priv, pipe, 2217 PIPE_VBLANK_INTERRUPT_ENABLE); 2218 2219 /* maintain vblank delivery even in deep C-states */ 2220 if (dev_priv->info->gen == 3) 2221 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2222 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2223 2224 return 0; 2225 } 2226 2227 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2228 { 2229 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2230 unsigned long irqflags; 2231 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2232 DE_PIPE_VBLANK(pipe); 2233 2234 if (!i915_pipe_enabled(dev, pipe)) 2235 return -EINVAL; 2236 2237 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2238 ironlake_enable_display_irq(dev_priv, bit); 2239 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2240 2241 return 0; 2242 } 2243 2244 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2245 { 2246 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2247 unsigned long irqflags; 2248 u32 imr; 2249 2250 if (!i915_pipe_enabled(dev, pipe)) 2251 return -EINVAL; 2252 2253 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2254 imr = I915_READ(VLV_IMR); 2255 if (pipe == PIPE_A) 2256 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2257 else 2258 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2259 I915_WRITE(VLV_IMR, imr); 2260 i915_enable_pipestat(dev_priv, pipe, 2261 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2262 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2263 2264 return 0; 2265 } 2266 2267 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2268 { 2269 struct drm_i915_private *dev_priv = dev->dev_private; 2270 unsigned long irqflags; 2271 2272 if (!i915_pipe_enabled(dev, pipe)) 2273 return -EINVAL; 2274 2275 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2276 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2277 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2278 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2279 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2280 return 0; 2281 } 2282 2283 /* Called from drm generic code, passed 'crtc' which 2284 * we use as a pipe index 2285 */ 2286 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2287 { 2288 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2289 unsigned long irqflags; 2290 2291 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2292 if (dev_priv->info->gen == 3) 2293 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2294 2295 i915_disable_pipestat(dev_priv, pipe, 2296 PIPE_VBLANK_INTERRUPT_ENABLE | 2297 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2298 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2299 } 2300 2301 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2302 { 2303 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2304 unsigned long irqflags; 2305 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2306 DE_PIPE_VBLANK(pipe); 2307 2308 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2309 ironlake_disable_display_irq(dev_priv, bit); 2310 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2311 } 2312 2313 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2314 { 2315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2316 unsigned long irqflags; 2317 u32 imr; 2318 2319 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2320 i915_disable_pipestat(dev_priv, pipe, 2321 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2322 imr = I915_READ(VLV_IMR); 2323 if (pipe == PIPE_A) 2324 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2325 else 2326 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2327 I915_WRITE(VLV_IMR, imr); 2328 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2329 } 2330 2331 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2332 { 2333 struct drm_i915_private *dev_priv = dev->dev_private; 2334 unsigned long irqflags; 2335 2336 if (!i915_pipe_enabled(dev, pipe)) 2337 return; 2338 2339 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2340 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2341 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2342 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2343 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2344 } 2345 2346 static u32 2347 ring_last_seqno(struct intel_ring_buffer *ring) 2348 { 2349 return list_entry(ring->request_list.prev, 2350 struct drm_i915_gem_request, list)->seqno; 2351 } 2352 2353 static bool 2354 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2355 { 2356 return (list_empty(&ring->request_list) || 2357 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2358 } 2359 2360 static struct intel_ring_buffer * 2361 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2362 { 2363 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2364 u32 cmd, ipehr, acthd, acthd_min; 2365 2366 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2367 if ((ipehr & ~(0x3 << 16)) != 2368 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2369 return NULL; 2370 2371 /* ACTHD is likely pointing to the dword after the actual command, 2372 * so scan backwards until we find the MBOX. 2373 */ 2374 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2375 acthd_min = max((int)acthd - 3 * 4, 0); 2376 do { 2377 cmd = ioread32(ring->virtual_start + acthd); 2378 if (cmd == ipehr) 2379 break; 2380 2381 acthd -= 4; 2382 if (acthd < acthd_min) 2383 return NULL; 2384 } while (1); 2385 2386 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2387 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2388 } 2389 2390 static int semaphore_passed(struct intel_ring_buffer *ring) 2391 { 2392 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2393 struct intel_ring_buffer *signaller; 2394 u32 seqno, ctl; 2395 2396 ring->hangcheck.deadlock = true; 2397 2398 signaller = semaphore_waits_for(ring, &seqno); 2399 if (signaller == NULL || signaller->hangcheck.deadlock) 2400 return -1; 2401 2402 /* cursory check for an unkickable deadlock */ 2403 ctl = I915_READ_CTL(signaller); 2404 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2405 return -1; 2406 2407 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2408 } 2409 2410 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2411 { 2412 struct intel_ring_buffer *ring; 2413 int i; 2414 2415 for_each_ring(ring, dev_priv, i) 2416 ring->hangcheck.deadlock = false; 2417 } 2418 2419 static enum intel_ring_hangcheck_action 2420 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2421 { 2422 struct drm_device *dev = ring->dev; 2423 struct drm_i915_private *dev_priv = dev->dev_private; 2424 u32 tmp; 2425 2426 if (ring->hangcheck.acthd != acthd) 2427 return HANGCHECK_ACTIVE; 2428 2429 if (IS_GEN2(dev)) 2430 return HANGCHECK_HUNG; 2431 2432 /* Is the chip hanging on a WAIT_FOR_EVENT? 2433 * If so we can simply poke the RB_WAIT bit 2434 * and break the hang. This should work on 2435 * all but the second generation chipsets. 2436 */ 2437 tmp = I915_READ_CTL(ring); 2438 if (tmp & RING_WAIT) { 2439 DRM_ERROR("Kicking stuck wait on %s\n", 2440 ring->name); 2441 i915_handle_error(dev, false); 2442 I915_WRITE_CTL(ring, tmp); 2443 return HANGCHECK_KICK; 2444 } 2445 2446 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2447 switch (semaphore_passed(ring)) { 2448 default: 2449 return HANGCHECK_HUNG; 2450 case 1: 2451 DRM_ERROR("Kicking stuck semaphore on %s\n", 2452 ring->name); 2453 i915_handle_error(dev, false); 2454 I915_WRITE_CTL(ring, tmp); 2455 return HANGCHECK_KICK; 2456 case 0: 2457 return HANGCHECK_WAIT; 2458 } 2459 } 2460 2461 return HANGCHECK_HUNG; 2462 } 2463 2464 /** 2465 * This is called when the chip hasn't reported back with completed 2466 * batchbuffers in a long time. We keep track per ring seqno progress and 2467 * if there are no progress, hangcheck score for that ring is increased. 2468 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2469 * we kick the ring. If we see no progress on three subsequent calls 2470 * we assume chip is wedged and try to fix it by resetting the chip. 2471 */ 2472 static void i915_hangcheck_elapsed(unsigned long data) 2473 { 2474 struct drm_device *dev = (struct drm_device *)data; 2475 drm_i915_private_t *dev_priv = dev->dev_private; 2476 struct intel_ring_buffer *ring; 2477 int i; 2478 int busy_count = 0, rings_hung = 0; 2479 bool stuck[I915_NUM_RINGS] = { 0 }; 2480 #define BUSY 1 2481 #define KICK 5 2482 #define HUNG 20 2483 #define FIRE 30 2484 2485 if (!i915_enable_hangcheck) 2486 return; 2487 2488 for_each_ring(ring, dev_priv, i) { 2489 u32 seqno, acthd; 2490 bool busy = true; 2491 2492 semaphore_clear_deadlocks(dev_priv); 2493 2494 seqno = ring->get_seqno(ring, false); 2495 acthd = intel_ring_get_active_head(ring); 2496 2497 if (ring->hangcheck.seqno == seqno) { 2498 if (ring_idle(ring, seqno)) { 2499 ring->hangcheck.action = HANGCHECK_IDLE; 2500 2501 if (waitqueue_active(&ring->irq_queue)) { 2502 /* Issue a wake-up to catch stuck h/w. */ 2503 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2504 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2505 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2506 ring->name); 2507 else 2508 DRM_INFO("Fake missed irq on %s\n", 2509 ring->name); 2510 wake_up_all(&ring->irq_queue); 2511 } 2512 /* Safeguard against driver failure */ 2513 ring->hangcheck.score += BUSY; 2514 } else 2515 busy = false; 2516 } else { 2517 /* We always increment the hangcheck score 2518 * if the ring is busy and still processing 2519 * the same request, so that no single request 2520 * can run indefinitely (such as a chain of 2521 * batches). The only time we do not increment 2522 * the hangcheck score on this ring, if this 2523 * ring is in a legitimate wait for another 2524 * ring. In that case the waiting ring is a 2525 * victim and we want to be sure we catch the 2526 * right culprit. Then every time we do kick 2527 * the ring, add a small increment to the 2528 * score so that we can catch a batch that is 2529 * being repeatedly kicked and so responsible 2530 * for stalling the machine. 2531 */ 2532 ring->hangcheck.action = ring_stuck(ring, 2533 acthd); 2534 2535 switch (ring->hangcheck.action) { 2536 case HANGCHECK_IDLE: 2537 case HANGCHECK_WAIT: 2538 break; 2539 case HANGCHECK_ACTIVE: 2540 ring->hangcheck.score += BUSY; 2541 break; 2542 case HANGCHECK_KICK: 2543 ring->hangcheck.score += KICK; 2544 break; 2545 case HANGCHECK_HUNG: 2546 ring->hangcheck.score += HUNG; 2547 stuck[i] = true; 2548 break; 2549 } 2550 } 2551 } else { 2552 ring->hangcheck.action = HANGCHECK_ACTIVE; 2553 2554 /* Gradually reduce the count so that we catch DoS 2555 * attempts across multiple batches. 2556 */ 2557 if (ring->hangcheck.score > 0) 2558 ring->hangcheck.score--; 2559 } 2560 2561 ring->hangcheck.seqno = seqno; 2562 ring->hangcheck.acthd = acthd; 2563 busy_count += busy; 2564 } 2565 2566 for_each_ring(ring, dev_priv, i) { 2567 if (ring->hangcheck.score > FIRE) { 2568 DRM_INFO("%s on %s\n", 2569 stuck[i] ? "stuck" : "no progress", 2570 ring->name); 2571 rings_hung++; 2572 } 2573 } 2574 2575 if (rings_hung) 2576 return i915_handle_error(dev, true); 2577 2578 if (busy_count) 2579 /* Reset timer case chip hangs without another request 2580 * being added */ 2581 i915_queue_hangcheck(dev); 2582 } 2583 2584 void i915_queue_hangcheck(struct drm_device *dev) 2585 { 2586 struct drm_i915_private *dev_priv = dev->dev_private; 2587 if (!i915_enable_hangcheck) 2588 return; 2589 2590 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2591 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2592 } 2593 2594 static void ibx_irq_preinstall(struct drm_device *dev) 2595 { 2596 struct drm_i915_private *dev_priv = dev->dev_private; 2597 2598 if (HAS_PCH_NOP(dev)) 2599 return; 2600 2601 /* south display irq */ 2602 I915_WRITE(SDEIMR, 0xffffffff); 2603 /* 2604 * SDEIER is also touched by the interrupt handler to work around missed 2605 * PCH interrupts. Hence we can't update it after the interrupt handler 2606 * is enabled - instead we unconditionally enable all PCH interrupt 2607 * sources here, but then only unmask them as needed with SDEIMR. 2608 */ 2609 I915_WRITE(SDEIER, 0xffffffff); 2610 POSTING_READ(SDEIER); 2611 } 2612 2613 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2614 { 2615 struct drm_i915_private *dev_priv = dev->dev_private; 2616 2617 /* and GT */ 2618 I915_WRITE(GTIMR, 0xffffffff); 2619 I915_WRITE(GTIER, 0x0); 2620 POSTING_READ(GTIER); 2621 2622 if (INTEL_INFO(dev)->gen >= 6) { 2623 /* and PM */ 2624 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2625 I915_WRITE(GEN6_PMIER, 0x0); 2626 POSTING_READ(GEN6_PMIER); 2627 } 2628 } 2629 2630 /* drm_dma.h hooks 2631 */ 2632 static void ironlake_irq_preinstall(struct drm_device *dev) 2633 { 2634 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2635 2636 atomic_set(&dev_priv->irq_received, 0); 2637 2638 I915_WRITE(HWSTAM, 0xeffe); 2639 2640 I915_WRITE(DEIMR, 0xffffffff); 2641 I915_WRITE(DEIER, 0x0); 2642 POSTING_READ(DEIER); 2643 2644 gen5_gt_irq_preinstall(dev); 2645 2646 ibx_irq_preinstall(dev); 2647 } 2648 2649 static void valleyview_irq_preinstall(struct drm_device *dev) 2650 { 2651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2652 int pipe; 2653 2654 atomic_set(&dev_priv->irq_received, 0); 2655 2656 /* VLV magic */ 2657 I915_WRITE(VLV_IMR, 0); 2658 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2659 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2660 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2661 2662 /* and GT */ 2663 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2664 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2665 2666 gen5_gt_irq_preinstall(dev); 2667 2668 I915_WRITE(DPINVGTT, 0xff); 2669 2670 I915_WRITE(PORT_HOTPLUG_EN, 0); 2671 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2672 for_each_pipe(pipe) 2673 I915_WRITE(PIPESTAT(pipe), 0xffff); 2674 I915_WRITE(VLV_IIR, 0xffffffff); 2675 I915_WRITE(VLV_IMR, 0xffffffff); 2676 I915_WRITE(VLV_IER, 0x0); 2677 POSTING_READ(VLV_IER); 2678 } 2679 2680 static void gen8_irq_preinstall(struct drm_device *dev) 2681 { 2682 struct drm_i915_private *dev_priv = dev->dev_private; 2683 int pipe; 2684 2685 atomic_set(&dev_priv->irq_received, 0); 2686 2687 I915_WRITE(GEN8_MASTER_IRQ, 0); 2688 POSTING_READ(GEN8_MASTER_IRQ); 2689 2690 /* IIR can theoretically queue up two events. Be paranoid */ 2691 #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2692 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2693 POSTING_READ(GEN8_##type##_IMR(which)); \ 2694 I915_WRITE(GEN8_##type##_IER(which), 0); \ 2695 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2696 POSTING_READ(GEN8_##type##_IIR(which)); \ 2697 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2698 } while (0) 2699 2700 #define GEN8_IRQ_INIT(type) do { \ 2701 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2702 POSTING_READ(GEN8_##type##_IMR); \ 2703 I915_WRITE(GEN8_##type##_IER, 0); \ 2704 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2705 POSTING_READ(GEN8_##type##_IIR); \ 2706 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2707 } while (0) 2708 2709 GEN8_IRQ_INIT_NDX(GT, 0); 2710 GEN8_IRQ_INIT_NDX(GT, 1); 2711 GEN8_IRQ_INIT_NDX(GT, 2); 2712 GEN8_IRQ_INIT_NDX(GT, 3); 2713 2714 for_each_pipe(pipe) { 2715 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2716 } 2717 2718 GEN8_IRQ_INIT(DE_PORT); 2719 GEN8_IRQ_INIT(DE_MISC); 2720 GEN8_IRQ_INIT(PCU); 2721 #undef GEN8_IRQ_INIT 2722 #undef GEN8_IRQ_INIT_NDX 2723 2724 POSTING_READ(GEN8_PCU_IIR); 2725 2726 ibx_irq_preinstall(dev); 2727 } 2728 2729 static void ibx_hpd_irq_setup(struct drm_device *dev) 2730 { 2731 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2732 struct drm_mode_config *mode_config = &dev->mode_config; 2733 struct intel_encoder *intel_encoder; 2734 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2735 2736 if (HAS_PCH_IBX(dev)) { 2737 hotplug_irqs = SDE_HOTPLUG_MASK; 2738 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2739 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2740 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2741 } else { 2742 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2743 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2744 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2745 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2746 } 2747 2748 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2749 2750 /* 2751 * Enable digital hotplug on the PCH, and configure the DP short pulse 2752 * duration to 2ms (which is the minimum in the Display Port spec) 2753 * 2754 * This register is the same on all known PCH chips. 2755 */ 2756 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2757 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2758 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2759 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2760 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2761 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2762 } 2763 2764 static void ibx_irq_postinstall(struct drm_device *dev) 2765 { 2766 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2767 u32 mask; 2768 2769 if (HAS_PCH_NOP(dev)) 2770 return; 2771 2772 if (HAS_PCH_IBX(dev)) { 2773 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2774 SDE_TRANSA_FIFO_UNDER | SDE_POISON; 2775 } else { 2776 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 2777 2778 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2779 } 2780 2781 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2782 I915_WRITE(SDEIMR, ~mask); 2783 } 2784 2785 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2786 { 2787 struct drm_i915_private *dev_priv = dev->dev_private; 2788 u32 pm_irqs, gt_irqs; 2789 2790 pm_irqs = gt_irqs = 0; 2791 2792 dev_priv->gt_irq_mask = ~0; 2793 if (HAS_L3_DPF(dev)) { 2794 /* L3 parity interrupt is always unmasked. */ 2795 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 2796 gt_irqs |= GT_PARITY_ERROR(dev); 2797 } 2798 2799 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2800 if (IS_GEN5(dev)) { 2801 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2802 ILK_BSD_USER_INTERRUPT; 2803 } else { 2804 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2805 } 2806 2807 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2808 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2809 I915_WRITE(GTIER, gt_irqs); 2810 POSTING_READ(GTIER); 2811 2812 if (INTEL_INFO(dev)->gen >= 6) { 2813 pm_irqs |= GEN6_PM_RPS_EVENTS; 2814 2815 if (HAS_VEBOX(dev)) 2816 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2817 2818 dev_priv->pm_irq_mask = 0xffffffff; 2819 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2820 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2821 I915_WRITE(GEN6_PMIER, pm_irqs); 2822 POSTING_READ(GEN6_PMIER); 2823 } 2824 } 2825 2826 static int ironlake_irq_postinstall(struct drm_device *dev) 2827 { 2828 unsigned long irqflags; 2829 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2830 u32 display_mask, extra_mask; 2831 2832 if (INTEL_INFO(dev)->gen >= 7) { 2833 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2834 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2835 DE_PLANEB_FLIP_DONE_IVB | 2836 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 2837 DE_ERR_INT_IVB); 2838 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2839 DE_PIPEA_VBLANK_IVB); 2840 2841 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2842 } else { 2843 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2844 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2845 DE_AUX_CHANNEL_A | 2846 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 2847 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2848 DE_POISON); 2849 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2850 } 2851 2852 dev_priv->irq_mask = ~display_mask; 2853 2854 /* should always can generate irq */ 2855 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2856 I915_WRITE(DEIMR, dev_priv->irq_mask); 2857 I915_WRITE(DEIER, display_mask | extra_mask); 2858 POSTING_READ(DEIER); 2859 2860 gen5_gt_irq_postinstall(dev); 2861 2862 ibx_irq_postinstall(dev); 2863 2864 if (IS_IRONLAKE_M(dev)) { 2865 /* Enable PCU event interrupts 2866 * 2867 * spinlocking not required here for correctness since interrupt 2868 * setup is guaranteed to run in single-threaded context. But we 2869 * need it to make the assert_spin_locked happy. */ 2870 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2871 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2872 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2873 } 2874 2875 return 0; 2876 } 2877 2878 static int valleyview_irq_postinstall(struct drm_device *dev) 2879 { 2880 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2881 u32 enable_mask; 2882 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2883 PIPE_CRC_DONE_ENABLE; 2884 unsigned long irqflags; 2885 2886 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2887 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2888 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2889 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2890 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2891 2892 /* 2893 *Leave vblank interrupts masked initially. enable/disable will 2894 * toggle them based on usage. 2895 */ 2896 dev_priv->irq_mask = (~enable_mask) | 2897 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2898 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2899 2900 I915_WRITE(PORT_HOTPLUG_EN, 0); 2901 POSTING_READ(PORT_HOTPLUG_EN); 2902 2903 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2904 I915_WRITE(VLV_IER, enable_mask); 2905 I915_WRITE(VLV_IIR, 0xffffffff); 2906 I915_WRITE(PIPESTAT(0), 0xffff); 2907 I915_WRITE(PIPESTAT(1), 0xffff); 2908 POSTING_READ(VLV_IER); 2909 2910 /* Interrupt setup is already guaranteed to be single-threaded, this is 2911 * just to make the assert_spin_locked check happy. */ 2912 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2913 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 2914 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 2915 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 2916 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2917 2918 I915_WRITE(VLV_IIR, 0xffffffff); 2919 I915_WRITE(VLV_IIR, 0xffffffff); 2920 2921 gen5_gt_irq_postinstall(dev); 2922 2923 /* ack & enable invalid PTE error interrupts */ 2924 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2925 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2926 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2927 #endif 2928 2929 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2930 2931 return 0; 2932 } 2933 2934 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 2935 { 2936 int i; 2937 2938 /* These are interrupts we'll toggle with the ring mask register */ 2939 uint32_t gt_interrupts[] = { 2940 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 2941 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 2942 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 2943 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 2944 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 2945 0, 2946 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 2947 }; 2948 2949 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 2950 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 2951 if (tmp) 2952 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2953 i, tmp); 2954 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 2955 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 2956 } 2957 POSTING_READ(GEN8_GT_IER(0)); 2958 } 2959 2960 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 2961 { 2962 struct drm_device *dev = dev_priv->dev; 2963 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 2964 GEN8_PIPE_CDCLK_CRC_DONE | 2965 GEN8_PIPE_FIFO_UNDERRUN | 2966 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2967 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; 2968 int pipe; 2969 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 2970 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 2971 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 2972 2973 for_each_pipe(pipe) { 2974 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2975 if (tmp) 2976 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2977 pipe, tmp); 2978 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2979 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 2980 } 2981 POSTING_READ(GEN8_DE_PIPE_ISR(0)); 2982 2983 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 2984 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 2985 POSTING_READ(GEN8_DE_PORT_IER); 2986 } 2987 2988 static int gen8_irq_postinstall(struct drm_device *dev) 2989 { 2990 struct drm_i915_private *dev_priv = dev->dev_private; 2991 2992 gen8_gt_irq_postinstall(dev_priv); 2993 gen8_de_irq_postinstall(dev_priv); 2994 2995 ibx_irq_postinstall(dev); 2996 2997 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2998 POSTING_READ(GEN8_MASTER_IRQ); 2999 3000 return 0; 3001 } 3002 3003 static void gen8_irq_uninstall(struct drm_device *dev) 3004 { 3005 struct drm_i915_private *dev_priv = dev->dev_private; 3006 int pipe; 3007 3008 if (!dev_priv) 3009 return; 3010 3011 atomic_set(&dev_priv->irq_received, 0); 3012 3013 I915_WRITE(GEN8_MASTER_IRQ, 0); 3014 3015 #define GEN8_IRQ_FINI_NDX(type, which) do { \ 3016 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3017 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3018 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3019 } while (0) 3020 3021 #define GEN8_IRQ_FINI(type) do { \ 3022 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3023 I915_WRITE(GEN8_##type##_IER, 0); \ 3024 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3025 } while (0) 3026 3027 GEN8_IRQ_FINI_NDX(GT, 0); 3028 GEN8_IRQ_FINI_NDX(GT, 1); 3029 GEN8_IRQ_FINI_NDX(GT, 2); 3030 GEN8_IRQ_FINI_NDX(GT, 3); 3031 3032 for_each_pipe(pipe) { 3033 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3034 } 3035 3036 GEN8_IRQ_FINI(DE_PORT); 3037 GEN8_IRQ_FINI(DE_MISC); 3038 GEN8_IRQ_FINI(PCU); 3039 #undef GEN8_IRQ_FINI 3040 #undef GEN8_IRQ_FINI_NDX 3041 3042 POSTING_READ(GEN8_PCU_IIR); 3043 } 3044 3045 static void valleyview_irq_uninstall(struct drm_device *dev) 3046 { 3047 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3048 int pipe; 3049 3050 if (!dev_priv) 3051 return; 3052 3053 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3054 3055 for_each_pipe(pipe) 3056 I915_WRITE(PIPESTAT(pipe), 0xffff); 3057 3058 I915_WRITE(HWSTAM, 0xffffffff); 3059 I915_WRITE(PORT_HOTPLUG_EN, 0); 3060 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3061 for_each_pipe(pipe) 3062 I915_WRITE(PIPESTAT(pipe), 0xffff); 3063 I915_WRITE(VLV_IIR, 0xffffffff); 3064 I915_WRITE(VLV_IMR, 0xffffffff); 3065 I915_WRITE(VLV_IER, 0x0); 3066 POSTING_READ(VLV_IER); 3067 } 3068 3069 static void ironlake_irq_uninstall(struct drm_device *dev) 3070 { 3071 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3072 3073 if (!dev_priv) 3074 return; 3075 3076 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3077 3078 I915_WRITE(HWSTAM, 0xffffffff); 3079 3080 I915_WRITE(DEIMR, 0xffffffff); 3081 I915_WRITE(DEIER, 0x0); 3082 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3083 if (IS_GEN7(dev)) 3084 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3085 3086 I915_WRITE(GTIMR, 0xffffffff); 3087 I915_WRITE(GTIER, 0x0); 3088 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3089 3090 if (HAS_PCH_NOP(dev)) 3091 return; 3092 3093 I915_WRITE(SDEIMR, 0xffffffff); 3094 I915_WRITE(SDEIER, 0x0); 3095 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3096 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3097 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3098 } 3099 3100 static void i8xx_irq_preinstall(struct drm_device * dev) 3101 { 3102 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3103 int pipe; 3104 3105 atomic_set(&dev_priv->irq_received, 0); 3106 3107 for_each_pipe(pipe) 3108 I915_WRITE(PIPESTAT(pipe), 0); 3109 I915_WRITE16(IMR, 0xffff); 3110 I915_WRITE16(IER, 0x0); 3111 POSTING_READ16(IER); 3112 } 3113 3114 static int i8xx_irq_postinstall(struct drm_device *dev) 3115 { 3116 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3117 unsigned long irqflags; 3118 3119 I915_WRITE16(EMR, 3120 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3121 3122 /* Unmask the interrupts that we always want on. */ 3123 dev_priv->irq_mask = 3124 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3125 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3126 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3127 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3128 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3129 I915_WRITE16(IMR, dev_priv->irq_mask); 3130 3131 I915_WRITE16(IER, 3132 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3133 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3134 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3135 I915_USER_INTERRUPT); 3136 POSTING_READ16(IER); 3137 3138 /* Interrupt setup is already guaranteed to be single-threaded, this is 3139 * just to make the assert_spin_locked check happy. */ 3140 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3141 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3142 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3143 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3144 3145 return 0; 3146 } 3147 3148 /* 3149 * Returns true when a page flip has completed. 3150 */ 3151 static bool i8xx_handle_vblank(struct drm_device *dev, 3152 int plane, int pipe, u32 iir) 3153 { 3154 drm_i915_private_t *dev_priv = dev->dev_private; 3155 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3156 3157 if (!drm_handle_vblank(dev, pipe)) 3158 return false; 3159 3160 if ((iir & flip_pending) == 0) 3161 return false; 3162 3163 intel_prepare_page_flip(dev, plane); 3164 3165 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3166 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3167 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3168 * the flip is completed (no longer pending). Since this doesn't raise 3169 * an interrupt per se, we watch for the change at vblank. 3170 */ 3171 if (I915_READ16(ISR) & flip_pending) 3172 return false; 3173 3174 intel_finish_page_flip(dev, pipe); 3175 3176 return true; 3177 } 3178 3179 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3180 { 3181 struct drm_device *dev = (struct drm_device *) arg; 3182 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3183 u16 iir, new_iir; 3184 u32 pipe_stats[2]; 3185 unsigned long irqflags; 3186 int pipe; 3187 u16 flip_mask = 3188 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3189 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3190 3191 atomic_inc(&dev_priv->irq_received); 3192 3193 iir = I915_READ16(IIR); 3194 if (iir == 0) 3195 return IRQ_NONE; 3196 3197 while (iir & ~flip_mask) { 3198 /* Can't rely on pipestat interrupt bit in iir as it might 3199 * have been cleared after the pipestat interrupt was received. 3200 * It doesn't set the bit in iir again, but it still produces 3201 * interrupts (for non-MSI). 3202 */ 3203 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3204 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3205 i915_handle_error(dev, false); 3206 3207 for_each_pipe(pipe) { 3208 int reg = PIPESTAT(pipe); 3209 pipe_stats[pipe] = I915_READ(reg); 3210 3211 /* 3212 * Clear the PIPE*STAT regs before the IIR 3213 */ 3214 if (pipe_stats[pipe] & 0x8000ffff) { 3215 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3216 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3217 pipe_name(pipe)); 3218 I915_WRITE(reg, pipe_stats[pipe]); 3219 } 3220 } 3221 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3222 3223 I915_WRITE16(IIR, iir & ~flip_mask); 3224 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3225 3226 i915_update_dri1_breadcrumb(dev); 3227 3228 if (iir & I915_USER_INTERRUPT) 3229 notify_ring(dev, &dev_priv->ring[RCS]); 3230 3231 for_each_pipe(pipe) { 3232 int plane = pipe; 3233 if (HAS_FBC(dev)) 3234 plane = !plane; 3235 3236 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3237 i8xx_handle_vblank(dev, plane, pipe, iir)) 3238 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3239 3240 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3241 i9xx_pipe_crc_irq_handler(dev, pipe); 3242 } 3243 3244 iir = new_iir; 3245 } 3246 3247 return IRQ_HANDLED; 3248 } 3249 3250 static void i8xx_irq_uninstall(struct drm_device * dev) 3251 { 3252 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3253 int pipe; 3254 3255 for_each_pipe(pipe) { 3256 /* Clear enable bits; then clear status bits */ 3257 I915_WRITE(PIPESTAT(pipe), 0); 3258 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3259 } 3260 I915_WRITE16(IMR, 0xffff); 3261 I915_WRITE16(IER, 0x0); 3262 I915_WRITE16(IIR, I915_READ16(IIR)); 3263 } 3264 3265 static void i915_irq_preinstall(struct drm_device * dev) 3266 { 3267 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3268 int pipe; 3269 3270 atomic_set(&dev_priv->irq_received, 0); 3271 3272 if (I915_HAS_HOTPLUG(dev)) { 3273 I915_WRITE(PORT_HOTPLUG_EN, 0); 3274 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3275 } 3276 3277 I915_WRITE16(HWSTAM, 0xeffe); 3278 for_each_pipe(pipe) 3279 I915_WRITE(PIPESTAT(pipe), 0); 3280 I915_WRITE(IMR, 0xffffffff); 3281 I915_WRITE(IER, 0x0); 3282 POSTING_READ(IER); 3283 } 3284 3285 static int i915_irq_postinstall(struct drm_device *dev) 3286 { 3287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3288 u32 enable_mask; 3289 unsigned long irqflags; 3290 3291 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3292 3293 /* Unmask the interrupts that we always want on. */ 3294 dev_priv->irq_mask = 3295 ~(I915_ASLE_INTERRUPT | 3296 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3297 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3298 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3299 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3300 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3301 3302 enable_mask = 3303 I915_ASLE_INTERRUPT | 3304 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3305 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3306 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3307 I915_USER_INTERRUPT; 3308 3309 if (I915_HAS_HOTPLUG(dev)) { 3310 I915_WRITE(PORT_HOTPLUG_EN, 0); 3311 POSTING_READ(PORT_HOTPLUG_EN); 3312 3313 /* Enable in IER... */ 3314 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3315 /* and unmask in IMR */ 3316 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3317 } 3318 3319 I915_WRITE(IMR, dev_priv->irq_mask); 3320 I915_WRITE(IER, enable_mask); 3321 POSTING_READ(IER); 3322 3323 i915_enable_asle_pipestat(dev); 3324 3325 /* Interrupt setup is already guaranteed to be single-threaded, this is 3326 * just to make the assert_spin_locked check happy. */ 3327 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3328 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3329 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3330 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3331 3332 return 0; 3333 } 3334 3335 /* 3336 * Returns true when a page flip has completed. 3337 */ 3338 static bool i915_handle_vblank(struct drm_device *dev, 3339 int plane, int pipe, u32 iir) 3340 { 3341 drm_i915_private_t *dev_priv = dev->dev_private; 3342 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3343 3344 if (!drm_handle_vblank(dev, pipe)) 3345 return false; 3346 3347 if ((iir & flip_pending) == 0) 3348 return false; 3349 3350 intel_prepare_page_flip(dev, plane); 3351 3352 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3353 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3354 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3355 * the flip is completed (no longer pending). Since this doesn't raise 3356 * an interrupt per se, we watch for the change at vblank. 3357 */ 3358 if (I915_READ(ISR) & flip_pending) 3359 return false; 3360 3361 intel_finish_page_flip(dev, pipe); 3362 3363 return true; 3364 } 3365 3366 static irqreturn_t i915_irq_handler(int irq, void *arg) 3367 { 3368 struct drm_device *dev = (struct drm_device *) arg; 3369 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3370 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3371 unsigned long irqflags; 3372 u32 flip_mask = 3373 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3374 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3375 int pipe, ret = IRQ_NONE; 3376 3377 atomic_inc(&dev_priv->irq_received); 3378 3379 iir = I915_READ(IIR); 3380 do { 3381 bool irq_received = (iir & ~flip_mask) != 0; 3382 bool blc_event = false; 3383 3384 /* Can't rely on pipestat interrupt bit in iir as it might 3385 * have been cleared after the pipestat interrupt was received. 3386 * It doesn't set the bit in iir again, but it still produces 3387 * interrupts (for non-MSI). 3388 */ 3389 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3390 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3391 i915_handle_error(dev, false); 3392 3393 for_each_pipe(pipe) { 3394 int reg = PIPESTAT(pipe); 3395 pipe_stats[pipe] = I915_READ(reg); 3396 3397 /* Clear the PIPE*STAT regs before the IIR */ 3398 if (pipe_stats[pipe] & 0x8000ffff) { 3399 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3400 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3401 pipe_name(pipe)); 3402 I915_WRITE(reg, pipe_stats[pipe]); 3403 irq_received = true; 3404 } 3405 } 3406 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3407 3408 if (!irq_received) 3409 break; 3410 3411 /* Consume port. Then clear IIR or we'll miss events */ 3412 if ((I915_HAS_HOTPLUG(dev)) && 3413 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3414 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3415 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3416 3417 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3418 hotplug_status); 3419 3420 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3421 3422 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3423 POSTING_READ(PORT_HOTPLUG_STAT); 3424 } 3425 3426 I915_WRITE(IIR, iir & ~flip_mask); 3427 new_iir = I915_READ(IIR); /* Flush posted writes */ 3428 3429 if (iir & I915_USER_INTERRUPT) 3430 notify_ring(dev, &dev_priv->ring[RCS]); 3431 3432 for_each_pipe(pipe) { 3433 int plane = pipe; 3434 if (HAS_FBC(dev)) 3435 plane = !plane; 3436 3437 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3438 i915_handle_vblank(dev, plane, pipe, iir)) 3439 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3440 3441 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3442 blc_event = true; 3443 3444 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3445 i9xx_pipe_crc_irq_handler(dev, pipe); 3446 } 3447 3448 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3449 intel_opregion_asle_intr(dev); 3450 3451 /* With MSI, interrupts are only generated when iir 3452 * transitions from zero to nonzero. If another bit got 3453 * set while we were handling the existing iir bits, then 3454 * we would never get another interrupt. 3455 * 3456 * This is fine on non-MSI as well, as if we hit this path 3457 * we avoid exiting the interrupt handler only to generate 3458 * another one. 3459 * 3460 * Note that for MSI this could cause a stray interrupt report 3461 * if an interrupt landed in the time between writing IIR and 3462 * the posting read. This should be rare enough to never 3463 * trigger the 99% of 100,000 interrupts test for disabling 3464 * stray interrupts. 3465 */ 3466 ret = IRQ_HANDLED; 3467 iir = new_iir; 3468 } while (iir & ~flip_mask); 3469 3470 i915_update_dri1_breadcrumb(dev); 3471 3472 return ret; 3473 } 3474 3475 static void i915_irq_uninstall(struct drm_device * dev) 3476 { 3477 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3478 int pipe; 3479 3480 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3481 3482 if (I915_HAS_HOTPLUG(dev)) { 3483 I915_WRITE(PORT_HOTPLUG_EN, 0); 3484 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3485 } 3486 3487 I915_WRITE16(HWSTAM, 0xffff); 3488 for_each_pipe(pipe) { 3489 /* Clear enable bits; then clear status bits */ 3490 I915_WRITE(PIPESTAT(pipe), 0); 3491 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3492 } 3493 I915_WRITE(IMR, 0xffffffff); 3494 I915_WRITE(IER, 0x0); 3495 3496 I915_WRITE(IIR, I915_READ(IIR)); 3497 } 3498 3499 static void i965_irq_preinstall(struct drm_device * dev) 3500 { 3501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3502 int pipe; 3503 3504 atomic_set(&dev_priv->irq_received, 0); 3505 3506 I915_WRITE(PORT_HOTPLUG_EN, 0); 3507 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3508 3509 I915_WRITE(HWSTAM, 0xeffe); 3510 for_each_pipe(pipe) 3511 I915_WRITE(PIPESTAT(pipe), 0); 3512 I915_WRITE(IMR, 0xffffffff); 3513 I915_WRITE(IER, 0x0); 3514 POSTING_READ(IER); 3515 } 3516 3517 static int i965_irq_postinstall(struct drm_device *dev) 3518 { 3519 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3520 u32 enable_mask; 3521 u32 error_mask; 3522 unsigned long irqflags; 3523 3524 /* Unmask the interrupts that we always want on. */ 3525 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3526 I915_DISPLAY_PORT_INTERRUPT | 3527 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3528 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3529 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3530 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3531 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3532 3533 enable_mask = ~dev_priv->irq_mask; 3534 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3535 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3536 enable_mask |= I915_USER_INTERRUPT; 3537 3538 if (IS_G4X(dev)) 3539 enable_mask |= I915_BSD_USER_INTERRUPT; 3540 3541 /* Interrupt setup is already guaranteed to be single-threaded, this is 3542 * just to make the assert_spin_locked check happy. */ 3543 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3544 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3545 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3546 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3547 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3548 3549 /* 3550 * Enable some error detection, note the instruction error mask 3551 * bit is reserved, so we leave it masked. 3552 */ 3553 if (IS_G4X(dev)) { 3554 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3555 GM45_ERROR_MEM_PRIV | 3556 GM45_ERROR_CP_PRIV | 3557 I915_ERROR_MEMORY_REFRESH); 3558 } else { 3559 error_mask = ~(I915_ERROR_PAGE_TABLE | 3560 I915_ERROR_MEMORY_REFRESH); 3561 } 3562 I915_WRITE(EMR, error_mask); 3563 3564 I915_WRITE(IMR, dev_priv->irq_mask); 3565 I915_WRITE(IER, enable_mask); 3566 POSTING_READ(IER); 3567 3568 I915_WRITE(PORT_HOTPLUG_EN, 0); 3569 POSTING_READ(PORT_HOTPLUG_EN); 3570 3571 i915_enable_asle_pipestat(dev); 3572 3573 return 0; 3574 } 3575 3576 static void i915_hpd_irq_setup(struct drm_device *dev) 3577 { 3578 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3579 struct drm_mode_config *mode_config = &dev->mode_config; 3580 struct intel_encoder *intel_encoder; 3581 u32 hotplug_en; 3582 3583 assert_spin_locked(&dev_priv->irq_lock); 3584 3585 if (I915_HAS_HOTPLUG(dev)) { 3586 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3587 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3588 /* Note HDMI and DP share hotplug bits */ 3589 /* enable bits are the same for all generations */ 3590 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3591 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3592 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3593 /* Programming the CRT detection parameters tends 3594 to generate a spurious hotplug event about three 3595 seconds later. So just do it once. 3596 */ 3597 if (IS_G4X(dev)) 3598 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3599 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3600 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3601 3602 /* Ignore TV since it's buggy */ 3603 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3604 } 3605 } 3606 3607 static irqreturn_t i965_irq_handler(int irq, void *arg) 3608 { 3609 struct drm_device *dev = (struct drm_device *) arg; 3610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3611 u32 iir, new_iir; 3612 u32 pipe_stats[I915_MAX_PIPES]; 3613 unsigned long irqflags; 3614 int irq_received; 3615 int ret = IRQ_NONE, pipe; 3616 u32 flip_mask = 3617 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3618 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3619 3620 atomic_inc(&dev_priv->irq_received); 3621 3622 iir = I915_READ(IIR); 3623 3624 for (;;) { 3625 bool blc_event = false; 3626 3627 irq_received = (iir & ~flip_mask) != 0; 3628 3629 /* Can't rely on pipestat interrupt bit in iir as it might 3630 * have been cleared after the pipestat interrupt was received. 3631 * It doesn't set the bit in iir again, but it still produces 3632 * interrupts (for non-MSI). 3633 */ 3634 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3635 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3636 i915_handle_error(dev, false); 3637 3638 for_each_pipe(pipe) { 3639 int reg = PIPESTAT(pipe); 3640 pipe_stats[pipe] = I915_READ(reg); 3641 3642 /* 3643 * Clear the PIPE*STAT regs before the IIR 3644 */ 3645 if (pipe_stats[pipe] & 0x8000ffff) { 3646 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3647 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3648 pipe_name(pipe)); 3649 I915_WRITE(reg, pipe_stats[pipe]); 3650 irq_received = 1; 3651 } 3652 } 3653 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3654 3655 if (!irq_received) 3656 break; 3657 3658 ret = IRQ_HANDLED; 3659 3660 /* Consume port. Then clear IIR or we'll miss events */ 3661 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3662 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3663 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3664 HOTPLUG_INT_STATUS_G4X : 3665 HOTPLUG_INT_STATUS_I915); 3666 3667 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3668 hotplug_status); 3669 3670 intel_hpd_irq_handler(dev, hotplug_trigger, 3671 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3672 3673 if (IS_G4X(dev) && 3674 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3675 dp_aux_irq_handler(dev); 3676 3677 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3678 I915_READ(PORT_HOTPLUG_STAT); 3679 } 3680 3681 I915_WRITE(IIR, iir & ~flip_mask); 3682 new_iir = I915_READ(IIR); /* Flush posted writes */ 3683 3684 if (iir & I915_USER_INTERRUPT) 3685 notify_ring(dev, &dev_priv->ring[RCS]); 3686 if (iir & I915_BSD_USER_INTERRUPT) 3687 notify_ring(dev, &dev_priv->ring[VCS]); 3688 3689 for_each_pipe(pipe) { 3690 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3691 i915_handle_vblank(dev, pipe, pipe, iir)) 3692 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3693 3694 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3695 blc_event = true; 3696 3697 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3698 i9xx_pipe_crc_irq_handler(dev, pipe); 3699 } 3700 3701 3702 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3703 intel_opregion_asle_intr(dev); 3704 3705 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3706 gmbus_irq_handler(dev); 3707 3708 /* With MSI, interrupts are only generated when iir 3709 * transitions from zero to nonzero. If another bit got 3710 * set while we were handling the existing iir bits, then 3711 * we would never get another interrupt. 3712 * 3713 * This is fine on non-MSI as well, as if we hit this path 3714 * we avoid exiting the interrupt handler only to generate 3715 * another one. 3716 * 3717 * Note that for MSI this could cause a stray interrupt report 3718 * if an interrupt landed in the time between writing IIR and 3719 * the posting read. This should be rare enough to never 3720 * trigger the 99% of 100,000 interrupts test for disabling 3721 * stray interrupts. 3722 */ 3723 iir = new_iir; 3724 } 3725 3726 i915_update_dri1_breadcrumb(dev); 3727 3728 return ret; 3729 } 3730 3731 static void i965_irq_uninstall(struct drm_device * dev) 3732 { 3733 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3734 int pipe; 3735 3736 if (!dev_priv) 3737 return; 3738 3739 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3740 3741 I915_WRITE(PORT_HOTPLUG_EN, 0); 3742 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3743 3744 I915_WRITE(HWSTAM, 0xffffffff); 3745 for_each_pipe(pipe) 3746 I915_WRITE(PIPESTAT(pipe), 0); 3747 I915_WRITE(IMR, 0xffffffff); 3748 I915_WRITE(IER, 0x0); 3749 3750 for_each_pipe(pipe) 3751 I915_WRITE(PIPESTAT(pipe), 3752 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3753 I915_WRITE(IIR, I915_READ(IIR)); 3754 } 3755 3756 static void i915_reenable_hotplug_timer_func(unsigned long data) 3757 { 3758 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3759 struct drm_device *dev = dev_priv->dev; 3760 struct drm_mode_config *mode_config = &dev->mode_config; 3761 unsigned long irqflags; 3762 int i; 3763 3764 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3765 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3766 struct drm_connector *connector; 3767 3768 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3769 continue; 3770 3771 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3772 3773 list_for_each_entry(connector, &mode_config->connector_list, head) { 3774 struct intel_connector *intel_connector = to_intel_connector(connector); 3775 3776 if (intel_connector->encoder->hpd_pin == i) { 3777 if (connector->polled != intel_connector->polled) 3778 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3779 drm_get_connector_name(connector)); 3780 connector->polled = intel_connector->polled; 3781 if (!connector->polled) 3782 connector->polled = DRM_CONNECTOR_POLL_HPD; 3783 } 3784 } 3785 } 3786 if (dev_priv->display.hpd_irq_setup) 3787 dev_priv->display.hpd_irq_setup(dev); 3788 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3789 } 3790 3791 void intel_irq_init(struct drm_device *dev) 3792 { 3793 struct drm_i915_private *dev_priv = dev->dev_private; 3794 3795 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3796 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3797 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3798 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3799 3800 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3801 i915_hangcheck_elapsed, 3802 (unsigned long) dev); 3803 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3804 (unsigned long) dev_priv); 3805 3806 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3807 3808 if (IS_GEN2(dev)) { 3809 dev->max_vblank_count = 0; 3810 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 3811 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3812 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3813 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3814 } else { 3815 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3816 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3817 } 3818 3819 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3820 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3821 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3822 } 3823 3824 if (IS_VALLEYVIEW(dev)) { 3825 dev->driver->irq_handler = valleyview_irq_handler; 3826 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3827 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3828 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3829 dev->driver->enable_vblank = valleyview_enable_vblank; 3830 dev->driver->disable_vblank = valleyview_disable_vblank; 3831 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3832 } else if (IS_GEN8(dev)) { 3833 dev->driver->irq_handler = gen8_irq_handler; 3834 dev->driver->irq_preinstall = gen8_irq_preinstall; 3835 dev->driver->irq_postinstall = gen8_irq_postinstall; 3836 dev->driver->irq_uninstall = gen8_irq_uninstall; 3837 dev->driver->enable_vblank = gen8_enable_vblank; 3838 dev->driver->disable_vblank = gen8_disable_vblank; 3839 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3840 } else if (HAS_PCH_SPLIT(dev)) { 3841 dev->driver->irq_handler = ironlake_irq_handler; 3842 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3843 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3844 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3845 dev->driver->enable_vblank = ironlake_enable_vblank; 3846 dev->driver->disable_vblank = ironlake_disable_vblank; 3847 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3848 } else { 3849 if (INTEL_INFO(dev)->gen == 2) { 3850 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3851 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3852 dev->driver->irq_handler = i8xx_irq_handler; 3853 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3854 } else if (INTEL_INFO(dev)->gen == 3) { 3855 dev->driver->irq_preinstall = i915_irq_preinstall; 3856 dev->driver->irq_postinstall = i915_irq_postinstall; 3857 dev->driver->irq_uninstall = i915_irq_uninstall; 3858 dev->driver->irq_handler = i915_irq_handler; 3859 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3860 } else { 3861 dev->driver->irq_preinstall = i965_irq_preinstall; 3862 dev->driver->irq_postinstall = i965_irq_postinstall; 3863 dev->driver->irq_uninstall = i965_irq_uninstall; 3864 dev->driver->irq_handler = i965_irq_handler; 3865 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3866 } 3867 dev->driver->enable_vblank = i915_enable_vblank; 3868 dev->driver->disable_vblank = i915_disable_vblank; 3869 } 3870 } 3871 3872 void intel_hpd_init(struct drm_device *dev) 3873 { 3874 struct drm_i915_private *dev_priv = dev->dev_private; 3875 struct drm_mode_config *mode_config = &dev->mode_config; 3876 struct drm_connector *connector; 3877 unsigned long irqflags; 3878 int i; 3879 3880 for (i = 1; i < HPD_NUM_PINS; i++) { 3881 dev_priv->hpd_stats[i].hpd_cnt = 0; 3882 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3883 } 3884 list_for_each_entry(connector, &mode_config->connector_list, head) { 3885 struct intel_connector *intel_connector = to_intel_connector(connector); 3886 connector->polled = intel_connector->polled; 3887 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3888 connector->polled = DRM_CONNECTOR_POLL_HPD; 3889 } 3890 3891 /* Interrupt setup is already guaranteed to be single-threaded, this is 3892 * just to make the assert_spin_locked checks happy. */ 3893 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3894 if (dev_priv->display.hpd_irq_setup) 3895 dev_priv->display.hpd_irq_setup(dev); 3896 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3897 } 3898 3899 /* Disable interrupts so we can allow Package C8+. */ 3900 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3901 { 3902 struct drm_i915_private *dev_priv = dev->dev_private; 3903 unsigned long irqflags; 3904 3905 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3906 3907 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3908 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3909 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3910 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3911 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3912 3913 ironlake_disable_display_irq(dev_priv, 0xffffffff); 3914 ibx_disable_display_interrupt(dev_priv, 0xffffffff); 3915 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3916 snb_disable_pm_irq(dev_priv, 0xffffffff); 3917 3918 dev_priv->pc8.irqs_disabled = true; 3919 3920 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3921 } 3922 3923 /* Restore interrupts so we can recover from Package C8+. */ 3924 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3925 { 3926 struct drm_i915_private *dev_priv = dev->dev_private; 3927 unsigned long irqflags; 3928 uint32_t val; 3929 3930 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3931 3932 val = I915_READ(DEIMR); 3933 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); 3934 3935 val = I915_READ(SDEIMR); 3936 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); 3937 3938 val = I915_READ(GTIMR); 3939 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); 3940 3941 val = I915_READ(GEN6_PMIMR); 3942 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); 3943 3944 dev_priv->pc8.irqs_disabled = false; 3945 3946 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3947 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); 3948 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3949 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3950 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3951 3952 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3953 } 3954