1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 static const u32 hpd_ibx[] = { 41 [HPD_CRT] = SDE_CRT_HOTPLUG, 42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46 }; 47 48 static const u32 hpd_cpt[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54 }; 55 56 static const u32 hpd_mask_i915[] = { 57 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63 }; 64 65 static const u32 hpd_status_g4x[] = { 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72 }; 73 74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81 }; 82 83 /* For display hotplug interrupt */ 84 static void 85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 86 { 87 assert_spin_locked(&dev_priv->irq_lock); 88 89 if (dev_priv->pc8.irqs_disabled) { 90 WARN(1, "IRQs disabled\n"); 91 dev_priv->pc8.regsave.deimr &= ~mask; 92 return; 93 } 94 95 if ((dev_priv->irq_mask & mask) != 0) { 96 dev_priv->irq_mask &= ~mask; 97 I915_WRITE(DEIMR, dev_priv->irq_mask); 98 POSTING_READ(DEIMR); 99 } 100 } 101 102 static void 103 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 104 { 105 assert_spin_locked(&dev_priv->irq_lock); 106 107 if (dev_priv->pc8.irqs_disabled) { 108 WARN(1, "IRQs disabled\n"); 109 dev_priv->pc8.regsave.deimr |= mask; 110 return; 111 } 112 113 if ((dev_priv->irq_mask & mask) != mask) { 114 dev_priv->irq_mask |= mask; 115 I915_WRITE(DEIMR, dev_priv->irq_mask); 116 POSTING_READ(DEIMR); 117 } 118 } 119 120 /** 121 * ilk_update_gt_irq - update GTIMR 122 * @dev_priv: driver private 123 * @interrupt_mask: mask of interrupt bits to update 124 * @enabled_irq_mask: mask of interrupt bits to enable 125 */ 126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 127 uint32_t interrupt_mask, 128 uint32_t enabled_irq_mask) 129 { 130 assert_spin_locked(&dev_priv->irq_lock); 131 132 if (dev_priv->pc8.irqs_disabled) { 133 WARN(1, "IRQs disabled\n"); 134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 136 interrupt_mask); 137 return; 138 } 139 140 dev_priv->gt_irq_mask &= ~interrupt_mask; 141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 143 POSTING_READ(GTIMR); 144 } 145 146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 147 { 148 ilk_update_gt_irq(dev_priv, mask, mask); 149 } 150 151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 152 { 153 ilk_update_gt_irq(dev_priv, mask, 0); 154 } 155 156 /** 157 * snb_update_pm_irq - update GEN6_PMIMR 158 * @dev_priv: driver private 159 * @interrupt_mask: mask of interrupt bits to update 160 * @enabled_irq_mask: mask of interrupt bits to enable 161 */ 162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 163 uint32_t interrupt_mask, 164 uint32_t enabled_irq_mask) 165 { 166 uint32_t new_val; 167 168 assert_spin_locked(&dev_priv->irq_lock); 169 170 if (dev_priv->pc8.irqs_disabled) { 171 WARN(1, "IRQs disabled\n"); 172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 174 interrupt_mask); 175 return; 176 } 177 178 new_val = dev_priv->pm_irq_mask; 179 new_val &= ~interrupt_mask; 180 new_val |= (~enabled_irq_mask & interrupt_mask); 181 182 if (new_val != dev_priv->pm_irq_mask) { 183 dev_priv->pm_irq_mask = new_val; 184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 185 POSTING_READ(GEN6_PMIMR); 186 } 187 } 188 189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190 { 191 snb_update_pm_irq(dev_priv, mask, mask); 192 } 193 194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 195 { 196 snb_update_pm_irq(dev_priv, mask, 0); 197 } 198 199 static bool ivb_can_enable_err_int(struct drm_device *dev) 200 { 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct intel_crtc *crtc; 203 enum pipe pipe; 204 205 assert_spin_locked(&dev_priv->irq_lock); 206 207 for_each_pipe(pipe) { 208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 209 210 if (crtc->cpu_fifo_underrun_disabled) 211 return false; 212 } 213 214 return true; 215 } 216 217 static bool cpt_can_enable_serr_int(struct drm_device *dev) 218 { 219 struct drm_i915_private *dev_priv = dev->dev_private; 220 enum pipe pipe; 221 struct intel_crtc *crtc; 222 223 assert_spin_locked(&dev_priv->irq_lock); 224 225 for_each_pipe(pipe) { 226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 227 228 if (crtc->pch_fifo_underrun_disabled) 229 return false; 230 } 231 232 return true; 233 } 234 235 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 236 enum pipe pipe, bool enable) 237 { 238 struct drm_i915_private *dev_priv = dev->dev_private; 239 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 240 DE_PIPEB_FIFO_UNDERRUN; 241 242 if (enable) 243 ironlake_enable_display_irq(dev_priv, bit); 244 else 245 ironlake_disable_display_irq(dev_priv, bit); 246 } 247 248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 249 enum pipe pipe, bool enable) 250 { 251 struct drm_i915_private *dev_priv = dev->dev_private; 252 if (enable) { 253 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 254 255 if (!ivb_can_enable_err_int(dev)) 256 return; 257 258 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 259 } else { 260 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 261 262 /* Change the state _after_ we've read out the current one. */ 263 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 264 265 if (!was_enabled && 266 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 268 pipe_name(pipe)); 269 } 270 } 271 } 272 273 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 274 enum pipe pipe, bool enable) 275 { 276 struct drm_i915_private *dev_priv = dev->dev_private; 277 278 assert_spin_locked(&dev_priv->irq_lock); 279 280 if (enable) 281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 282 else 283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 286 } 287 288 /** 289 * ibx_display_interrupt_update - update SDEIMR 290 * @dev_priv: driver private 291 * @interrupt_mask: mask of interrupt bits to update 292 * @enabled_irq_mask: mask of interrupt bits to enable 293 */ 294 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 295 uint32_t interrupt_mask, 296 uint32_t enabled_irq_mask) 297 { 298 uint32_t sdeimr = I915_READ(SDEIMR); 299 sdeimr &= ~interrupt_mask; 300 sdeimr |= (~enabled_irq_mask & interrupt_mask); 301 302 assert_spin_locked(&dev_priv->irq_lock); 303 304 if (dev_priv->pc8.irqs_disabled && 305 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 306 WARN(1, "IRQs disabled\n"); 307 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 308 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 309 interrupt_mask); 310 return; 311 } 312 313 I915_WRITE(SDEIMR, sdeimr); 314 POSTING_READ(SDEIMR); 315 } 316 #define ibx_enable_display_interrupt(dev_priv, bits) \ 317 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 318 #define ibx_disable_display_interrupt(dev_priv, bits) \ 319 ibx_display_interrupt_update((dev_priv), (bits), 0) 320 321 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 322 enum transcoder pch_transcoder, 323 bool enable) 324 { 325 struct drm_i915_private *dev_priv = dev->dev_private; 326 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 327 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 328 329 if (enable) 330 ibx_enable_display_interrupt(dev_priv, bit); 331 else 332 ibx_disable_display_interrupt(dev_priv, bit); 333 } 334 335 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 336 enum transcoder pch_transcoder, 337 bool enable) 338 { 339 struct drm_i915_private *dev_priv = dev->dev_private; 340 341 if (enable) { 342 I915_WRITE(SERR_INT, 343 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 344 345 if (!cpt_can_enable_serr_int(dev)) 346 return; 347 348 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 349 } else { 350 uint32_t tmp = I915_READ(SERR_INT); 351 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 352 353 /* Change the state _after_ we've read out the current one. */ 354 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 355 356 if (!was_enabled && 357 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 358 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 359 transcoder_name(pch_transcoder)); 360 } 361 } 362 } 363 364 /** 365 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 366 * @dev: drm device 367 * @pipe: pipe 368 * @enable: true if we want to report FIFO underrun errors, false otherwise 369 * 370 * This function makes us disable or enable CPU fifo underruns for a specific 371 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 372 * reporting for one pipe may also disable all the other CPU error interruts for 373 * the other pipes, due to the fact that there's just one interrupt mask/enable 374 * bit for all the pipes. 375 * 376 * Returns the previous state of underrun reporting. 377 */ 378 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 379 enum pipe pipe, bool enable) 380 { 381 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 384 unsigned long flags; 385 bool ret; 386 387 spin_lock_irqsave(&dev_priv->irq_lock, flags); 388 389 ret = !intel_crtc->cpu_fifo_underrun_disabled; 390 391 if (enable == ret) 392 goto done; 393 394 intel_crtc->cpu_fifo_underrun_disabled = !enable; 395 396 if (IS_GEN5(dev) || IS_GEN6(dev)) 397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 398 else if (IS_GEN7(dev)) 399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 400 else if (IS_GEN8(dev)) 401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 402 403 done: 404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 405 return ret; 406 } 407 408 /** 409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 410 * @dev: drm device 411 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 412 * @enable: true if we want to report FIFO underrun errors, false otherwise 413 * 414 * This function makes us disable or enable PCH fifo underruns for a specific 415 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 416 * underrun reporting for one transcoder may also disable all the other PCH 417 * error interruts for the other transcoders, due to the fact that there's just 418 * one interrupt mask/enable bit for all the transcoders. 419 * 420 * Returns the previous state of underrun reporting. 421 */ 422 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 423 enum transcoder pch_transcoder, 424 bool enable) 425 { 426 struct drm_i915_private *dev_priv = dev->dev_private; 427 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 429 unsigned long flags; 430 bool ret; 431 432 /* 433 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 434 * has only one pch transcoder A that all pipes can use. To avoid racy 435 * pch transcoder -> pipe lookups from interrupt code simply store the 436 * underrun statistics in crtc A. Since we never expose this anywhere 437 * nor use it outside of the fifo underrun code here using the "wrong" 438 * crtc on LPT won't cause issues. 439 */ 440 441 spin_lock_irqsave(&dev_priv->irq_lock, flags); 442 443 ret = !intel_crtc->pch_fifo_underrun_disabled; 444 445 if (enable == ret) 446 goto done; 447 448 intel_crtc->pch_fifo_underrun_disabled = !enable; 449 450 if (HAS_PCH_IBX(dev)) 451 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 452 else 453 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 454 455 done: 456 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 457 return ret; 458 } 459 460 461 void 462 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 463 { 464 u32 reg = PIPESTAT(pipe); 465 u32 pipestat = I915_READ(reg) & 0x7fff0000; 466 467 assert_spin_locked(&dev_priv->irq_lock); 468 469 if ((pipestat & mask) == mask) 470 return; 471 472 /* Enable the interrupt, clear any pending status */ 473 pipestat |= mask | (mask >> 16); 474 I915_WRITE(reg, pipestat); 475 POSTING_READ(reg); 476 } 477 478 void 479 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 480 { 481 u32 reg = PIPESTAT(pipe); 482 u32 pipestat = I915_READ(reg) & 0x7fff0000; 483 484 assert_spin_locked(&dev_priv->irq_lock); 485 486 if ((pipestat & mask) == 0) 487 return; 488 489 pipestat &= ~mask; 490 I915_WRITE(reg, pipestat); 491 POSTING_READ(reg); 492 } 493 494 /** 495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 496 */ 497 static void i915_enable_asle_pipestat(struct drm_device *dev) 498 { 499 drm_i915_private_t *dev_priv = dev->dev_private; 500 unsigned long irqflags; 501 502 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 503 return; 504 505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 506 507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 508 if (INTEL_INFO(dev)->gen >= 4) 509 i915_enable_pipestat(dev_priv, PIPE_A, 510 PIPE_LEGACY_BLC_EVENT_ENABLE); 511 512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 513 } 514 515 /** 516 * i915_pipe_enabled - check if a pipe is enabled 517 * @dev: DRM device 518 * @pipe: pipe to check 519 * 520 * Reading certain registers when the pipe is disabled can hang the chip. 521 * Use this routine to make sure the PLL is running and the pipe is active 522 * before reading such registers if unsure. 523 */ 524 static int 525 i915_pipe_enabled(struct drm_device *dev, int pipe) 526 { 527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 528 529 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 530 /* Locking is horribly broken here, but whatever. */ 531 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 533 534 return intel_crtc->active; 535 } else { 536 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 537 } 538 } 539 540 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 541 { 542 /* Gen2 doesn't have a hardware frame counter */ 543 return 0; 544 } 545 546 /* Called from drm generic code, passed a 'crtc', which 547 * we use as a pipe index 548 */ 549 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 550 { 551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 552 unsigned long high_frame; 553 unsigned long low_frame; 554 u32 high1, high2, low, pixel, vbl_start; 555 556 if (!i915_pipe_enabled(dev, pipe)) { 557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 558 "pipe %c\n", pipe_name(pipe)); 559 return 0; 560 } 561 562 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 563 struct intel_crtc *intel_crtc = 564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 565 const struct drm_display_mode *mode = 566 &intel_crtc->config.adjusted_mode; 567 568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 569 } else { 570 enum transcoder cpu_transcoder = (enum transcoder) pipe; 571 u32 htotal; 572 573 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 574 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 575 576 vbl_start *= htotal; 577 } 578 579 high_frame = PIPEFRAME(pipe); 580 low_frame = PIPEFRAMEPIXEL(pipe); 581 582 /* 583 * High & low register fields aren't synchronized, so make sure 584 * we get a low value that's stable across two reads of the high 585 * register. 586 */ 587 do { 588 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 589 low = I915_READ(low_frame); 590 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 591 } while (high1 != high2); 592 593 high1 >>= PIPE_FRAME_HIGH_SHIFT; 594 pixel = low & PIPE_PIXEL_MASK; 595 low >>= PIPE_FRAME_LOW_SHIFT; 596 597 /* 598 * The frame counter increments at beginning of active. 599 * Cook up a vblank counter by also checking the pixel 600 * counter against vblank start. 601 */ 602 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 603 } 604 605 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 606 { 607 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 608 int reg = PIPE_FRMCOUNT_GM45(pipe); 609 610 if (!i915_pipe_enabled(dev, pipe)) { 611 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 612 "pipe %c\n", pipe_name(pipe)); 613 return 0; 614 } 615 616 return I915_READ(reg); 617 } 618 619 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 620 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 621 622 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 623 { 624 struct drm_i915_private *dev_priv = dev->dev_private; 625 uint32_t status; 626 int reg; 627 628 if (INTEL_INFO(dev)->gen >= 8) { 629 status = GEN8_PIPE_VBLANK; 630 reg = GEN8_DE_PIPE_ISR(pipe); 631 } else if (INTEL_INFO(dev)->gen >= 7) { 632 status = DE_PIPE_VBLANK_IVB(pipe); 633 reg = DEISR; 634 } else { 635 status = DE_PIPE_VBLANK(pipe); 636 reg = DEISR; 637 } 638 639 return __raw_i915_read32(dev_priv, reg) & status; 640 } 641 642 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 643 unsigned int flags, int *vpos, int *hpos, 644 ktime_t *stime, ktime_t *etime) 645 { 646 struct drm_i915_private *dev_priv = dev->dev_private; 647 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 648 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 649 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 650 int position; 651 int vbl_start, vbl_end, htotal, vtotal; 652 bool in_vbl = true; 653 int ret = 0; 654 unsigned long irqflags; 655 656 if (!intel_crtc->active) { 657 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 658 "pipe %c\n", pipe_name(pipe)); 659 return 0; 660 } 661 662 htotal = mode->crtc_htotal; 663 vtotal = mode->crtc_vtotal; 664 vbl_start = mode->crtc_vblank_start; 665 vbl_end = mode->crtc_vblank_end; 666 667 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 668 vbl_start = DIV_ROUND_UP(vbl_start, 2); 669 vbl_end /= 2; 670 vtotal /= 2; 671 } 672 673 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 674 675 /* 676 * Lock uncore.lock, as we will do multiple timing critical raw 677 * register reads, potentially with preemption disabled, so the 678 * following code must not block on uncore.lock. 679 */ 680 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 681 682 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 683 684 /* Get optional system timestamp before query. */ 685 if (stime) 686 *stime = ktime_get(); 687 688 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 689 /* No obvious pixelcount register. Only query vertical 690 * scanout position from Display scan line register. 691 */ 692 if (IS_GEN2(dev)) 693 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 694 else 695 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 696 697 if (HAS_DDI(dev)) { 698 /* 699 * On HSW HDMI outputs there seems to be a 2 line 700 * difference, whereas eDP has the normal 1 line 701 * difference that earlier platforms have. External 702 * DP is unknown. For now just check for the 2 line 703 * difference case on all output types on HSW+. 704 * 705 * This might misinterpret the scanline counter being 706 * one line too far along on eDP, but that's less 707 * dangerous than the alternative since that would lead 708 * the vblank timestamp code astray when it sees a 709 * scanline count before vblank_start during a vblank 710 * interrupt. 711 */ 712 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 713 if ((in_vbl && (position == vbl_start - 2 || 714 position == vbl_start - 1)) || 715 (!in_vbl && (position == vbl_end - 2 || 716 position == vbl_end - 1))) 717 position = (position + 2) % vtotal; 718 } else if (HAS_PCH_SPLIT(dev)) { 719 /* 720 * The scanline counter increments at the leading edge 721 * of hsync, ie. it completely misses the active portion 722 * of the line. Fix up the counter at both edges of vblank 723 * to get a more accurate picture whether we're in vblank 724 * or not. 725 */ 726 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 727 if ((in_vbl && position == vbl_start - 1) || 728 (!in_vbl && position == vbl_end - 1)) 729 position = (position + 1) % vtotal; 730 } else { 731 /* 732 * ISR vblank status bits don't work the way we'd want 733 * them to work on non-PCH platforms (for 734 * ilk_pipe_in_vblank_locked()), and there doesn't 735 * appear any other way to determine if we're currently 736 * in vblank. 737 * 738 * Instead let's assume that we're already in vblank if 739 * we got called from the vblank interrupt and the 740 * scanline counter value indicates that we're on the 741 * line just prior to vblank start. This should result 742 * in the correct answer, unless the vblank interrupt 743 * delivery really got delayed for almost exactly one 744 * full frame/field. 745 */ 746 if (flags & DRM_CALLED_FROM_VBLIRQ && 747 position == vbl_start - 1) { 748 position = (position + 1) % vtotal; 749 750 /* Signal this correction as "applied". */ 751 ret |= 0x8; 752 } 753 } 754 } else { 755 /* Have access to pixelcount since start of frame. 756 * We can split this into vertical and horizontal 757 * scanout position. 758 */ 759 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 760 761 /* convert to pixel counts */ 762 vbl_start *= htotal; 763 vbl_end *= htotal; 764 vtotal *= htotal; 765 } 766 767 /* Get optional system timestamp after query. */ 768 if (etime) 769 *etime = ktime_get(); 770 771 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 772 773 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 774 775 in_vbl = position >= vbl_start && position < vbl_end; 776 777 /* 778 * While in vblank, position will be negative 779 * counting up towards 0 at vbl_end. And outside 780 * vblank, position will be positive counting 781 * up since vbl_end. 782 */ 783 if (position >= vbl_start) 784 position -= vbl_end; 785 else 786 position += vtotal - vbl_end; 787 788 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 789 *vpos = position; 790 *hpos = 0; 791 } else { 792 *vpos = position / htotal; 793 *hpos = position - (*vpos * htotal); 794 } 795 796 /* In vblank? */ 797 if (in_vbl) 798 ret |= DRM_SCANOUTPOS_INVBL; 799 800 return ret; 801 } 802 803 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 804 int *max_error, 805 struct timeval *vblank_time, 806 unsigned flags) 807 { 808 struct drm_crtc *crtc; 809 810 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 811 DRM_ERROR("Invalid crtc %d\n", pipe); 812 return -EINVAL; 813 } 814 815 /* Get drm_crtc to timestamp: */ 816 crtc = intel_get_crtc_for_pipe(dev, pipe); 817 if (crtc == NULL) { 818 DRM_ERROR("Invalid crtc %d\n", pipe); 819 return -EINVAL; 820 } 821 822 if (!crtc->enabled) { 823 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 824 return -EBUSY; 825 } 826 827 /* Helper routine in DRM core does all the work: */ 828 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 829 vblank_time, flags, 830 crtc, 831 &to_intel_crtc(crtc)->config.adjusted_mode); 832 } 833 834 static bool intel_hpd_irq_event(struct drm_device *dev, 835 struct drm_connector *connector) 836 { 837 enum drm_connector_status old_status; 838 839 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 840 old_status = connector->status; 841 842 connector->status = connector->funcs->detect(connector, false); 843 if (old_status == connector->status) 844 return false; 845 846 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 847 connector->base.id, 848 drm_get_connector_name(connector), 849 drm_get_connector_status_name(old_status), 850 drm_get_connector_status_name(connector->status)); 851 852 return true; 853 } 854 855 /* 856 * Handle hotplug events outside the interrupt handler proper. 857 */ 858 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 859 860 static void i915_hotplug_work_func(struct work_struct *work) 861 { 862 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 863 hotplug_work); 864 struct drm_device *dev = dev_priv->dev; 865 struct drm_mode_config *mode_config = &dev->mode_config; 866 struct intel_connector *intel_connector; 867 struct intel_encoder *intel_encoder; 868 struct drm_connector *connector; 869 unsigned long irqflags; 870 bool hpd_disabled = false; 871 bool changed = false; 872 u32 hpd_event_bits; 873 874 /* HPD irq before everything is fully set up. */ 875 if (!dev_priv->enable_hotplug_processing) 876 return; 877 878 mutex_lock(&mode_config->mutex); 879 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 880 881 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 882 883 hpd_event_bits = dev_priv->hpd_event_bits; 884 dev_priv->hpd_event_bits = 0; 885 list_for_each_entry(connector, &mode_config->connector_list, head) { 886 intel_connector = to_intel_connector(connector); 887 intel_encoder = intel_connector->encoder; 888 if (intel_encoder->hpd_pin > HPD_NONE && 889 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 890 connector->polled == DRM_CONNECTOR_POLL_HPD) { 891 DRM_INFO("HPD interrupt storm detected on connector %s: " 892 "switching from hotplug detection to polling\n", 893 drm_get_connector_name(connector)); 894 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 895 connector->polled = DRM_CONNECTOR_POLL_CONNECT 896 | DRM_CONNECTOR_POLL_DISCONNECT; 897 hpd_disabled = true; 898 } 899 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 900 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 901 drm_get_connector_name(connector), intel_encoder->hpd_pin); 902 } 903 } 904 /* if there were no outputs to poll, poll was disabled, 905 * therefore make sure it's enabled when disabling HPD on 906 * some connectors */ 907 if (hpd_disabled) { 908 drm_kms_helper_poll_enable(dev); 909 mod_timer(&dev_priv->hotplug_reenable_timer, 910 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 911 } 912 913 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 914 915 list_for_each_entry(connector, &mode_config->connector_list, head) { 916 intel_connector = to_intel_connector(connector); 917 intel_encoder = intel_connector->encoder; 918 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 919 if (intel_encoder->hot_plug) 920 intel_encoder->hot_plug(intel_encoder); 921 if (intel_hpd_irq_event(dev, connector)) 922 changed = true; 923 } 924 } 925 mutex_unlock(&mode_config->mutex); 926 927 if (changed) 928 drm_kms_helper_hotplug_event(dev); 929 } 930 931 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 932 { 933 drm_i915_private_t *dev_priv = dev->dev_private; 934 u32 busy_up, busy_down, max_avg, min_avg; 935 u8 new_delay; 936 937 spin_lock(&mchdev_lock); 938 939 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 940 941 new_delay = dev_priv->ips.cur_delay; 942 943 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 944 busy_up = I915_READ(RCPREVBSYTUPAVG); 945 busy_down = I915_READ(RCPREVBSYTDNAVG); 946 max_avg = I915_READ(RCBMAXAVG); 947 min_avg = I915_READ(RCBMINAVG); 948 949 /* Handle RCS change request from hw */ 950 if (busy_up > max_avg) { 951 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 952 new_delay = dev_priv->ips.cur_delay - 1; 953 if (new_delay < dev_priv->ips.max_delay) 954 new_delay = dev_priv->ips.max_delay; 955 } else if (busy_down < min_avg) { 956 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 957 new_delay = dev_priv->ips.cur_delay + 1; 958 if (new_delay > dev_priv->ips.min_delay) 959 new_delay = dev_priv->ips.min_delay; 960 } 961 962 if (ironlake_set_drps(dev, new_delay)) 963 dev_priv->ips.cur_delay = new_delay; 964 965 spin_unlock(&mchdev_lock); 966 967 return; 968 } 969 970 static void notify_ring(struct drm_device *dev, 971 struct intel_ring_buffer *ring) 972 { 973 if (ring->obj == NULL) 974 return; 975 976 trace_i915_gem_request_complete(ring); 977 978 wake_up_all(&ring->irq_queue); 979 i915_queue_hangcheck(dev); 980 } 981 982 static void gen6_pm_rps_work(struct work_struct *work) 983 { 984 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 985 rps.work); 986 u32 pm_iir; 987 int new_delay, adj; 988 989 spin_lock_irq(&dev_priv->irq_lock); 990 pm_iir = dev_priv->rps.pm_iir; 991 dev_priv->rps.pm_iir = 0; 992 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 993 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 994 spin_unlock_irq(&dev_priv->irq_lock); 995 996 /* Make sure we didn't queue anything we're not going to process. */ 997 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 998 999 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 1000 return; 1001 1002 mutex_lock(&dev_priv->rps.hw_lock); 1003 1004 adj = dev_priv->rps.last_adj; 1005 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1006 if (adj > 0) 1007 adj *= 2; 1008 else 1009 adj = 1; 1010 new_delay = dev_priv->rps.cur_delay + adj; 1011 1012 /* 1013 * For better performance, jump directly 1014 * to RPe if we're below it. 1015 */ 1016 if (new_delay < dev_priv->rps.rpe_delay) 1017 new_delay = dev_priv->rps.rpe_delay; 1018 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1019 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 1020 new_delay = dev_priv->rps.rpe_delay; 1021 else 1022 new_delay = dev_priv->rps.min_delay; 1023 adj = 0; 1024 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1025 if (adj < 0) 1026 adj *= 2; 1027 else 1028 adj = -1; 1029 new_delay = dev_priv->rps.cur_delay + adj; 1030 } else { /* unknown event */ 1031 new_delay = dev_priv->rps.cur_delay; 1032 } 1033 1034 /* sysfs frequency interfaces may have snuck in while servicing the 1035 * interrupt 1036 */ 1037 new_delay = clamp_t(int, new_delay, 1038 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1039 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1040 1041 if (IS_VALLEYVIEW(dev_priv->dev)) 1042 valleyview_set_rps(dev_priv->dev, new_delay); 1043 else 1044 gen6_set_rps(dev_priv->dev, new_delay); 1045 1046 mutex_unlock(&dev_priv->rps.hw_lock); 1047 } 1048 1049 1050 /** 1051 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1052 * occurred. 1053 * @work: workqueue struct 1054 * 1055 * Doesn't actually do anything except notify userspace. As a consequence of 1056 * this event, userspace should try to remap the bad rows since statistically 1057 * it is likely the same row is more likely to go bad again. 1058 */ 1059 static void ivybridge_parity_work(struct work_struct *work) 1060 { 1061 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1062 l3_parity.error_work); 1063 u32 error_status, row, bank, subbank; 1064 char *parity_event[6]; 1065 uint32_t misccpctl; 1066 unsigned long flags; 1067 uint8_t slice = 0; 1068 1069 /* We must turn off DOP level clock gating to access the L3 registers. 1070 * In order to prevent a get/put style interface, acquire struct mutex 1071 * any time we access those registers. 1072 */ 1073 mutex_lock(&dev_priv->dev->struct_mutex); 1074 1075 /* If we've screwed up tracking, just let the interrupt fire again */ 1076 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1077 goto out; 1078 1079 misccpctl = I915_READ(GEN7_MISCCPCTL); 1080 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1081 POSTING_READ(GEN7_MISCCPCTL); 1082 1083 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1084 u32 reg; 1085 1086 slice--; 1087 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1088 break; 1089 1090 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1091 1092 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1093 1094 error_status = I915_READ(reg); 1095 row = GEN7_PARITY_ERROR_ROW(error_status); 1096 bank = GEN7_PARITY_ERROR_BANK(error_status); 1097 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1098 1099 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1100 POSTING_READ(reg); 1101 1102 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1103 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1104 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1105 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1106 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1107 parity_event[5] = NULL; 1108 1109 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1110 KOBJ_CHANGE, parity_event); 1111 1112 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1113 slice, row, bank, subbank); 1114 1115 kfree(parity_event[4]); 1116 kfree(parity_event[3]); 1117 kfree(parity_event[2]); 1118 kfree(parity_event[1]); 1119 } 1120 1121 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1122 1123 out: 1124 WARN_ON(dev_priv->l3_parity.which_slice); 1125 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1126 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1127 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1128 1129 mutex_unlock(&dev_priv->dev->struct_mutex); 1130 } 1131 1132 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1133 { 1134 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1135 1136 if (!HAS_L3_DPF(dev)) 1137 return; 1138 1139 spin_lock(&dev_priv->irq_lock); 1140 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1141 spin_unlock(&dev_priv->irq_lock); 1142 1143 iir &= GT_PARITY_ERROR(dev); 1144 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1145 dev_priv->l3_parity.which_slice |= 1 << 1; 1146 1147 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1148 dev_priv->l3_parity.which_slice |= 1 << 0; 1149 1150 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1151 } 1152 1153 static void ilk_gt_irq_handler(struct drm_device *dev, 1154 struct drm_i915_private *dev_priv, 1155 u32 gt_iir) 1156 { 1157 if (gt_iir & 1158 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1159 notify_ring(dev, &dev_priv->ring[RCS]); 1160 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1161 notify_ring(dev, &dev_priv->ring[VCS]); 1162 } 1163 1164 static void snb_gt_irq_handler(struct drm_device *dev, 1165 struct drm_i915_private *dev_priv, 1166 u32 gt_iir) 1167 { 1168 1169 if (gt_iir & 1170 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1171 notify_ring(dev, &dev_priv->ring[RCS]); 1172 if (gt_iir & GT_BSD_USER_INTERRUPT) 1173 notify_ring(dev, &dev_priv->ring[VCS]); 1174 if (gt_iir & GT_BLT_USER_INTERRUPT) 1175 notify_ring(dev, &dev_priv->ring[BCS]); 1176 1177 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1178 GT_BSD_CS_ERROR_INTERRUPT | 1179 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1180 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1181 i915_handle_error(dev, false); 1182 } 1183 1184 if (gt_iir & GT_PARITY_ERROR(dev)) 1185 ivybridge_parity_error_irq_handler(dev, gt_iir); 1186 } 1187 1188 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1189 struct drm_i915_private *dev_priv, 1190 u32 master_ctl) 1191 { 1192 u32 rcs, bcs, vcs; 1193 uint32_t tmp = 0; 1194 irqreturn_t ret = IRQ_NONE; 1195 1196 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1197 tmp = I915_READ(GEN8_GT_IIR(0)); 1198 if (tmp) { 1199 ret = IRQ_HANDLED; 1200 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1201 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1202 if (rcs & GT_RENDER_USER_INTERRUPT) 1203 notify_ring(dev, &dev_priv->ring[RCS]); 1204 if (bcs & GT_RENDER_USER_INTERRUPT) 1205 notify_ring(dev, &dev_priv->ring[BCS]); 1206 I915_WRITE(GEN8_GT_IIR(0), tmp); 1207 } else 1208 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1209 } 1210 1211 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1212 tmp = I915_READ(GEN8_GT_IIR(1)); 1213 if (tmp) { 1214 ret = IRQ_HANDLED; 1215 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1216 if (vcs & GT_RENDER_USER_INTERRUPT) 1217 notify_ring(dev, &dev_priv->ring[VCS]); 1218 I915_WRITE(GEN8_GT_IIR(1), tmp); 1219 } else 1220 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1221 } 1222 1223 if (master_ctl & GEN8_GT_VECS_IRQ) { 1224 tmp = I915_READ(GEN8_GT_IIR(3)); 1225 if (tmp) { 1226 ret = IRQ_HANDLED; 1227 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1228 if (vcs & GT_RENDER_USER_INTERRUPT) 1229 notify_ring(dev, &dev_priv->ring[VECS]); 1230 I915_WRITE(GEN8_GT_IIR(3), tmp); 1231 } else 1232 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1233 } 1234 1235 return ret; 1236 } 1237 1238 #define HPD_STORM_DETECT_PERIOD 1000 1239 #define HPD_STORM_THRESHOLD 5 1240 1241 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1242 u32 hotplug_trigger, 1243 const u32 *hpd) 1244 { 1245 drm_i915_private_t *dev_priv = dev->dev_private; 1246 int i; 1247 bool storm_detected = false; 1248 1249 if (!hotplug_trigger) 1250 return; 1251 1252 spin_lock(&dev_priv->irq_lock); 1253 for (i = 1; i < HPD_NUM_PINS; i++) { 1254 1255 WARN_ONCE(hpd[i] & hotplug_trigger && 1256 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1257 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1258 hotplug_trigger, i, hpd[i]); 1259 1260 if (!(hpd[i] & hotplug_trigger) || 1261 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1262 continue; 1263 1264 dev_priv->hpd_event_bits |= (1 << i); 1265 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1266 dev_priv->hpd_stats[i].hpd_last_jiffies 1267 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1268 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1269 dev_priv->hpd_stats[i].hpd_cnt = 0; 1270 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1271 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1272 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1273 dev_priv->hpd_event_bits &= ~(1 << i); 1274 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1275 storm_detected = true; 1276 } else { 1277 dev_priv->hpd_stats[i].hpd_cnt++; 1278 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1279 dev_priv->hpd_stats[i].hpd_cnt); 1280 } 1281 } 1282 1283 if (storm_detected) 1284 dev_priv->display.hpd_irq_setup(dev); 1285 spin_unlock(&dev_priv->irq_lock); 1286 1287 /* 1288 * Our hotplug handler can grab modeset locks (by calling down into the 1289 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1290 * queue for otherwise the flush_work in the pageflip code will 1291 * deadlock. 1292 */ 1293 schedule_work(&dev_priv->hotplug_work); 1294 } 1295 1296 static void gmbus_irq_handler(struct drm_device *dev) 1297 { 1298 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1299 1300 wake_up_all(&dev_priv->gmbus_wait_queue); 1301 } 1302 1303 static void dp_aux_irq_handler(struct drm_device *dev) 1304 { 1305 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1306 1307 wake_up_all(&dev_priv->gmbus_wait_queue); 1308 } 1309 1310 #if defined(CONFIG_DEBUG_FS) 1311 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1312 uint32_t crc0, uint32_t crc1, 1313 uint32_t crc2, uint32_t crc3, 1314 uint32_t crc4) 1315 { 1316 struct drm_i915_private *dev_priv = dev->dev_private; 1317 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1318 struct intel_pipe_crc_entry *entry; 1319 int head, tail; 1320 1321 spin_lock(&pipe_crc->lock); 1322 1323 if (!pipe_crc->entries) { 1324 spin_unlock(&pipe_crc->lock); 1325 DRM_ERROR("spurious interrupt\n"); 1326 return; 1327 } 1328 1329 head = pipe_crc->head; 1330 tail = pipe_crc->tail; 1331 1332 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1333 spin_unlock(&pipe_crc->lock); 1334 DRM_ERROR("CRC buffer overflowing\n"); 1335 return; 1336 } 1337 1338 entry = &pipe_crc->entries[head]; 1339 1340 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1341 entry->crc[0] = crc0; 1342 entry->crc[1] = crc1; 1343 entry->crc[2] = crc2; 1344 entry->crc[3] = crc3; 1345 entry->crc[4] = crc4; 1346 1347 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1348 pipe_crc->head = head; 1349 1350 spin_unlock(&pipe_crc->lock); 1351 1352 wake_up_interruptible(&pipe_crc->wq); 1353 } 1354 #else 1355 static inline void 1356 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1357 uint32_t crc0, uint32_t crc1, 1358 uint32_t crc2, uint32_t crc3, 1359 uint32_t crc4) {} 1360 #endif 1361 1362 1363 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1364 { 1365 struct drm_i915_private *dev_priv = dev->dev_private; 1366 1367 display_pipe_crc_irq_handler(dev, pipe, 1368 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1369 0, 0, 0, 0); 1370 } 1371 1372 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1373 { 1374 struct drm_i915_private *dev_priv = dev->dev_private; 1375 1376 display_pipe_crc_irq_handler(dev, pipe, 1377 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1378 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1379 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1380 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1381 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1382 } 1383 1384 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1385 { 1386 struct drm_i915_private *dev_priv = dev->dev_private; 1387 uint32_t res1, res2; 1388 1389 if (INTEL_INFO(dev)->gen >= 3) 1390 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1391 else 1392 res1 = 0; 1393 1394 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1395 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1396 else 1397 res2 = 0; 1398 1399 display_pipe_crc_irq_handler(dev, pipe, 1400 I915_READ(PIPE_CRC_RES_RED(pipe)), 1401 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1402 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1403 res1, res2); 1404 } 1405 1406 /* The RPS events need forcewake, so we add them to a work queue and mask their 1407 * IMR bits until the work is done. Other interrupts can be processed without 1408 * the work queue. */ 1409 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1410 { 1411 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1412 spin_lock(&dev_priv->irq_lock); 1413 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1414 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1415 spin_unlock(&dev_priv->irq_lock); 1416 1417 queue_work(dev_priv->wq, &dev_priv->rps.work); 1418 } 1419 1420 if (HAS_VEBOX(dev_priv->dev)) { 1421 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1422 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1423 1424 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1425 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1426 i915_handle_error(dev_priv->dev, false); 1427 } 1428 } 1429 } 1430 1431 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1432 { 1433 struct drm_device *dev = (struct drm_device *) arg; 1434 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1435 u32 iir, gt_iir, pm_iir; 1436 irqreturn_t ret = IRQ_NONE; 1437 unsigned long irqflags; 1438 int pipe; 1439 u32 pipe_stats[I915_MAX_PIPES]; 1440 1441 atomic_inc(&dev_priv->irq_received); 1442 1443 while (true) { 1444 iir = I915_READ(VLV_IIR); 1445 gt_iir = I915_READ(GTIIR); 1446 pm_iir = I915_READ(GEN6_PMIIR); 1447 1448 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1449 goto out; 1450 1451 ret = IRQ_HANDLED; 1452 1453 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1454 1455 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1456 for_each_pipe(pipe) { 1457 int reg = PIPESTAT(pipe); 1458 pipe_stats[pipe] = I915_READ(reg); 1459 1460 /* 1461 * Clear the PIPE*STAT regs before the IIR 1462 */ 1463 if (pipe_stats[pipe] & 0x8000ffff) { 1464 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1465 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1466 pipe_name(pipe)); 1467 I915_WRITE(reg, pipe_stats[pipe]); 1468 } 1469 } 1470 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1471 1472 for_each_pipe(pipe) { 1473 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1474 drm_handle_vblank(dev, pipe); 1475 1476 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1477 intel_prepare_page_flip(dev, pipe); 1478 intel_finish_page_flip(dev, pipe); 1479 } 1480 1481 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1482 i9xx_pipe_crc_irq_handler(dev, pipe); 1483 } 1484 1485 /* Consume port. Then clear IIR or we'll miss events */ 1486 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1487 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1488 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1489 1490 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1491 hotplug_status); 1492 1493 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1494 1495 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1496 dp_aux_irq_handler(dev); 1497 1498 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1499 I915_READ(PORT_HOTPLUG_STAT); 1500 } 1501 1502 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1503 gmbus_irq_handler(dev); 1504 1505 if (pm_iir) 1506 gen6_rps_irq_handler(dev_priv, pm_iir); 1507 1508 I915_WRITE(GTIIR, gt_iir); 1509 I915_WRITE(GEN6_PMIIR, pm_iir); 1510 I915_WRITE(VLV_IIR, iir); 1511 } 1512 1513 out: 1514 return ret; 1515 } 1516 1517 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1518 { 1519 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1520 int pipe; 1521 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1522 1523 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1524 1525 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1526 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1527 SDE_AUDIO_POWER_SHIFT); 1528 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1529 port_name(port)); 1530 } 1531 1532 if (pch_iir & SDE_AUX_MASK) 1533 dp_aux_irq_handler(dev); 1534 1535 if (pch_iir & SDE_GMBUS) 1536 gmbus_irq_handler(dev); 1537 1538 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1539 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1540 1541 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1542 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1543 1544 if (pch_iir & SDE_POISON) 1545 DRM_ERROR("PCH poison interrupt\n"); 1546 1547 if (pch_iir & SDE_FDI_MASK) 1548 for_each_pipe(pipe) 1549 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1550 pipe_name(pipe), 1551 I915_READ(FDI_RX_IIR(pipe))); 1552 1553 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1554 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1555 1556 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1557 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1558 1559 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1560 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1561 false)) 1562 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1563 1564 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1565 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1566 false)) 1567 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1568 } 1569 1570 static void ivb_err_int_handler(struct drm_device *dev) 1571 { 1572 struct drm_i915_private *dev_priv = dev->dev_private; 1573 u32 err_int = I915_READ(GEN7_ERR_INT); 1574 enum pipe pipe; 1575 1576 if (err_int & ERR_INT_POISON) 1577 DRM_ERROR("Poison interrupt\n"); 1578 1579 for_each_pipe(pipe) { 1580 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1581 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1582 false)) 1583 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1584 pipe_name(pipe)); 1585 } 1586 1587 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1588 if (IS_IVYBRIDGE(dev)) 1589 ivb_pipe_crc_irq_handler(dev, pipe); 1590 else 1591 hsw_pipe_crc_irq_handler(dev, pipe); 1592 } 1593 } 1594 1595 I915_WRITE(GEN7_ERR_INT, err_int); 1596 } 1597 1598 static void cpt_serr_int_handler(struct drm_device *dev) 1599 { 1600 struct drm_i915_private *dev_priv = dev->dev_private; 1601 u32 serr_int = I915_READ(SERR_INT); 1602 1603 if (serr_int & SERR_INT_POISON) 1604 DRM_ERROR("PCH poison interrupt\n"); 1605 1606 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1607 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1608 false)) 1609 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1610 1611 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1612 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1613 false)) 1614 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1615 1616 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1617 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1618 false)) 1619 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1620 1621 I915_WRITE(SERR_INT, serr_int); 1622 } 1623 1624 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1625 { 1626 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1627 int pipe; 1628 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1629 1630 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1631 1632 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1633 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1634 SDE_AUDIO_POWER_SHIFT_CPT); 1635 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1636 port_name(port)); 1637 } 1638 1639 if (pch_iir & SDE_AUX_MASK_CPT) 1640 dp_aux_irq_handler(dev); 1641 1642 if (pch_iir & SDE_GMBUS_CPT) 1643 gmbus_irq_handler(dev); 1644 1645 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1646 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1647 1648 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1649 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1650 1651 if (pch_iir & SDE_FDI_MASK_CPT) 1652 for_each_pipe(pipe) 1653 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1654 pipe_name(pipe), 1655 I915_READ(FDI_RX_IIR(pipe))); 1656 1657 if (pch_iir & SDE_ERROR_CPT) 1658 cpt_serr_int_handler(dev); 1659 } 1660 1661 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1662 { 1663 struct drm_i915_private *dev_priv = dev->dev_private; 1664 enum pipe pipe; 1665 1666 if (de_iir & DE_AUX_CHANNEL_A) 1667 dp_aux_irq_handler(dev); 1668 1669 if (de_iir & DE_GSE) 1670 intel_opregion_asle_intr(dev); 1671 1672 if (de_iir & DE_POISON) 1673 DRM_ERROR("Poison interrupt\n"); 1674 1675 for_each_pipe(pipe) { 1676 if (de_iir & DE_PIPE_VBLANK(pipe)) 1677 drm_handle_vblank(dev, pipe); 1678 1679 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1680 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1681 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1682 pipe_name(pipe)); 1683 1684 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1685 i9xx_pipe_crc_irq_handler(dev, pipe); 1686 1687 /* plane/pipes map 1:1 on ilk+ */ 1688 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1689 intel_prepare_page_flip(dev, pipe); 1690 intel_finish_page_flip_plane(dev, pipe); 1691 } 1692 } 1693 1694 /* check event from PCH */ 1695 if (de_iir & DE_PCH_EVENT) { 1696 u32 pch_iir = I915_READ(SDEIIR); 1697 1698 if (HAS_PCH_CPT(dev)) 1699 cpt_irq_handler(dev, pch_iir); 1700 else 1701 ibx_irq_handler(dev, pch_iir); 1702 1703 /* should clear PCH hotplug event before clear CPU irq */ 1704 I915_WRITE(SDEIIR, pch_iir); 1705 } 1706 1707 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1708 ironlake_rps_change_irq_handler(dev); 1709 } 1710 1711 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1712 { 1713 struct drm_i915_private *dev_priv = dev->dev_private; 1714 enum pipe i; 1715 1716 if (de_iir & DE_ERR_INT_IVB) 1717 ivb_err_int_handler(dev); 1718 1719 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1720 dp_aux_irq_handler(dev); 1721 1722 if (de_iir & DE_GSE_IVB) 1723 intel_opregion_asle_intr(dev); 1724 1725 for_each_pipe(i) { 1726 if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 1727 drm_handle_vblank(dev, i); 1728 1729 /* plane/pipes map 1:1 on ilk+ */ 1730 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 1731 intel_prepare_page_flip(dev, i); 1732 intel_finish_page_flip_plane(dev, i); 1733 } 1734 } 1735 1736 /* check event from PCH */ 1737 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1738 u32 pch_iir = I915_READ(SDEIIR); 1739 1740 cpt_irq_handler(dev, pch_iir); 1741 1742 /* clear PCH hotplug event before clear CPU irq */ 1743 I915_WRITE(SDEIIR, pch_iir); 1744 } 1745 } 1746 1747 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1748 { 1749 struct drm_device *dev = (struct drm_device *) arg; 1750 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1751 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1752 irqreturn_t ret = IRQ_NONE; 1753 1754 atomic_inc(&dev_priv->irq_received); 1755 1756 /* We get interrupts on unclaimed registers, so check for this before we 1757 * do any I915_{READ,WRITE}. */ 1758 intel_uncore_check_errors(dev); 1759 1760 /* disable master interrupt before clearing iir */ 1761 de_ier = I915_READ(DEIER); 1762 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1763 POSTING_READ(DEIER); 1764 1765 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1766 * interrupts will will be stored on its back queue, and then we'll be 1767 * able to process them after we restore SDEIER (as soon as we restore 1768 * it, we'll get an interrupt if SDEIIR still has something to process 1769 * due to its back queue). */ 1770 if (!HAS_PCH_NOP(dev)) { 1771 sde_ier = I915_READ(SDEIER); 1772 I915_WRITE(SDEIER, 0); 1773 POSTING_READ(SDEIER); 1774 } 1775 1776 gt_iir = I915_READ(GTIIR); 1777 if (gt_iir) { 1778 if (INTEL_INFO(dev)->gen >= 6) 1779 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1780 else 1781 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1782 I915_WRITE(GTIIR, gt_iir); 1783 ret = IRQ_HANDLED; 1784 } 1785 1786 de_iir = I915_READ(DEIIR); 1787 if (de_iir) { 1788 if (INTEL_INFO(dev)->gen >= 7) 1789 ivb_display_irq_handler(dev, de_iir); 1790 else 1791 ilk_display_irq_handler(dev, de_iir); 1792 I915_WRITE(DEIIR, de_iir); 1793 ret = IRQ_HANDLED; 1794 } 1795 1796 if (INTEL_INFO(dev)->gen >= 6) { 1797 u32 pm_iir = I915_READ(GEN6_PMIIR); 1798 if (pm_iir) { 1799 gen6_rps_irq_handler(dev_priv, pm_iir); 1800 I915_WRITE(GEN6_PMIIR, pm_iir); 1801 ret = IRQ_HANDLED; 1802 } 1803 } 1804 1805 I915_WRITE(DEIER, de_ier); 1806 POSTING_READ(DEIER); 1807 if (!HAS_PCH_NOP(dev)) { 1808 I915_WRITE(SDEIER, sde_ier); 1809 POSTING_READ(SDEIER); 1810 } 1811 1812 return ret; 1813 } 1814 1815 static irqreturn_t gen8_irq_handler(int irq, void *arg) 1816 { 1817 struct drm_device *dev = arg; 1818 struct drm_i915_private *dev_priv = dev->dev_private; 1819 u32 master_ctl; 1820 irqreturn_t ret = IRQ_NONE; 1821 uint32_t tmp = 0; 1822 enum pipe pipe; 1823 1824 atomic_inc(&dev_priv->irq_received); 1825 1826 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1827 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1828 if (!master_ctl) 1829 return IRQ_NONE; 1830 1831 I915_WRITE(GEN8_MASTER_IRQ, 0); 1832 POSTING_READ(GEN8_MASTER_IRQ); 1833 1834 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1835 1836 if (master_ctl & GEN8_DE_MISC_IRQ) { 1837 tmp = I915_READ(GEN8_DE_MISC_IIR); 1838 if (tmp & GEN8_DE_MISC_GSE) 1839 intel_opregion_asle_intr(dev); 1840 else if (tmp) 1841 DRM_ERROR("Unexpected DE Misc interrupt\n"); 1842 else 1843 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1844 1845 if (tmp) { 1846 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1847 ret = IRQ_HANDLED; 1848 } 1849 } 1850 1851 if (master_ctl & GEN8_DE_PORT_IRQ) { 1852 tmp = I915_READ(GEN8_DE_PORT_IIR); 1853 if (tmp & GEN8_AUX_CHANNEL_A) 1854 dp_aux_irq_handler(dev); 1855 else if (tmp) 1856 DRM_ERROR("Unexpected DE Port interrupt\n"); 1857 else 1858 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 1859 1860 if (tmp) { 1861 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 1862 ret = IRQ_HANDLED; 1863 } 1864 } 1865 1866 for_each_pipe(pipe) { 1867 uint32_t pipe_iir; 1868 1869 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1870 continue; 1871 1872 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 1873 if (pipe_iir & GEN8_PIPE_VBLANK) 1874 drm_handle_vblank(dev, pipe); 1875 1876 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 1877 intel_prepare_page_flip(dev, pipe); 1878 intel_finish_page_flip_plane(dev, pipe); 1879 } 1880 1881 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 1882 hsw_pipe_crc_irq_handler(dev, pipe); 1883 1884 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1885 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1886 false)) 1887 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1888 pipe_name(pipe)); 1889 } 1890 1891 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 1892 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 1893 pipe_name(pipe), 1894 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 1895 } 1896 1897 if (pipe_iir) { 1898 ret = IRQ_HANDLED; 1899 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 1900 } else 1901 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 1902 } 1903 1904 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 1905 /* 1906 * FIXME(BDW): Assume for now that the new interrupt handling 1907 * scheme also closed the SDE interrupt handling race we've seen 1908 * on older pch-split platforms. But this needs testing. 1909 */ 1910 u32 pch_iir = I915_READ(SDEIIR); 1911 1912 cpt_irq_handler(dev, pch_iir); 1913 1914 if (pch_iir) { 1915 I915_WRITE(SDEIIR, pch_iir); 1916 ret = IRQ_HANDLED; 1917 } 1918 } 1919 1920 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1921 POSTING_READ(GEN8_MASTER_IRQ); 1922 1923 return ret; 1924 } 1925 1926 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1927 bool reset_completed) 1928 { 1929 struct intel_ring_buffer *ring; 1930 int i; 1931 1932 /* 1933 * Notify all waiters for GPU completion events that reset state has 1934 * been changed, and that they need to restart their wait after 1935 * checking for potential errors (and bail out to drop locks if there is 1936 * a gpu reset pending so that i915_error_work_func can acquire them). 1937 */ 1938 1939 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1940 for_each_ring(ring, dev_priv, i) 1941 wake_up_all(&ring->irq_queue); 1942 1943 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1944 wake_up_all(&dev_priv->pending_flip_queue); 1945 1946 /* 1947 * Signal tasks blocked in i915_gem_wait_for_error that the pending 1948 * reset state is cleared. 1949 */ 1950 if (reset_completed) 1951 wake_up_all(&dev_priv->gpu_error.reset_queue); 1952 } 1953 1954 /** 1955 * i915_error_work_func - do process context error handling work 1956 * @work: work struct 1957 * 1958 * Fire an error uevent so userspace can see that a hang or error 1959 * was detected. 1960 */ 1961 static void i915_error_work_func(struct work_struct *work) 1962 { 1963 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1964 work); 1965 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1966 gpu_error); 1967 struct drm_device *dev = dev_priv->dev; 1968 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1969 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1970 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1971 int ret; 1972 1973 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 1974 1975 /* 1976 * Note that there's only one work item which does gpu resets, so we 1977 * need not worry about concurrent gpu resets potentially incrementing 1978 * error->reset_counter twice. We only need to take care of another 1979 * racing irq/hangcheck declaring the gpu dead for a second time. A 1980 * quick check for that is good enough: schedule_work ensures the 1981 * correct ordering between hang detection and this work item, and since 1982 * the reset in-progress bit is only ever set by code outside of this 1983 * work we don't need to worry about any other races. 1984 */ 1985 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1986 DRM_DEBUG_DRIVER("resetting chip\n"); 1987 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 1988 reset_event); 1989 1990 /* 1991 * All state reset _must_ be completed before we update the 1992 * reset counter, for otherwise waiters might miss the reset 1993 * pending state and not properly drop locks, resulting in 1994 * deadlocks with the reset work. 1995 */ 1996 ret = i915_reset(dev); 1997 1998 intel_display_handle_reset(dev); 1999 2000 if (ret == 0) { 2001 /* 2002 * After all the gem state is reset, increment the reset 2003 * counter and wake up everyone waiting for the reset to 2004 * complete. 2005 * 2006 * Since unlock operations are a one-sided barrier only, 2007 * we need to insert a barrier here to order any seqno 2008 * updates before 2009 * the counter increment. 2010 */ 2011 smp_mb__before_atomic_inc(); 2012 atomic_inc(&dev_priv->gpu_error.reset_counter); 2013 2014 kobject_uevent_env(&dev->primary->kdev->kobj, 2015 KOBJ_CHANGE, reset_done_event); 2016 } else { 2017 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2018 } 2019 2020 /* 2021 * Note: The wake_up also serves as a memory barrier so that 2022 * waiters see the update value of the reset counter atomic_t. 2023 */ 2024 i915_error_wake_up(dev_priv, true); 2025 } 2026 } 2027 2028 static void i915_report_and_clear_eir(struct drm_device *dev) 2029 { 2030 struct drm_i915_private *dev_priv = dev->dev_private; 2031 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2032 u32 eir = I915_READ(EIR); 2033 int pipe, i; 2034 2035 if (!eir) 2036 return; 2037 2038 pr_err("render error detected, EIR: 0x%08x\n", eir); 2039 2040 i915_get_extra_instdone(dev, instdone); 2041 2042 if (IS_G4X(dev)) { 2043 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2044 u32 ipeir = I915_READ(IPEIR_I965); 2045 2046 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2047 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2048 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2049 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2050 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2051 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2052 I915_WRITE(IPEIR_I965, ipeir); 2053 POSTING_READ(IPEIR_I965); 2054 } 2055 if (eir & GM45_ERROR_PAGE_TABLE) { 2056 u32 pgtbl_err = I915_READ(PGTBL_ER); 2057 pr_err("page table error\n"); 2058 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2059 I915_WRITE(PGTBL_ER, pgtbl_err); 2060 POSTING_READ(PGTBL_ER); 2061 } 2062 } 2063 2064 if (!IS_GEN2(dev)) { 2065 if (eir & I915_ERROR_PAGE_TABLE) { 2066 u32 pgtbl_err = I915_READ(PGTBL_ER); 2067 pr_err("page table error\n"); 2068 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2069 I915_WRITE(PGTBL_ER, pgtbl_err); 2070 POSTING_READ(PGTBL_ER); 2071 } 2072 } 2073 2074 if (eir & I915_ERROR_MEMORY_REFRESH) { 2075 pr_err("memory refresh error:\n"); 2076 for_each_pipe(pipe) 2077 pr_err("pipe %c stat: 0x%08x\n", 2078 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2079 /* pipestat has already been acked */ 2080 } 2081 if (eir & I915_ERROR_INSTRUCTION) { 2082 pr_err("instruction error\n"); 2083 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2084 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2085 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2086 if (INTEL_INFO(dev)->gen < 4) { 2087 u32 ipeir = I915_READ(IPEIR); 2088 2089 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2090 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2091 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2092 I915_WRITE(IPEIR, ipeir); 2093 POSTING_READ(IPEIR); 2094 } else { 2095 u32 ipeir = I915_READ(IPEIR_I965); 2096 2097 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2098 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2099 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2100 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2101 I915_WRITE(IPEIR_I965, ipeir); 2102 POSTING_READ(IPEIR_I965); 2103 } 2104 } 2105 2106 I915_WRITE(EIR, eir); 2107 POSTING_READ(EIR); 2108 eir = I915_READ(EIR); 2109 if (eir) { 2110 /* 2111 * some errors might have become stuck, 2112 * mask them. 2113 */ 2114 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2115 I915_WRITE(EMR, I915_READ(EMR) | eir); 2116 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2117 } 2118 } 2119 2120 /** 2121 * i915_handle_error - handle an error interrupt 2122 * @dev: drm device 2123 * 2124 * Do some basic checking of regsiter state at error interrupt time and 2125 * dump it to the syslog. Also call i915_capture_error_state() to make 2126 * sure we get a record and make it available in debugfs. Fire a uevent 2127 * so userspace knows something bad happened (should trigger collection 2128 * of a ring dump etc.). 2129 */ 2130 void i915_handle_error(struct drm_device *dev, bool wedged) 2131 { 2132 struct drm_i915_private *dev_priv = dev->dev_private; 2133 2134 i915_capture_error_state(dev); 2135 i915_report_and_clear_eir(dev); 2136 2137 if (wedged) { 2138 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2139 &dev_priv->gpu_error.reset_counter); 2140 2141 /* 2142 * Wakeup waiting processes so that the reset work function 2143 * i915_error_work_func doesn't deadlock trying to grab various 2144 * locks. By bumping the reset counter first, the woken 2145 * processes will see a reset in progress and back off, 2146 * releasing their locks and then wait for the reset completion. 2147 * We must do this for _all_ gpu waiters that might hold locks 2148 * that the reset work needs to acquire. 2149 * 2150 * Note: The wake_up serves as the required memory barrier to 2151 * ensure that the waiters see the updated value of the reset 2152 * counter atomic_t. 2153 */ 2154 i915_error_wake_up(dev_priv, false); 2155 } 2156 2157 /* 2158 * Our reset work can grab modeset locks (since it needs to reset the 2159 * state of outstanding pagelips). Hence it must not be run on our own 2160 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2161 * code will deadlock. 2162 */ 2163 schedule_work(&dev_priv->gpu_error.work); 2164 } 2165 2166 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2167 { 2168 drm_i915_private_t *dev_priv = dev->dev_private; 2169 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2170 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2171 struct drm_i915_gem_object *obj; 2172 struct intel_unpin_work *work; 2173 unsigned long flags; 2174 bool stall_detected; 2175 2176 /* Ignore early vblank irqs */ 2177 if (intel_crtc == NULL) 2178 return; 2179 2180 spin_lock_irqsave(&dev->event_lock, flags); 2181 work = intel_crtc->unpin_work; 2182 2183 if (work == NULL || 2184 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2185 !work->enable_stall_check) { 2186 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2187 spin_unlock_irqrestore(&dev->event_lock, flags); 2188 return; 2189 } 2190 2191 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2192 obj = work->pending_flip_obj; 2193 if (INTEL_INFO(dev)->gen >= 4) { 2194 int dspsurf = DSPSURF(intel_crtc->plane); 2195 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2196 i915_gem_obj_ggtt_offset(obj); 2197 } else { 2198 int dspaddr = DSPADDR(intel_crtc->plane); 2199 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2200 crtc->y * crtc->fb->pitches[0] + 2201 crtc->x * crtc->fb->bits_per_pixel/8); 2202 } 2203 2204 spin_unlock_irqrestore(&dev->event_lock, flags); 2205 2206 if (stall_detected) { 2207 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2208 intel_prepare_page_flip(dev, intel_crtc->plane); 2209 } 2210 } 2211 2212 /* Called from drm generic code, passed 'crtc' which 2213 * we use as a pipe index 2214 */ 2215 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2216 { 2217 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2218 unsigned long irqflags; 2219 2220 if (!i915_pipe_enabled(dev, pipe)) 2221 return -EINVAL; 2222 2223 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2224 if (INTEL_INFO(dev)->gen >= 4) 2225 i915_enable_pipestat(dev_priv, pipe, 2226 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2227 else 2228 i915_enable_pipestat(dev_priv, pipe, 2229 PIPE_VBLANK_INTERRUPT_ENABLE); 2230 2231 /* maintain vblank delivery even in deep C-states */ 2232 if (dev_priv->info->gen == 3) 2233 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2234 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2235 2236 return 0; 2237 } 2238 2239 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2240 { 2241 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2242 unsigned long irqflags; 2243 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2244 DE_PIPE_VBLANK(pipe); 2245 2246 if (!i915_pipe_enabled(dev, pipe)) 2247 return -EINVAL; 2248 2249 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2250 ironlake_enable_display_irq(dev_priv, bit); 2251 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2252 2253 return 0; 2254 } 2255 2256 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2257 { 2258 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2259 unsigned long irqflags; 2260 u32 imr; 2261 2262 if (!i915_pipe_enabled(dev, pipe)) 2263 return -EINVAL; 2264 2265 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2266 imr = I915_READ(VLV_IMR); 2267 if (pipe == PIPE_A) 2268 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2269 else 2270 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2271 I915_WRITE(VLV_IMR, imr); 2272 i915_enable_pipestat(dev_priv, pipe, 2273 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2274 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2275 2276 return 0; 2277 } 2278 2279 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2280 { 2281 struct drm_i915_private *dev_priv = dev->dev_private; 2282 unsigned long irqflags; 2283 2284 if (!i915_pipe_enabled(dev, pipe)) 2285 return -EINVAL; 2286 2287 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2288 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2289 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2290 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2291 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2292 return 0; 2293 } 2294 2295 /* Called from drm generic code, passed 'crtc' which 2296 * we use as a pipe index 2297 */ 2298 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2299 { 2300 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2301 unsigned long irqflags; 2302 2303 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2304 if (dev_priv->info->gen == 3) 2305 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2306 2307 i915_disable_pipestat(dev_priv, pipe, 2308 PIPE_VBLANK_INTERRUPT_ENABLE | 2309 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2310 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2311 } 2312 2313 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2314 { 2315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2316 unsigned long irqflags; 2317 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2318 DE_PIPE_VBLANK(pipe); 2319 2320 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2321 ironlake_disable_display_irq(dev_priv, bit); 2322 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2323 } 2324 2325 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2326 { 2327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2328 unsigned long irqflags; 2329 u32 imr; 2330 2331 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2332 i915_disable_pipestat(dev_priv, pipe, 2333 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2334 imr = I915_READ(VLV_IMR); 2335 if (pipe == PIPE_A) 2336 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2337 else 2338 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2339 I915_WRITE(VLV_IMR, imr); 2340 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2341 } 2342 2343 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2344 { 2345 struct drm_i915_private *dev_priv = dev->dev_private; 2346 unsigned long irqflags; 2347 2348 if (!i915_pipe_enabled(dev, pipe)) 2349 return; 2350 2351 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2352 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2353 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2354 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2355 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2356 } 2357 2358 static u32 2359 ring_last_seqno(struct intel_ring_buffer *ring) 2360 { 2361 return list_entry(ring->request_list.prev, 2362 struct drm_i915_gem_request, list)->seqno; 2363 } 2364 2365 static bool 2366 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2367 { 2368 return (list_empty(&ring->request_list) || 2369 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2370 } 2371 2372 static struct intel_ring_buffer * 2373 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2374 { 2375 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2376 u32 cmd, ipehr, acthd, acthd_min; 2377 2378 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2379 if ((ipehr & ~(0x3 << 16)) != 2380 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2381 return NULL; 2382 2383 /* ACTHD is likely pointing to the dword after the actual command, 2384 * so scan backwards until we find the MBOX. 2385 */ 2386 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2387 acthd_min = max((int)acthd - 3 * 4, 0); 2388 do { 2389 cmd = ioread32(ring->virtual_start + acthd); 2390 if (cmd == ipehr) 2391 break; 2392 2393 acthd -= 4; 2394 if (acthd < acthd_min) 2395 return NULL; 2396 } while (1); 2397 2398 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2399 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2400 } 2401 2402 static int semaphore_passed(struct intel_ring_buffer *ring) 2403 { 2404 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2405 struct intel_ring_buffer *signaller; 2406 u32 seqno, ctl; 2407 2408 ring->hangcheck.deadlock = true; 2409 2410 signaller = semaphore_waits_for(ring, &seqno); 2411 if (signaller == NULL || signaller->hangcheck.deadlock) 2412 return -1; 2413 2414 /* cursory check for an unkickable deadlock */ 2415 ctl = I915_READ_CTL(signaller); 2416 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2417 return -1; 2418 2419 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2420 } 2421 2422 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2423 { 2424 struct intel_ring_buffer *ring; 2425 int i; 2426 2427 for_each_ring(ring, dev_priv, i) 2428 ring->hangcheck.deadlock = false; 2429 } 2430 2431 static enum intel_ring_hangcheck_action 2432 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2433 { 2434 struct drm_device *dev = ring->dev; 2435 struct drm_i915_private *dev_priv = dev->dev_private; 2436 u32 tmp; 2437 2438 if (ring->hangcheck.acthd != acthd) 2439 return HANGCHECK_ACTIVE; 2440 2441 if (IS_GEN2(dev)) 2442 return HANGCHECK_HUNG; 2443 2444 /* Is the chip hanging on a WAIT_FOR_EVENT? 2445 * If so we can simply poke the RB_WAIT bit 2446 * and break the hang. This should work on 2447 * all but the second generation chipsets. 2448 */ 2449 tmp = I915_READ_CTL(ring); 2450 if (tmp & RING_WAIT) { 2451 DRM_ERROR("Kicking stuck wait on %s\n", 2452 ring->name); 2453 i915_handle_error(dev, false); 2454 I915_WRITE_CTL(ring, tmp); 2455 return HANGCHECK_KICK; 2456 } 2457 2458 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2459 switch (semaphore_passed(ring)) { 2460 default: 2461 return HANGCHECK_HUNG; 2462 case 1: 2463 DRM_ERROR("Kicking stuck semaphore on %s\n", 2464 ring->name); 2465 i915_handle_error(dev, false); 2466 I915_WRITE_CTL(ring, tmp); 2467 return HANGCHECK_KICK; 2468 case 0: 2469 return HANGCHECK_WAIT; 2470 } 2471 } 2472 2473 return HANGCHECK_HUNG; 2474 } 2475 2476 /** 2477 * This is called when the chip hasn't reported back with completed 2478 * batchbuffers in a long time. We keep track per ring seqno progress and 2479 * if there are no progress, hangcheck score for that ring is increased. 2480 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2481 * we kick the ring. If we see no progress on three subsequent calls 2482 * we assume chip is wedged and try to fix it by resetting the chip. 2483 */ 2484 static void i915_hangcheck_elapsed(unsigned long data) 2485 { 2486 struct drm_device *dev = (struct drm_device *)data; 2487 drm_i915_private_t *dev_priv = dev->dev_private; 2488 struct intel_ring_buffer *ring; 2489 int i; 2490 int busy_count = 0, rings_hung = 0; 2491 bool stuck[I915_NUM_RINGS] = { 0 }; 2492 #define BUSY 1 2493 #define KICK 5 2494 #define HUNG 20 2495 #define FIRE 30 2496 2497 if (!i915_enable_hangcheck) 2498 return; 2499 2500 for_each_ring(ring, dev_priv, i) { 2501 u32 seqno, acthd; 2502 bool busy = true; 2503 2504 semaphore_clear_deadlocks(dev_priv); 2505 2506 seqno = ring->get_seqno(ring, false); 2507 acthd = intel_ring_get_active_head(ring); 2508 2509 if (ring->hangcheck.seqno == seqno) { 2510 if (ring_idle(ring, seqno)) { 2511 ring->hangcheck.action = HANGCHECK_IDLE; 2512 2513 if (waitqueue_active(&ring->irq_queue)) { 2514 /* Issue a wake-up to catch stuck h/w. */ 2515 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2516 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2517 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2518 ring->name); 2519 else 2520 DRM_INFO("Fake missed irq on %s\n", 2521 ring->name); 2522 wake_up_all(&ring->irq_queue); 2523 } 2524 /* Safeguard against driver failure */ 2525 ring->hangcheck.score += BUSY; 2526 } else 2527 busy = false; 2528 } else { 2529 /* We always increment the hangcheck score 2530 * if the ring is busy and still processing 2531 * the same request, so that no single request 2532 * can run indefinitely (such as a chain of 2533 * batches). The only time we do not increment 2534 * the hangcheck score on this ring, if this 2535 * ring is in a legitimate wait for another 2536 * ring. In that case the waiting ring is a 2537 * victim and we want to be sure we catch the 2538 * right culprit. Then every time we do kick 2539 * the ring, add a small increment to the 2540 * score so that we can catch a batch that is 2541 * being repeatedly kicked and so responsible 2542 * for stalling the machine. 2543 */ 2544 ring->hangcheck.action = ring_stuck(ring, 2545 acthd); 2546 2547 switch (ring->hangcheck.action) { 2548 case HANGCHECK_IDLE: 2549 case HANGCHECK_WAIT: 2550 break; 2551 case HANGCHECK_ACTIVE: 2552 ring->hangcheck.score += BUSY; 2553 break; 2554 case HANGCHECK_KICK: 2555 ring->hangcheck.score += KICK; 2556 break; 2557 case HANGCHECK_HUNG: 2558 ring->hangcheck.score += HUNG; 2559 stuck[i] = true; 2560 break; 2561 } 2562 } 2563 } else { 2564 ring->hangcheck.action = HANGCHECK_ACTIVE; 2565 2566 /* Gradually reduce the count so that we catch DoS 2567 * attempts across multiple batches. 2568 */ 2569 if (ring->hangcheck.score > 0) 2570 ring->hangcheck.score--; 2571 } 2572 2573 ring->hangcheck.seqno = seqno; 2574 ring->hangcheck.acthd = acthd; 2575 busy_count += busy; 2576 } 2577 2578 for_each_ring(ring, dev_priv, i) { 2579 if (ring->hangcheck.score > FIRE) { 2580 DRM_INFO("%s on %s\n", 2581 stuck[i] ? "stuck" : "no progress", 2582 ring->name); 2583 rings_hung++; 2584 } 2585 } 2586 2587 if (rings_hung) 2588 return i915_handle_error(dev, true); 2589 2590 if (busy_count) 2591 /* Reset timer case chip hangs without another request 2592 * being added */ 2593 i915_queue_hangcheck(dev); 2594 } 2595 2596 void i915_queue_hangcheck(struct drm_device *dev) 2597 { 2598 struct drm_i915_private *dev_priv = dev->dev_private; 2599 if (!i915_enable_hangcheck) 2600 return; 2601 2602 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2603 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2604 } 2605 2606 static void ibx_irq_preinstall(struct drm_device *dev) 2607 { 2608 struct drm_i915_private *dev_priv = dev->dev_private; 2609 2610 if (HAS_PCH_NOP(dev)) 2611 return; 2612 2613 /* south display irq */ 2614 I915_WRITE(SDEIMR, 0xffffffff); 2615 /* 2616 * SDEIER is also touched by the interrupt handler to work around missed 2617 * PCH interrupts. Hence we can't update it after the interrupt handler 2618 * is enabled - instead we unconditionally enable all PCH interrupt 2619 * sources here, but then only unmask them as needed with SDEIMR. 2620 */ 2621 I915_WRITE(SDEIER, 0xffffffff); 2622 POSTING_READ(SDEIER); 2623 } 2624 2625 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2626 { 2627 struct drm_i915_private *dev_priv = dev->dev_private; 2628 2629 /* and GT */ 2630 I915_WRITE(GTIMR, 0xffffffff); 2631 I915_WRITE(GTIER, 0x0); 2632 POSTING_READ(GTIER); 2633 2634 if (INTEL_INFO(dev)->gen >= 6) { 2635 /* and PM */ 2636 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2637 I915_WRITE(GEN6_PMIER, 0x0); 2638 POSTING_READ(GEN6_PMIER); 2639 } 2640 } 2641 2642 /* drm_dma.h hooks 2643 */ 2644 static void ironlake_irq_preinstall(struct drm_device *dev) 2645 { 2646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2647 2648 atomic_set(&dev_priv->irq_received, 0); 2649 2650 I915_WRITE(HWSTAM, 0xeffe); 2651 2652 I915_WRITE(DEIMR, 0xffffffff); 2653 I915_WRITE(DEIER, 0x0); 2654 POSTING_READ(DEIER); 2655 2656 gen5_gt_irq_preinstall(dev); 2657 2658 ibx_irq_preinstall(dev); 2659 } 2660 2661 static void valleyview_irq_preinstall(struct drm_device *dev) 2662 { 2663 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2664 int pipe; 2665 2666 atomic_set(&dev_priv->irq_received, 0); 2667 2668 /* VLV magic */ 2669 I915_WRITE(VLV_IMR, 0); 2670 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2671 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2672 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2673 2674 /* and GT */ 2675 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2676 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2677 2678 gen5_gt_irq_preinstall(dev); 2679 2680 I915_WRITE(DPINVGTT, 0xff); 2681 2682 I915_WRITE(PORT_HOTPLUG_EN, 0); 2683 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2684 for_each_pipe(pipe) 2685 I915_WRITE(PIPESTAT(pipe), 0xffff); 2686 I915_WRITE(VLV_IIR, 0xffffffff); 2687 I915_WRITE(VLV_IMR, 0xffffffff); 2688 I915_WRITE(VLV_IER, 0x0); 2689 POSTING_READ(VLV_IER); 2690 } 2691 2692 static void gen8_irq_preinstall(struct drm_device *dev) 2693 { 2694 struct drm_i915_private *dev_priv = dev->dev_private; 2695 int pipe; 2696 2697 atomic_set(&dev_priv->irq_received, 0); 2698 2699 I915_WRITE(GEN8_MASTER_IRQ, 0); 2700 POSTING_READ(GEN8_MASTER_IRQ); 2701 2702 /* IIR can theoretically queue up two events. Be paranoid */ 2703 #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2704 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2705 POSTING_READ(GEN8_##type##_IMR(which)); \ 2706 I915_WRITE(GEN8_##type##_IER(which), 0); \ 2707 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2708 POSTING_READ(GEN8_##type##_IIR(which)); \ 2709 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2710 } while (0) 2711 2712 #define GEN8_IRQ_INIT(type) do { \ 2713 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2714 POSTING_READ(GEN8_##type##_IMR); \ 2715 I915_WRITE(GEN8_##type##_IER, 0); \ 2716 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2717 POSTING_READ(GEN8_##type##_IIR); \ 2718 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2719 } while (0) 2720 2721 GEN8_IRQ_INIT_NDX(GT, 0); 2722 GEN8_IRQ_INIT_NDX(GT, 1); 2723 GEN8_IRQ_INIT_NDX(GT, 2); 2724 GEN8_IRQ_INIT_NDX(GT, 3); 2725 2726 for_each_pipe(pipe) { 2727 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2728 } 2729 2730 GEN8_IRQ_INIT(DE_PORT); 2731 GEN8_IRQ_INIT(DE_MISC); 2732 GEN8_IRQ_INIT(PCU); 2733 #undef GEN8_IRQ_INIT 2734 #undef GEN8_IRQ_INIT_NDX 2735 2736 POSTING_READ(GEN8_PCU_IIR); 2737 2738 ibx_irq_preinstall(dev); 2739 } 2740 2741 static void ibx_hpd_irq_setup(struct drm_device *dev) 2742 { 2743 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2744 struct drm_mode_config *mode_config = &dev->mode_config; 2745 struct intel_encoder *intel_encoder; 2746 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2747 2748 if (HAS_PCH_IBX(dev)) { 2749 hotplug_irqs = SDE_HOTPLUG_MASK; 2750 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2751 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2752 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2753 } else { 2754 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2755 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2756 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2757 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2758 } 2759 2760 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2761 2762 /* 2763 * Enable digital hotplug on the PCH, and configure the DP short pulse 2764 * duration to 2ms (which is the minimum in the Display Port spec) 2765 * 2766 * This register is the same on all known PCH chips. 2767 */ 2768 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2769 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2770 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2771 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2772 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2773 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2774 } 2775 2776 static void ibx_irq_postinstall(struct drm_device *dev) 2777 { 2778 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2779 u32 mask; 2780 2781 if (HAS_PCH_NOP(dev)) 2782 return; 2783 2784 if (HAS_PCH_IBX(dev)) { 2785 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2786 } else { 2787 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2788 2789 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2790 } 2791 2792 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2793 I915_WRITE(SDEIMR, ~mask); 2794 } 2795 2796 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2797 { 2798 struct drm_i915_private *dev_priv = dev->dev_private; 2799 u32 pm_irqs, gt_irqs; 2800 2801 pm_irqs = gt_irqs = 0; 2802 2803 dev_priv->gt_irq_mask = ~0; 2804 if (HAS_L3_DPF(dev)) { 2805 /* L3 parity interrupt is always unmasked. */ 2806 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 2807 gt_irqs |= GT_PARITY_ERROR(dev); 2808 } 2809 2810 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2811 if (IS_GEN5(dev)) { 2812 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2813 ILK_BSD_USER_INTERRUPT; 2814 } else { 2815 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2816 } 2817 2818 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2819 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2820 I915_WRITE(GTIER, gt_irqs); 2821 POSTING_READ(GTIER); 2822 2823 if (INTEL_INFO(dev)->gen >= 6) { 2824 pm_irqs |= GEN6_PM_RPS_EVENTS; 2825 2826 if (HAS_VEBOX(dev)) 2827 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2828 2829 dev_priv->pm_irq_mask = 0xffffffff; 2830 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2831 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2832 I915_WRITE(GEN6_PMIER, pm_irqs); 2833 POSTING_READ(GEN6_PMIER); 2834 } 2835 } 2836 2837 static int ironlake_irq_postinstall(struct drm_device *dev) 2838 { 2839 unsigned long irqflags; 2840 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2841 u32 display_mask, extra_mask; 2842 2843 if (INTEL_INFO(dev)->gen >= 7) { 2844 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2845 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2846 DE_PLANEB_FLIP_DONE_IVB | 2847 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 2848 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2849 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 2850 2851 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2852 } else { 2853 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2854 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2855 DE_AUX_CHANNEL_A | 2856 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2857 DE_POISON); 2858 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 2859 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 2860 } 2861 2862 dev_priv->irq_mask = ~display_mask; 2863 2864 /* should always can generate irq */ 2865 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2866 I915_WRITE(DEIMR, dev_priv->irq_mask); 2867 I915_WRITE(DEIER, display_mask | extra_mask); 2868 POSTING_READ(DEIER); 2869 2870 gen5_gt_irq_postinstall(dev); 2871 2872 ibx_irq_postinstall(dev); 2873 2874 if (IS_IRONLAKE_M(dev)) { 2875 /* Enable PCU event interrupts 2876 * 2877 * spinlocking not required here for correctness since interrupt 2878 * setup is guaranteed to run in single-threaded context. But we 2879 * need it to make the assert_spin_locked happy. */ 2880 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2881 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2882 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2883 } 2884 2885 return 0; 2886 } 2887 2888 static int valleyview_irq_postinstall(struct drm_device *dev) 2889 { 2890 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2891 u32 enable_mask; 2892 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2893 PIPE_CRC_DONE_ENABLE; 2894 unsigned long irqflags; 2895 2896 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2897 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2898 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2899 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2900 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2901 2902 /* 2903 *Leave vblank interrupts masked initially. enable/disable will 2904 * toggle them based on usage. 2905 */ 2906 dev_priv->irq_mask = (~enable_mask) | 2907 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2908 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2909 2910 I915_WRITE(PORT_HOTPLUG_EN, 0); 2911 POSTING_READ(PORT_HOTPLUG_EN); 2912 2913 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2914 I915_WRITE(VLV_IER, enable_mask); 2915 I915_WRITE(VLV_IIR, 0xffffffff); 2916 I915_WRITE(PIPESTAT(0), 0xffff); 2917 I915_WRITE(PIPESTAT(1), 0xffff); 2918 POSTING_READ(VLV_IER); 2919 2920 /* Interrupt setup is already guaranteed to be single-threaded, this is 2921 * just to make the assert_spin_locked check happy. */ 2922 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2923 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 2924 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 2925 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 2926 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2927 2928 I915_WRITE(VLV_IIR, 0xffffffff); 2929 I915_WRITE(VLV_IIR, 0xffffffff); 2930 2931 gen5_gt_irq_postinstall(dev); 2932 2933 /* ack & enable invalid PTE error interrupts */ 2934 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2935 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2936 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2937 #endif 2938 2939 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2940 2941 return 0; 2942 } 2943 2944 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 2945 { 2946 int i; 2947 2948 /* These are interrupts we'll toggle with the ring mask register */ 2949 uint32_t gt_interrupts[] = { 2950 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 2951 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 2952 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 2953 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 2954 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 2955 0, 2956 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 2957 }; 2958 2959 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 2960 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 2961 if (tmp) 2962 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2963 i, tmp); 2964 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 2965 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 2966 } 2967 POSTING_READ(GEN8_GT_IER(0)); 2968 } 2969 2970 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 2971 { 2972 struct drm_device *dev = dev_priv->dev; 2973 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 2974 GEN8_PIPE_CDCLK_CRC_DONE | 2975 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2976 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 2977 GEN8_PIPE_FIFO_UNDERRUN; 2978 int pipe; 2979 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 2980 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 2981 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 2982 2983 for_each_pipe(pipe) { 2984 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2985 if (tmp) 2986 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2987 pipe, tmp); 2988 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2989 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 2990 } 2991 POSTING_READ(GEN8_DE_PIPE_ISR(0)); 2992 2993 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 2994 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 2995 POSTING_READ(GEN8_DE_PORT_IER); 2996 } 2997 2998 static int gen8_irq_postinstall(struct drm_device *dev) 2999 { 3000 struct drm_i915_private *dev_priv = dev->dev_private; 3001 3002 gen8_gt_irq_postinstall(dev_priv); 3003 gen8_de_irq_postinstall(dev_priv); 3004 3005 ibx_irq_postinstall(dev); 3006 3007 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3008 POSTING_READ(GEN8_MASTER_IRQ); 3009 3010 return 0; 3011 } 3012 3013 static void gen8_irq_uninstall(struct drm_device *dev) 3014 { 3015 struct drm_i915_private *dev_priv = dev->dev_private; 3016 int pipe; 3017 3018 if (!dev_priv) 3019 return; 3020 3021 atomic_set(&dev_priv->irq_received, 0); 3022 3023 I915_WRITE(GEN8_MASTER_IRQ, 0); 3024 3025 #define GEN8_IRQ_FINI_NDX(type, which) do { \ 3026 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3027 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3028 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3029 } while (0) 3030 3031 #define GEN8_IRQ_FINI(type) do { \ 3032 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3033 I915_WRITE(GEN8_##type##_IER, 0); \ 3034 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3035 } while (0) 3036 3037 GEN8_IRQ_FINI_NDX(GT, 0); 3038 GEN8_IRQ_FINI_NDX(GT, 1); 3039 GEN8_IRQ_FINI_NDX(GT, 2); 3040 GEN8_IRQ_FINI_NDX(GT, 3); 3041 3042 for_each_pipe(pipe) { 3043 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3044 } 3045 3046 GEN8_IRQ_FINI(DE_PORT); 3047 GEN8_IRQ_FINI(DE_MISC); 3048 GEN8_IRQ_FINI(PCU); 3049 #undef GEN8_IRQ_FINI 3050 #undef GEN8_IRQ_FINI_NDX 3051 3052 POSTING_READ(GEN8_PCU_IIR); 3053 } 3054 3055 static void valleyview_irq_uninstall(struct drm_device *dev) 3056 { 3057 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3058 int pipe; 3059 3060 if (!dev_priv) 3061 return; 3062 3063 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3064 3065 for_each_pipe(pipe) 3066 I915_WRITE(PIPESTAT(pipe), 0xffff); 3067 3068 I915_WRITE(HWSTAM, 0xffffffff); 3069 I915_WRITE(PORT_HOTPLUG_EN, 0); 3070 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3071 for_each_pipe(pipe) 3072 I915_WRITE(PIPESTAT(pipe), 0xffff); 3073 I915_WRITE(VLV_IIR, 0xffffffff); 3074 I915_WRITE(VLV_IMR, 0xffffffff); 3075 I915_WRITE(VLV_IER, 0x0); 3076 POSTING_READ(VLV_IER); 3077 } 3078 3079 static void ironlake_irq_uninstall(struct drm_device *dev) 3080 { 3081 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3082 3083 if (!dev_priv) 3084 return; 3085 3086 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3087 3088 I915_WRITE(HWSTAM, 0xffffffff); 3089 3090 I915_WRITE(DEIMR, 0xffffffff); 3091 I915_WRITE(DEIER, 0x0); 3092 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3093 if (IS_GEN7(dev)) 3094 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3095 3096 I915_WRITE(GTIMR, 0xffffffff); 3097 I915_WRITE(GTIER, 0x0); 3098 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3099 3100 if (HAS_PCH_NOP(dev)) 3101 return; 3102 3103 I915_WRITE(SDEIMR, 0xffffffff); 3104 I915_WRITE(SDEIER, 0x0); 3105 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3106 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3107 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3108 } 3109 3110 static void i8xx_irq_preinstall(struct drm_device * dev) 3111 { 3112 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3113 int pipe; 3114 3115 atomic_set(&dev_priv->irq_received, 0); 3116 3117 for_each_pipe(pipe) 3118 I915_WRITE(PIPESTAT(pipe), 0); 3119 I915_WRITE16(IMR, 0xffff); 3120 I915_WRITE16(IER, 0x0); 3121 POSTING_READ16(IER); 3122 } 3123 3124 static int i8xx_irq_postinstall(struct drm_device *dev) 3125 { 3126 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3127 unsigned long irqflags; 3128 3129 I915_WRITE16(EMR, 3130 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3131 3132 /* Unmask the interrupts that we always want on. */ 3133 dev_priv->irq_mask = 3134 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3135 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3136 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3137 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3138 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3139 I915_WRITE16(IMR, dev_priv->irq_mask); 3140 3141 I915_WRITE16(IER, 3142 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3143 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3144 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3145 I915_USER_INTERRUPT); 3146 POSTING_READ16(IER); 3147 3148 /* Interrupt setup is already guaranteed to be single-threaded, this is 3149 * just to make the assert_spin_locked check happy. */ 3150 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3151 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3152 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3153 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3154 3155 return 0; 3156 } 3157 3158 /* 3159 * Returns true when a page flip has completed. 3160 */ 3161 static bool i8xx_handle_vblank(struct drm_device *dev, 3162 int plane, int pipe, u32 iir) 3163 { 3164 drm_i915_private_t *dev_priv = dev->dev_private; 3165 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3166 3167 if (!drm_handle_vblank(dev, pipe)) 3168 return false; 3169 3170 if ((iir & flip_pending) == 0) 3171 return false; 3172 3173 intel_prepare_page_flip(dev, plane); 3174 3175 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3176 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3177 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3178 * the flip is completed (no longer pending). Since this doesn't raise 3179 * an interrupt per se, we watch for the change at vblank. 3180 */ 3181 if (I915_READ16(ISR) & flip_pending) 3182 return false; 3183 3184 intel_finish_page_flip(dev, pipe); 3185 3186 return true; 3187 } 3188 3189 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3190 { 3191 struct drm_device *dev = (struct drm_device *) arg; 3192 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3193 u16 iir, new_iir; 3194 u32 pipe_stats[2]; 3195 unsigned long irqflags; 3196 int pipe; 3197 u16 flip_mask = 3198 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3199 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3200 3201 atomic_inc(&dev_priv->irq_received); 3202 3203 iir = I915_READ16(IIR); 3204 if (iir == 0) 3205 return IRQ_NONE; 3206 3207 while (iir & ~flip_mask) { 3208 /* Can't rely on pipestat interrupt bit in iir as it might 3209 * have been cleared after the pipestat interrupt was received. 3210 * It doesn't set the bit in iir again, but it still produces 3211 * interrupts (for non-MSI). 3212 */ 3213 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3214 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3215 i915_handle_error(dev, false); 3216 3217 for_each_pipe(pipe) { 3218 int reg = PIPESTAT(pipe); 3219 pipe_stats[pipe] = I915_READ(reg); 3220 3221 /* 3222 * Clear the PIPE*STAT regs before the IIR 3223 */ 3224 if (pipe_stats[pipe] & 0x8000ffff) { 3225 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3226 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3227 pipe_name(pipe)); 3228 I915_WRITE(reg, pipe_stats[pipe]); 3229 } 3230 } 3231 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3232 3233 I915_WRITE16(IIR, iir & ~flip_mask); 3234 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3235 3236 i915_update_dri1_breadcrumb(dev); 3237 3238 if (iir & I915_USER_INTERRUPT) 3239 notify_ring(dev, &dev_priv->ring[RCS]); 3240 3241 for_each_pipe(pipe) { 3242 int plane = pipe; 3243 if (HAS_FBC(dev)) 3244 plane = !plane; 3245 3246 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3247 i8xx_handle_vblank(dev, plane, pipe, iir)) 3248 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3249 3250 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3251 i9xx_pipe_crc_irq_handler(dev, pipe); 3252 } 3253 3254 iir = new_iir; 3255 } 3256 3257 return IRQ_HANDLED; 3258 } 3259 3260 static void i8xx_irq_uninstall(struct drm_device * dev) 3261 { 3262 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3263 int pipe; 3264 3265 for_each_pipe(pipe) { 3266 /* Clear enable bits; then clear status bits */ 3267 I915_WRITE(PIPESTAT(pipe), 0); 3268 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3269 } 3270 I915_WRITE16(IMR, 0xffff); 3271 I915_WRITE16(IER, 0x0); 3272 I915_WRITE16(IIR, I915_READ16(IIR)); 3273 } 3274 3275 static void i915_irq_preinstall(struct drm_device * dev) 3276 { 3277 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3278 int pipe; 3279 3280 atomic_set(&dev_priv->irq_received, 0); 3281 3282 if (I915_HAS_HOTPLUG(dev)) { 3283 I915_WRITE(PORT_HOTPLUG_EN, 0); 3284 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3285 } 3286 3287 I915_WRITE16(HWSTAM, 0xeffe); 3288 for_each_pipe(pipe) 3289 I915_WRITE(PIPESTAT(pipe), 0); 3290 I915_WRITE(IMR, 0xffffffff); 3291 I915_WRITE(IER, 0x0); 3292 POSTING_READ(IER); 3293 } 3294 3295 static int i915_irq_postinstall(struct drm_device *dev) 3296 { 3297 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3298 u32 enable_mask; 3299 unsigned long irqflags; 3300 3301 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3302 3303 /* Unmask the interrupts that we always want on. */ 3304 dev_priv->irq_mask = 3305 ~(I915_ASLE_INTERRUPT | 3306 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3307 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3308 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3309 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3310 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3311 3312 enable_mask = 3313 I915_ASLE_INTERRUPT | 3314 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3315 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3316 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3317 I915_USER_INTERRUPT; 3318 3319 if (I915_HAS_HOTPLUG(dev)) { 3320 I915_WRITE(PORT_HOTPLUG_EN, 0); 3321 POSTING_READ(PORT_HOTPLUG_EN); 3322 3323 /* Enable in IER... */ 3324 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3325 /* and unmask in IMR */ 3326 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3327 } 3328 3329 I915_WRITE(IMR, dev_priv->irq_mask); 3330 I915_WRITE(IER, enable_mask); 3331 POSTING_READ(IER); 3332 3333 i915_enable_asle_pipestat(dev); 3334 3335 /* Interrupt setup is already guaranteed to be single-threaded, this is 3336 * just to make the assert_spin_locked check happy. */ 3337 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3338 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3339 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3340 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3341 3342 return 0; 3343 } 3344 3345 /* 3346 * Returns true when a page flip has completed. 3347 */ 3348 static bool i915_handle_vblank(struct drm_device *dev, 3349 int plane, int pipe, u32 iir) 3350 { 3351 drm_i915_private_t *dev_priv = dev->dev_private; 3352 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3353 3354 if (!drm_handle_vblank(dev, pipe)) 3355 return false; 3356 3357 if ((iir & flip_pending) == 0) 3358 return false; 3359 3360 intel_prepare_page_flip(dev, plane); 3361 3362 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3363 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3364 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3365 * the flip is completed (no longer pending). Since this doesn't raise 3366 * an interrupt per se, we watch for the change at vblank. 3367 */ 3368 if (I915_READ(ISR) & flip_pending) 3369 return false; 3370 3371 intel_finish_page_flip(dev, pipe); 3372 3373 return true; 3374 } 3375 3376 static irqreturn_t i915_irq_handler(int irq, void *arg) 3377 { 3378 struct drm_device *dev = (struct drm_device *) arg; 3379 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3380 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3381 unsigned long irqflags; 3382 u32 flip_mask = 3383 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3384 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3385 int pipe, ret = IRQ_NONE; 3386 3387 atomic_inc(&dev_priv->irq_received); 3388 3389 iir = I915_READ(IIR); 3390 do { 3391 bool irq_received = (iir & ~flip_mask) != 0; 3392 bool blc_event = false; 3393 3394 /* Can't rely on pipestat interrupt bit in iir as it might 3395 * have been cleared after the pipestat interrupt was received. 3396 * It doesn't set the bit in iir again, but it still produces 3397 * interrupts (for non-MSI). 3398 */ 3399 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3400 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3401 i915_handle_error(dev, false); 3402 3403 for_each_pipe(pipe) { 3404 int reg = PIPESTAT(pipe); 3405 pipe_stats[pipe] = I915_READ(reg); 3406 3407 /* Clear the PIPE*STAT regs before the IIR */ 3408 if (pipe_stats[pipe] & 0x8000ffff) { 3409 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3410 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3411 pipe_name(pipe)); 3412 I915_WRITE(reg, pipe_stats[pipe]); 3413 irq_received = true; 3414 } 3415 } 3416 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3417 3418 if (!irq_received) 3419 break; 3420 3421 /* Consume port. Then clear IIR or we'll miss events */ 3422 if ((I915_HAS_HOTPLUG(dev)) && 3423 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3424 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3425 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3426 3427 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3428 hotplug_status); 3429 3430 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3431 3432 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3433 POSTING_READ(PORT_HOTPLUG_STAT); 3434 } 3435 3436 I915_WRITE(IIR, iir & ~flip_mask); 3437 new_iir = I915_READ(IIR); /* Flush posted writes */ 3438 3439 if (iir & I915_USER_INTERRUPT) 3440 notify_ring(dev, &dev_priv->ring[RCS]); 3441 3442 for_each_pipe(pipe) { 3443 int plane = pipe; 3444 if (HAS_FBC(dev)) 3445 plane = !plane; 3446 3447 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3448 i915_handle_vblank(dev, plane, pipe, iir)) 3449 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3450 3451 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3452 blc_event = true; 3453 3454 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3455 i9xx_pipe_crc_irq_handler(dev, pipe); 3456 } 3457 3458 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3459 intel_opregion_asle_intr(dev); 3460 3461 /* With MSI, interrupts are only generated when iir 3462 * transitions from zero to nonzero. If another bit got 3463 * set while we were handling the existing iir bits, then 3464 * we would never get another interrupt. 3465 * 3466 * This is fine on non-MSI as well, as if we hit this path 3467 * we avoid exiting the interrupt handler only to generate 3468 * another one. 3469 * 3470 * Note that for MSI this could cause a stray interrupt report 3471 * if an interrupt landed in the time between writing IIR and 3472 * the posting read. This should be rare enough to never 3473 * trigger the 99% of 100,000 interrupts test for disabling 3474 * stray interrupts. 3475 */ 3476 ret = IRQ_HANDLED; 3477 iir = new_iir; 3478 } while (iir & ~flip_mask); 3479 3480 i915_update_dri1_breadcrumb(dev); 3481 3482 return ret; 3483 } 3484 3485 static void i915_irq_uninstall(struct drm_device * dev) 3486 { 3487 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3488 int pipe; 3489 3490 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3491 3492 if (I915_HAS_HOTPLUG(dev)) { 3493 I915_WRITE(PORT_HOTPLUG_EN, 0); 3494 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3495 } 3496 3497 I915_WRITE16(HWSTAM, 0xffff); 3498 for_each_pipe(pipe) { 3499 /* Clear enable bits; then clear status bits */ 3500 I915_WRITE(PIPESTAT(pipe), 0); 3501 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3502 } 3503 I915_WRITE(IMR, 0xffffffff); 3504 I915_WRITE(IER, 0x0); 3505 3506 I915_WRITE(IIR, I915_READ(IIR)); 3507 } 3508 3509 static void i965_irq_preinstall(struct drm_device * dev) 3510 { 3511 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3512 int pipe; 3513 3514 atomic_set(&dev_priv->irq_received, 0); 3515 3516 I915_WRITE(PORT_HOTPLUG_EN, 0); 3517 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3518 3519 I915_WRITE(HWSTAM, 0xeffe); 3520 for_each_pipe(pipe) 3521 I915_WRITE(PIPESTAT(pipe), 0); 3522 I915_WRITE(IMR, 0xffffffff); 3523 I915_WRITE(IER, 0x0); 3524 POSTING_READ(IER); 3525 } 3526 3527 static int i965_irq_postinstall(struct drm_device *dev) 3528 { 3529 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3530 u32 enable_mask; 3531 u32 error_mask; 3532 unsigned long irqflags; 3533 3534 /* Unmask the interrupts that we always want on. */ 3535 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3536 I915_DISPLAY_PORT_INTERRUPT | 3537 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3538 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3539 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3540 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3541 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3542 3543 enable_mask = ~dev_priv->irq_mask; 3544 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3545 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3546 enable_mask |= I915_USER_INTERRUPT; 3547 3548 if (IS_G4X(dev)) 3549 enable_mask |= I915_BSD_USER_INTERRUPT; 3550 3551 /* Interrupt setup is already guaranteed to be single-threaded, this is 3552 * just to make the assert_spin_locked check happy. */ 3553 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3554 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3555 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3556 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3557 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3558 3559 /* 3560 * Enable some error detection, note the instruction error mask 3561 * bit is reserved, so we leave it masked. 3562 */ 3563 if (IS_G4X(dev)) { 3564 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3565 GM45_ERROR_MEM_PRIV | 3566 GM45_ERROR_CP_PRIV | 3567 I915_ERROR_MEMORY_REFRESH); 3568 } else { 3569 error_mask = ~(I915_ERROR_PAGE_TABLE | 3570 I915_ERROR_MEMORY_REFRESH); 3571 } 3572 I915_WRITE(EMR, error_mask); 3573 3574 I915_WRITE(IMR, dev_priv->irq_mask); 3575 I915_WRITE(IER, enable_mask); 3576 POSTING_READ(IER); 3577 3578 I915_WRITE(PORT_HOTPLUG_EN, 0); 3579 POSTING_READ(PORT_HOTPLUG_EN); 3580 3581 i915_enable_asle_pipestat(dev); 3582 3583 return 0; 3584 } 3585 3586 static void i915_hpd_irq_setup(struct drm_device *dev) 3587 { 3588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3589 struct drm_mode_config *mode_config = &dev->mode_config; 3590 struct intel_encoder *intel_encoder; 3591 u32 hotplug_en; 3592 3593 assert_spin_locked(&dev_priv->irq_lock); 3594 3595 if (I915_HAS_HOTPLUG(dev)) { 3596 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3597 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3598 /* Note HDMI and DP share hotplug bits */ 3599 /* enable bits are the same for all generations */ 3600 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3601 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3602 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3603 /* Programming the CRT detection parameters tends 3604 to generate a spurious hotplug event about three 3605 seconds later. So just do it once. 3606 */ 3607 if (IS_G4X(dev)) 3608 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3609 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3610 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3611 3612 /* Ignore TV since it's buggy */ 3613 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3614 } 3615 } 3616 3617 static irqreturn_t i965_irq_handler(int irq, void *arg) 3618 { 3619 struct drm_device *dev = (struct drm_device *) arg; 3620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3621 u32 iir, new_iir; 3622 u32 pipe_stats[I915_MAX_PIPES]; 3623 unsigned long irqflags; 3624 int irq_received; 3625 int ret = IRQ_NONE, pipe; 3626 u32 flip_mask = 3627 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3628 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3629 3630 atomic_inc(&dev_priv->irq_received); 3631 3632 iir = I915_READ(IIR); 3633 3634 for (;;) { 3635 bool blc_event = false; 3636 3637 irq_received = (iir & ~flip_mask) != 0; 3638 3639 /* Can't rely on pipestat interrupt bit in iir as it might 3640 * have been cleared after the pipestat interrupt was received. 3641 * It doesn't set the bit in iir again, but it still produces 3642 * interrupts (for non-MSI). 3643 */ 3644 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3645 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3646 i915_handle_error(dev, false); 3647 3648 for_each_pipe(pipe) { 3649 int reg = PIPESTAT(pipe); 3650 pipe_stats[pipe] = I915_READ(reg); 3651 3652 /* 3653 * Clear the PIPE*STAT regs before the IIR 3654 */ 3655 if (pipe_stats[pipe] & 0x8000ffff) { 3656 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3657 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3658 pipe_name(pipe)); 3659 I915_WRITE(reg, pipe_stats[pipe]); 3660 irq_received = 1; 3661 } 3662 } 3663 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3664 3665 if (!irq_received) 3666 break; 3667 3668 ret = IRQ_HANDLED; 3669 3670 /* Consume port. Then clear IIR or we'll miss events */ 3671 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3672 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3673 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3674 HOTPLUG_INT_STATUS_G4X : 3675 HOTPLUG_INT_STATUS_I915); 3676 3677 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3678 hotplug_status); 3679 3680 intel_hpd_irq_handler(dev, hotplug_trigger, 3681 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3682 3683 if (IS_G4X(dev) && 3684 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3685 dp_aux_irq_handler(dev); 3686 3687 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3688 I915_READ(PORT_HOTPLUG_STAT); 3689 } 3690 3691 I915_WRITE(IIR, iir & ~flip_mask); 3692 new_iir = I915_READ(IIR); /* Flush posted writes */ 3693 3694 if (iir & I915_USER_INTERRUPT) 3695 notify_ring(dev, &dev_priv->ring[RCS]); 3696 if (iir & I915_BSD_USER_INTERRUPT) 3697 notify_ring(dev, &dev_priv->ring[VCS]); 3698 3699 for_each_pipe(pipe) { 3700 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3701 i915_handle_vblank(dev, pipe, pipe, iir)) 3702 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3703 3704 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3705 blc_event = true; 3706 3707 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3708 i9xx_pipe_crc_irq_handler(dev, pipe); 3709 } 3710 3711 3712 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3713 intel_opregion_asle_intr(dev); 3714 3715 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3716 gmbus_irq_handler(dev); 3717 3718 /* With MSI, interrupts are only generated when iir 3719 * transitions from zero to nonzero. If another bit got 3720 * set while we were handling the existing iir bits, then 3721 * we would never get another interrupt. 3722 * 3723 * This is fine on non-MSI as well, as if we hit this path 3724 * we avoid exiting the interrupt handler only to generate 3725 * another one. 3726 * 3727 * Note that for MSI this could cause a stray interrupt report 3728 * if an interrupt landed in the time between writing IIR and 3729 * the posting read. This should be rare enough to never 3730 * trigger the 99% of 100,000 interrupts test for disabling 3731 * stray interrupts. 3732 */ 3733 iir = new_iir; 3734 } 3735 3736 i915_update_dri1_breadcrumb(dev); 3737 3738 return ret; 3739 } 3740 3741 static void i965_irq_uninstall(struct drm_device * dev) 3742 { 3743 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3744 int pipe; 3745 3746 if (!dev_priv) 3747 return; 3748 3749 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3750 3751 I915_WRITE(PORT_HOTPLUG_EN, 0); 3752 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3753 3754 I915_WRITE(HWSTAM, 0xffffffff); 3755 for_each_pipe(pipe) 3756 I915_WRITE(PIPESTAT(pipe), 0); 3757 I915_WRITE(IMR, 0xffffffff); 3758 I915_WRITE(IER, 0x0); 3759 3760 for_each_pipe(pipe) 3761 I915_WRITE(PIPESTAT(pipe), 3762 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3763 I915_WRITE(IIR, I915_READ(IIR)); 3764 } 3765 3766 static void i915_reenable_hotplug_timer_func(unsigned long data) 3767 { 3768 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3769 struct drm_device *dev = dev_priv->dev; 3770 struct drm_mode_config *mode_config = &dev->mode_config; 3771 unsigned long irqflags; 3772 int i; 3773 3774 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3775 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3776 struct drm_connector *connector; 3777 3778 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3779 continue; 3780 3781 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3782 3783 list_for_each_entry(connector, &mode_config->connector_list, head) { 3784 struct intel_connector *intel_connector = to_intel_connector(connector); 3785 3786 if (intel_connector->encoder->hpd_pin == i) { 3787 if (connector->polled != intel_connector->polled) 3788 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3789 drm_get_connector_name(connector)); 3790 connector->polled = intel_connector->polled; 3791 if (!connector->polled) 3792 connector->polled = DRM_CONNECTOR_POLL_HPD; 3793 } 3794 } 3795 } 3796 if (dev_priv->display.hpd_irq_setup) 3797 dev_priv->display.hpd_irq_setup(dev); 3798 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3799 } 3800 3801 void intel_irq_init(struct drm_device *dev) 3802 { 3803 struct drm_i915_private *dev_priv = dev->dev_private; 3804 3805 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3806 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3807 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3808 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3809 3810 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3811 i915_hangcheck_elapsed, 3812 (unsigned long) dev); 3813 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3814 (unsigned long) dev_priv); 3815 3816 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3817 3818 if (IS_GEN2(dev)) { 3819 dev->max_vblank_count = 0; 3820 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 3821 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3822 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3823 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3824 } else { 3825 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3826 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3827 } 3828 3829 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3830 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3831 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3832 } 3833 3834 if (IS_VALLEYVIEW(dev)) { 3835 dev->driver->irq_handler = valleyview_irq_handler; 3836 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3837 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3838 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3839 dev->driver->enable_vblank = valleyview_enable_vblank; 3840 dev->driver->disable_vblank = valleyview_disable_vblank; 3841 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3842 } else if (IS_GEN8(dev)) { 3843 dev->driver->irq_handler = gen8_irq_handler; 3844 dev->driver->irq_preinstall = gen8_irq_preinstall; 3845 dev->driver->irq_postinstall = gen8_irq_postinstall; 3846 dev->driver->irq_uninstall = gen8_irq_uninstall; 3847 dev->driver->enable_vblank = gen8_enable_vblank; 3848 dev->driver->disable_vblank = gen8_disable_vblank; 3849 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3850 } else if (HAS_PCH_SPLIT(dev)) { 3851 dev->driver->irq_handler = ironlake_irq_handler; 3852 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3853 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3854 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3855 dev->driver->enable_vblank = ironlake_enable_vblank; 3856 dev->driver->disable_vblank = ironlake_disable_vblank; 3857 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3858 } else { 3859 if (INTEL_INFO(dev)->gen == 2) { 3860 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3861 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3862 dev->driver->irq_handler = i8xx_irq_handler; 3863 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3864 } else if (INTEL_INFO(dev)->gen == 3) { 3865 dev->driver->irq_preinstall = i915_irq_preinstall; 3866 dev->driver->irq_postinstall = i915_irq_postinstall; 3867 dev->driver->irq_uninstall = i915_irq_uninstall; 3868 dev->driver->irq_handler = i915_irq_handler; 3869 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3870 } else { 3871 dev->driver->irq_preinstall = i965_irq_preinstall; 3872 dev->driver->irq_postinstall = i965_irq_postinstall; 3873 dev->driver->irq_uninstall = i965_irq_uninstall; 3874 dev->driver->irq_handler = i965_irq_handler; 3875 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3876 } 3877 dev->driver->enable_vblank = i915_enable_vblank; 3878 dev->driver->disable_vblank = i915_disable_vblank; 3879 } 3880 } 3881 3882 void intel_hpd_init(struct drm_device *dev) 3883 { 3884 struct drm_i915_private *dev_priv = dev->dev_private; 3885 struct drm_mode_config *mode_config = &dev->mode_config; 3886 struct drm_connector *connector; 3887 unsigned long irqflags; 3888 int i; 3889 3890 for (i = 1; i < HPD_NUM_PINS; i++) { 3891 dev_priv->hpd_stats[i].hpd_cnt = 0; 3892 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3893 } 3894 list_for_each_entry(connector, &mode_config->connector_list, head) { 3895 struct intel_connector *intel_connector = to_intel_connector(connector); 3896 connector->polled = intel_connector->polled; 3897 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3898 connector->polled = DRM_CONNECTOR_POLL_HPD; 3899 } 3900 3901 /* Interrupt setup is already guaranteed to be single-threaded, this is 3902 * just to make the assert_spin_locked checks happy. */ 3903 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3904 if (dev_priv->display.hpd_irq_setup) 3905 dev_priv->display.hpd_irq_setup(dev); 3906 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3907 } 3908 3909 /* Disable interrupts so we can allow Package C8+. */ 3910 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3911 { 3912 struct drm_i915_private *dev_priv = dev->dev_private; 3913 unsigned long irqflags; 3914 3915 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3916 3917 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3918 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3919 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3920 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3921 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3922 3923 ironlake_disable_display_irq(dev_priv, 0xffffffff); 3924 ibx_disable_display_interrupt(dev_priv, 0xffffffff); 3925 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3926 snb_disable_pm_irq(dev_priv, 0xffffffff); 3927 3928 dev_priv->pc8.irqs_disabled = true; 3929 3930 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3931 } 3932 3933 /* Restore interrupts so we can recover from Package C8+. */ 3934 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3935 { 3936 struct drm_i915_private *dev_priv = dev->dev_private; 3937 unsigned long irqflags; 3938 uint32_t val; 3939 3940 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3941 3942 val = I915_READ(DEIMR); 3943 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); 3944 3945 val = I915_READ(SDEIMR); 3946 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); 3947 3948 val = I915_READ(GTIMR); 3949 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); 3950 3951 val = I915_READ(GEN6_PMIMR); 3952 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); 3953 3954 dev_priv->pc8.irqs_disabled = false; 3955 3956 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3957 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); 3958 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3959 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3960 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3961 3962 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3963 } 3964