1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 static const u32 hpd_ibx[] = { 41 [HPD_CRT] = SDE_CRT_HOTPLUG, 42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46 }; 47 48 static const u32 hpd_cpt[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54 }; 55 56 static const u32 hpd_mask_i915[] = { 57 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63 }; 64 65 static const u32 hpd_status_gen4[] = { 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72 }; 73 74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81 }; 82 83 /* For display hotplug interrupt */ 84 static void 85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 86 { 87 assert_spin_locked(&dev_priv->irq_lock); 88 89 if (dev_priv->pc8.irqs_disabled) { 90 WARN(1, "IRQs disabled\n"); 91 dev_priv->pc8.regsave.deimr &= ~mask; 92 return; 93 } 94 95 if ((dev_priv->irq_mask & mask) != 0) { 96 dev_priv->irq_mask &= ~mask; 97 I915_WRITE(DEIMR, dev_priv->irq_mask); 98 POSTING_READ(DEIMR); 99 } 100 } 101 102 static void 103 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 104 { 105 assert_spin_locked(&dev_priv->irq_lock); 106 107 if (dev_priv->pc8.irqs_disabled) { 108 WARN(1, "IRQs disabled\n"); 109 dev_priv->pc8.regsave.deimr |= mask; 110 return; 111 } 112 113 if ((dev_priv->irq_mask & mask) != mask) { 114 dev_priv->irq_mask |= mask; 115 I915_WRITE(DEIMR, dev_priv->irq_mask); 116 POSTING_READ(DEIMR); 117 } 118 } 119 120 /** 121 * ilk_update_gt_irq - update GTIMR 122 * @dev_priv: driver private 123 * @interrupt_mask: mask of interrupt bits to update 124 * @enabled_irq_mask: mask of interrupt bits to enable 125 */ 126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 127 uint32_t interrupt_mask, 128 uint32_t enabled_irq_mask) 129 { 130 assert_spin_locked(&dev_priv->irq_lock); 131 132 if (dev_priv->pc8.irqs_disabled) { 133 WARN(1, "IRQs disabled\n"); 134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 136 interrupt_mask); 137 return; 138 } 139 140 dev_priv->gt_irq_mask &= ~interrupt_mask; 141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 143 POSTING_READ(GTIMR); 144 } 145 146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 147 { 148 ilk_update_gt_irq(dev_priv, mask, mask); 149 } 150 151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 152 { 153 ilk_update_gt_irq(dev_priv, mask, 0); 154 } 155 156 /** 157 * snb_update_pm_irq - update GEN6_PMIMR 158 * @dev_priv: driver private 159 * @interrupt_mask: mask of interrupt bits to update 160 * @enabled_irq_mask: mask of interrupt bits to enable 161 */ 162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 163 uint32_t interrupt_mask, 164 uint32_t enabled_irq_mask) 165 { 166 uint32_t new_val; 167 168 assert_spin_locked(&dev_priv->irq_lock); 169 170 if (dev_priv->pc8.irqs_disabled) { 171 WARN(1, "IRQs disabled\n"); 172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 174 interrupt_mask); 175 return; 176 } 177 178 new_val = dev_priv->pm_irq_mask; 179 new_val &= ~interrupt_mask; 180 new_val |= (~enabled_irq_mask & interrupt_mask); 181 182 if (new_val != dev_priv->pm_irq_mask) { 183 dev_priv->pm_irq_mask = new_val; 184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 185 POSTING_READ(GEN6_PMIMR); 186 } 187 } 188 189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190 { 191 snb_update_pm_irq(dev_priv, mask, mask); 192 } 193 194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 195 { 196 snb_update_pm_irq(dev_priv, mask, 0); 197 } 198 199 static bool ivb_can_enable_err_int(struct drm_device *dev) 200 { 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct intel_crtc *crtc; 203 enum pipe pipe; 204 205 assert_spin_locked(&dev_priv->irq_lock); 206 207 for_each_pipe(pipe) { 208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 209 210 if (crtc->cpu_fifo_underrun_disabled) 211 return false; 212 } 213 214 return true; 215 } 216 217 static bool cpt_can_enable_serr_int(struct drm_device *dev) 218 { 219 struct drm_i915_private *dev_priv = dev->dev_private; 220 enum pipe pipe; 221 struct intel_crtc *crtc; 222 223 assert_spin_locked(&dev_priv->irq_lock); 224 225 for_each_pipe(pipe) { 226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 227 228 if (crtc->pch_fifo_underrun_disabled) 229 return false; 230 } 231 232 return true; 233 } 234 235 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 236 enum pipe pipe, bool enable) 237 { 238 struct drm_i915_private *dev_priv = dev->dev_private; 239 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 240 DE_PIPEB_FIFO_UNDERRUN; 241 242 if (enable) 243 ironlake_enable_display_irq(dev_priv, bit); 244 else 245 ironlake_disable_display_irq(dev_priv, bit); 246 } 247 248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 249 enum pipe pipe, bool enable) 250 { 251 struct drm_i915_private *dev_priv = dev->dev_private; 252 if (enable) { 253 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 254 255 if (!ivb_can_enable_err_int(dev)) 256 return; 257 258 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 259 } else { 260 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 261 262 /* Change the state _after_ we've read out the current one. */ 263 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 264 265 if (!was_enabled && 266 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 268 pipe_name(pipe)); 269 } 270 } 271 } 272 273 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 274 enum pipe pipe, bool enable) 275 { 276 struct drm_i915_private *dev_priv = dev->dev_private; 277 278 assert_spin_locked(&dev_priv->irq_lock); 279 280 if (enable) 281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 282 else 283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 286 } 287 288 /** 289 * ibx_display_interrupt_update - update SDEIMR 290 * @dev_priv: driver private 291 * @interrupt_mask: mask of interrupt bits to update 292 * @enabled_irq_mask: mask of interrupt bits to enable 293 */ 294 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 295 uint32_t interrupt_mask, 296 uint32_t enabled_irq_mask) 297 { 298 uint32_t sdeimr = I915_READ(SDEIMR); 299 sdeimr &= ~interrupt_mask; 300 sdeimr |= (~enabled_irq_mask & interrupt_mask); 301 302 assert_spin_locked(&dev_priv->irq_lock); 303 304 if (dev_priv->pc8.irqs_disabled && 305 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 306 WARN(1, "IRQs disabled\n"); 307 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 308 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 309 interrupt_mask); 310 return; 311 } 312 313 I915_WRITE(SDEIMR, sdeimr); 314 POSTING_READ(SDEIMR); 315 } 316 #define ibx_enable_display_interrupt(dev_priv, bits) \ 317 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 318 #define ibx_disable_display_interrupt(dev_priv, bits) \ 319 ibx_display_interrupt_update((dev_priv), (bits), 0) 320 321 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 322 enum transcoder pch_transcoder, 323 bool enable) 324 { 325 struct drm_i915_private *dev_priv = dev->dev_private; 326 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 327 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 328 329 if (enable) 330 ibx_enable_display_interrupt(dev_priv, bit); 331 else 332 ibx_disable_display_interrupt(dev_priv, bit); 333 } 334 335 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 336 enum transcoder pch_transcoder, 337 bool enable) 338 { 339 struct drm_i915_private *dev_priv = dev->dev_private; 340 341 if (enable) { 342 I915_WRITE(SERR_INT, 343 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 344 345 if (!cpt_can_enable_serr_int(dev)) 346 return; 347 348 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 349 } else { 350 uint32_t tmp = I915_READ(SERR_INT); 351 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 352 353 /* Change the state _after_ we've read out the current one. */ 354 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 355 356 if (!was_enabled && 357 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 358 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 359 transcoder_name(pch_transcoder)); 360 } 361 } 362 } 363 364 /** 365 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 366 * @dev: drm device 367 * @pipe: pipe 368 * @enable: true if we want to report FIFO underrun errors, false otherwise 369 * 370 * This function makes us disable or enable CPU fifo underruns for a specific 371 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 372 * reporting for one pipe may also disable all the other CPU error interruts for 373 * the other pipes, due to the fact that there's just one interrupt mask/enable 374 * bit for all the pipes. 375 * 376 * Returns the previous state of underrun reporting. 377 */ 378 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 379 enum pipe pipe, bool enable) 380 { 381 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 384 unsigned long flags; 385 bool ret; 386 387 spin_lock_irqsave(&dev_priv->irq_lock, flags); 388 389 ret = !intel_crtc->cpu_fifo_underrun_disabled; 390 391 if (enable == ret) 392 goto done; 393 394 intel_crtc->cpu_fifo_underrun_disabled = !enable; 395 396 if (IS_GEN5(dev) || IS_GEN6(dev)) 397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 398 else if (IS_GEN7(dev)) 399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 400 else if (IS_GEN8(dev)) 401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 402 403 done: 404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 405 return ret; 406 } 407 408 /** 409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 410 * @dev: drm device 411 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 412 * @enable: true if we want to report FIFO underrun errors, false otherwise 413 * 414 * This function makes us disable or enable PCH fifo underruns for a specific 415 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 416 * underrun reporting for one transcoder may also disable all the other PCH 417 * error interruts for the other transcoders, due to the fact that there's just 418 * one interrupt mask/enable bit for all the transcoders. 419 * 420 * Returns the previous state of underrun reporting. 421 */ 422 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 423 enum transcoder pch_transcoder, 424 bool enable) 425 { 426 struct drm_i915_private *dev_priv = dev->dev_private; 427 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 429 unsigned long flags; 430 bool ret; 431 432 /* 433 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 434 * has only one pch transcoder A that all pipes can use. To avoid racy 435 * pch transcoder -> pipe lookups from interrupt code simply store the 436 * underrun statistics in crtc A. Since we never expose this anywhere 437 * nor use it outside of the fifo underrun code here using the "wrong" 438 * crtc on LPT won't cause issues. 439 */ 440 441 spin_lock_irqsave(&dev_priv->irq_lock, flags); 442 443 ret = !intel_crtc->pch_fifo_underrun_disabled; 444 445 if (enable == ret) 446 goto done; 447 448 intel_crtc->pch_fifo_underrun_disabled = !enable; 449 450 if (HAS_PCH_IBX(dev)) 451 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 452 else 453 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 454 455 done: 456 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 457 return ret; 458 } 459 460 461 void 462 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 463 { 464 u32 reg = PIPESTAT(pipe); 465 u32 pipestat = I915_READ(reg) & 0x7fff0000; 466 467 assert_spin_locked(&dev_priv->irq_lock); 468 469 if ((pipestat & mask) == mask) 470 return; 471 472 /* Enable the interrupt, clear any pending status */ 473 pipestat |= mask | (mask >> 16); 474 I915_WRITE(reg, pipestat); 475 POSTING_READ(reg); 476 } 477 478 void 479 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 480 { 481 u32 reg = PIPESTAT(pipe); 482 u32 pipestat = I915_READ(reg) & 0x7fff0000; 483 484 assert_spin_locked(&dev_priv->irq_lock); 485 486 if ((pipestat & mask) == 0) 487 return; 488 489 pipestat &= ~mask; 490 I915_WRITE(reg, pipestat); 491 POSTING_READ(reg); 492 } 493 494 /** 495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 496 */ 497 static void i915_enable_asle_pipestat(struct drm_device *dev) 498 { 499 drm_i915_private_t *dev_priv = dev->dev_private; 500 unsigned long irqflags; 501 502 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 503 return; 504 505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 506 507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 508 if (INTEL_INFO(dev)->gen >= 4) 509 i915_enable_pipestat(dev_priv, PIPE_A, 510 PIPE_LEGACY_BLC_EVENT_ENABLE); 511 512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 513 } 514 515 /** 516 * i915_pipe_enabled - check if a pipe is enabled 517 * @dev: DRM device 518 * @pipe: pipe to check 519 * 520 * Reading certain registers when the pipe is disabled can hang the chip. 521 * Use this routine to make sure the PLL is running and the pipe is active 522 * before reading such registers if unsure. 523 */ 524 static int 525 i915_pipe_enabled(struct drm_device *dev, int pipe) 526 { 527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 528 529 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 530 /* Locking is horribly broken here, but whatever. */ 531 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 533 534 return intel_crtc->active; 535 } else { 536 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 537 } 538 } 539 540 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 541 { 542 /* Gen2 doesn't have a hardware frame counter */ 543 return 0; 544 } 545 546 /* Called from drm generic code, passed a 'crtc', which 547 * we use as a pipe index 548 */ 549 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 550 { 551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 552 unsigned long high_frame; 553 unsigned long low_frame; 554 u32 high1, high2, low, pixel, vbl_start; 555 556 if (!i915_pipe_enabled(dev, pipe)) { 557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 558 "pipe %c\n", pipe_name(pipe)); 559 return 0; 560 } 561 562 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 563 struct intel_crtc *intel_crtc = 564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 565 const struct drm_display_mode *mode = 566 &intel_crtc->config.adjusted_mode; 567 568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 569 } else { 570 enum transcoder cpu_transcoder = 571 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 572 u32 htotal; 573 574 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 575 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 576 577 vbl_start *= htotal; 578 } 579 580 high_frame = PIPEFRAME(pipe); 581 low_frame = PIPEFRAMEPIXEL(pipe); 582 583 /* 584 * High & low register fields aren't synchronized, so make sure 585 * we get a low value that's stable across two reads of the high 586 * register. 587 */ 588 do { 589 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 590 low = I915_READ(low_frame); 591 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 592 } while (high1 != high2); 593 594 high1 >>= PIPE_FRAME_HIGH_SHIFT; 595 pixel = low & PIPE_PIXEL_MASK; 596 low >>= PIPE_FRAME_LOW_SHIFT; 597 598 /* 599 * The frame counter increments at beginning of active. 600 * Cook up a vblank counter by also checking the pixel 601 * counter against vblank start. 602 */ 603 return ((high1 << 8) | low) + (pixel >= vbl_start); 604 } 605 606 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 607 { 608 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 609 int reg = PIPE_FRMCOUNT_GM45(pipe); 610 611 if (!i915_pipe_enabled(dev, pipe)) { 612 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 613 "pipe %c\n", pipe_name(pipe)); 614 return 0; 615 } 616 617 return I915_READ(reg); 618 } 619 620 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 621 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 622 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 623 624 static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 625 { 626 struct drm_i915_private *dev_priv = dev->dev_private; 627 uint32_t status; 628 int reg; 629 630 if (IS_VALLEYVIEW(dev)) { 631 status = pipe == PIPE_A ? 632 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 633 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 634 635 reg = VLV_ISR; 636 } else if (IS_GEN2(dev)) { 637 status = pipe == PIPE_A ? 638 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 639 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 640 641 reg = ISR; 642 } else if (INTEL_INFO(dev)->gen < 5) { 643 status = pipe == PIPE_A ? 644 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 645 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 646 647 reg = ISR; 648 } else if (INTEL_INFO(dev)->gen < 7) { 649 status = pipe == PIPE_A ? 650 DE_PIPEA_VBLANK : 651 DE_PIPEB_VBLANK; 652 653 reg = DEISR; 654 } else { 655 switch (pipe) { 656 default: 657 case PIPE_A: 658 status = DE_PIPEA_VBLANK_IVB; 659 break; 660 case PIPE_B: 661 status = DE_PIPEB_VBLANK_IVB; 662 break; 663 case PIPE_C: 664 status = DE_PIPEC_VBLANK_IVB; 665 break; 666 } 667 668 reg = DEISR; 669 } 670 671 if (IS_GEN2(dev)) 672 return __raw_i915_read16(dev_priv, reg) & status; 673 else 674 return __raw_i915_read32(dev_priv, reg) & status; 675 } 676 677 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 678 int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) 679 { 680 struct drm_i915_private *dev_priv = dev->dev_private; 681 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 683 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 684 int position; 685 int vbl_start, vbl_end, htotal, vtotal; 686 bool in_vbl = true; 687 int ret = 0; 688 unsigned long irqflags; 689 690 if (!intel_crtc->active) { 691 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 692 "pipe %c\n", pipe_name(pipe)); 693 return 0; 694 } 695 696 htotal = mode->crtc_htotal; 697 vtotal = mode->crtc_vtotal; 698 vbl_start = mode->crtc_vblank_start; 699 vbl_end = mode->crtc_vblank_end; 700 701 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 702 703 /* 704 * Lock uncore.lock, as we will do multiple timing critical raw 705 * register reads, potentially with preemption disabled, so the 706 * following code must not block on uncore.lock. 707 */ 708 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 709 710 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 711 712 /* Get optional system timestamp before query. */ 713 if (stime) 714 *stime = ktime_get(); 715 716 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 717 /* No obvious pixelcount register. Only query vertical 718 * scanout position from Display scan line register. 719 */ 720 if (IS_GEN2(dev)) 721 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 722 else 723 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 724 725 /* 726 * The scanline counter increments at the leading edge 727 * of hsync, ie. it completely misses the active portion 728 * of the line. Fix up the counter at both edges of vblank 729 * to get a more accurate picture whether we're in vblank 730 * or not. 731 */ 732 in_vbl = intel_pipe_in_vblank_locked(dev, pipe); 733 if ((in_vbl && position == vbl_start - 1) || 734 (!in_vbl && position == vbl_end - 1)) 735 position = (position + 1) % vtotal; 736 } else { 737 /* Have access to pixelcount since start of frame. 738 * We can split this into vertical and horizontal 739 * scanout position. 740 */ 741 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 742 743 /* convert to pixel counts */ 744 vbl_start *= htotal; 745 vbl_end *= htotal; 746 vtotal *= htotal; 747 } 748 749 /* Get optional system timestamp after query. */ 750 if (etime) 751 *etime = ktime_get(); 752 753 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 754 755 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 756 757 in_vbl = position >= vbl_start && position < vbl_end; 758 759 /* 760 * While in vblank, position will be negative 761 * counting up towards 0 at vbl_end. And outside 762 * vblank, position will be positive counting 763 * up since vbl_end. 764 */ 765 if (position >= vbl_start) 766 position -= vbl_end; 767 else 768 position += vtotal - vbl_end; 769 770 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 771 *vpos = position; 772 *hpos = 0; 773 } else { 774 *vpos = position / htotal; 775 *hpos = position - (*vpos * htotal); 776 } 777 778 /* In vblank? */ 779 if (in_vbl) 780 ret |= DRM_SCANOUTPOS_INVBL; 781 782 return ret; 783 } 784 785 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 786 int *max_error, 787 struct timeval *vblank_time, 788 unsigned flags) 789 { 790 struct drm_crtc *crtc; 791 792 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 793 DRM_ERROR("Invalid crtc %d\n", pipe); 794 return -EINVAL; 795 } 796 797 /* Get drm_crtc to timestamp: */ 798 crtc = intel_get_crtc_for_pipe(dev, pipe); 799 if (crtc == NULL) { 800 DRM_ERROR("Invalid crtc %d\n", pipe); 801 return -EINVAL; 802 } 803 804 if (!crtc->enabled) { 805 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 806 return -EBUSY; 807 } 808 809 /* Helper routine in DRM core does all the work: */ 810 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 811 vblank_time, flags, 812 crtc); 813 } 814 815 static bool intel_hpd_irq_event(struct drm_device *dev, 816 struct drm_connector *connector) 817 { 818 enum drm_connector_status old_status; 819 820 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 821 old_status = connector->status; 822 823 connector->status = connector->funcs->detect(connector, false); 824 if (old_status == connector->status) 825 return false; 826 827 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 828 connector->base.id, 829 drm_get_connector_name(connector), 830 drm_get_connector_status_name(old_status), 831 drm_get_connector_status_name(connector->status)); 832 833 return true; 834 } 835 836 /* 837 * Handle hotplug events outside the interrupt handler proper. 838 */ 839 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 840 841 static void i915_hotplug_work_func(struct work_struct *work) 842 { 843 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 844 hotplug_work); 845 struct drm_device *dev = dev_priv->dev; 846 struct drm_mode_config *mode_config = &dev->mode_config; 847 struct intel_connector *intel_connector; 848 struct intel_encoder *intel_encoder; 849 struct drm_connector *connector; 850 unsigned long irqflags; 851 bool hpd_disabled = false; 852 bool changed = false; 853 u32 hpd_event_bits; 854 855 /* HPD irq before everything is fully set up. */ 856 if (!dev_priv->enable_hotplug_processing) 857 return; 858 859 mutex_lock(&mode_config->mutex); 860 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 861 862 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 863 864 hpd_event_bits = dev_priv->hpd_event_bits; 865 dev_priv->hpd_event_bits = 0; 866 list_for_each_entry(connector, &mode_config->connector_list, head) { 867 intel_connector = to_intel_connector(connector); 868 intel_encoder = intel_connector->encoder; 869 if (intel_encoder->hpd_pin > HPD_NONE && 870 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 871 connector->polled == DRM_CONNECTOR_POLL_HPD) { 872 DRM_INFO("HPD interrupt storm detected on connector %s: " 873 "switching from hotplug detection to polling\n", 874 drm_get_connector_name(connector)); 875 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 876 connector->polled = DRM_CONNECTOR_POLL_CONNECT 877 | DRM_CONNECTOR_POLL_DISCONNECT; 878 hpd_disabled = true; 879 } 880 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 881 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 882 drm_get_connector_name(connector), intel_encoder->hpd_pin); 883 } 884 } 885 /* if there were no outputs to poll, poll was disabled, 886 * therefore make sure it's enabled when disabling HPD on 887 * some connectors */ 888 if (hpd_disabled) { 889 drm_kms_helper_poll_enable(dev); 890 mod_timer(&dev_priv->hotplug_reenable_timer, 891 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 892 } 893 894 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 895 896 list_for_each_entry(connector, &mode_config->connector_list, head) { 897 intel_connector = to_intel_connector(connector); 898 intel_encoder = intel_connector->encoder; 899 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 900 if (intel_encoder->hot_plug) 901 intel_encoder->hot_plug(intel_encoder); 902 if (intel_hpd_irq_event(dev, connector)) 903 changed = true; 904 } 905 } 906 mutex_unlock(&mode_config->mutex); 907 908 if (changed) 909 drm_kms_helper_hotplug_event(dev); 910 } 911 912 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 913 { 914 drm_i915_private_t *dev_priv = dev->dev_private; 915 u32 busy_up, busy_down, max_avg, min_avg; 916 u8 new_delay; 917 918 spin_lock(&mchdev_lock); 919 920 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 921 922 new_delay = dev_priv->ips.cur_delay; 923 924 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 925 busy_up = I915_READ(RCPREVBSYTUPAVG); 926 busy_down = I915_READ(RCPREVBSYTDNAVG); 927 max_avg = I915_READ(RCBMAXAVG); 928 min_avg = I915_READ(RCBMINAVG); 929 930 /* Handle RCS change request from hw */ 931 if (busy_up > max_avg) { 932 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 933 new_delay = dev_priv->ips.cur_delay - 1; 934 if (new_delay < dev_priv->ips.max_delay) 935 new_delay = dev_priv->ips.max_delay; 936 } else if (busy_down < min_avg) { 937 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 938 new_delay = dev_priv->ips.cur_delay + 1; 939 if (new_delay > dev_priv->ips.min_delay) 940 new_delay = dev_priv->ips.min_delay; 941 } 942 943 if (ironlake_set_drps(dev, new_delay)) 944 dev_priv->ips.cur_delay = new_delay; 945 946 spin_unlock(&mchdev_lock); 947 948 return; 949 } 950 951 static void notify_ring(struct drm_device *dev, 952 struct intel_ring_buffer *ring) 953 { 954 if (ring->obj == NULL) 955 return; 956 957 trace_i915_gem_request_complete(ring); 958 959 wake_up_all(&ring->irq_queue); 960 i915_queue_hangcheck(dev); 961 } 962 963 static void gen6_pm_rps_work(struct work_struct *work) 964 { 965 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 966 rps.work); 967 u32 pm_iir; 968 int new_delay, adj; 969 970 spin_lock_irq(&dev_priv->irq_lock); 971 pm_iir = dev_priv->rps.pm_iir; 972 dev_priv->rps.pm_iir = 0; 973 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 974 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 975 spin_unlock_irq(&dev_priv->irq_lock); 976 977 /* Make sure we didn't queue anything we're not going to process. */ 978 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 979 980 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 981 return; 982 983 mutex_lock(&dev_priv->rps.hw_lock); 984 985 adj = dev_priv->rps.last_adj; 986 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 987 if (adj > 0) 988 adj *= 2; 989 else 990 adj = 1; 991 new_delay = dev_priv->rps.cur_delay + adj; 992 993 /* 994 * For better performance, jump directly 995 * to RPe if we're below it. 996 */ 997 if (new_delay < dev_priv->rps.rpe_delay) 998 new_delay = dev_priv->rps.rpe_delay; 999 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1000 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 1001 new_delay = dev_priv->rps.rpe_delay; 1002 else 1003 new_delay = dev_priv->rps.min_delay; 1004 adj = 0; 1005 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1006 if (adj < 0) 1007 adj *= 2; 1008 else 1009 adj = -1; 1010 new_delay = dev_priv->rps.cur_delay + adj; 1011 } else { /* unknown event */ 1012 new_delay = dev_priv->rps.cur_delay; 1013 } 1014 1015 /* sysfs frequency interfaces may have snuck in while servicing the 1016 * interrupt 1017 */ 1018 if (new_delay < (int)dev_priv->rps.min_delay) 1019 new_delay = dev_priv->rps.min_delay; 1020 if (new_delay > (int)dev_priv->rps.max_delay) 1021 new_delay = dev_priv->rps.max_delay; 1022 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1023 1024 if (IS_VALLEYVIEW(dev_priv->dev)) 1025 valleyview_set_rps(dev_priv->dev, new_delay); 1026 else 1027 gen6_set_rps(dev_priv->dev, new_delay); 1028 1029 mutex_unlock(&dev_priv->rps.hw_lock); 1030 } 1031 1032 1033 /** 1034 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1035 * occurred. 1036 * @work: workqueue struct 1037 * 1038 * Doesn't actually do anything except notify userspace. As a consequence of 1039 * this event, userspace should try to remap the bad rows since statistically 1040 * it is likely the same row is more likely to go bad again. 1041 */ 1042 static void ivybridge_parity_work(struct work_struct *work) 1043 { 1044 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1045 l3_parity.error_work); 1046 u32 error_status, row, bank, subbank; 1047 char *parity_event[6]; 1048 uint32_t misccpctl; 1049 unsigned long flags; 1050 uint8_t slice = 0; 1051 1052 /* We must turn off DOP level clock gating to access the L3 registers. 1053 * In order to prevent a get/put style interface, acquire struct mutex 1054 * any time we access those registers. 1055 */ 1056 mutex_lock(&dev_priv->dev->struct_mutex); 1057 1058 /* If we've screwed up tracking, just let the interrupt fire again */ 1059 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1060 goto out; 1061 1062 misccpctl = I915_READ(GEN7_MISCCPCTL); 1063 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1064 POSTING_READ(GEN7_MISCCPCTL); 1065 1066 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1067 u32 reg; 1068 1069 slice--; 1070 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1071 break; 1072 1073 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1074 1075 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1076 1077 error_status = I915_READ(reg); 1078 row = GEN7_PARITY_ERROR_ROW(error_status); 1079 bank = GEN7_PARITY_ERROR_BANK(error_status); 1080 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1081 1082 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1083 POSTING_READ(reg); 1084 1085 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1086 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1087 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1088 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1089 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1090 parity_event[5] = NULL; 1091 1092 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1093 KOBJ_CHANGE, parity_event); 1094 1095 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1096 slice, row, bank, subbank); 1097 1098 kfree(parity_event[4]); 1099 kfree(parity_event[3]); 1100 kfree(parity_event[2]); 1101 kfree(parity_event[1]); 1102 } 1103 1104 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1105 1106 out: 1107 WARN_ON(dev_priv->l3_parity.which_slice); 1108 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1109 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1110 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1111 1112 mutex_unlock(&dev_priv->dev->struct_mutex); 1113 } 1114 1115 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1116 { 1117 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1118 1119 if (!HAS_L3_DPF(dev)) 1120 return; 1121 1122 spin_lock(&dev_priv->irq_lock); 1123 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1124 spin_unlock(&dev_priv->irq_lock); 1125 1126 iir &= GT_PARITY_ERROR(dev); 1127 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1128 dev_priv->l3_parity.which_slice |= 1 << 1; 1129 1130 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1131 dev_priv->l3_parity.which_slice |= 1 << 0; 1132 1133 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1134 } 1135 1136 static void ilk_gt_irq_handler(struct drm_device *dev, 1137 struct drm_i915_private *dev_priv, 1138 u32 gt_iir) 1139 { 1140 if (gt_iir & 1141 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1142 notify_ring(dev, &dev_priv->ring[RCS]); 1143 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1144 notify_ring(dev, &dev_priv->ring[VCS]); 1145 } 1146 1147 static void snb_gt_irq_handler(struct drm_device *dev, 1148 struct drm_i915_private *dev_priv, 1149 u32 gt_iir) 1150 { 1151 1152 if (gt_iir & 1153 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1154 notify_ring(dev, &dev_priv->ring[RCS]); 1155 if (gt_iir & GT_BSD_USER_INTERRUPT) 1156 notify_ring(dev, &dev_priv->ring[VCS]); 1157 if (gt_iir & GT_BLT_USER_INTERRUPT) 1158 notify_ring(dev, &dev_priv->ring[BCS]); 1159 1160 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1161 GT_BSD_CS_ERROR_INTERRUPT | 1162 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1163 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1164 i915_handle_error(dev, false); 1165 } 1166 1167 if (gt_iir & GT_PARITY_ERROR(dev)) 1168 ivybridge_parity_error_irq_handler(dev, gt_iir); 1169 } 1170 1171 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1172 struct drm_i915_private *dev_priv, 1173 u32 master_ctl) 1174 { 1175 u32 rcs, bcs, vcs; 1176 uint32_t tmp = 0; 1177 irqreturn_t ret = IRQ_NONE; 1178 1179 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1180 tmp = I915_READ(GEN8_GT_IIR(0)); 1181 if (tmp) { 1182 ret = IRQ_HANDLED; 1183 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1184 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1185 if (rcs & GT_RENDER_USER_INTERRUPT) 1186 notify_ring(dev, &dev_priv->ring[RCS]); 1187 if (bcs & GT_RENDER_USER_INTERRUPT) 1188 notify_ring(dev, &dev_priv->ring[BCS]); 1189 I915_WRITE(GEN8_GT_IIR(0), tmp); 1190 } else 1191 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1192 } 1193 1194 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1195 tmp = I915_READ(GEN8_GT_IIR(1)); 1196 if (tmp) { 1197 ret = IRQ_HANDLED; 1198 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1199 if (vcs & GT_RENDER_USER_INTERRUPT) 1200 notify_ring(dev, &dev_priv->ring[VCS]); 1201 I915_WRITE(GEN8_GT_IIR(1), tmp); 1202 } else 1203 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1204 } 1205 1206 if (master_ctl & GEN8_GT_VECS_IRQ) { 1207 tmp = I915_READ(GEN8_GT_IIR(3)); 1208 if (tmp) { 1209 ret = IRQ_HANDLED; 1210 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1211 if (vcs & GT_RENDER_USER_INTERRUPT) 1212 notify_ring(dev, &dev_priv->ring[VECS]); 1213 I915_WRITE(GEN8_GT_IIR(3), tmp); 1214 } else 1215 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1216 } 1217 1218 return ret; 1219 } 1220 1221 #define HPD_STORM_DETECT_PERIOD 1000 1222 #define HPD_STORM_THRESHOLD 5 1223 1224 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1225 u32 hotplug_trigger, 1226 const u32 *hpd) 1227 { 1228 drm_i915_private_t *dev_priv = dev->dev_private; 1229 int i; 1230 bool storm_detected = false; 1231 1232 if (!hotplug_trigger) 1233 return; 1234 1235 spin_lock(&dev_priv->irq_lock); 1236 for (i = 1; i < HPD_NUM_PINS; i++) { 1237 1238 WARN(((hpd[i] & hotplug_trigger) && 1239 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1240 "Received HPD interrupt although disabled\n"); 1241 1242 if (!(hpd[i] & hotplug_trigger) || 1243 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1244 continue; 1245 1246 dev_priv->hpd_event_bits |= (1 << i); 1247 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1248 dev_priv->hpd_stats[i].hpd_last_jiffies 1249 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1250 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1251 dev_priv->hpd_stats[i].hpd_cnt = 0; 1252 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1253 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1254 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1255 dev_priv->hpd_event_bits &= ~(1 << i); 1256 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1257 storm_detected = true; 1258 } else { 1259 dev_priv->hpd_stats[i].hpd_cnt++; 1260 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1261 dev_priv->hpd_stats[i].hpd_cnt); 1262 } 1263 } 1264 1265 if (storm_detected) 1266 dev_priv->display.hpd_irq_setup(dev); 1267 spin_unlock(&dev_priv->irq_lock); 1268 1269 /* 1270 * Our hotplug handler can grab modeset locks (by calling down into the 1271 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1272 * queue for otherwise the flush_work in the pageflip code will 1273 * deadlock. 1274 */ 1275 schedule_work(&dev_priv->hotplug_work); 1276 } 1277 1278 static void gmbus_irq_handler(struct drm_device *dev) 1279 { 1280 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1281 1282 wake_up_all(&dev_priv->gmbus_wait_queue); 1283 } 1284 1285 static void dp_aux_irq_handler(struct drm_device *dev) 1286 { 1287 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1288 1289 wake_up_all(&dev_priv->gmbus_wait_queue); 1290 } 1291 1292 #if defined(CONFIG_DEBUG_FS) 1293 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1294 uint32_t crc0, uint32_t crc1, 1295 uint32_t crc2, uint32_t crc3, 1296 uint32_t crc4) 1297 { 1298 struct drm_i915_private *dev_priv = dev->dev_private; 1299 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1300 struct intel_pipe_crc_entry *entry; 1301 int head, tail; 1302 1303 spin_lock(&pipe_crc->lock); 1304 1305 if (!pipe_crc->entries) { 1306 spin_unlock(&pipe_crc->lock); 1307 DRM_ERROR("spurious interrupt\n"); 1308 return; 1309 } 1310 1311 head = pipe_crc->head; 1312 tail = pipe_crc->tail; 1313 1314 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1315 spin_unlock(&pipe_crc->lock); 1316 DRM_ERROR("CRC buffer overflowing\n"); 1317 return; 1318 } 1319 1320 entry = &pipe_crc->entries[head]; 1321 1322 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1323 entry->crc[0] = crc0; 1324 entry->crc[1] = crc1; 1325 entry->crc[2] = crc2; 1326 entry->crc[3] = crc3; 1327 entry->crc[4] = crc4; 1328 1329 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1330 pipe_crc->head = head; 1331 1332 spin_unlock(&pipe_crc->lock); 1333 1334 wake_up_interruptible(&pipe_crc->wq); 1335 } 1336 #else 1337 static inline void 1338 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1339 uint32_t crc0, uint32_t crc1, 1340 uint32_t crc2, uint32_t crc3, 1341 uint32_t crc4) {} 1342 #endif 1343 1344 1345 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1346 { 1347 struct drm_i915_private *dev_priv = dev->dev_private; 1348 1349 display_pipe_crc_irq_handler(dev, pipe, 1350 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1351 0, 0, 0, 0); 1352 } 1353 1354 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1355 { 1356 struct drm_i915_private *dev_priv = dev->dev_private; 1357 1358 display_pipe_crc_irq_handler(dev, pipe, 1359 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1360 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1361 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1362 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1363 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1364 } 1365 1366 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1367 { 1368 struct drm_i915_private *dev_priv = dev->dev_private; 1369 uint32_t res1, res2; 1370 1371 if (INTEL_INFO(dev)->gen >= 3) 1372 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1373 else 1374 res1 = 0; 1375 1376 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1377 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1378 else 1379 res2 = 0; 1380 1381 display_pipe_crc_irq_handler(dev, pipe, 1382 I915_READ(PIPE_CRC_RES_RED(pipe)), 1383 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1384 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1385 res1, res2); 1386 } 1387 1388 /* The RPS events need forcewake, so we add them to a work queue and mask their 1389 * IMR bits until the work is done. Other interrupts can be processed without 1390 * the work queue. */ 1391 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1392 { 1393 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1394 spin_lock(&dev_priv->irq_lock); 1395 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1396 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1397 spin_unlock(&dev_priv->irq_lock); 1398 1399 queue_work(dev_priv->wq, &dev_priv->rps.work); 1400 } 1401 1402 if (HAS_VEBOX(dev_priv->dev)) { 1403 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1404 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1405 1406 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1407 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1408 i915_handle_error(dev_priv->dev, false); 1409 } 1410 } 1411 } 1412 1413 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1414 { 1415 struct drm_device *dev = (struct drm_device *) arg; 1416 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1417 u32 iir, gt_iir, pm_iir; 1418 irqreturn_t ret = IRQ_NONE; 1419 unsigned long irqflags; 1420 int pipe; 1421 u32 pipe_stats[I915_MAX_PIPES]; 1422 1423 atomic_inc(&dev_priv->irq_received); 1424 1425 while (true) { 1426 iir = I915_READ(VLV_IIR); 1427 gt_iir = I915_READ(GTIIR); 1428 pm_iir = I915_READ(GEN6_PMIIR); 1429 1430 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1431 goto out; 1432 1433 ret = IRQ_HANDLED; 1434 1435 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1436 1437 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1438 for_each_pipe(pipe) { 1439 int reg = PIPESTAT(pipe); 1440 pipe_stats[pipe] = I915_READ(reg); 1441 1442 /* 1443 * Clear the PIPE*STAT regs before the IIR 1444 */ 1445 if (pipe_stats[pipe] & 0x8000ffff) { 1446 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1447 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1448 pipe_name(pipe)); 1449 I915_WRITE(reg, pipe_stats[pipe]); 1450 } 1451 } 1452 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1453 1454 for_each_pipe(pipe) { 1455 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1456 drm_handle_vblank(dev, pipe); 1457 1458 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1459 intel_prepare_page_flip(dev, pipe); 1460 intel_finish_page_flip(dev, pipe); 1461 } 1462 1463 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1464 i9xx_pipe_crc_irq_handler(dev, pipe); 1465 } 1466 1467 /* Consume port. Then clear IIR or we'll miss events */ 1468 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1469 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1470 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1471 1472 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1473 hotplug_status); 1474 1475 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1476 1477 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1478 I915_READ(PORT_HOTPLUG_STAT); 1479 } 1480 1481 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1482 gmbus_irq_handler(dev); 1483 1484 if (pm_iir) 1485 gen6_rps_irq_handler(dev_priv, pm_iir); 1486 1487 I915_WRITE(GTIIR, gt_iir); 1488 I915_WRITE(GEN6_PMIIR, pm_iir); 1489 I915_WRITE(VLV_IIR, iir); 1490 } 1491 1492 out: 1493 return ret; 1494 } 1495 1496 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1497 { 1498 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1499 int pipe; 1500 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1501 1502 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1503 1504 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1505 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1506 SDE_AUDIO_POWER_SHIFT); 1507 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1508 port_name(port)); 1509 } 1510 1511 if (pch_iir & SDE_AUX_MASK) 1512 dp_aux_irq_handler(dev); 1513 1514 if (pch_iir & SDE_GMBUS) 1515 gmbus_irq_handler(dev); 1516 1517 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1518 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1519 1520 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1521 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1522 1523 if (pch_iir & SDE_POISON) 1524 DRM_ERROR("PCH poison interrupt\n"); 1525 1526 if (pch_iir & SDE_FDI_MASK) 1527 for_each_pipe(pipe) 1528 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1529 pipe_name(pipe), 1530 I915_READ(FDI_RX_IIR(pipe))); 1531 1532 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1533 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1534 1535 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1536 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1537 1538 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1539 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1540 false)) 1541 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1542 1543 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1544 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1545 false)) 1546 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1547 } 1548 1549 static void ivb_err_int_handler(struct drm_device *dev) 1550 { 1551 struct drm_i915_private *dev_priv = dev->dev_private; 1552 u32 err_int = I915_READ(GEN7_ERR_INT); 1553 enum pipe pipe; 1554 1555 if (err_int & ERR_INT_POISON) 1556 DRM_ERROR("Poison interrupt\n"); 1557 1558 for_each_pipe(pipe) { 1559 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1560 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1561 false)) 1562 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1563 pipe_name(pipe)); 1564 } 1565 1566 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1567 if (IS_IVYBRIDGE(dev)) 1568 ivb_pipe_crc_irq_handler(dev, pipe); 1569 else 1570 hsw_pipe_crc_irq_handler(dev, pipe); 1571 } 1572 } 1573 1574 I915_WRITE(GEN7_ERR_INT, err_int); 1575 } 1576 1577 static void cpt_serr_int_handler(struct drm_device *dev) 1578 { 1579 struct drm_i915_private *dev_priv = dev->dev_private; 1580 u32 serr_int = I915_READ(SERR_INT); 1581 1582 if (serr_int & SERR_INT_POISON) 1583 DRM_ERROR("PCH poison interrupt\n"); 1584 1585 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1586 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1587 false)) 1588 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1589 1590 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1591 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1592 false)) 1593 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1594 1595 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1596 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1597 false)) 1598 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1599 1600 I915_WRITE(SERR_INT, serr_int); 1601 } 1602 1603 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1604 { 1605 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1606 int pipe; 1607 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1608 1609 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1610 1611 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1612 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1613 SDE_AUDIO_POWER_SHIFT_CPT); 1614 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1615 port_name(port)); 1616 } 1617 1618 if (pch_iir & SDE_AUX_MASK_CPT) 1619 dp_aux_irq_handler(dev); 1620 1621 if (pch_iir & SDE_GMBUS_CPT) 1622 gmbus_irq_handler(dev); 1623 1624 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1625 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1626 1627 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1628 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1629 1630 if (pch_iir & SDE_FDI_MASK_CPT) 1631 for_each_pipe(pipe) 1632 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1633 pipe_name(pipe), 1634 I915_READ(FDI_RX_IIR(pipe))); 1635 1636 if (pch_iir & SDE_ERROR_CPT) 1637 cpt_serr_int_handler(dev); 1638 } 1639 1640 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1641 { 1642 struct drm_i915_private *dev_priv = dev->dev_private; 1643 enum pipe pipe; 1644 1645 if (de_iir & DE_AUX_CHANNEL_A) 1646 dp_aux_irq_handler(dev); 1647 1648 if (de_iir & DE_GSE) 1649 intel_opregion_asle_intr(dev); 1650 1651 if (de_iir & DE_POISON) 1652 DRM_ERROR("Poison interrupt\n"); 1653 1654 for_each_pipe(pipe) { 1655 if (de_iir & DE_PIPE_VBLANK(pipe)) 1656 drm_handle_vblank(dev, pipe); 1657 1658 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1659 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1660 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1661 pipe_name(pipe)); 1662 1663 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1664 i9xx_pipe_crc_irq_handler(dev, pipe); 1665 1666 /* plane/pipes map 1:1 on ilk+ */ 1667 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1668 intel_prepare_page_flip(dev, pipe); 1669 intel_finish_page_flip_plane(dev, pipe); 1670 } 1671 } 1672 1673 /* check event from PCH */ 1674 if (de_iir & DE_PCH_EVENT) { 1675 u32 pch_iir = I915_READ(SDEIIR); 1676 1677 if (HAS_PCH_CPT(dev)) 1678 cpt_irq_handler(dev, pch_iir); 1679 else 1680 ibx_irq_handler(dev, pch_iir); 1681 1682 /* should clear PCH hotplug event before clear CPU irq */ 1683 I915_WRITE(SDEIIR, pch_iir); 1684 } 1685 1686 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1687 ironlake_rps_change_irq_handler(dev); 1688 } 1689 1690 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1691 { 1692 struct drm_i915_private *dev_priv = dev->dev_private; 1693 enum pipe i; 1694 1695 if (de_iir & DE_ERR_INT_IVB) 1696 ivb_err_int_handler(dev); 1697 1698 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1699 dp_aux_irq_handler(dev); 1700 1701 if (de_iir & DE_GSE_IVB) 1702 intel_opregion_asle_intr(dev); 1703 1704 for_each_pipe(i) { 1705 if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 1706 drm_handle_vblank(dev, i); 1707 1708 /* plane/pipes map 1:1 on ilk+ */ 1709 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 1710 intel_prepare_page_flip(dev, i); 1711 intel_finish_page_flip_plane(dev, i); 1712 } 1713 } 1714 1715 /* check event from PCH */ 1716 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1717 u32 pch_iir = I915_READ(SDEIIR); 1718 1719 cpt_irq_handler(dev, pch_iir); 1720 1721 /* clear PCH hotplug event before clear CPU irq */ 1722 I915_WRITE(SDEIIR, pch_iir); 1723 } 1724 } 1725 1726 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1727 { 1728 struct drm_device *dev = (struct drm_device *) arg; 1729 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1730 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1731 irqreturn_t ret = IRQ_NONE; 1732 1733 atomic_inc(&dev_priv->irq_received); 1734 1735 /* We get interrupts on unclaimed registers, so check for this before we 1736 * do any I915_{READ,WRITE}. */ 1737 intel_uncore_check_errors(dev); 1738 1739 /* disable master interrupt before clearing iir */ 1740 de_ier = I915_READ(DEIER); 1741 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1742 POSTING_READ(DEIER); 1743 1744 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1745 * interrupts will will be stored on its back queue, and then we'll be 1746 * able to process them after we restore SDEIER (as soon as we restore 1747 * it, we'll get an interrupt if SDEIIR still has something to process 1748 * due to its back queue). */ 1749 if (!HAS_PCH_NOP(dev)) { 1750 sde_ier = I915_READ(SDEIER); 1751 I915_WRITE(SDEIER, 0); 1752 POSTING_READ(SDEIER); 1753 } 1754 1755 gt_iir = I915_READ(GTIIR); 1756 if (gt_iir) { 1757 if (INTEL_INFO(dev)->gen >= 6) 1758 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1759 else 1760 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1761 I915_WRITE(GTIIR, gt_iir); 1762 ret = IRQ_HANDLED; 1763 } 1764 1765 de_iir = I915_READ(DEIIR); 1766 if (de_iir) { 1767 if (INTEL_INFO(dev)->gen >= 7) 1768 ivb_display_irq_handler(dev, de_iir); 1769 else 1770 ilk_display_irq_handler(dev, de_iir); 1771 I915_WRITE(DEIIR, de_iir); 1772 ret = IRQ_HANDLED; 1773 } 1774 1775 if (INTEL_INFO(dev)->gen >= 6) { 1776 u32 pm_iir = I915_READ(GEN6_PMIIR); 1777 if (pm_iir) { 1778 gen6_rps_irq_handler(dev_priv, pm_iir); 1779 I915_WRITE(GEN6_PMIIR, pm_iir); 1780 ret = IRQ_HANDLED; 1781 } 1782 } 1783 1784 I915_WRITE(DEIER, de_ier); 1785 POSTING_READ(DEIER); 1786 if (!HAS_PCH_NOP(dev)) { 1787 I915_WRITE(SDEIER, sde_ier); 1788 POSTING_READ(SDEIER); 1789 } 1790 1791 return ret; 1792 } 1793 1794 static irqreturn_t gen8_irq_handler(int irq, void *arg) 1795 { 1796 struct drm_device *dev = arg; 1797 struct drm_i915_private *dev_priv = dev->dev_private; 1798 u32 master_ctl; 1799 irqreturn_t ret = IRQ_NONE; 1800 uint32_t tmp = 0; 1801 enum pipe pipe; 1802 1803 atomic_inc(&dev_priv->irq_received); 1804 1805 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1806 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1807 if (!master_ctl) 1808 return IRQ_NONE; 1809 1810 I915_WRITE(GEN8_MASTER_IRQ, 0); 1811 POSTING_READ(GEN8_MASTER_IRQ); 1812 1813 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1814 1815 if (master_ctl & GEN8_DE_MISC_IRQ) { 1816 tmp = I915_READ(GEN8_DE_MISC_IIR); 1817 if (tmp & GEN8_DE_MISC_GSE) 1818 intel_opregion_asle_intr(dev); 1819 else if (tmp) 1820 DRM_ERROR("Unexpected DE Misc interrupt\n"); 1821 else 1822 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1823 1824 if (tmp) { 1825 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1826 ret = IRQ_HANDLED; 1827 } 1828 } 1829 1830 if (master_ctl & GEN8_DE_PORT_IRQ) { 1831 tmp = I915_READ(GEN8_DE_PORT_IIR); 1832 if (tmp & GEN8_AUX_CHANNEL_A) 1833 dp_aux_irq_handler(dev); 1834 else if (tmp) 1835 DRM_ERROR("Unexpected DE Port interrupt\n"); 1836 else 1837 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 1838 1839 if (tmp) { 1840 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 1841 ret = IRQ_HANDLED; 1842 } 1843 } 1844 1845 for_each_pipe(pipe) { 1846 uint32_t pipe_iir; 1847 1848 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1849 continue; 1850 1851 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 1852 if (pipe_iir & GEN8_PIPE_VBLANK) 1853 drm_handle_vblank(dev, pipe); 1854 1855 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 1856 intel_prepare_page_flip(dev, pipe); 1857 intel_finish_page_flip_plane(dev, pipe); 1858 } 1859 1860 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 1861 hsw_pipe_crc_irq_handler(dev, pipe); 1862 1863 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1864 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1865 false)) 1866 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1867 pipe_name(pipe)); 1868 } 1869 1870 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 1871 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 1872 pipe_name(pipe), 1873 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 1874 } 1875 1876 if (pipe_iir) { 1877 ret = IRQ_HANDLED; 1878 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 1879 } else 1880 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 1881 } 1882 1883 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 1884 /* 1885 * FIXME(BDW): Assume for now that the new interrupt handling 1886 * scheme also closed the SDE interrupt handling race we've seen 1887 * on older pch-split platforms. But this needs testing. 1888 */ 1889 u32 pch_iir = I915_READ(SDEIIR); 1890 1891 cpt_irq_handler(dev, pch_iir); 1892 1893 if (pch_iir) { 1894 I915_WRITE(SDEIIR, pch_iir); 1895 ret = IRQ_HANDLED; 1896 } 1897 } 1898 1899 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1900 POSTING_READ(GEN8_MASTER_IRQ); 1901 1902 return ret; 1903 } 1904 1905 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1906 bool reset_completed) 1907 { 1908 struct intel_ring_buffer *ring; 1909 int i; 1910 1911 /* 1912 * Notify all waiters for GPU completion events that reset state has 1913 * been changed, and that they need to restart their wait after 1914 * checking for potential errors (and bail out to drop locks if there is 1915 * a gpu reset pending so that i915_error_work_func can acquire them). 1916 */ 1917 1918 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1919 for_each_ring(ring, dev_priv, i) 1920 wake_up_all(&ring->irq_queue); 1921 1922 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1923 wake_up_all(&dev_priv->pending_flip_queue); 1924 1925 /* 1926 * Signal tasks blocked in i915_gem_wait_for_error that the pending 1927 * reset state is cleared. 1928 */ 1929 if (reset_completed) 1930 wake_up_all(&dev_priv->gpu_error.reset_queue); 1931 } 1932 1933 /** 1934 * i915_error_work_func - do process context error handling work 1935 * @work: work struct 1936 * 1937 * Fire an error uevent so userspace can see that a hang or error 1938 * was detected. 1939 */ 1940 static void i915_error_work_func(struct work_struct *work) 1941 { 1942 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1943 work); 1944 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1945 gpu_error); 1946 struct drm_device *dev = dev_priv->dev; 1947 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1948 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1949 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1950 int ret; 1951 1952 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 1953 1954 /* 1955 * Note that there's only one work item which does gpu resets, so we 1956 * need not worry about concurrent gpu resets potentially incrementing 1957 * error->reset_counter twice. We only need to take care of another 1958 * racing irq/hangcheck declaring the gpu dead for a second time. A 1959 * quick check for that is good enough: schedule_work ensures the 1960 * correct ordering between hang detection and this work item, and since 1961 * the reset in-progress bit is only ever set by code outside of this 1962 * work we don't need to worry about any other races. 1963 */ 1964 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1965 DRM_DEBUG_DRIVER("resetting chip\n"); 1966 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 1967 reset_event); 1968 1969 /* 1970 * All state reset _must_ be completed before we update the 1971 * reset counter, for otherwise waiters might miss the reset 1972 * pending state and not properly drop locks, resulting in 1973 * deadlocks with the reset work. 1974 */ 1975 ret = i915_reset(dev); 1976 1977 intel_display_handle_reset(dev); 1978 1979 if (ret == 0) { 1980 /* 1981 * After all the gem state is reset, increment the reset 1982 * counter and wake up everyone waiting for the reset to 1983 * complete. 1984 * 1985 * Since unlock operations are a one-sided barrier only, 1986 * we need to insert a barrier here to order any seqno 1987 * updates before 1988 * the counter increment. 1989 */ 1990 smp_mb__before_atomic_inc(); 1991 atomic_inc(&dev_priv->gpu_error.reset_counter); 1992 1993 kobject_uevent_env(&dev->primary->kdev->kobj, 1994 KOBJ_CHANGE, reset_done_event); 1995 } else { 1996 atomic_set(&error->reset_counter, I915_WEDGED); 1997 } 1998 1999 /* 2000 * Note: The wake_up also serves as a memory barrier so that 2001 * waiters see the update value of the reset counter atomic_t. 2002 */ 2003 i915_error_wake_up(dev_priv, true); 2004 } 2005 } 2006 2007 static void i915_report_and_clear_eir(struct drm_device *dev) 2008 { 2009 struct drm_i915_private *dev_priv = dev->dev_private; 2010 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2011 u32 eir = I915_READ(EIR); 2012 int pipe, i; 2013 2014 if (!eir) 2015 return; 2016 2017 pr_err("render error detected, EIR: 0x%08x\n", eir); 2018 2019 i915_get_extra_instdone(dev, instdone); 2020 2021 if (IS_G4X(dev)) { 2022 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2023 u32 ipeir = I915_READ(IPEIR_I965); 2024 2025 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2026 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2027 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2028 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2029 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2030 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2031 I915_WRITE(IPEIR_I965, ipeir); 2032 POSTING_READ(IPEIR_I965); 2033 } 2034 if (eir & GM45_ERROR_PAGE_TABLE) { 2035 u32 pgtbl_err = I915_READ(PGTBL_ER); 2036 pr_err("page table error\n"); 2037 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2038 I915_WRITE(PGTBL_ER, pgtbl_err); 2039 POSTING_READ(PGTBL_ER); 2040 } 2041 } 2042 2043 if (!IS_GEN2(dev)) { 2044 if (eir & I915_ERROR_PAGE_TABLE) { 2045 u32 pgtbl_err = I915_READ(PGTBL_ER); 2046 pr_err("page table error\n"); 2047 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2048 I915_WRITE(PGTBL_ER, pgtbl_err); 2049 POSTING_READ(PGTBL_ER); 2050 } 2051 } 2052 2053 if (eir & I915_ERROR_MEMORY_REFRESH) { 2054 pr_err("memory refresh error:\n"); 2055 for_each_pipe(pipe) 2056 pr_err("pipe %c stat: 0x%08x\n", 2057 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2058 /* pipestat has already been acked */ 2059 } 2060 if (eir & I915_ERROR_INSTRUCTION) { 2061 pr_err("instruction error\n"); 2062 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2063 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2064 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2065 if (INTEL_INFO(dev)->gen < 4) { 2066 u32 ipeir = I915_READ(IPEIR); 2067 2068 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2069 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2070 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2071 I915_WRITE(IPEIR, ipeir); 2072 POSTING_READ(IPEIR); 2073 } else { 2074 u32 ipeir = I915_READ(IPEIR_I965); 2075 2076 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2077 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2078 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2079 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2080 I915_WRITE(IPEIR_I965, ipeir); 2081 POSTING_READ(IPEIR_I965); 2082 } 2083 } 2084 2085 I915_WRITE(EIR, eir); 2086 POSTING_READ(EIR); 2087 eir = I915_READ(EIR); 2088 if (eir) { 2089 /* 2090 * some errors might have become stuck, 2091 * mask them. 2092 */ 2093 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2094 I915_WRITE(EMR, I915_READ(EMR) | eir); 2095 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2096 } 2097 } 2098 2099 /** 2100 * i915_handle_error - handle an error interrupt 2101 * @dev: drm device 2102 * 2103 * Do some basic checking of regsiter state at error interrupt time and 2104 * dump it to the syslog. Also call i915_capture_error_state() to make 2105 * sure we get a record and make it available in debugfs. Fire a uevent 2106 * so userspace knows something bad happened (should trigger collection 2107 * of a ring dump etc.). 2108 */ 2109 void i915_handle_error(struct drm_device *dev, bool wedged) 2110 { 2111 struct drm_i915_private *dev_priv = dev->dev_private; 2112 2113 i915_capture_error_state(dev); 2114 i915_report_and_clear_eir(dev); 2115 2116 if (wedged) { 2117 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2118 &dev_priv->gpu_error.reset_counter); 2119 2120 /* 2121 * Wakeup waiting processes so that the reset work function 2122 * i915_error_work_func doesn't deadlock trying to grab various 2123 * locks. By bumping the reset counter first, the woken 2124 * processes will see a reset in progress and back off, 2125 * releasing their locks and then wait for the reset completion. 2126 * We must do this for _all_ gpu waiters that might hold locks 2127 * that the reset work needs to acquire. 2128 * 2129 * Note: The wake_up serves as the required memory barrier to 2130 * ensure that the waiters see the updated value of the reset 2131 * counter atomic_t. 2132 */ 2133 i915_error_wake_up(dev_priv, false); 2134 } 2135 2136 /* 2137 * Our reset work can grab modeset locks (since it needs to reset the 2138 * state of outstanding pagelips). Hence it must not be run on our own 2139 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2140 * code will deadlock. 2141 */ 2142 schedule_work(&dev_priv->gpu_error.work); 2143 } 2144 2145 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2146 { 2147 drm_i915_private_t *dev_priv = dev->dev_private; 2148 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2150 struct drm_i915_gem_object *obj; 2151 struct intel_unpin_work *work; 2152 unsigned long flags; 2153 bool stall_detected; 2154 2155 /* Ignore early vblank irqs */ 2156 if (intel_crtc == NULL) 2157 return; 2158 2159 spin_lock_irqsave(&dev->event_lock, flags); 2160 work = intel_crtc->unpin_work; 2161 2162 if (work == NULL || 2163 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2164 !work->enable_stall_check) { 2165 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2166 spin_unlock_irqrestore(&dev->event_lock, flags); 2167 return; 2168 } 2169 2170 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2171 obj = work->pending_flip_obj; 2172 if (INTEL_INFO(dev)->gen >= 4) { 2173 int dspsurf = DSPSURF(intel_crtc->plane); 2174 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2175 i915_gem_obj_ggtt_offset(obj); 2176 } else { 2177 int dspaddr = DSPADDR(intel_crtc->plane); 2178 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2179 crtc->y * crtc->fb->pitches[0] + 2180 crtc->x * crtc->fb->bits_per_pixel/8); 2181 } 2182 2183 spin_unlock_irqrestore(&dev->event_lock, flags); 2184 2185 if (stall_detected) { 2186 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2187 intel_prepare_page_flip(dev, intel_crtc->plane); 2188 } 2189 } 2190 2191 /* Called from drm generic code, passed 'crtc' which 2192 * we use as a pipe index 2193 */ 2194 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2195 { 2196 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2197 unsigned long irqflags; 2198 2199 if (!i915_pipe_enabled(dev, pipe)) 2200 return -EINVAL; 2201 2202 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2203 if (INTEL_INFO(dev)->gen >= 4) 2204 i915_enable_pipestat(dev_priv, pipe, 2205 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2206 else 2207 i915_enable_pipestat(dev_priv, pipe, 2208 PIPE_VBLANK_INTERRUPT_ENABLE); 2209 2210 /* maintain vblank delivery even in deep C-states */ 2211 if (dev_priv->info->gen == 3) 2212 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2213 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2214 2215 return 0; 2216 } 2217 2218 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2219 { 2220 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2221 unsigned long irqflags; 2222 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2223 DE_PIPE_VBLANK(pipe); 2224 2225 if (!i915_pipe_enabled(dev, pipe)) 2226 return -EINVAL; 2227 2228 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2229 ironlake_enable_display_irq(dev_priv, bit); 2230 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2231 2232 return 0; 2233 } 2234 2235 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2236 { 2237 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2238 unsigned long irqflags; 2239 u32 imr; 2240 2241 if (!i915_pipe_enabled(dev, pipe)) 2242 return -EINVAL; 2243 2244 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2245 imr = I915_READ(VLV_IMR); 2246 if (pipe == PIPE_A) 2247 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2248 else 2249 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2250 I915_WRITE(VLV_IMR, imr); 2251 i915_enable_pipestat(dev_priv, pipe, 2252 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2253 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2254 2255 return 0; 2256 } 2257 2258 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2259 { 2260 struct drm_i915_private *dev_priv = dev->dev_private; 2261 unsigned long irqflags; 2262 2263 if (!i915_pipe_enabled(dev, pipe)) 2264 return -EINVAL; 2265 2266 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2267 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2268 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2269 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2270 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2271 return 0; 2272 } 2273 2274 /* Called from drm generic code, passed 'crtc' which 2275 * we use as a pipe index 2276 */ 2277 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2278 { 2279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2280 unsigned long irqflags; 2281 2282 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2283 if (dev_priv->info->gen == 3) 2284 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2285 2286 i915_disable_pipestat(dev_priv, pipe, 2287 PIPE_VBLANK_INTERRUPT_ENABLE | 2288 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2289 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2290 } 2291 2292 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2293 { 2294 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2295 unsigned long irqflags; 2296 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2297 DE_PIPE_VBLANK(pipe); 2298 2299 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2300 ironlake_disable_display_irq(dev_priv, bit); 2301 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2302 } 2303 2304 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2305 { 2306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2307 unsigned long irqflags; 2308 u32 imr; 2309 2310 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2311 i915_disable_pipestat(dev_priv, pipe, 2312 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2313 imr = I915_READ(VLV_IMR); 2314 if (pipe == PIPE_A) 2315 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2316 else 2317 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2318 I915_WRITE(VLV_IMR, imr); 2319 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2320 } 2321 2322 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2323 { 2324 struct drm_i915_private *dev_priv = dev->dev_private; 2325 unsigned long irqflags; 2326 2327 if (!i915_pipe_enabled(dev, pipe)) 2328 return; 2329 2330 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2331 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2332 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2333 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2334 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2335 } 2336 2337 static u32 2338 ring_last_seqno(struct intel_ring_buffer *ring) 2339 { 2340 return list_entry(ring->request_list.prev, 2341 struct drm_i915_gem_request, list)->seqno; 2342 } 2343 2344 static bool 2345 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2346 { 2347 return (list_empty(&ring->request_list) || 2348 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2349 } 2350 2351 static struct intel_ring_buffer * 2352 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2353 { 2354 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2355 u32 cmd, ipehr, acthd, acthd_min; 2356 2357 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2358 if ((ipehr & ~(0x3 << 16)) != 2359 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2360 return NULL; 2361 2362 /* ACTHD is likely pointing to the dword after the actual command, 2363 * so scan backwards until we find the MBOX. 2364 */ 2365 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2366 acthd_min = max((int)acthd - 3 * 4, 0); 2367 do { 2368 cmd = ioread32(ring->virtual_start + acthd); 2369 if (cmd == ipehr) 2370 break; 2371 2372 acthd -= 4; 2373 if (acthd < acthd_min) 2374 return NULL; 2375 } while (1); 2376 2377 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2378 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2379 } 2380 2381 static int semaphore_passed(struct intel_ring_buffer *ring) 2382 { 2383 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2384 struct intel_ring_buffer *signaller; 2385 u32 seqno, ctl; 2386 2387 ring->hangcheck.deadlock = true; 2388 2389 signaller = semaphore_waits_for(ring, &seqno); 2390 if (signaller == NULL || signaller->hangcheck.deadlock) 2391 return -1; 2392 2393 /* cursory check for an unkickable deadlock */ 2394 ctl = I915_READ_CTL(signaller); 2395 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2396 return -1; 2397 2398 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2399 } 2400 2401 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2402 { 2403 struct intel_ring_buffer *ring; 2404 int i; 2405 2406 for_each_ring(ring, dev_priv, i) 2407 ring->hangcheck.deadlock = false; 2408 } 2409 2410 static enum intel_ring_hangcheck_action 2411 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2412 { 2413 struct drm_device *dev = ring->dev; 2414 struct drm_i915_private *dev_priv = dev->dev_private; 2415 u32 tmp; 2416 2417 if (ring->hangcheck.acthd != acthd) 2418 return HANGCHECK_ACTIVE; 2419 2420 if (IS_GEN2(dev)) 2421 return HANGCHECK_HUNG; 2422 2423 /* Is the chip hanging on a WAIT_FOR_EVENT? 2424 * If so we can simply poke the RB_WAIT bit 2425 * and break the hang. This should work on 2426 * all but the second generation chipsets. 2427 */ 2428 tmp = I915_READ_CTL(ring); 2429 if (tmp & RING_WAIT) { 2430 DRM_ERROR("Kicking stuck wait on %s\n", 2431 ring->name); 2432 i915_handle_error(dev, false); 2433 I915_WRITE_CTL(ring, tmp); 2434 return HANGCHECK_KICK; 2435 } 2436 2437 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2438 switch (semaphore_passed(ring)) { 2439 default: 2440 return HANGCHECK_HUNG; 2441 case 1: 2442 DRM_ERROR("Kicking stuck semaphore on %s\n", 2443 ring->name); 2444 i915_handle_error(dev, false); 2445 I915_WRITE_CTL(ring, tmp); 2446 return HANGCHECK_KICK; 2447 case 0: 2448 return HANGCHECK_WAIT; 2449 } 2450 } 2451 2452 return HANGCHECK_HUNG; 2453 } 2454 2455 /** 2456 * This is called when the chip hasn't reported back with completed 2457 * batchbuffers in a long time. We keep track per ring seqno progress and 2458 * if there are no progress, hangcheck score for that ring is increased. 2459 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2460 * we kick the ring. If we see no progress on three subsequent calls 2461 * we assume chip is wedged and try to fix it by resetting the chip. 2462 */ 2463 static void i915_hangcheck_elapsed(unsigned long data) 2464 { 2465 struct drm_device *dev = (struct drm_device *)data; 2466 drm_i915_private_t *dev_priv = dev->dev_private; 2467 struct intel_ring_buffer *ring; 2468 int i; 2469 int busy_count = 0, rings_hung = 0; 2470 bool stuck[I915_NUM_RINGS] = { 0 }; 2471 #define BUSY 1 2472 #define KICK 5 2473 #define HUNG 20 2474 #define FIRE 30 2475 2476 if (!i915_enable_hangcheck) 2477 return; 2478 2479 for_each_ring(ring, dev_priv, i) { 2480 u32 seqno, acthd; 2481 bool busy = true; 2482 2483 semaphore_clear_deadlocks(dev_priv); 2484 2485 seqno = ring->get_seqno(ring, false); 2486 acthd = intel_ring_get_active_head(ring); 2487 2488 if (ring->hangcheck.seqno == seqno) { 2489 if (ring_idle(ring, seqno)) { 2490 ring->hangcheck.action = HANGCHECK_IDLE; 2491 2492 if (waitqueue_active(&ring->irq_queue)) { 2493 /* Issue a wake-up to catch stuck h/w. */ 2494 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2495 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2496 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2497 ring->name); 2498 else 2499 DRM_INFO("Fake missed irq on %s\n", 2500 ring->name); 2501 wake_up_all(&ring->irq_queue); 2502 } 2503 /* Safeguard against driver failure */ 2504 ring->hangcheck.score += BUSY; 2505 } else 2506 busy = false; 2507 } else { 2508 /* We always increment the hangcheck score 2509 * if the ring is busy and still processing 2510 * the same request, so that no single request 2511 * can run indefinitely (such as a chain of 2512 * batches). The only time we do not increment 2513 * the hangcheck score on this ring, if this 2514 * ring is in a legitimate wait for another 2515 * ring. In that case the waiting ring is a 2516 * victim and we want to be sure we catch the 2517 * right culprit. Then every time we do kick 2518 * the ring, add a small increment to the 2519 * score so that we can catch a batch that is 2520 * being repeatedly kicked and so responsible 2521 * for stalling the machine. 2522 */ 2523 ring->hangcheck.action = ring_stuck(ring, 2524 acthd); 2525 2526 switch (ring->hangcheck.action) { 2527 case HANGCHECK_IDLE: 2528 case HANGCHECK_WAIT: 2529 break; 2530 case HANGCHECK_ACTIVE: 2531 ring->hangcheck.score += BUSY; 2532 break; 2533 case HANGCHECK_KICK: 2534 ring->hangcheck.score += KICK; 2535 break; 2536 case HANGCHECK_HUNG: 2537 ring->hangcheck.score += HUNG; 2538 stuck[i] = true; 2539 break; 2540 } 2541 } 2542 } else { 2543 ring->hangcheck.action = HANGCHECK_ACTIVE; 2544 2545 /* Gradually reduce the count so that we catch DoS 2546 * attempts across multiple batches. 2547 */ 2548 if (ring->hangcheck.score > 0) 2549 ring->hangcheck.score--; 2550 } 2551 2552 ring->hangcheck.seqno = seqno; 2553 ring->hangcheck.acthd = acthd; 2554 busy_count += busy; 2555 } 2556 2557 for_each_ring(ring, dev_priv, i) { 2558 if (ring->hangcheck.score > FIRE) { 2559 DRM_INFO("%s on %s\n", 2560 stuck[i] ? "stuck" : "no progress", 2561 ring->name); 2562 rings_hung++; 2563 } 2564 } 2565 2566 if (rings_hung) 2567 return i915_handle_error(dev, true); 2568 2569 if (busy_count) 2570 /* Reset timer case chip hangs without another request 2571 * being added */ 2572 i915_queue_hangcheck(dev); 2573 } 2574 2575 void i915_queue_hangcheck(struct drm_device *dev) 2576 { 2577 struct drm_i915_private *dev_priv = dev->dev_private; 2578 if (!i915_enable_hangcheck) 2579 return; 2580 2581 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2582 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2583 } 2584 2585 static void ibx_irq_preinstall(struct drm_device *dev) 2586 { 2587 struct drm_i915_private *dev_priv = dev->dev_private; 2588 2589 if (HAS_PCH_NOP(dev)) 2590 return; 2591 2592 /* south display irq */ 2593 I915_WRITE(SDEIMR, 0xffffffff); 2594 /* 2595 * SDEIER is also touched by the interrupt handler to work around missed 2596 * PCH interrupts. Hence we can't update it after the interrupt handler 2597 * is enabled - instead we unconditionally enable all PCH interrupt 2598 * sources here, but then only unmask them as needed with SDEIMR. 2599 */ 2600 I915_WRITE(SDEIER, 0xffffffff); 2601 POSTING_READ(SDEIER); 2602 } 2603 2604 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2605 { 2606 struct drm_i915_private *dev_priv = dev->dev_private; 2607 2608 /* and GT */ 2609 I915_WRITE(GTIMR, 0xffffffff); 2610 I915_WRITE(GTIER, 0x0); 2611 POSTING_READ(GTIER); 2612 2613 if (INTEL_INFO(dev)->gen >= 6) { 2614 /* and PM */ 2615 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2616 I915_WRITE(GEN6_PMIER, 0x0); 2617 POSTING_READ(GEN6_PMIER); 2618 } 2619 } 2620 2621 /* drm_dma.h hooks 2622 */ 2623 static void ironlake_irq_preinstall(struct drm_device *dev) 2624 { 2625 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2626 2627 atomic_set(&dev_priv->irq_received, 0); 2628 2629 I915_WRITE(HWSTAM, 0xeffe); 2630 2631 I915_WRITE(DEIMR, 0xffffffff); 2632 I915_WRITE(DEIER, 0x0); 2633 POSTING_READ(DEIER); 2634 2635 gen5_gt_irq_preinstall(dev); 2636 2637 ibx_irq_preinstall(dev); 2638 } 2639 2640 static void valleyview_irq_preinstall(struct drm_device *dev) 2641 { 2642 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2643 int pipe; 2644 2645 atomic_set(&dev_priv->irq_received, 0); 2646 2647 /* VLV magic */ 2648 I915_WRITE(VLV_IMR, 0); 2649 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2650 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2651 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2652 2653 /* and GT */ 2654 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2655 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2656 2657 gen5_gt_irq_preinstall(dev); 2658 2659 I915_WRITE(DPINVGTT, 0xff); 2660 2661 I915_WRITE(PORT_HOTPLUG_EN, 0); 2662 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2663 for_each_pipe(pipe) 2664 I915_WRITE(PIPESTAT(pipe), 0xffff); 2665 I915_WRITE(VLV_IIR, 0xffffffff); 2666 I915_WRITE(VLV_IMR, 0xffffffff); 2667 I915_WRITE(VLV_IER, 0x0); 2668 POSTING_READ(VLV_IER); 2669 } 2670 2671 static void gen8_irq_preinstall(struct drm_device *dev) 2672 { 2673 struct drm_i915_private *dev_priv = dev->dev_private; 2674 int pipe; 2675 2676 atomic_set(&dev_priv->irq_received, 0); 2677 2678 I915_WRITE(GEN8_MASTER_IRQ, 0); 2679 POSTING_READ(GEN8_MASTER_IRQ); 2680 2681 /* IIR can theoretically queue up two events. Be paranoid */ 2682 #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2683 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2684 POSTING_READ(GEN8_##type##_IMR(which)); \ 2685 I915_WRITE(GEN8_##type##_IER(which), 0); \ 2686 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2687 POSTING_READ(GEN8_##type##_IIR(which)); \ 2688 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2689 } while (0) 2690 2691 #define GEN8_IRQ_INIT(type) do { \ 2692 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2693 POSTING_READ(GEN8_##type##_IMR); \ 2694 I915_WRITE(GEN8_##type##_IER, 0); \ 2695 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2696 POSTING_READ(GEN8_##type##_IIR); \ 2697 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2698 } while (0) 2699 2700 GEN8_IRQ_INIT_NDX(GT, 0); 2701 GEN8_IRQ_INIT_NDX(GT, 1); 2702 GEN8_IRQ_INIT_NDX(GT, 2); 2703 GEN8_IRQ_INIT_NDX(GT, 3); 2704 2705 for_each_pipe(pipe) { 2706 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2707 } 2708 2709 GEN8_IRQ_INIT(DE_PORT); 2710 GEN8_IRQ_INIT(DE_MISC); 2711 GEN8_IRQ_INIT(PCU); 2712 #undef GEN8_IRQ_INIT 2713 #undef GEN8_IRQ_INIT_NDX 2714 2715 POSTING_READ(GEN8_PCU_IIR); 2716 } 2717 2718 static void ibx_hpd_irq_setup(struct drm_device *dev) 2719 { 2720 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2721 struct drm_mode_config *mode_config = &dev->mode_config; 2722 struct intel_encoder *intel_encoder; 2723 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2724 2725 if (HAS_PCH_IBX(dev)) { 2726 hotplug_irqs = SDE_HOTPLUG_MASK; 2727 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2728 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2729 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2730 } else { 2731 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2732 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2733 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2734 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2735 } 2736 2737 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2738 2739 /* 2740 * Enable digital hotplug on the PCH, and configure the DP short pulse 2741 * duration to 2ms (which is the minimum in the Display Port spec) 2742 * 2743 * This register is the same on all known PCH chips. 2744 */ 2745 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2746 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2747 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2748 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2749 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2750 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2751 } 2752 2753 static void ibx_irq_postinstall(struct drm_device *dev) 2754 { 2755 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2756 u32 mask; 2757 2758 if (HAS_PCH_NOP(dev)) 2759 return; 2760 2761 if (HAS_PCH_IBX(dev)) { 2762 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2763 SDE_TRANSA_FIFO_UNDER | SDE_POISON; 2764 } else { 2765 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 2766 2767 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2768 } 2769 2770 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2771 I915_WRITE(SDEIMR, ~mask); 2772 } 2773 2774 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2775 { 2776 struct drm_i915_private *dev_priv = dev->dev_private; 2777 u32 pm_irqs, gt_irqs; 2778 2779 pm_irqs = gt_irqs = 0; 2780 2781 dev_priv->gt_irq_mask = ~0; 2782 if (HAS_L3_DPF(dev)) { 2783 /* L3 parity interrupt is always unmasked. */ 2784 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 2785 gt_irqs |= GT_PARITY_ERROR(dev); 2786 } 2787 2788 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2789 if (IS_GEN5(dev)) { 2790 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2791 ILK_BSD_USER_INTERRUPT; 2792 } else { 2793 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2794 } 2795 2796 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2797 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2798 I915_WRITE(GTIER, gt_irqs); 2799 POSTING_READ(GTIER); 2800 2801 if (INTEL_INFO(dev)->gen >= 6) { 2802 pm_irqs |= GEN6_PM_RPS_EVENTS; 2803 2804 if (HAS_VEBOX(dev)) 2805 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2806 2807 dev_priv->pm_irq_mask = 0xffffffff; 2808 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2809 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2810 I915_WRITE(GEN6_PMIER, pm_irqs); 2811 POSTING_READ(GEN6_PMIER); 2812 } 2813 } 2814 2815 static int ironlake_irq_postinstall(struct drm_device *dev) 2816 { 2817 unsigned long irqflags; 2818 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2819 u32 display_mask, extra_mask; 2820 2821 if (INTEL_INFO(dev)->gen >= 7) { 2822 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2823 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2824 DE_PLANEB_FLIP_DONE_IVB | 2825 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 2826 DE_ERR_INT_IVB); 2827 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2828 DE_PIPEA_VBLANK_IVB); 2829 2830 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2831 } else { 2832 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2833 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2834 DE_AUX_CHANNEL_A | 2835 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 2836 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2837 DE_POISON); 2838 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2839 } 2840 2841 dev_priv->irq_mask = ~display_mask; 2842 2843 /* should always can generate irq */ 2844 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2845 I915_WRITE(DEIMR, dev_priv->irq_mask); 2846 I915_WRITE(DEIER, display_mask | extra_mask); 2847 POSTING_READ(DEIER); 2848 2849 gen5_gt_irq_postinstall(dev); 2850 2851 ibx_irq_postinstall(dev); 2852 2853 if (IS_IRONLAKE_M(dev)) { 2854 /* Enable PCU event interrupts 2855 * 2856 * spinlocking not required here for correctness since interrupt 2857 * setup is guaranteed to run in single-threaded context. But we 2858 * need it to make the assert_spin_locked happy. */ 2859 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2860 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2861 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2862 } 2863 2864 return 0; 2865 } 2866 2867 static int valleyview_irq_postinstall(struct drm_device *dev) 2868 { 2869 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2870 u32 enable_mask; 2871 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2872 PIPE_CRC_DONE_ENABLE; 2873 unsigned long irqflags; 2874 2875 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2876 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2877 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2878 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2879 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2880 2881 /* 2882 *Leave vblank interrupts masked initially. enable/disable will 2883 * toggle them based on usage. 2884 */ 2885 dev_priv->irq_mask = (~enable_mask) | 2886 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2887 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2888 2889 I915_WRITE(PORT_HOTPLUG_EN, 0); 2890 POSTING_READ(PORT_HOTPLUG_EN); 2891 2892 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2893 I915_WRITE(VLV_IER, enable_mask); 2894 I915_WRITE(VLV_IIR, 0xffffffff); 2895 I915_WRITE(PIPESTAT(0), 0xffff); 2896 I915_WRITE(PIPESTAT(1), 0xffff); 2897 POSTING_READ(VLV_IER); 2898 2899 /* Interrupt setup is already guaranteed to be single-threaded, this is 2900 * just to make the assert_spin_locked check happy. */ 2901 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2902 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 2903 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 2904 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 2905 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2906 2907 I915_WRITE(VLV_IIR, 0xffffffff); 2908 I915_WRITE(VLV_IIR, 0xffffffff); 2909 2910 gen5_gt_irq_postinstall(dev); 2911 2912 /* ack & enable invalid PTE error interrupts */ 2913 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2914 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2915 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2916 #endif 2917 2918 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2919 2920 return 0; 2921 } 2922 2923 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 2924 { 2925 int i; 2926 2927 /* These are interrupts we'll toggle with the ring mask register */ 2928 uint32_t gt_interrupts[] = { 2929 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 2930 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 2931 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 2932 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 2933 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 2934 0, 2935 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 2936 }; 2937 2938 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 2939 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 2940 if (tmp) 2941 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2942 i, tmp); 2943 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 2944 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 2945 } 2946 POSTING_READ(GEN8_GT_IER(0)); 2947 } 2948 2949 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 2950 { 2951 struct drm_device *dev = dev_priv->dev; 2952 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 2953 GEN8_PIPE_CDCLK_CRC_DONE | 2954 GEN8_PIPE_FIFO_UNDERRUN | 2955 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2956 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; 2957 int pipe; 2958 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 2959 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 2960 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 2961 2962 for_each_pipe(pipe) { 2963 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2964 if (tmp) 2965 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2966 pipe, tmp); 2967 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2968 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 2969 } 2970 POSTING_READ(GEN8_DE_PIPE_ISR(0)); 2971 2972 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 2973 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 2974 POSTING_READ(GEN8_DE_PORT_IER); 2975 } 2976 2977 static int gen8_irq_postinstall(struct drm_device *dev) 2978 { 2979 struct drm_i915_private *dev_priv = dev->dev_private; 2980 2981 gen8_gt_irq_postinstall(dev_priv); 2982 gen8_de_irq_postinstall(dev_priv); 2983 2984 ibx_irq_postinstall(dev); 2985 2986 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2987 POSTING_READ(GEN8_MASTER_IRQ); 2988 2989 return 0; 2990 } 2991 2992 static void gen8_irq_uninstall(struct drm_device *dev) 2993 { 2994 struct drm_i915_private *dev_priv = dev->dev_private; 2995 int pipe; 2996 2997 if (!dev_priv) 2998 return; 2999 3000 atomic_set(&dev_priv->irq_received, 0); 3001 3002 I915_WRITE(GEN8_MASTER_IRQ, 0); 3003 3004 #define GEN8_IRQ_FINI_NDX(type, which) do { \ 3005 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3006 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3007 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3008 } while (0) 3009 3010 #define GEN8_IRQ_FINI(type) do { \ 3011 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3012 I915_WRITE(GEN8_##type##_IER, 0); \ 3013 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3014 } while (0) 3015 3016 GEN8_IRQ_FINI_NDX(GT, 0); 3017 GEN8_IRQ_FINI_NDX(GT, 1); 3018 GEN8_IRQ_FINI_NDX(GT, 2); 3019 GEN8_IRQ_FINI_NDX(GT, 3); 3020 3021 for_each_pipe(pipe) { 3022 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3023 } 3024 3025 GEN8_IRQ_FINI(DE_PORT); 3026 GEN8_IRQ_FINI(DE_MISC); 3027 GEN8_IRQ_FINI(PCU); 3028 #undef GEN8_IRQ_FINI 3029 #undef GEN8_IRQ_FINI_NDX 3030 3031 POSTING_READ(GEN8_PCU_IIR); 3032 } 3033 3034 static void valleyview_irq_uninstall(struct drm_device *dev) 3035 { 3036 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3037 int pipe; 3038 3039 if (!dev_priv) 3040 return; 3041 3042 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3043 3044 for_each_pipe(pipe) 3045 I915_WRITE(PIPESTAT(pipe), 0xffff); 3046 3047 I915_WRITE(HWSTAM, 0xffffffff); 3048 I915_WRITE(PORT_HOTPLUG_EN, 0); 3049 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3050 for_each_pipe(pipe) 3051 I915_WRITE(PIPESTAT(pipe), 0xffff); 3052 I915_WRITE(VLV_IIR, 0xffffffff); 3053 I915_WRITE(VLV_IMR, 0xffffffff); 3054 I915_WRITE(VLV_IER, 0x0); 3055 POSTING_READ(VLV_IER); 3056 } 3057 3058 static void ironlake_irq_uninstall(struct drm_device *dev) 3059 { 3060 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3061 3062 if (!dev_priv) 3063 return; 3064 3065 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3066 3067 I915_WRITE(HWSTAM, 0xffffffff); 3068 3069 I915_WRITE(DEIMR, 0xffffffff); 3070 I915_WRITE(DEIER, 0x0); 3071 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3072 if (IS_GEN7(dev)) 3073 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3074 3075 I915_WRITE(GTIMR, 0xffffffff); 3076 I915_WRITE(GTIER, 0x0); 3077 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3078 3079 if (HAS_PCH_NOP(dev)) 3080 return; 3081 3082 I915_WRITE(SDEIMR, 0xffffffff); 3083 I915_WRITE(SDEIER, 0x0); 3084 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3085 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3086 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3087 } 3088 3089 static void i8xx_irq_preinstall(struct drm_device * dev) 3090 { 3091 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3092 int pipe; 3093 3094 atomic_set(&dev_priv->irq_received, 0); 3095 3096 for_each_pipe(pipe) 3097 I915_WRITE(PIPESTAT(pipe), 0); 3098 I915_WRITE16(IMR, 0xffff); 3099 I915_WRITE16(IER, 0x0); 3100 POSTING_READ16(IER); 3101 } 3102 3103 static int i8xx_irq_postinstall(struct drm_device *dev) 3104 { 3105 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3106 unsigned long irqflags; 3107 3108 I915_WRITE16(EMR, 3109 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3110 3111 /* Unmask the interrupts that we always want on. */ 3112 dev_priv->irq_mask = 3113 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3114 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3115 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3116 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3117 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3118 I915_WRITE16(IMR, dev_priv->irq_mask); 3119 3120 I915_WRITE16(IER, 3121 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3122 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3123 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3124 I915_USER_INTERRUPT); 3125 POSTING_READ16(IER); 3126 3127 /* Interrupt setup is already guaranteed to be single-threaded, this is 3128 * just to make the assert_spin_locked check happy. */ 3129 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3130 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3131 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3132 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3133 3134 return 0; 3135 } 3136 3137 /* 3138 * Returns true when a page flip has completed. 3139 */ 3140 static bool i8xx_handle_vblank(struct drm_device *dev, 3141 int pipe, u16 iir) 3142 { 3143 drm_i915_private_t *dev_priv = dev->dev_private; 3144 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 3145 3146 if (!drm_handle_vblank(dev, pipe)) 3147 return false; 3148 3149 if ((iir & flip_pending) == 0) 3150 return false; 3151 3152 intel_prepare_page_flip(dev, pipe); 3153 3154 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3155 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3156 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3157 * the flip is completed (no longer pending). Since this doesn't raise 3158 * an interrupt per se, we watch for the change at vblank. 3159 */ 3160 if (I915_READ16(ISR) & flip_pending) 3161 return false; 3162 3163 intel_finish_page_flip(dev, pipe); 3164 3165 return true; 3166 } 3167 3168 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3169 { 3170 struct drm_device *dev = (struct drm_device *) arg; 3171 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3172 u16 iir, new_iir; 3173 u32 pipe_stats[2]; 3174 unsigned long irqflags; 3175 int pipe; 3176 u16 flip_mask = 3177 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3178 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3179 3180 atomic_inc(&dev_priv->irq_received); 3181 3182 iir = I915_READ16(IIR); 3183 if (iir == 0) 3184 return IRQ_NONE; 3185 3186 while (iir & ~flip_mask) { 3187 /* Can't rely on pipestat interrupt bit in iir as it might 3188 * have been cleared after the pipestat interrupt was received. 3189 * It doesn't set the bit in iir again, but it still produces 3190 * interrupts (for non-MSI). 3191 */ 3192 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3193 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3194 i915_handle_error(dev, false); 3195 3196 for_each_pipe(pipe) { 3197 int reg = PIPESTAT(pipe); 3198 pipe_stats[pipe] = I915_READ(reg); 3199 3200 /* 3201 * Clear the PIPE*STAT regs before the IIR 3202 */ 3203 if (pipe_stats[pipe] & 0x8000ffff) { 3204 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3205 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3206 pipe_name(pipe)); 3207 I915_WRITE(reg, pipe_stats[pipe]); 3208 } 3209 } 3210 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3211 3212 I915_WRITE16(IIR, iir & ~flip_mask); 3213 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3214 3215 i915_update_dri1_breadcrumb(dev); 3216 3217 if (iir & I915_USER_INTERRUPT) 3218 notify_ring(dev, &dev_priv->ring[RCS]); 3219 3220 for_each_pipe(pipe) { 3221 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3222 i8xx_handle_vblank(dev, pipe, iir)) 3223 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3224 3225 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3226 i9xx_pipe_crc_irq_handler(dev, pipe); 3227 } 3228 3229 iir = new_iir; 3230 } 3231 3232 return IRQ_HANDLED; 3233 } 3234 3235 static void i8xx_irq_uninstall(struct drm_device * dev) 3236 { 3237 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3238 int pipe; 3239 3240 for_each_pipe(pipe) { 3241 /* Clear enable bits; then clear status bits */ 3242 I915_WRITE(PIPESTAT(pipe), 0); 3243 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3244 } 3245 I915_WRITE16(IMR, 0xffff); 3246 I915_WRITE16(IER, 0x0); 3247 I915_WRITE16(IIR, I915_READ16(IIR)); 3248 } 3249 3250 static void i915_irq_preinstall(struct drm_device * dev) 3251 { 3252 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3253 int pipe; 3254 3255 atomic_set(&dev_priv->irq_received, 0); 3256 3257 if (I915_HAS_HOTPLUG(dev)) { 3258 I915_WRITE(PORT_HOTPLUG_EN, 0); 3259 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3260 } 3261 3262 I915_WRITE16(HWSTAM, 0xeffe); 3263 for_each_pipe(pipe) 3264 I915_WRITE(PIPESTAT(pipe), 0); 3265 I915_WRITE(IMR, 0xffffffff); 3266 I915_WRITE(IER, 0x0); 3267 POSTING_READ(IER); 3268 } 3269 3270 static int i915_irq_postinstall(struct drm_device *dev) 3271 { 3272 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3273 u32 enable_mask; 3274 unsigned long irqflags; 3275 3276 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3277 3278 /* Unmask the interrupts that we always want on. */ 3279 dev_priv->irq_mask = 3280 ~(I915_ASLE_INTERRUPT | 3281 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3282 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3283 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3284 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3285 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3286 3287 enable_mask = 3288 I915_ASLE_INTERRUPT | 3289 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3290 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3291 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3292 I915_USER_INTERRUPT; 3293 3294 if (I915_HAS_HOTPLUG(dev)) { 3295 I915_WRITE(PORT_HOTPLUG_EN, 0); 3296 POSTING_READ(PORT_HOTPLUG_EN); 3297 3298 /* Enable in IER... */ 3299 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3300 /* and unmask in IMR */ 3301 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3302 } 3303 3304 I915_WRITE(IMR, dev_priv->irq_mask); 3305 I915_WRITE(IER, enable_mask); 3306 POSTING_READ(IER); 3307 3308 i915_enable_asle_pipestat(dev); 3309 3310 /* Interrupt setup is already guaranteed to be single-threaded, this is 3311 * just to make the assert_spin_locked check happy. */ 3312 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3313 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3314 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3315 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3316 3317 return 0; 3318 } 3319 3320 /* 3321 * Returns true when a page flip has completed. 3322 */ 3323 static bool i915_handle_vblank(struct drm_device *dev, 3324 int plane, int pipe, u32 iir) 3325 { 3326 drm_i915_private_t *dev_priv = dev->dev_private; 3327 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3328 3329 if (!drm_handle_vblank(dev, pipe)) 3330 return false; 3331 3332 if ((iir & flip_pending) == 0) 3333 return false; 3334 3335 intel_prepare_page_flip(dev, plane); 3336 3337 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3338 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3339 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3340 * the flip is completed (no longer pending). Since this doesn't raise 3341 * an interrupt per se, we watch for the change at vblank. 3342 */ 3343 if (I915_READ(ISR) & flip_pending) 3344 return false; 3345 3346 intel_finish_page_flip(dev, pipe); 3347 3348 return true; 3349 } 3350 3351 static irqreturn_t i915_irq_handler(int irq, void *arg) 3352 { 3353 struct drm_device *dev = (struct drm_device *) arg; 3354 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3355 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3356 unsigned long irqflags; 3357 u32 flip_mask = 3358 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3359 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3360 int pipe, ret = IRQ_NONE; 3361 3362 atomic_inc(&dev_priv->irq_received); 3363 3364 iir = I915_READ(IIR); 3365 do { 3366 bool irq_received = (iir & ~flip_mask) != 0; 3367 bool blc_event = false; 3368 3369 /* Can't rely on pipestat interrupt bit in iir as it might 3370 * have been cleared after the pipestat interrupt was received. 3371 * It doesn't set the bit in iir again, but it still produces 3372 * interrupts (for non-MSI). 3373 */ 3374 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3375 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3376 i915_handle_error(dev, false); 3377 3378 for_each_pipe(pipe) { 3379 int reg = PIPESTAT(pipe); 3380 pipe_stats[pipe] = I915_READ(reg); 3381 3382 /* Clear the PIPE*STAT regs before the IIR */ 3383 if (pipe_stats[pipe] & 0x8000ffff) { 3384 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3385 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3386 pipe_name(pipe)); 3387 I915_WRITE(reg, pipe_stats[pipe]); 3388 irq_received = true; 3389 } 3390 } 3391 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3392 3393 if (!irq_received) 3394 break; 3395 3396 /* Consume port. Then clear IIR or we'll miss events */ 3397 if ((I915_HAS_HOTPLUG(dev)) && 3398 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3399 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3400 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3401 3402 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3403 hotplug_status); 3404 3405 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3406 3407 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3408 POSTING_READ(PORT_HOTPLUG_STAT); 3409 } 3410 3411 I915_WRITE(IIR, iir & ~flip_mask); 3412 new_iir = I915_READ(IIR); /* Flush posted writes */ 3413 3414 if (iir & I915_USER_INTERRUPT) 3415 notify_ring(dev, &dev_priv->ring[RCS]); 3416 3417 for_each_pipe(pipe) { 3418 int plane = pipe; 3419 if (IS_MOBILE(dev)) 3420 plane = !plane; 3421 3422 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3423 i915_handle_vblank(dev, plane, pipe, iir)) 3424 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3425 3426 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3427 blc_event = true; 3428 3429 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3430 i9xx_pipe_crc_irq_handler(dev, pipe); 3431 } 3432 3433 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3434 intel_opregion_asle_intr(dev); 3435 3436 /* With MSI, interrupts are only generated when iir 3437 * transitions from zero to nonzero. If another bit got 3438 * set while we were handling the existing iir bits, then 3439 * we would never get another interrupt. 3440 * 3441 * This is fine on non-MSI as well, as if we hit this path 3442 * we avoid exiting the interrupt handler only to generate 3443 * another one. 3444 * 3445 * Note that for MSI this could cause a stray interrupt report 3446 * if an interrupt landed in the time between writing IIR and 3447 * the posting read. This should be rare enough to never 3448 * trigger the 99% of 100,000 interrupts test for disabling 3449 * stray interrupts. 3450 */ 3451 ret = IRQ_HANDLED; 3452 iir = new_iir; 3453 } while (iir & ~flip_mask); 3454 3455 i915_update_dri1_breadcrumb(dev); 3456 3457 return ret; 3458 } 3459 3460 static void i915_irq_uninstall(struct drm_device * dev) 3461 { 3462 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3463 int pipe; 3464 3465 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3466 3467 if (I915_HAS_HOTPLUG(dev)) { 3468 I915_WRITE(PORT_HOTPLUG_EN, 0); 3469 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3470 } 3471 3472 I915_WRITE16(HWSTAM, 0xffff); 3473 for_each_pipe(pipe) { 3474 /* Clear enable bits; then clear status bits */ 3475 I915_WRITE(PIPESTAT(pipe), 0); 3476 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3477 } 3478 I915_WRITE(IMR, 0xffffffff); 3479 I915_WRITE(IER, 0x0); 3480 3481 I915_WRITE(IIR, I915_READ(IIR)); 3482 } 3483 3484 static void i965_irq_preinstall(struct drm_device * dev) 3485 { 3486 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3487 int pipe; 3488 3489 atomic_set(&dev_priv->irq_received, 0); 3490 3491 I915_WRITE(PORT_HOTPLUG_EN, 0); 3492 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3493 3494 I915_WRITE(HWSTAM, 0xeffe); 3495 for_each_pipe(pipe) 3496 I915_WRITE(PIPESTAT(pipe), 0); 3497 I915_WRITE(IMR, 0xffffffff); 3498 I915_WRITE(IER, 0x0); 3499 POSTING_READ(IER); 3500 } 3501 3502 static int i965_irq_postinstall(struct drm_device *dev) 3503 { 3504 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3505 u32 enable_mask; 3506 u32 error_mask; 3507 unsigned long irqflags; 3508 3509 /* Unmask the interrupts that we always want on. */ 3510 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3511 I915_DISPLAY_PORT_INTERRUPT | 3512 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3513 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3514 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3515 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3516 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3517 3518 enable_mask = ~dev_priv->irq_mask; 3519 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3520 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3521 enable_mask |= I915_USER_INTERRUPT; 3522 3523 if (IS_G4X(dev)) 3524 enable_mask |= I915_BSD_USER_INTERRUPT; 3525 3526 /* Interrupt setup is already guaranteed to be single-threaded, this is 3527 * just to make the assert_spin_locked check happy. */ 3528 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3529 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3530 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3531 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3532 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3533 3534 /* 3535 * Enable some error detection, note the instruction error mask 3536 * bit is reserved, so we leave it masked. 3537 */ 3538 if (IS_G4X(dev)) { 3539 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3540 GM45_ERROR_MEM_PRIV | 3541 GM45_ERROR_CP_PRIV | 3542 I915_ERROR_MEMORY_REFRESH); 3543 } else { 3544 error_mask = ~(I915_ERROR_PAGE_TABLE | 3545 I915_ERROR_MEMORY_REFRESH); 3546 } 3547 I915_WRITE(EMR, error_mask); 3548 3549 I915_WRITE(IMR, dev_priv->irq_mask); 3550 I915_WRITE(IER, enable_mask); 3551 POSTING_READ(IER); 3552 3553 I915_WRITE(PORT_HOTPLUG_EN, 0); 3554 POSTING_READ(PORT_HOTPLUG_EN); 3555 3556 i915_enable_asle_pipestat(dev); 3557 3558 return 0; 3559 } 3560 3561 static void i915_hpd_irq_setup(struct drm_device *dev) 3562 { 3563 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3564 struct drm_mode_config *mode_config = &dev->mode_config; 3565 struct intel_encoder *intel_encoder; 3566 u32 hotplug_en; 3567 3568 assert_spin_locked(&dev_priv->irq_lock); 3569 3570 if (I915_HAS_HOTPLUG(dev)) { 3571 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3572 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3573 /* Note HDMI and DP share hotplug bits */ 3574 /* enable bits are the same for all generations */ 3575 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3576 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3577 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3578 /* Programming the CRT detection parameters tends 3579 to generate a spurious hotplug event about three 3580 seconds later. So just do it once. 3581 */ 3582 if (IS_G4X(dev)) 3583 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3584 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3585 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3586 3587 /* Ignore TV since it's buggy */ 3588 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3589 } 3590 } 3591 3592 static irqreturn_t i965_irq_handler(int irq, void *arg) 3593 { 3594 struct drm_device *dev = (struct drm_device *) arg; 3595 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3596 u32 iir, new_iir; 3597 u32 pipe_stats[I915_MAX_PIPES]; 3598 unsigned long irqflags; 3599 int irq_received; 3600 int ret = IRQ_NONE, pipe; 3601 u32 flip_mask = 3602 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3603 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3604 3605 atomic_inc(&dev_priv->irq_received); 3606 3607 iir = I915_READ(IIR); 3608 3609 for (;;) { 3610 bool blc_event = false; 3611 3612 irq_received = (iir & ~flip_mask) != 0; 3613 3614 /* Can't rely on pipestat interrupt bit in iir as it might 3615 * have been cleared after the pipestat interrupt was received. 3616 * It doesn't set the bit in iir again, but it still produces 3617 * interrupts (for non-MSI). 3618 */ 3619 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3620 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3621 i915_handle_error(dev, false); 3622 3623 for_each_pipe(pipe) { 3624 int reg = PIPESTAT(pipe); 3625 pipe_stats[pipe] = I915_READ(reg); 3626 3627 /* 3628 * Clear the PIPE*STAT regs before the IIR 3629 */ 3630 if (pipe_stats[pipe] & 0x8000ffff) { 3631 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3632 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3633 pipe_name(pipe)); 3634 I915_WRITE(reg, pipe_stats[pipe]); 3635 irq_received = 1; 3636 } 3637 } 3638 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3639 3640 if (!irq_received) 3641 break; 3642 3643 ret = IRQ_HANDLED; 3644 3645 /* Consume port. Then clear IIR or we'll miss events */ 3646 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3647 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3648 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3649 HOTPLUG_INT_STATUS_G4X : 3650 HOTPLUG_INT_STATUS_I915); 3651 3652 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3653 hotplug_status); 3654 3655 intel_hpd_irq_handler(dev, hotplug_trigger, 3656 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 3657 3658 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3659 I915_READ(PORT_HOTPLUG_STAT); 3660 } 3661 3662 I915_WRITE(IIR, iir & ~flip_mask); 3663 new_iir = I915_READ(IIR); /* Flush posted writes */ 3664 3665 if (iir & I915_USER_INTERRUPT) 3666 notify_ring(dev, &dev_priv->ring[RCS]); 3667 if (iir & I915_BSD_USER_INTERRUPT) 3668 notify_ring(dev, &dev_priv->ring[VCS]); 3669 3670 for_each_pipe(pipe) { 3671 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3672 i915_handle_vblank(dev, pipe, pipe, iir)) 3673 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3674 3675 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3676 blc_event = true; 3677 3678 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3679 i9xx_pipe_crc_irq_handler(dev, pipe); 3680 } 3681 3682 3683 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3684 intel_opregion_asle_intr(dev); 3685 3686 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3687 gmbus_irq_handler(dev); 3688 3689 /* With MSI, interrupts are only generated when iir 3690 * transitions from zero to nonzero. If another bit got 3691 * set while we were handling the existing iir bits, then 3692 * we would never get another interrupt. 3693 * 3694 * This is fine on non-MSI as well, as if we hit this path 3695 * we avoid exiting the interrupt handler only to generate 3696 * another one. 3697 * 3698 * Note that for MSI this could cause a stray interrupt report 3699 * if an interrupt landed in the time between writing IIR and 3700 * the posting read. This should be rare enough to never 3701 * trigger the 99% of 100,000 interrupts test for disabling 3702 * stray interrupts. 3703 */ 3704 iir = new_iir; 3705 } 3706 3707 i915_update_dri1_breadcrumb(dev); 3708 3709 return ret; 3710 } 3711 3712 static void i965_irq_uninstall(struct drm_device * dev) 3713 { 3714 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3715 int pipe; 3716 3717 if (!dev_priv) 3718 return; 3719 3720 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3721 3722 I915_WRITE(PORT_HOTPLUG_EN, 0); 3723 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3724 3725 I915_WRITE(HWSTAM, 0xffffffff); 3726 for_each_pipe(pipe) 3727 I915_WRITE(PIPESTAT(pipe), 0); 3728 I915_WRITE(IMR, 0xffffffff); 3729 I915_WRITE(IER, 0x0); 3730 3731 for_each_pipe(pipe) 3732 I915_WRITE(PIPESTAT(pipe), 3733 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3734 I915_WRITE(IIR, I915_READ(IIR)); 3735 } 3736 3737 static void i915_reenable_hotplug_timer_func(unsigned long data) 3738 { 3739 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3740 struct drm_device *dev = dev_priv->dev; 3741 struct drm_mode_config *mode_config = &dev->mode_config; 3742 unsigned long irqflags; 3743 int i; 3744 3745 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3746 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3747 struct drm_connector *connector; 3748 3749 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3750 continue; 3751 3752 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3753 3754 list_for_each_entry(connector, &mode_config->connector_list, head) { 3755 struct intel_connector *intel_connector = to_intel_connector(connector); 3756 3757 if (intel_connector->encoder->hpd_pin == i) { 3758 if (connector->polled != intel_connector->polled) 3759 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3760 drm_get_connector_name(connector)); 3761 connector->polled = intel_connector->polled; 3762 if (!connector->polled) 3763 connector->polled = DRM_CONNECTOR_POLL_HPD; 3764 } 3765 } 3766 } 3767 if (dev_priv->display.hpd_irq_setup) 3768 dev_priv->display.hpd_irq_setup(dev); 3769 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3770 } 3771 3772 void intel_irq_init(struct drm_device *dev) 3773 { 3774 struct drm_i915_private *dev_priv = dev->dev_private; 3775 3776 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3777 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3778 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3779 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3780 3781 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3782 i915_hangcheck_elapsed, 3783 (unsigned long) dev); 3784 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3785 (unsigned long) dev_priv); 3786 3787 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3788 3789 if (IS_GEN2(dev)) { 3790 dev->max_vblank_count = 0; 3791 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 3792 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3793 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3794 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3795 } else { 3796 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3797 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3798 } 3799 3800 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3801 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3802 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3803 } 3804 3805 if (IS_VALLEYVIEW(dev)) { 3806 dev->driver->irq_handler = valleyview_irq_handler; 3807 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3808 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3809 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3810 dev->driver->enable_vblank = valleyview_enable_vblank; 3811 dev->driver->disable_vblank = valleyview_disable_vblank; 3812 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3813 } else if (IS_GEN8(dev)) { 3814 dev->driver->irq_handler = gen8_irq_handler; 3815 dev->driver->irq_preinstall = gen8_irq_preinstall; 3816 dev->driver->irq_postinstall = gen8_irq_postinstall; 3817 dev->driver->irq_uninstall = gen8_irq_uninstall; 3818 dev->driver->enable_vblank = gen8_enable_vblank; 3819 dev->driver->disable_vblank = gen8_disable_vblank; 3820 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3821 } else if (HAS_PCH_SPLIT(dev)) { 3822 dev->driver->irq_handler = ironlake_irq_handler; 3823 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3824 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3825 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3826 dev->driver->enable_vblank = ironlake_enable_vblank; 3827 dev->driver->disable_vblank = ironlake_disable_vblank; 3828 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3829 } else { 3830 if (INTEL_INFO(dev)->gen == 2) { 3831 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3832 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3833 dev->driver->irq_handler = i8xx_irq_handler; 3834 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3835 } else if (INTEL_INFO(dev)->gen == 3) { 3836 dev->driver->irq_preinstall = i915_irq_preinstall; 3837 dev->driver->irq_postinstall = i915_irq_postinstall; 3838 dev->driver->irq_uninstall = i915_irq_uninstall; 3839 dev->driver->irq_handler = i915_irq_handler; 3840 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3841 } else { 3842 dev->driver->irq_preinstall = i965_irq_preinstall; 3843 dev->driver->irq_postinstall = i965_irq_postinstall; 3844 dev->driver->irq_uninstall = i965_irq_uninstall; 3845 dev->driver->irq_handler = i965_irq_handler; 3846 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3847 } 3848 dev->driver->enable_vblank = i915_enable_vblank; 3849 dev->driver->disable_vblank = i915_disable_vblank; 3850 } 3851 } 3852 3853 void intel_hpd_init(struct drm_device *dev) 3854 { 3855 struct drm_i915_private *dev_priv = dev->dev_private; 3856 struct drm_mode_config *mode_config = &dev->mode_config; 3857 struct drm_connector *connector; 3858 unsigned long irqflags; 3859 int i; 3860 3861 for (i = 1; i < HPD_NUM_PINS; i++) { 3862 dev_priv->hpd_stats[i].hpd_cnt = 0; 3863 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3864 } 3865 list_for_each_entry(connector, &mode_config->connector_list, head) { 3866 struct intel_connector *intel_connector = to_intel_connector(connector); 3867 connector->polled = intel_connector->polled; 3868 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3869 connector->polled = DRM_CONNECTOR_POLL_HPD; 3870 } 3871 3872 /* Interrupt setup is already guaranteed to be single-threaded, this is 3873 * just to make the assert_spin_locked checks happy. */ 3874 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3875 if (dev_priv->display.hpd_irq_setup) 3876 dev_priv->display.hpd_irq_setup(dev); 3877 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3878 } 3879 3880 /* Disable interrupts so we can allow Package C8+. */ 3881 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3882 { 3883 struct drm_i915_private *dev_priv = dev->dev_private; 3884 unsigned long irqflags; 3885 3886 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3887 3888 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3889 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3890 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3891 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3892 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3893 3894 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3895 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3896 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3897 snb_disable_pm_irq(dev_priv, 0xffffffff); 3898 3899 dev_priv->pc8.irqs_disabled = true; 3900 3901 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3902 } 3903 3904 /* Restore interrupts so we can recover from Package C8+. */ 3905 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3906 { 3907 struct drm_i915_private *dev_priv = dev->dev_private; 3908 unsigned long irqflags; 3909 uint32_t val, expected; 3910 3911 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3912 3913 val = I915_READ(DEIMR); 3914 expected = ~DE_PCH_EVENT_IVB; 3915 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); 3916 3917 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3918 expected = ~SDE_HOTPLUG_MASK_CPT; 3919 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", 3920 val, expected); 3921 3922 val = I915_READ(GTIMR); 3923 expected = 0xffffffff; 3924 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); 3925 3926 val = I915_READ(GEN6_PMIMR); 3927 expected = 0xffffffff; 3928 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, 3929 expected); 3930 3931 dev_priv->pc8.irqs_disabled = false; 3932 3933 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3934 ibx_enable_display_interrupt(dev_priv, 3935 ~dev_priv->pc8.regsave.sdeimr & 3936 ~SDE_HOTPLUG_MASK_CPT); 3937 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3938 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3939 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3940 3941 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3942 } 3943