1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 static const u32 hpd_ibx[] = { 41 [HPD_CRT] = SDE_CRT_HOTPLUG, 42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46 }; 47 48 static const u32 hpd_cpt[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54 }; 55 56 static const u32 hpd_mask_i915[] = { 57 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63 }; 64 65 static const u32 hpd_status_g4x[] = { 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72 }; 73 74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81 }; 82 83 /* For display hotplug interrupt */ 84 static void 85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 86 { 87 assert_spin_locked(&dev_priv->irq_lock); 88 89 if (dev_priv->pc8.irqs_disabled) { 90 WARN(1, "IRQs disabled\n"); 91 dev_priv->pc8.regsave.deimr &= ~mask; 92 return; 93 } 94 95 if ((dev_priv->irq_mask & mask) != 0) { 96 dev_priv->irq_mask &= ~mask; 97 I915_WRITE(DEIMR, dev_priv->irq_mask); 98 POSTING_READ(DEIMR); 99 } 100 } 101 102 static void 103 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 104 { 105 assert_spin_locked(&dev_priv->irq_lock); 106 107 if (dev_priv->pc8.irqs_disabled) { 108 WARN(1, "IRQs disabled\n"); 109 dev_priv->pc8.regsave.deimr |= mask; 110 return; 111 } 112 113 if ((dev_priv->irq_mask & mask) != mask) { 114 dev_priv->irq_mask |= mask; 115 I915_WRITE(DEIMR, dev_priv->irq_mask); 116 POSTING_READ(DEIMR); 117 } 118 } 119 120 /** 121 * ilk_update_gt_irq - update GTIMR 122 * @dev_priv: driver private 123 * @interrupt_mask: mask of interrupt bits to update 124 * @enabled_irq_mask: mask of interrupt bits to enable 125 */ 126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 127 uint32_t interrupt_mask, 128 uint32_t enabled_irq_mask) 129 { 130 assert_spin_locked(&dev_priv->irq_lock); 131 132 if (dev_priv->pc8.irqs_disabled) { 133 WARN(1, "IRQs disabled\n"); 134 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 135 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 136 interrupt_mask); 137 return; 138 } 139 140 dev_priv->gt_irq_mask &= ~interrupt_mask; 141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 143 POSTING_READ(GTIMR); 144 } 145 146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 147 { 148 ilk_update_gt_irq(dev_priv, mask, mask); 149 } 150 151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 152 { 153 ilk_update_gt_irq(dev_priv, mask, 0); 154 } 155 156 /** 157 * snb_update_pm_irq - update GEN6_PMIMR 158 * @dev_priv: driver private 159 * @interrupt_mask: mask of interrupt bits to update 160 * @enabled_irq_mask: mask of interrupt bits to enable 161 */ 162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 163 uint32_t interrupt_mask, 164 uint32_t enabled_irq_mask) 165 { 166 uint32_t new_val; 167 168 assert_spin_locked(&dev_priv->irq_lock); 169 170 if (dev_priv->pc8.irqs_disabled) { 171 WARN(1, "IRQs disabled\n"); 172 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 173 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 174 interrupt_mask); 175 return; 176 } 177 178 new_val = dev_priv->pm_irq_mask; 179 new_val &= ~interrupt_mask; 180 new_val |= (~enabled_irq_mask & interrupt_mask); 181 182 if (new_val != dev_priv->pm_irq_mask) { 183 dev_priv->pm_irq_mask = new_val; 184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 185 POSTING_READ(GEN6_PMIMR); 186 } 187 } 188 189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190 { 191 snb_update_pm_irq(dev_priv, mask, mask); 192 } 193 194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 195 { 196 snb_update_pm_irq(dev_priv, mask, 0); 197 } 198 199 static bool ivb_can_enable_err_int(struct drm_device *dev) 200 { 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct intel_crtc *crtc; 203 enum pipe pipe; 204 205 assert_spin_locked(&dev_priv->irq_lock); 206 207 for_each_pipe(pipe) { 208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 209 210 if (crtc->cpu_fifo_underrun_disabled) 211 return false; 212 } 213 214 return true; 215 } 216 217 static bool cpt_can_enable_serr_int(struct drm_device *dev) 218 { 219 struct drm_i915_private *dev_priv = dev->dev_private; 220 enum pipe pipe; 221 struct intel_crtc *crtc; 222 223 assert_spin_locked(&dev_priv->irq_lock); 224 225 for_each_pipe(pipe) { 226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 227 228 if (crtc->pch_fifo_underrun_disabled) 229 return false; 230 } 231 232 return true; 233 } 234 235 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 236 enum pipe pipe, bool enable) 237 { 238 struct drm_i915_private *dev_priv = dev->dev_private; 239 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 240 DE_PIPEB_FIFO_UNDERRUN; 241 242 if (enable) 243 ironlake_enable_display_irq(dev_priv, bit); 244 else 245 ironlake_disable_display_irq(dev_priv, bit); 246 } 247 248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 249 enum pipe pipe, bool enable) 250 { 251 struct drm_i915_private *dev_priv = dev->dev_private; 252 if (enable) { 253 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 254 255 if (!ivb_can_enable_err_int(dev)) 256 return; 257 258 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 259 } else { 260 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 261 262 /* Change the state _after_ we've read out the current one. */ 263 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 264 265 if (!was_enabled && 266 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 268 pipe_name(pipe)); 269 } 270 } 271 } 272 273 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 274 enum pipe pipe, bool enable) 275 { 276 struct drm_i915_private *dev_priv = dev->dev_private; 277 278 assert_spin_locked(&dev_priv->irq_lock); 279 280 if (enable) 281 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 282 else 283 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 286 } 287 288 /** 289 * ibx_display_interrupt_update - update SDEIMR 290 * @dev_priv: driver private 291 * @interrupt_mask: mask of interrupt bits to update 292 * @enabled_irq_mask: mask of interrupt bits to enable 293 */ 294 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 295 uint32_t interrupt_mask, 296 uint32_t enabled_irq_mask) 297 { 298 uint32_t sdeimr = I915_READ(SDEIMR); 299 sdeimr &= ~interrupt_mask; 300 sdeimr |= (~enabled_irq_mask & interrupt_mask); 301 302 assert_spin_locked(&dev_priv->irq_lock); 303 304 if (dev_priv->pc8.irqs_disabled && 305 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 306 WARN(1, "IRQs disabled\n"); 307 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 308 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 309 interrupt_mask); 310 return; 311 } 312 313 I915_WRITE(SDEIMR, sdeimr); 314 POSTING_READ(SDEIMR); 315 } 316 #define ibx_enable_display_interrupt(dev_priv, bits) \ 317 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 318 #define ibx_disable_display_interrupt(dev_priv, bits) \ 319 ibx_display_interrupt_update((dev_priv), (bits), 0) 320 321 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 322 enum transcoder pch_transcoder, 323 bool enable) 324 { 325 struct drm_i915_private *dev_priv = dev->dev_private; 326 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 327 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 328 329 if (enable) 330 ibx_enable_display_interrupt(dev_priv, bit); 331 else 332 ibx_disable_display_interrupt(dev_priv, bit); 333 } 334 335 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 336 enum transcoder pch_transcoder, 337 bool enable) 338 { 339 struct drm_i915_private *dev_priv = dev->dev_private; 340 341 if (enable) { 342 I915_WRITE(SERR_INT, 343 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 344 345 if (!cpt_can_enable_serr_int(dev)) 346 return; 347 348 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 349 } else { 350 uint32_t tmp = I915_READ(SERR_INT); 351 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 352 353 /* Change the state _after_ we've read out the current one. */ 354 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 355 356 if (!was_enabled && 357 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 358 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 359 transcoder_name(pch_transcoder)); 360 } 361 } 362 } 363 364 /** 365 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 366 * @dev: drm device 367 * @pipe: pipe 368 * @enable: true if we want to report FIFO underrun errors, false otherwise 369 * 370 * This function makes us disable or enable CPU fifo underruns for a specific 371 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 372 * reporting for one pipe may also disable all the other CPU error interruts for 373 * the other pipes, due to the fact that there's just one interrupt mask/enable 374 * bit for all the pipes. 375 * 376 * Returns the previous state of underrun reporting. 377 */ 378 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 379 enum pipe pipe, bool enable) 380 { 381 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 384 unsigned long flags; 385 bool ret; 386 387 spin_lock_irqsave(&dev_priv->irq_lock, flags); 388 389 ret = !intel_crtc->cpu_fifo_underrun_disabled; 390 391 if (enable == ret) 392 goto done; 393 394 intel_crtc->cpu_fifo_underrun_disabled = !enable; 395 396 if (IS_GEN5(dev) || IS_GEN6(dev)) 397 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 398 else if (IS_GEN7(dev)) 399 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 400 else if (IS_GEN8(dev)) 401 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 402 403 done: 404 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 405 return ret; 406 } 407 408 /** 409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 410 * @dev: drm device 411 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 412 * @enable: true if we want to report FIFO underrun errors, false otherwise 413 * 414 * This function makes us disable or enable PCH fifo underruns for a specific 415 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 416 * underrun reporting for one transcoder may also disable all the other PCH 417 * error interruts for the other transcoders, due to the fact that there's just 418 * one interrupt mask/enable bit for all the transcoders. 419 * 420 * Returns the previous state of underrun reporting. 421 */ 422 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 423 enum transcoder pch_transcoder, 424 bool enable) 425 { 426 struct drm_i915_private *dev_priv = dev->dev_private; 427 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 428 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 429 unsigned long flags; 430 bool ret; 431 432 /* 433 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 434 * has only one pch transcoder A that all pipes can use. To avoid racy 435 * pch transcoder -> pipe lookups from interrupt code simply store the 436 * underrun statistics in crtc A. Since we never expose this anywhere 437 * nor use it outside of the fifo underrun code here using the "wrong" 438 * crtc on LPT won't cause issues. 439 */ 440 441 spin_lock_irqsave(&dev_priv->irq_lock, flags); 442 443 ret = !intel_crtc->pch_fifo_underrun_disabled; 444 445 if (enable == ret) 446 goto done; 447 448 intel_crtc->pch_fifo_underrun_disabled = !enable; 449 450 if (HAS_PCH_IBX(dev)) 451 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 452 else 453 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 454 455 done: 456 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 457 return ret; 458 } 459 460 461 void 462 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 463 { 464 u32 reg = PIPESTAT(pipe); 465 u32 pipestat = I915_READ(reg) & 0x7fff0000; 466 467 assert_spin_locked(&dev_priv->irq_lock); 468 469 if ((pipestat & mask) == mask) 470 return; 471 472 /* Enable the interrupt, clear any pending status */ 473 pipestat |= mask | (mask >> 16); 474 I915_WRITE(reg, pipestat); 475 POSTING_READ(reg); 476 } 477 478 void 479 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 480 { 481 u32 reg = PIPESTAT(pipe); 482 u32 pipestat = I915_READ(reg) & 0x7fff0000; 483 484 assert_spin_locked(&dev_priv->irq_lock); 485 486 if ((pipestat & mask) == 0) 487 return; 488 489 pipestat &= ~mask; 490 I915_WRITE(reg, pipestat); 491 POSTING_READ(reg); 492 } 493 494 /** 495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 496 */ 497 static void i915_enable_asle_pipestat(struct drm_device *dev) 498 { 499 drm_i915_private_t *dev_priv = dev->dev_private; 500 unsigned long irqflags; 501 502 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 503 return; 504 505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 506 507 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 508 if (INTEL_INFO(dev)->gen >= 4) 509 i915_enable_pipestat(dev_priv, PIPE_A, 510 PIPE_LEGACY_BLC_EVENT_ENABLE); 511 512 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 513 } 514 515 /** 516 * i915_pipe_enabled - check if a pipe is enabled 517 * @dev: DRM device 518 * @pipe: pipe to check 519 * 520 * Reading certain registers when the pipe is disabled can hang the chip. 521 * Use this routine to make sure the PLL is running and the pipe is active 522 * before reading such registers if unsure. 523 */ 524 static int 525 i915_pipe_enabled(struct drm_device *dev, int pipe) 526 { 527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 528 529 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 530 /* Locking is horribly broken here, but whatever. */ 531 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 532 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 533 534 return intel_crtc->active; 535 } else { 536 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 537 } 538 } 539 540 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 541 { 542 /* Gen2 doesn't have a hardware frame counter */ 543 return 0; 544 } 545 546 /* Called from drm generic code, passed a 'crtc', which 547 * we use as a pipe index 548 */ 549 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 550 { 551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 552 unsigned long high_frame; 553 unsigned long low_frame; 554 u32 high1, high2, low, pixel, vbl_start; 555 556 if (!i915_pipe_enabled(dev, pipe)) { 557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 558 "pipe %c\n", pipe_name(pipe)); 559 return 0; 560 } 561 562 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 563 struct intel_crtc *intel_crtc = 564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 565 const struct drm_display_mode *mode = 566 &intel_crtc->config.adjusted_mode; 567 568 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 569 } else { 570 enum transcoder cpu_transcoder = (enum transcoder) pipe; 571 u32 htotal; 572 573 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 574 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 575 576 vbl_start *= htotal; 577 } 578 579 high_frame = PIPEFRAME(pipe); 580 low_frame = PIPEFRAMEPIXEL(pipe); 581 582 /* 583 * High & low register fields aren't synchronized, so make sure 584 * we get a low value that's stable across two reads of the high 585 * register. 586 */ 587 do { 588 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 589 low = I915_READ(low_frame); 590 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 591 } while (high1 != high2); 592 593 high1 >>= PIPE_FRAME_HIGH_SHIFT; 594 pixel = low & PIPE_PIXEL_MASK; 595 low >>= PIPE_FRAME_LOW_SHIFT; 596 597 /* 598 * The frame counter increments at beginning of active. 599 * Cook up a vblank counter by also checking the pixel 600 * counter against vblank start. 601 */ 602 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 603 } 604 605 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 606 { 607 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 608 int reg = PIPE_FRMCOUNT_GM45(pipe); 609 610 if (!i915_pipe_enabled(dev, pipe)) { 611 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 612 "pipe %c\n", pipe_name(pipe)); 613 return 0; 614 } 615 616 return I915_READ(reg); 617 } 618 619 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 620 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 621 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 622 623 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 624 { 625 struct drm_i915_private *dev_priv = dev->dev_private; 626 uint32_t status; 627 628 if (INTEL_INFO(dev)->gen < 7) { 629 status = pipe == PIPE_A ? 630 DE_PIPEA_VBLANK : 631 DE_PIPEB_VBLANK; 632 } else { 633 switch (pipe) { 634 default: 635 case PIPE_A: 636 status = DE_PIPEA_VBLANK_IVB; 637 break; 638 case PIPE_B: 639 status = DE_PIPEB_VBLANK_IVB; 640 break; 641 case PIPE_C: 642 status = DE_PIPEC_VBLANK_IVB; 643 break; 644 } 645 } 646 647 return __raw_i915_read32(dev_priv, DEISR) & status; 648 } 649 650 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 651 unsigned int flags, int *vpos, int *hpos, 652 ktime_t *stime, ktime_t *etime) 653 { 654 struct drm_i915_private *dev_priv = dev->dev_private; 655 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 656 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 657 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 658 int position; 659 int vbl_start, vbl_end, htotal, vtotal; 660 bool in_vbl = true; 661 int ret = 0; 662 unsigned long irqflags; 663 664 if (!intel_crtc->active) { 665 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 666 "pipe %c\n", pipe_name(pipe)); 667 return 0; 668 } 669 670 htotal = mode->crtc_htotal; 671 vtotal = mode->crtc_vtotal; 672 vbl_start = mode->crtc_vblank_start; 673 vbl_end = mode->crtc_vblank_end; 674 675 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 676 vbl_start = DIV_ROUND_UP(vbl_start, 2); 677 vbl_end /= 2; 678 vtotal /= 2; 679 } 680 681 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 682 683 /* 684 * Lock uncore.lock, as we will do multiple timing critical raw 685 * register reads, potentially with preemption disabled, so the 686 * following code must not block on uncore.lock. 687 */ 688 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 689 690 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 691 692 /* Get optional system timestamp before query. */ 693 if (stime) 694 *stime = ktime_get(); 695 696 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 697 /* No obvious pixelcount register. Only query vertical 698 * scanout position from Display scan line register. 699 */ 700 if (IS_GEN2(dev)) 701 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 702 else 703 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 704 705 if (HAS_PCH_SPLIT(dev)) { 706 /* 707 * The scanline counter increments at the leading edge 708 * of hsync, ie. it completely misses the active portion 709 * of the line. Fix up the counter at both edges of vblank 710 * to get a more accurate picture whether we're in vblank 711 * or not. 712 */ 713 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 714 if ((in_vbl && position == vbl_start - 1) || 715 (!in_vbl && position == vbl_end - 1)) 716 position = (position + 1) % vtotal; 717 } else { 718 /* 719 * ISR vblank status bits don't work the way we'd want 720 * them to work on non-PCH platforms (for 721 * ilk_pipe_in_vblank_locked()), and there doesn't 722 * appear any other way to determine if we're currently 723 * in vblank. 724 * 725 * Instead let's assume that we're already in vblank if 726 * we got called from the vblank interrupt and the 727 * scanline counter value indicates that we're on the 728 * line just prior to vblank start. This should result 729 * in the correct answer, unless the vblank interrupt 730 * delivery really got delayed for almost exactly one 731 * full frame/field. 732 */ 733 if (flags & DRM_CALLED_FROM_VBLIRQ && 734 position == vbl_start - 1) { 735 position = (position + 1) % vtotal; 736 737 /* Signal this correction as "applied". */ 738 ret |= 0x8; 739 } 740 } 741 } else { 742 /* Have access to pixelcount since start of frame. 743 * We can split this into vertical and horizontal 744 * scanout position. 745 */ 746 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 747 748 /* convert to pixel counts */ 749 vbl_start *= htotal; 750 vbl_end *= htotal; 751 vtotal *= htotal; 752 } 753 754 /* Get optional system timestamp after query. */ 755 if (etime) 756 *etime = ktime_get(); 757 758 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 759 760 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 761 762 in_vbl = position >= vbl_start && position < vbl_end; 763 764 /* 765 * While in vblank, position will be negative 766 * counting up towards 0 at vbl_end. And outside 767 * vblank, position will be positive counting 768 * up since vbl_end. 769 */ 770 if (position >= vbl_start) 771 position -= vbl_end; 772 else 773 position += vtotal - vbl_end; 774 775 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 776 *vpos = position; 777 *hpos = 0; 778 } else { 779 *vpos = position / htotal; 780 *hpos = position - (*vpos * htotal); 781 } 782 783 /* In vblank? */ 784 if (in_vbl) 785 ret |= DRM_SCANOUTPOS_INVBL; 786 787 return ret; 788 } 789 790 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 791 int *max_error, 792 struct timeval *vblank_time, 793 unsigned flags) 794 { 795 struct drm_crtc *crtc; 796 797 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 798 DRM_ERROR("Invalid crtc %d\n", pipe); 799 return -EINVAL; 800 } 801 802 /* Get drm_crtc to timestamp: */ 803 crtc = intel_get_crtc_for_pipe(dev, pipe); 804 if (crtc == NULL) { 805 DRM_ERROR("Invalid crtc %d\n", pipe); 806 return -EINVAL; 807 } 808 809 if (!crtc->enabled) { 810 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 811 return -EBUSY; 812 } 813 814 /* Helper routine in DRM core does all the work: */ 815 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 816 vblank_time, flags, 817 crtc, 818 &to_intel_crtc(crtc)->config.adjusted_mode); 819 } 820 821 static bool intel_hpd_irq_event(struct drm_device *dev, 822 struct drm_connector *connector) 823 { 824 enum drm_connector_status old_status; 825 826 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 827 old_status = connector->status; 828 829 connector->status = connector->funcs->detect(connector, false); 830 if (old_status == connector->status) 831 return false; 832 833 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 834 connector->base.id, 835 drm_get_connector_name(connector), 836 drm_get_connector_status_name(old_status), 837 drm_get_connector_status_name(connector->status)); 838 839 return true; 840 } 841 842 /* 843 * Handle hotplug events outside the interrupt handler proper. 844 */ 845 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 846 847 static void i915_hotplug_work_func(struct work_struct *work) 848 { 849 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 850 hotplug_work); 851 struct drm_device *dev = dev_priv->dev; 852 struct drm_mode_config *mode_config = &dev->mode_config; 853 struct intel_connector *intel_connector; 854 struct intel_encoder *intel_encoder; 855 struct drm_connector *connector; 856 unsigned long irqflags; 857 bool hpd_disabled = false; 858 bool changed = false; 859 u32 hpd_event_bits; 860 861 /* HPD irq before everything is fully set up. */ 862 if (!dev_priv->enable_hotplug_processing) 863 return; 864 865 mutex_lock(&mode_config->mutex); 866 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 867 868 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 869 870 hpd_event_bits = dev_priv->hpd_event_bits; 871 dev_priv->hpd_event_bits = 0; 872 list_for_each_entry(connector, &mode_config->connector_list, head) { 873 intel_connector = to_intel_connector(connector); 874 intel_encoder = intel_connector->encoder; 875 if (intel_encoder->hpd_pin > HPD_NONE && 876 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 877 connector->polled == DRM_CONNECTOR_POLL_HPD) { 878 DRM_INFO("HPD interrupt storm detected on connector %s: " 879 "switching from hotplug detection to polling\n", 880 drm_get_connector_name(connector)); 881 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 882 connector->polled = DRM_CONNECTOR_POLL_CONNECT 883 | DRM_CONNECTOR_POLL_DISCONNECT; 884 hpd_disabled = true; 885 } 886 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 887 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 888 drm_get_connector_name(connector), intel_encoder->hpd_pin); 889 } 890 } 891 /* if there were no outputs to poll, poll was disabled, 892 * therefore make sure it's enabled when disabling HPD on 893 * some connectors */ 894 if (hpd_disabled) { 895 drm_kms_helper_poll_enable(dev); 896 mod_timer(&dev_priv->hotplug_reenable_timer, 897 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 898 } 899 900 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 901 902 list_for_each_entry(connector, &mode_config->connector_list, head) { 903 intel_connector = to_intel_connector(connector); 904 intel_encoder = intel_connector->encoder; 905 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 906 if (intel_encoder->hot_plug) 907 intel_encoder->hot_plug(intel_encoder); 908 if (intel_hpd_irq_event(dev, connector)) 909 changed = true; 910 } 911 } 912 mutex_unlock(&mode_config->mutex); 913 914 if (changed) 915 drm_kms_helper_hotplug_event(dev); 916 } 917 918 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 919 { 920 drm_i915_private_t *dev_priv = dev->dev_private; 921 u32 busy_up, busy_down, max_avg, min_avg; 922 u8 new_delay; 923 924 spin_lock(&mchdev_lock); 925 926 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 927 928 new_delay = dev_priv->ips.cur_delay; 929 930 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 931 busy_up = I915_READ(RCPREVBSYTUPAVG); 932 busy_down = I915_READ(RCPREVBSYTDNAVG); 933 max_avg = I915_READ(RCBMAXAVG); 934 min_avg = I915_READ(RCBMINAVG); 935 936 /* Handle RCS change request from hw */ 937 if (busy_up > max_avg) { 938 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 939 new_delay = dev_priv->ips.cur_delay - 1; 940 if (new_delay < dev_priv->ips.max_delay) 941 new_delay = dev_priv->ips.max_delay; 942 } else if (busy_down < min_avg) { 943 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 944 new_delay = dev_priv->ips.cur_delay + 1; 945 if (new_delay > dev_priv->ips.min_delay) 946 new_delay = dev_priv->ips.min_delay; 947 } 948 949 if (ironlake_set_drps(dev, new_delay)) 950 dev_priv->ips.cur_delay = new_delay; 951 952 spin_unlock(&mchdev_lock); 953 954 return; 955 } 956 957 static void notify_ring(struct drm_device *dev, 958 struct intel_ring_buffer *ring) 959 { 960 if (ring->obj == NULL) 961 return; 962 963 trace_i915_gem_request_complete(ring); 964 965 wake_up_all(&ring->irq_queue); 966 i915_queue_hangcheck(dev); 967 } 968 969 static void gen6_pm_rps_work(struct work_struct *work) 970 { 971 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 972 rps.work); 973 u32 pm_iir; 974 int new_delay, adj; 975 976 spin_lock_irq(&dev_priv->irq_lock); 977 pm_iir = dev_priv->rps.pm_iir; 978 dev_priv->rps.pm_iir = 0; 979 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 980 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 981 spin_unlock_irq(&dev_priv->irq_lock); 982 983 /* Make sure we didn't queue anything we're not going to process. */ 984 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 985 986 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 987 return; 988 989 mutex_lock(&dev_priv->rps.hw_lock); 990 991 adj = dev_priv->rps.last_adj; 992 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 993 if (adj > 0) 994 adj *= 2; 995 else 996 adj = 1; 997 new_delay = dev_priv->rps.cur_delay + adj; 998 999 /* 1000 * For better performance, jump directly 1001 * to RPe if we're below it. 1002 */ 1003 if (new_delay < dev_priv->rps.rpe_delay) 1004 new_delay = dev_priv->rps.rpe_delay; 1005 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1006 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 1007 new_delay = dev_priv->rps.rpe_delay; 1008 else 1009 new_delay = dev_priv->rps.min_delay; 1010 adj = 0; 1011 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1012 if (adj < 0) 1013 adj *= 2; 1014 else 1015 adj = -1; 1016 new_delay = dev_priv->rps.cur_delay + adj; 1017 } else { /* unknown event */ 1018 new_delay = dev_priv->rps.cur_delay; 1019 } 1020 1021 /* sysfs frequency interfaces may have snuck in while servicing the 1022 * interrupt 1023 */ 1024 new_delay = clamp_t(int, new_delay, 1025 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1026 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1027 1028 if (IS_VALLEYVIEW(dev_priv->dev)) 1029 valleyview_set_rps(dev_priv->dev, new_delay); 1030 else 1031 gen6_set_rps(dev_priv->dev, new_delay); 1032 1033 mutex_unlock(&dev_priv->rps.hw_lock); 1034 } 1035 1036 1037 /** 1038 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1039 * occurred. 1040 * @work: workqueue struct 1041 * 1042 * Doesn't actually do anything except notify userspace. As a consequence of 1043 * this event, userspace should try to remap the bad rows since statistically 1044 * it is likely the same row is more likely to go bad again. 1045 */ 1046 static void ivybridge_parity_work(struct work_struct *work) 1047 { 1048 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1049 l3_parity.error_work); 1050 u32 error_status, row, bank, subbank; 1051 char *parity_event[6]; 1052 uint32_t misccpctl; 1053 unsigned long flags; 1054 uint8_t slice = 0; 1055 1056 /* We must turn off DOP level clock gating to access the L3 registers. 1057 * In order to prevent a get/put style interface, acquire struct mutex 1058 * any time we access those registers. 1059 */ 1060 mutex_lock(&dev_priv->dev->struct_mutex); 1061 1062 /* If we've screwed up tracking, just let the interrupt fire again */ 1063 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1064 goto out; 1065 1066 misccpctl = I915_READ(GEN7_MISCCPCTL); 1067 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1068 POSTING_READ(GEN7_MISCCPCTL); 1069 1070 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1071 u32 reg; 1072 1073 slice--; 1074 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1075 break; 1076 1077 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1078 1079 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1080 1081 error_status = I915_READ(reg); 1082 row = GEN7_PARITY_ERROR_ROW(error_status); 1083 bank = GEN7_PARITY_ERROR_BANK(error_status); 1084 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1085 1086 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1087 POSTING_READ(reg); 1088 1089 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1090 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1091 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1092 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1093 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1094 parity_event[5] = NULL; 1095 1096 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1097 KOBJ_CHANGE, parity_event); 1098 1099 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1100 slice, row, bank, subbank); 1101 1102 kfree(parity_event[4]); 1103 kfree(parity_event[3]); 1104 kfree(parity_event[2]); 1105 kfree(parity_event[1]); 1106 } 1107 1108 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1109 1110 out: 1111 WARN_ON(dev_priv->l3_parity.which_slice); 1112 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1113 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1114 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1115 1116 mutex_unlock(&dev_priv->dev->struct_mutex); 1117 } 1118 1119 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1120 { 1121 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1122 1123 if (!HAS_L3_DPF(dev)) 1124 return; 1125 1126 spin_lock(&dev_priv->irq_lock); 1127 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1128 spin_unlock(&dev_priv->irq_lock); 1129 1130 iir &= GT_PARITY_ERROR(dev); 1131 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1132 dev_priv->l3_parity.which_slice |= 1 << 1; 1133 1134 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1135 dev_priv->l3_parity.which_slice |= 1 << 0; 1136 1137 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1138 } 1139 1140 static void ilk_gt_irq_handler(struct drm_device *dev, 1141 struct drm_i915_private *dev_priv, 1142 u32 gt_iir) 1143 { 1144 if (gt_iir & 1145 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1146 notify_ring(dev, &dev_priv->ring[RCS]); 1147 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1148 notify_ring(dev, &dev_priv->ring[VCS]); 1149 } 1150 1151 static void snb_gt_irq_handler(struct drm_device *dev, 1152 struct drm_i915_private *dev_priv, 1153 u32 gt_iir) 1154 { 1155 1156 if (gt_iir & 1157 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1158 notify_ring(dev, &dev_priv->ring[RCS]); 1159 if (gt_iir & GT_BSD_USER_INTERRUPT) 1160 notify_ring(dev, &dev_priv->ring[VCS]); 1161 if (gt_iir & GT_BLT_USER_INTERRUPT) 1162 notify_ring(dev, &dev_priv->ring[BCS]); 1163 1164 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1165 GT_BSD_CS_ERROR_INTERRUPT | 1166 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1167 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1168 i915_handle_error(dev, false); 1169 } 1170 1171 if (gt_iir & GT_PARITY_ERROR(dev)) 1172 ivybridge_parity_error_irq_handler(dev, gt_iir); 1173 } 1174 1175 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1176 struct drm_i915_private *dev_priv, 1177 u32 master_ctl) 1178 { 1179 u32 rcs, bcs, vcs; 1180 uint32_t tmp = 0; 1181 irqreturn_t ret = IRQ_NONE; 1182 1183 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1184 tmp = I915_READ(GEN8_GT_IIR(0)); 1185 if (tmp) { 1186 ret = IRQ_HANDLED; 1187 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1188 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1189 if (rcs & GT_RENDER_USER_INTERRUPT) 1190 notify_ring(dev, &dev_priv->ring[RCS]); 1191 if (bcs & GT_RENDER_USER_INTERRUPT) 1192 notify_ring(dev, &dev_priv->ring[BCS]); 1193 I915_WRITE(GEN8_GT_IIR(0), tmp); 1194 } else 1195 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1196 } 1197 1198 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1199 tmp = I915_READ(GEN8_GT_IIR(1)); 1200 if (tmp) { 1201 ret = IRQ_HANDLED; 1202 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1203 if (vcs & GT_RENDER_USER_INTERRUPT) 1204 notify_ring(dev, &dev_priv->ring[VCS]); 1205 I915_WRITE(GEN8_GT_IIR(1), tmp); 1206 } else 1207 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1208 } 1209 1210 if (master_ctl & GEN8_GT_VECS_IRQ) { 1211 tmp = I915_READ(GEN8_GT_IIR(3)); 1212 if (tmp) { 1213 ret = IRQ_HANDLED; 1214 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1215 if (vcs & GT_RENDER_USER_INTERRUPT) 1216 notify_ring(dev, &dev_priv->ring[VECS]); 1217 I915_WRITE(GEN8_GT_IIR(3), tmp); 1218 } else 1219 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1220 } 1221 1222 return ret; 1223 } 1224 1225 #define HPD_STORM_DETECT_PERIOD 1000 1226 #define HPD_STORM_THRESHOLD 5 1227 1228 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1229 u32 hotplug_trigger, 1230 const u32 *hpd) 1231 { 1232 drm_i915_private_t *dev_priv = dev->dev_private; 1233 int i; 1234 bool storm_detected = false; 1235 1236 if (!hotplug_trigger) 1237 return; 1238 1239 spin_lock(&dev_priv->irq_lock); 1240 for (i = 1; i < HPD_NUM_PINS; i++) { 1241 1242 WARN_ONCE(hpd[i] & hotplug_trigger && 1243 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1244 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1245 hotplug_trigger, i, hpd[i]); 1246 1247 if (!(hpd[i] & hotplug_trigger) || 1248 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1249 continue; 1250 1251 dev_priv->hpd_event_bits |= (1 << i); 1252 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1253 dev_priv->hpd_stats[i].hpd_last_jiffies 1254 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1255 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1256 dev_priv->hpd_stats[i].hpd_cnt = 0; 1257 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1258 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1259 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1260 dev_priv->hpd_event_bits &= ~(1 << i); 1261 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1262 storm_detected = true; 1263 } else { 1264 dev_priv->hpd_stats[i].hpd_cnt++; 1265 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1266 dev_priv->hpd_stats[i].hpd_cnt); 1267 } 1268 } 1269 1270 if (storm_detected) 1271 dev_priv->display.hpd_irq_setup(dev); 1272 spin_unlock(&dev_priv->irq_lock); 1273 1274 /* 1275 * Our hotplug handler can grab modeset locks (by calling down into the 1276 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1277 * queue for otherwise the flush_work in the pageflip code will 1278 * deadlock. 1279 */ 1280 schedule_work(&dev_priv->hotplug_work); 1281 } 1282 1283 static void gmbus_irq_handler(struct drm_device *dev) 1284 { 1285 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1286 1287 wake_up_all(&dev_priv->gmbus_wait_queue); 1288 } 1289 1290 static void dp_aux_irq_handler(struct drm_device *dev) 1291 { 1292 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1293 1294 wake_up_all(&dev_priv->gmbus_wait_queue); 1295 } 1296 1297 #if defined(CONFIG_DEBUG_FS) 1298 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1299 uint32_t crc0, uint32_t crc1, 1300 uint32_t crc2, uint32_t crc3, 1301 uint32_t crc4) 1302 { 1303 struct drm_i915_private *dev_priv = dev->dev_private; 1304 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1305 struct intel_pipe_crc_entry *entry; 1306 int head, tail; 1307 1308 spin_lock(&pipe_crc->lock); 1309 1310 if (!pipe_crc->entries) { 1311 spin_unlock(&pipe_crc->lock); 1312 DRM_ERROR("spurious interrupt\n"); 1313 return; 1314 } 1315 1316 head = pipe_crc->head; 1317 tail = pipe_crc->tail; 1318 1319 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1320 spin_unlock(&pipe_crc->lock); 1321 DRM_ERROR("CRC buffer overflowing\n"); 1322 return; 1323 } 1324 1325 entry = &pipe_crc->entries[head]; 1326 1327 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1328 entry->crc[0] = crc0; 1329 entry->crc[1] = crc1; 1330 entry->crc[2] = crc2; 1331 entry->crc[3] = crc3; 1332 entry->crc[4] = crc4; 1333 1334 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1335 pipe_crc->head = head; 1336 1337 spin_unlock(&pipe_crc->lock); 1338 1339 wake_up_interruptible(&pipe_crc->wq); 1340 } 1341 #else 1342 static inline void 1343 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1344 uint32_t crc0, uint32_t crc1, 1345 uint32_t crc2, uint32_t crc3, 1346 uint32_t crc4) {} 1347 #endif 1348 1349 1350 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1351 { 1352 struct drm_i915_private *dev_priv = dev->dev_private; 1353 1354 display_pipe_crc_irq_handler(dev, pipe, 1355 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1356 0, 0, 0, 0); 1357 } 1358 1359 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1360 { 1361 struct drm_i915_private *dev_priv = dev->dev_private; 1362 1363 display_pipe_crc_irq_handler(dev, pipe, 1364 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1365 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1366 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1367 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1368 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1369 } 1370 1371 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1372 { 1373 struct drm_i915_private *dev_priv = dev->dev_private; 1374 uint32_t res1, res2; 1375 1376 if (INTEL_INFO(dev)->gen >= 3) 1377 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1378 else 1379 res1 = 0; 1380 1381 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1382 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1383 else 1384 res2 = 0; 1385 1386 display_pipe_crc_irq_handler(dev, pipe, 1387 I915_READ(PIPE_CRC_RES_RED(pipe)), 1388 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1389 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1390 res1, res2); 1391 } 1392 1393 /* The RPS events need forcewake, so we add them to a work queue and mask their 1394 * IMR bits until the work is done. Other interrupts can be processed without 1395 * the work queue. */ 1396 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1397 { 1398 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1399 spin_lock(&dev_priv->irq_lock); 1400 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1401 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1402 spin_unlock(&dev_priv->irq_lock); 1403 1404 queue_work(dev_priv->wq, &dev_priv->rps.work); 1405 } 1406 1407 if (HAS_VEBOX(dev_priv->dev)) { 1408 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1409 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1410 1411 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1412 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1413 i915_handle_error(dev_priv->dev, false); 1414 } 1415 } 1416 } 1417 1418 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1419 { 1420 struct drm_device *dev = (struct drm_device *) arg; 1421 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1422 u32 iir, gt_iir, pm_iir; 1423 irqreturn_t ret = IRQ_NONE; 1424 unsigned long irqflags; 1425 int pipe; 1426 u32 pipe_stats[I915_MAX_PIPES]; 1427 1428 atomic_inc(&dev_priv->irq_received); 1429 1430 while (true) { 1431 iir = I915_READ(VLV_IIR); 1432 gt_iir = I915_READ(GTIIR); 1433 pm_iir = I915_READ(GEN6_PMIIR); 1434 1435 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1436 goto out; 1437 1438 ret = IRQ_HANDLED; 1439 1440 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1441 1442 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1443 for_each_pipe(pipe) { 1444 int reg = PIPESTAT(pipe); 1445 pipe_stats[pipe] = I915_READ(reg); 1446 1447 /* 1448 * Clear the PIPE*STAT regs before the IIR 1449 */ 1450 if (pipe_stats[pipe] & 0x8000ffff) { 1451 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1452 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1453 pipe_name(pipe)); 1454 I915_WRITE(reg, pipe_stats[pipe]); 1455 } 1456 } 1457 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1458 1459 for_each_pipe(pipe) { 1460 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1461 drm_handle_vblank(dev, pipe); 1462 1463 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1464 intel_prepare_page_flip(dev, pipe); 1465 intel_finish_page_flip(dev, pipe); 1466 } 1467 1468 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1469 i9xx_pipe_crc_irq_handler(dev, pipe); 1470 } 1471 1472 /* Consume port. Then clear IIR or we'll miss events */ 1473 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1474 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1475 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1476 1477 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1478 hotplug_status); 1479 1480 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1481 1482 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1483 dp_aux_irq_handler(dev); 1484 1485 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1486 I915_READ(PORT_HOTPLUG_STAT); 1487 } 1488 1489 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1490 gmbus_irq_handler(dev); 1491 1492 if (pm_iir) 1493 gen6_rps_irq_handler(dev_priv, pm_iir); 1494 1495 I915_WRITE(GTIIR, gt_iir); 1496 I915_WRITE(GEN6_PMIIR, pm_iir); 1497 I915_WRITE(VLV_IIR, iir); 1498 } 1499 1500 out: 1501 return ret; 1502 } 1503 1504 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1505 { 1506 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1507 int pipe; 1508 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1509 1510 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1511 1512 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1513 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1514 SDE_AUDIO_POWER_SHIFT); 1515 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1516 port_name(port)); 1517 } 1518 1519 if (pch_iir & SDE_AUX_MASK) 1520 dp_aux_irq_handler(dev); 1521 1522 if (pch_iir & SDE_GMBUS) 1523 gmbus_irq_handler(dev); 1524 1525 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1526 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1527 1528 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1529 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1530 1531 if (pch_iir & SDE_POISON) 1532 DRM_ERROR("PCH poison interrupt\n"); 1533 1534 if (pch_iir & SDE_FDI_MASK) 1535 for_each_pipe(pipe) 1536 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1537 pipe_name(pipe), 1538 I915_READ(FDI_RX_IIR(pipe))); 1539 1540 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1541 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1542 1543 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1544 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1545 1546 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1547 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1548 false)) 1549 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1550 1551 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1552 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1553 false)) 1554 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1555 } 1556 1557 static void ivb_err_int_handler(struct drm_device *dev) 1558 { 1559 struct drm_i915_private *dev_priv = dev->dev_private; 1560 u32 err_int = I915_READ(GEN7_ERR_INT); 1561 enum pipe pipe; 1562 1563 if (err_int & ERR_INT_POISON) 1564 DRM_ERROR("Poison interrupt\n"); 1565 1566 for_each_pipe(pipe) { 1567 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1568 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1569 false)) 1570 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1571 pipe_name(pipe)); 1572 } 1573 1574 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1575 if (IS_IVYBRIDGE(dev)) 1576 ivb_pipe_crc_irq_handler(dev, pipe); 1577 else 1578 hsw_pipe_crc_irq_handler(dev, pipe); 1579 } 1580 } 1581 1582 I915_WRITE(GEN7_ERR_INT, err_int); 1583 } 1584 1585 static void cpt_serr_int_handler(struct drm_device *dev) 1586 { 1587 struct drm_i915_private *dev_priv = dev->dev_private; 1588 u32 serr_int = I915_READ(SERR_INT); 1589 1590 if (serr_int & SERR_INT_POISON) 1591 DRM_ERROR("PCH poison interrupt\n"); 1592 1593 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1594 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1595 false)) 1596 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1597 1598 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1599 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1600 false)) 1601 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1602 1603 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1604 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1605 false)) 1606 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1607 1608 I915_WRITE(SERR_INT, serr_int); 1609 } 1610 1611 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1612 { 1613 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1614 int pipe; 1615 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1616 1617 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1618 1619 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1620 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1621 SDE_AUDIO_POWER_SHIFT_CPT); 1622 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1623 port_name(port)); 1624 } 1625 1626 if (pch_iir & SDE_AUX_MASK_CPT) 1627 dp_aux_irq_handler(dev); 1628 1629 if (pch_iir & SDE_GMBUS_CPT) 1630 gmbus_irq_handler(dev); 1631 1632 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1633 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1634 1635 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1636 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1637 1638 if (pch_iir & SDE_FDI_MASK_CPT) 1639 for_each_pipe(pipe) 1640 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1641 pipe_name(pipe), 1642 I915_READ(FDI_RX_IIR(pipe))); 1643 1644 if (pch_iir & SDE_ERROR_CPT) 1645 cpt_serr_int_handler(dev); 1646 } 1647 1648 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1649 { 1650 struct drm_i915_private *dev_priv = dev->dev_private; 1651 enum pipe pipe; 1652 1653 if (de_iir & DE_AUX_CHANNEL_A) 1654 dp_aux_irq_handler(dev); 1655 1656 if (de_iir & DE_GSE) 1657 intel_opregion_asle_intr(dev); 1658 1659 if (de_iir & DE_POISON) 1660 DRM_ERROR("Poison interrupt\n"); 1661 1662 for_each_pipe(pipe) { 1663 if (de_iir & DE_PIPE_VBLANK(pipe)) 1664 drm_handle_vblank(dev, pipe); 1665 1666 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1667 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1668 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1669 pipe_name(pipe)); 1670 1671 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1672 i9xx_pipe_crc_irq_handler(dev, pipe); 1673 1674 /* plane/pipes map 1:1 on ilk+ */ 1675 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1676 intel_prepare_page_flip(dev, pipe); 1677 intel_finish_page_flip_plane(dev, pipe); 1678 } 1679 } 1680 1681 /* check event from PCH */ 1682 if (de_iir & DE_PCH_EVENT) { 1683 u32 pch_iir = I915_READ(SDEIIR); 1684 1685 if (HAS_PCH_CPT(dev)) 1686 cpt_irq_handler(dev, pch_iir); 1687 else 1688 ibx_irq_handler(dev, pch_iir); 1689 1690 /* should clear PCH hotplug event before clear CPU irq */ 1691 I915_WRITE(SDEIIR, pch_iir); 1692 } 1693 1694 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1695 ironlake_rps_change_irq_handler(dev); 1696 } 1697 1698 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1699 { 1700 struct drm_i915_private *dev_priv = dev->dev_private; 1701 enum pipe i; 1702 1703 if (de_iir & DE_ERR_INT_IVB) 1704 ivb_err_int_handler(dev); 1705 1706 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1707 dp_aux_irq_handler(dev); 1708 1709 if (de_iir & DE_GSE_IVB) 1710 intel_opregion_asle_intr(dev); 1711 1712 for_each_pipe(i) { 1713 if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 1714 drm_handle_vblank(dev, i); 1715 1716 /* plane/pipes map 1:1 on ilk+ */ 1717 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 1718 intel_prepare_page_flip(dev, i); 1719 intel_finish_page_flip_plane(dev, i); 1720 } 1721 } 1722 1723 /* check event from PCH */ 1724 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1725 u32 pch_iir = I915_READ(SDEIIR); 1726 1727 cpt_irq_handler(dev, pch_iir); 1728 1729 /* clear PCH hotplug event before clear CPU irq */ 1730 I915_WRITE(SDEIIR, pch_iir); 1731 } 1732 } 1733 1734 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1735 { 1736 struct drm_device *dev = (struct drm_device *) arg; 1737 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1738 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1739 irqreturn_t ret = IRQ_NONE; 1740 1741 atomic_inc(&dev_priv->irq_received); 1742 1743 /* We get interrupts on unclaimed registers, so check for this before we 1744 * do any I915_{READ,WRITE}. */ 1745 intel_uncore_check_errors(dev); 1746 1747 /* disable master interrupt before clearing iir */ 1748 de_ier = I915_READ(DEIER); 1749 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1750 POSTING_READ(DEIER); 1751 1752 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1753 * interrupts will will be stored on its back queue, and then we'll be 1754 * able to process them after we restore SDEIER (as soon as we restore 1755 * it, we'll get an interrupt if SDEIIR still has something to process 1756 * due to its back queue). */ 1757 if (!HAS_PCH_NOP(dev)) { 1758 sde_ier = I915_READ(SDEIER); 1759 I915_WRITE(SDEIER, 0); 1760 POSTING_READ(SDEIER); 1761 } 1762 1763 gt_iir = I915_READ(GTIIR); 1764 if (gt_iir) { 1765 if (INTEL_INFO(dev)->gen >= 6) 1766 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1767 else 1768 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1769 I915_WRITE(GTIIR, gt_iir); 1770 ret = IRQ_HANDLED; 1771 } 1772 1773 de_iir = I915_READ(DEIIR); 1774 if (de_iir) { 1775 if (INTEL_INFO(dev)->gen >= 7) 1776 ivb_display_irq_handler(dev, de_iir); 1777 else 1778 ilk_display_irq_handler(dev, de_iir); 1779 I915_WRITE(DEIIR, de_iir); 1780 ret = IRQ_HANDLED; 1781 } 1782 1783 if (INTEL_INFO(dev)->gen >= 6) { 1784 u32 pm_iir = I915_READ(GEN6_PMIIR); 1785 if (pm_iir) { 1786 gen6_rps_irq_handler(dev_priv, pm_iir); 1787 I915_WRITE(GEN6_PMIIR, pm_iir); 1788 ret = IRQ_HANDLED; 1789 } 1790 } 1791 1792 I915_WRITE(DEIER, de_ier); 1793 POSTING_READ(DEIER); 1794 if (!HAS_PCH_NOP(dev)) { 1795 I915_WRITE(SDEIER, sde_ier); 1796 POSTING_READ(SDEIER); 1797 } 1798 1799 return ret; 1800 } 1801 1802 static irqreturn_t gen8_irq_handler(int irq, void *arg) 1803 { 1804 struct drm_device *dev = arg; 1805 struct drm_i915_private *dev_priv = dev->dev_private; 1806 u32 master_ctl; 1807 irqreturn_t ret = IRQ_NONE; 1808 uint32_t tmp = 0; 1809 enum pipe pipe; 1810 1811 atomic_inc(&dev_priv->irq_received); 1812 1813 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1814 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1815 if (!master_ctl) 1816 return IRQ_NONE; 1817 1818 I915_WRITE(GEN8_MASTER_IRQ, 0); 1819 POSTING_READ(GEN8_MASTER_IRQ); 1820 1821 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1822 1823 if (master_ctl & GEN8_DE_MISC_IRQ) { 1824 tmp = I915_READ(GEN8_DE_MISC_IIR); 1825 if (tmp & GEN8_DE_MISC_GSE) 1826 intel_opregion_asle_intr(dev); 1827 else if (tmp) 1828 DRM_ERROR("Unexpected DE Misc interrupt\n"); 1829 else 1830 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1831 1832 if (tmp) { 1833 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1834 ret = IRQ_HANDLED; 1835 } 1836 } 1837 1838 if (master_ctl & GEN8_DE_PORT_IRQ) { 1839 tmp = I915_READ(GEN8_DE_PORT_IIR); 1840 if (tmp & GEN8_AUX_CHANNEL_A) 1841 dp_aux_irq_handler(dev); 1842 else if (tmp) 1843 DRM_ERROR("Unexpected DE Port interrupt\n"); 1844 else 1845 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 1846 1847 if (tmp) { 1848 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 1849 ret = IRQ_HANDLED; 1850 } 1851 } 1852 1853 for_each_pipe(pipe) { 1854 uint32_t pipe_iir; 1855 1856 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1857 continue; 1858 1859 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 1860 if (pipe_iir & GEN8_PIPE_VBLANK) 1861 drm_handle_vblank(dev, pipe); 1862 1863 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 1864 intel_prepare_page_flip(dev, pipe); 1865 intel_finish_page_flip_plane(dev, pipe); 1866 } 1867 1868 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 1869 hsw_pipe_crc_irq_handler(dev, pipe); 1870 1871 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1872 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1873 false)) 1874 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1875 pipe_name(pipe)); 1876 } 1877 1878 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 1879 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 1880 pipe_name(pipe), 1881 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 1882 } 1883 1884 if (pipe_iir) { 1885 ret = IRQ_HANDLED; 1886 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 1887 } else 1888 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 1889 } 1890 1891 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 1892 /* 1893 * FIXME(BDW): Assume for now that the new interrupt handling 1894 * scheme also closed the SDE interrupt handling race we've seen 1895 * on older pch-split platforms. But this needs testing. 1896 */ 1897 u32 pch_iir = I915_READ(SDEIIR); 1898 1899 cpt_irq_handler(dev, pch_iir); 1900 1901 if (pch_iir) { 1902 I915_WRITE(SDEIIR, pch_iir); 1903 ret = IRQ_HANDLED; 1904 } 1905 } 1906 1907 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1908 POSTING_READ(GEN8_MASTER_IRQ); 1909 1910 return ret; 1911 } 1912 1913 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1914 bool reset_completed) 1915 { 1916 struct intel_ring_buffer *ring; 1917 int i; 1918 1919 /* 1920 * Notify all waiters for GPU completion events that reset state has 1921 * been changed, and that they need to restart their wait after 1922 * checking for potential errors (and bail out to drop locks if there is 1923 * a gpu reset pending so that i915_error_work_func can acquire them). 1924 */ 1925 1926 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1927 for_each_ring(ring, dev_priv, i) 1928 wake_up_all(&ring->irq_queue); 1929 1930 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1931 wake_up_all(&dev_priv->pending_flip_queue); 1932 1933 /* 1934 * Signal tasks blocked in i915_gem_wait_for_error that the pending 1935 * reset state is cleared. 1936 */ 1937 if (reset_completed) 1938 wake_up_all(&dev_priv->gpu_error.reset_queue); 1939 } 1940 1941 /** 1942 * i915_error_work_func - do process context error handling work 1943 * @work: work struct 1944 * 1945 * Fire an error uevent so userspace can see that a hang or error 1946 * was detected. 1947 */ 1948 static void i915_error_work_func(struct work_struct *work) 1949 { 1950 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1951 work); 1952 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1953 gpu_error); 1954 struct drm_device *dev = dev_priv->dev; 1955 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1956 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1957 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1958 int ret; 1959 1960 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 1961 1962 /* 1963 * Note that there's only one work item which does gpu resets, so we 1964 * need not worry about concurrent gpu resets potentially incrementing 1965 * error->reset_counter twice. We only need to take care of another 1966 * racing irq/hangcheck declaring the gpu dead for a second time. A 1967 * quick check for that is good enough: schedule_work ensures the 1968 * correct ordering between hang detection and this work item, and since 1969 * the reset in-progress bit is only ever set by code outside of this 1970 * work we don't need to worry about any other races. 1971 */ 1972 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1973 DRM_DEBUG_DRIVER("resetting chip\n"); 1974 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 1975 reset_event); 1976 1977 /* 1978 * All state reset _must_ be completed before we update the 1979 * reset counter, for otherwise waiters might miss the reset 1980 * pending state and not properly drop locks, resulting in 1981 * deadlocks with the reset work. 1982 */ 1983 ret = i915_reset(dev); 1984 1985 intel_display_handle_reset(dev); 1986 1987 if (ret == 0) { 1988 /* 1989 * After all the gem state is reset, increment the reset 1990 * counter and wake up everyone waiting for the reset to 1991 * complete. 1992 * 1993 * Since unlock operations are a one-sided barrier only, 1994 * we need to insert a barrier here to order any seqno 1995 * updates before 1996 * the counter increment. 1997 */ 1998 smp_mb__before_atomic_inc(); 1999 atomic_inc(&dev_priv->gpu_error.reset_counter); 2000 2001 kobject_uevent_env(&dev->primary->kdev->kobj, 2002 KOBJ_CHANGE, reset_done_event); 2003 } else { 2004 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2005 } 2006 2007 /* 2008 * Note: The wake_up also serves as a memory barrier so that 2009 * waiters see the update value of the reset counter atomic_t. 2010 */ 2011 i915_error_wake_up(dev_priv, true); 2012 } 2013 } 2014 2015 static void i915_report_and_clear_eir(struct drm_device *dev) 2016 { 2017 struct drm_i915_private *dev_priv = dev->dev_private; 2018 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2019 u32 eir = I915_READ(EIR); 2020 int pipe, i; 2021 2022 if (!eir) 2023 return; 2024 2025 pr_err("render error detected, EIR: 0x%08x\n", eir); 2026 2027 i915_get_extra_instdone(dev, instdone); 2028 2029 if (IS_G4X(dev)) { 2030 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2031 u32 ipeir = I915_READ(IPEIR_I965); 2032 2033 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2034 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2035 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2036 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2037 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2038 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2039 I915_WRITE(IPEIR_I965, ipeir); 2040 POSTING_READ(IPEIR_I965); 2041 } 2042 if (eir & GM45_ERROR_PAGE_TABLE) { 2043 u32 pgtbl_err = I915_READ(PGTBL_ER); 2044 pr_err("page table error\n"); 2045 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2046 I915_WRITE(PGTBL_ER, pgtbl_err); 2047 POSTING_READ(PGTBL_ER); 2048 } 2049 } 2050 2051 if (!IS_GEN2(dev)) { 2052 if (eir & I915_ERROR_PAGE_TABLE) { 2053 u32 pgtbl_err = I915_READ(PGTBL_ER); 2054 pr_err("page table error\n"); 2055 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2056 I915_WRITE(PGTBL_ER, pgtbl_err); 2057 POSTING_READ(PGTBL_ER); 2058 } 2059 } 2060 2061 if (eir & I915_ERROR_MEMORY_REFRESH) { 2062 pr_err("memory refresh error:\n"); 2063 for_each_pipe(pipe) 2064 pr_err("pipe %c stat: 0x%08x\n", 2065 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2066 /* pipestat has already been acked */ 2067 } 2068 if (eir & I915_ERROR_INSTRUCTION) { 2069 pr_err("instruction error\n"); 2070 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2071 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2072 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2073 if (INTEL_INFO(dev)->gen < 4) { 2074 u32 ipeir = I915_READ(IPEIR); 2075 2076 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2077 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2078 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2079 I915_WRITE(IPEIR, ipeir); 2080 POSTING_READ(IPEIR); 2081 } else { 2082 u32 ipeir = I915_READ(IPEIR_I965); 2083 2084 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2085 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2086 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2087 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2088 I915_WRITE(IPEIR_I965, ipeir); 2089 POSTING_READ(IPEIR_I965); 2090 } 2091 } 2092 2093 I915_WRITE(EIR, eir); 2094 POSTING_READ(EIR); 2095 eir = I915_READ(EIR); 2096 if (eir) { 2097 /* 2098 * some errors might have become stuck, 2099 * mask them. 2100 */ 2101 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2102 I915_WRITE(EMR, I915_READ(EMR) | eir); 2103 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2104 } 2105 } 2106 2107 /** 2108 * i915_handle_error - handle an error interrupt 2109 * @dev: drm device 2110 * 2111 * Do some basic checking of regsiter state at error interrupt time and 2112 * dump it to the syslog. Also call i915_capture_error_state() to make 2113 * sure we get a record and make it available in debugfs. Fire a uevent 2114 * so userspace knows something bad happened (should trigger collection 2115 * of a ring dump etc.). 2116 */ 2117 void i915_handle_error(struct drm_device *dev, bool wedged) 2118 { 2119 struct drm_i915_private *dev_priv = dev->dev_private; 2120 2121 i915_capture_error_state(dev); 2122 i915_report_and_clear_eir(dev); 2123 2124 if (wedged) { 2125 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2126 &dev_priv->gpu_error.reset_counter); 2127 2128 /* 2129 * Wakeup waiting processes so that the reset work function 2130 * i915_error_work_func doesn't deadlock trying to grab various 2131 * locks. By bumping the reset counter first, the woken 2132 * processes will see a reset in progress and back off, 2133 * releasing their locks and then wait for the reset completion. 2134 * We must do this for _all_ gpu waiters that might hold locks 2135 * that the reset work needs to acquire. 2136 * 2137 * Note: The wake_up serves as the required memory barrier to 2138 * ensure that the waiters see the updated value of the reset 2139 * counter atomic_t. 2140 */ 2141 i915_error_wake_up(dev_priv, false); 2142 } 2143 2144 /* 2145 * Our reset work can grab modeset locks (since it needs to reset the 2146 * state of outstanding pagelips). Hence it must not be run on our own 2147 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2148 * code will deadlock. 2149 */ 2150 schedule_work(&dev_priv->gpu_error.work); 2151 } 2152 2153 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2154 { 2155 drm_i915_private_t *dev_priv = dev->dev_private; 2156 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2157 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2158 struct drm_i915_gem_object *obj; 2159 struct intel_unpin_work *work; 2160 unsigned long flags; 2161 bool stall_detected; 2162 2163 /* Ignore early vblank irqs */ 2164 if (intel_crtc == NULL) 2165 return; 2166 2167 spin_lock_irqsave(&dev->event_lock, flags); 2168 work = intel_crtc->unpin_work; 2169 2170 if (work == NULL || 2171 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2172 !work->enable_stall_check) { 2173 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2174 spin_unlock_irqrestore(&dev->event_lock, flags); 2175 return; 2176 } 2177 2178 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2179 obj = work->pending_flip_obj; 2180 if (INTEL_INFO(dev)->gen >= 4) { 2181 int dspsurf = DSPSURF(intel_crtc->plane); 2182 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2183 i915_gem_obj_ggtt_offset(obj); 2184 } else { 2185 int dspaddr = DSPADDR(intel_crtc->plane); 2186 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2187 crtc->y * crtc->fb->pitches[0] + 2188 crtc->x * crtc->fb->bits_per_pixel/8); 2189 } 2190 2191 spin_unlock_irqrestore(&dev->event_lock, flags); 2192 2193 if (stall_detected) { 2194 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2195 intel_prepare_page_flip(dev, intel_crtc->plane); 2196 } 2197 } 2198 2199 /* Called from drm generic code, passed 'crtc' which 2200 * we use as a pipe index 2201 */ 2202 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2203 { 2204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2205 unsigned long irqflags; 2206 2207 if (!i915_pipe_enabled(dev, pipe)) 2208 return -EINVAL; 2209 2210 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2211 if (INTEL_INFO(dev)->gen >= 4) 2212 i915_enable_pipestat(dev_priv, pipe, 2213 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2214 else 2215 i915_enable_pipestat(dev_priv, pipe, 2216 PIPE_VBLANK_INTERRUPT_ENABLE); 2217 2218 /* maintain vblank delivery even in deep C-states */ 2219 if (dev_priv->info->gen == 3) 2220 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2221 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2222 2223 return 0; 2224 } 2225 2226 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2227 { 2228 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2229 unsigned long irqflags; 2230 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2231 DE_PIPE_VBLANK(pipe); 2232 2233 if (!i915_pipe_enabled(dev, pipe)) 2234 return -EINVAL; 2235 2236 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2237 ironlake_enable_display_irq(dev_priv, bit); 2238 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2239 2240 return 0; 2241 } 2242 2243 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2244 { 2245 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2246 unsigned long irqflags; 2247 u32 imr; 2248 2249 if (!i915_pipe_enabled(dev, pipe)) 2250 return -EINVAL; 2251 2252 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2253 imr = I915_READ(VLV_IMR); 2254 if (pipe == PIPE_A) 2255 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2256 else 2257 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2258 I915_WRITE(VLV_IMR, imr); 2259 i915_enable_pipestat(dev_priv, pipe, 2260 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2261 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2262 2263 return 0; 2264 } 2265 2266 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2267 { 2268 struct drm_i915_private *dev_priv = dev->dev_private; 2269 unsigned long irqflags; 2270 2271 if (!i915_pipe_enabled(dev, pipe)) 2272 return -EINVAL; 2273 2274 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2275 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2276 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2277 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2278 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2279 return 0; 2280 } 2281 2282 /* Called from drm generic code, passed 'crtc' which 2283 * we use as a pipe index 2284 */ 2285 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2286 { 2287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2288 unsigned long irqflags; 2289 2290 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2291 if (dev_priv->info->gen == 3) 2292 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2293 2294 i915_disable_pipestat(dev_priv, pipe, 2295 PIPE_VBLANK_INTERRUPT_ENABLE | 2296 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2297 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2298 } 2299 2300 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2301 { 2302 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2303 unsigned long irqflags; 2304 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2305 DE_PIPE_VBLANK(pipe); 2306 2307 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2308 ironlake_disable_display_irq(dev_priv, bit); 2309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2310 } 2311 2312 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2313 { 2314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2315 unsigned long irqflags; 2316 u32 imr; 2317 2318 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2319 i915_disable_pipestat(dev_priv, pipe, 2320 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2321 imr = I915_READ(VLV_IMR); 2322 if (pipe == PIPE_A) 2323 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2324 else 2325 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2326 I915_WRITE(VLV_IMR, imr); 2327 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2328 } 2329 2330 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2331 { 2332 struct drm_i915_private *dev_priv = dev->dev_private; 2333 unsigned long irqflags; 2334 2335 if (!i915_pipe_enabled(dev, pipe)) 2336 return; 2337 2338 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2339 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2340 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2341 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2342 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2343 } 2344 2345 static u32 2346 ring_last_seqno(struct intel_ring_buffer *ring) 2347 { 2348 return list_entry(ring->request_list.prev, 2349 struct drm_i915_gem_request, list)->seqno; 2350 } 2351 2352 static bool 2353 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2354 { 2355 return (list_empty(&ring->request_list) || 2356 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2357 } 2358 2359 static struct intel_ring_buffer * 2360 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2361 { 2362 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2363 u32 cmd, ipehr, acthd, acthd_min; 2364 2365 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2366 if ((ipehr & ~(0x3 << 16)) != 2367 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2368 return NULL; 2369 2370 /* ACTHD is likely pointing to the dword after the actual command, 2371 * so scan backwards until we find the MBOX. 2372 */ 2373 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2374 acthd_min = max((int)acthd - 3 * 4, 0); 2375 do { 2376 cmd = ioread32(ring->virtual_start + acthd); 2377 if (cmd == ipehr) 2378 break; 2379 2380 acthd -= 4; 2381 if (acthd < acthd_min) 2382 return NULL; 2383 } while (1); 2384 2385 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2386 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2387 } 2388 2389 static int semaphore_passed(struct intel_ring_buffer *ring) 2390 { 2391 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2392 struct intel_ring_buffer *signaller; 2393 u32 seqno, ctl; 2394 2395 ring->hangcheck.deadlock = true; 2396 2397 signaller = semaphore_waits_for(ring, &seqno); 2398 if (signaller == NULL || signaller->hangcheck.deadlock) 2399 return -1; 2400 2401 /* cursory check for an unkickable deadlock */ 2402 ctl = I915_READ_CTL(signaller); 2403 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2404 return -1; 2405 2406 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2407 } 2408 2409 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2410 { 2411 struct intel_ring_buffer *ring; 2412 int i; 2413 2414 for_each_ring(ring, dev_priv, i) 2415 ring->hangcheck.deadlock = false; 2416 } 2417 2418 static enum intel_ring_hangcheck_action 2419 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2420 { 2421 struct drm_device *dev = ring->dev; 2422 struct drm_i915_private *dev_priv = dev->dev_private; 2423 u32 tmp; 2424 2425 if (ring->hangcheck.acthd != acthd) 2426 return HANGCHECK_ACTIVE; 2427 2428 if (IS_GEN2(dev)) 2429 return HANGCHECK_HUNG; 2430 2431 /* Is the chip hanging on a WAIT_FOR_EVENT? 2432 * If so we can simply poke the RB_WAIT bit 2433 * and break the hang. This should work on 2434 * all but the second generation chipsets. 2435 */ 2436 tmp = I915_READ_CTL(ring); 2437 if (tmp & RING_WAIT) { 2438 DRM_ERROR("Kicking stuck wait on %s\n", 2439 ring->name); 2440 i915_handle_error(dev, false); 2441 I915_WRITE_CTL(ring, tmp); 2442 return HANGCHECK_KICK; 2443 } 2444 2445 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2446 switch (semaphore_passed(ring)) { 2447 default: 2448 return HANGCHECK_HUNG; 2449 case 1: 2450 DRM_ERROR("Kicking stuck semaphore on %s\n", 2451 ring->name); 2452 i915_handle_error(dev, false); 2453 I915_WRITE_CTL(ring, tmp); 2454 return HANGCHECK_KICK; 2455 case 0: 2456 return HANGCHECK_WAIT; 2457 } 2458 } 2459 2460 return HANGCHECK_HUNG; 2461 } 2462 2463 /** 2464 * This is called when the chip hasn't reported back with completed 2465 * batchbuffers in a long time. We keep track per ring seqno progress and 2466 * if there are no progress, hangcheck score for that ring is increased. 2467 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2468 * we kick the ring. If we see no progress on three subsequent calls 2469 * we assume chip is wedged and try to fix it by resetting the chip. 2470 */ 2471 static void i915_hangcheck_elapsed(unsigned long data) 2472 { 2473 struct drm_device *dev = (struct drm_device *)data; 2474 drm_i915_private_t *dev_priv = dev->dev_private; 2475 struct intel_ring_buffer *ring; 2476 int i; 2477 int busy_count = 0, rings_hung = 0; 2478 bool stuck[I915_NUM_RINGS] = { 0 }; 2479 #define BUSY 1 2480 #define KICK 5 2481 #define HUNG 20 2482 #define FIRE 30 2483 2484 if (!i915_enable_hangcheck) 2485 return; 2486 2487 for_each_ring(ring, dev_priv, i) { 2488 u32 seqno, acthd; 2489 bool busy = true; 2490 2491 semaphore_clear_deadlocks(dev_priv); 2492 2493 seqno = ring->get_seqno(ring, false); 2494 acthd = intel_ring_get_active_head(ring); 2495 2496 if (ring->hangcheck.seqno == seqno) { 2497 if (ring_idle(ring, seqno)) { 2498 ring->hangcheck.action = HANGCHECK_IDLE; 2499 2500 if (waitqueue_active(&ring->irq_queue)) { 2501 /* Issue a wake-up to catch stuck h/w. */ 2502 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2503 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2504 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2505 ring->name); 2506 else 2507 DRM_INFO("Fake missed irq on %s\n", 2508 ring->name); 2509 wake_up_all(&ring->irq_queue); 2510 } 2511 /* Safeguard against driver failure */ 2512 ring->hangcheck.score += BUSY; 2513 } else 2514 busy = false; 2515 } else { 2516 /* We always increment the hangcheck score 2517 * if the ring is busy and still processing 2518 * the same request, so that no single request 2519 * can run indefinitely (such as a chain of 2520 * batches). The only time we do not increment 2521 * the hangcheck score on this ring, if this 2522 * ring is in a legitimate wait for another 2523 * ring. In that case the waiting ring is a 2524 * victim and we want to be sure we catch the 2525 * right culprit. Then every time we do kick 2526 * the ring, add a small increment to the 2527 * score so that we can catch a batch that is 2528 * being repeatedly kicked and so responsible 2529 * for stalling the machine. 2530 */ 2531 ring->hangcheck.action = ring_stuck(ring, 2532 acthd); 2533 2534 switch (ring->hangcheck.action) { 2535 case HANGCHECK_IDLE: 2536 case HANGCHECK_WAIT: 2537 break; 2538 case HANGCHECK_ACTIVE: 2539 ring->hangcheck.score += BUSY; 2540 break; 2541 case HANGCHECK_KICK: 2542 ring->hangcheck.score += KICK; 2543 break; 2544 case HANGCHECK_HUNG: 2545 ring->hangcheck.score += HUNG; 2546 stuck[i] = true; 2547 break; 2548 } 2549 } 2550 } else { 2551 ring->hangcheck.action = HANGCHECK_ACTIVE; 2552 2553 /* Gradually reduce the count so that we catch DoS 2554 * attempts across multiple batches. 2555 */ 2556 if (ring->hangcheck.score > 0) 2557 ring->hangcheck.score--; 2558 } 2559 2560 ring->hangcheck.seqno = seqno; 2561 ring->hangcheck.acthd = acthd; 2562 busy_count += busy; 2563 } 2564 2565 for_each_ring(ring, dev_priv, i) { 2566 if (ring->hangcheck.score > FIRE) { 2567 DRM_INFO("%s on %s\n", 2568 stuck[i] ? "stuck" : "no progress", 2569 ring->name); 2570 rings_hung++; 2571 } 2572 } 2573 2574 if (rings_hung) 2575 return i915_handle_error(dev, true); 2576 2577 if (busy_count) 2578 /* Reset timer case chip hangs without another request 2579 * being added */ 2580 i915_queue_hangcheck(dev); 2581 } 2582 2583 void i915_queue_hangcheck(struct drm_device *dev) 2584 { 2585 struct drm_i915_private *dev_priv = dev->dev_private; 2586 if (!i915_enable_hangcheck) 2587 return; 2588 2589 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2590 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2591 } 2592 2593 static void ibx_irq_preinstall(struct drm_device *dev) 2594 { 2595 struct drm_i915_private *dev_priv = dev->dev_private; 2596 2597 if (HAS_PCH_NOP(dev)) 2598 return; 2599 2600 /* south display irq */ 2601 I915_WRITE(SDEIMR, 0xffffffff); 2602 /* 2603 * SDEIER is also touched by the interrupt handler to work around missed 2604 * PCH interrupts. Hence we can't update it after the interrupt handler 2605 * is enabled - instead we unconditionally enable all PCH interrupt 2606 * sources here, but then only unmask them as needed with SDEIMR. 2607 */ 2608 I915_WRITE(SDEIER, 0xffffffff); 2609 POSTING_READ(SDEIER); 2610 } 2611 2612 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2613 { 2614 struct drm_i915_private *dev_priv = dev->dev_private; 2615 2616 /* and GT */ 2617 I915_WRITE(GTIMR, 0xffffffff); 2618 I915_WRITE(GTIER, 0x0); 2619 POSTING_READ(GTIER); 2620 2621 if (INTEL_INFO(dev)->gen >= 6) { 2622 /* and PM */ 2623 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2624 I915_WRITE(GEN6_PMIER, 0x0); 2625 POSTING_READ(GEN6_PMIER); 2626 } 2627 } 2628 2629 /* drm_dma.h hooks 2630 */ 2631 static void ironlake_irq_preinstall(struct drm_device *dev) 2632 { 2633 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2634 2635 atomic_set(&dev_priv->irq_received, 0); 2636 2637 I915_WRITE(HWSTAM, 0xeffe); 2638 2639 I915_WRITE(DEIMR, 0xffffffff); 2640 I915_WRITE(DEIER, 0x0); 2641 POSTING_READ(DEIER); 2642 2643 gen5_gt_irq_preinstall(dev); 2644 2645 ibx_irq_preinstall(dev); 2646 } 2647 2648 static void valleyview_irq_preinstall(struct drm_device *dev) 2649 { 2650 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2651 int pipe; 2652 2653 atomic_set(&dev_priv->irq_received, 0); 2654 2655 /* VLV magic */ 2656 I915_WRITE(VLV_IMR, 0); 2657 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2658 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2659 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2660 2661 /* and GT */ 2662 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2663 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2664 2665 gen5_gt_irq_preinstall(dev); 2666 2667 I915_WRITE(DPINVGTT, 0xff); 2668 2669 I915_WRITE(PORT_HOTPLUG_EN, 0); 2670 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2671 for_each_pipe(pipe) 2672 I915_WRITE(PIPESTAT(pipe), 0xffff); 2673 I915_WRITE(VLV_IIR, 0xffffffff); 2674 I915_WRITE(VLV_IMR, 0xffffffff); 2675 I915_WRITE(VLV_IER, 0x0); 2676 POSTING_READ(VLV_IER); 2677 } 2678 2679 static void gen8_irq_preinstall(struct drm_device *dev) 2680 { 2681 struct drm_i915_private *dev_priv = dev->dev_private; 2682 int pipe; 2683 2684 atomic_set(&dev_priv->irq_received, 0); 2685 2686 I915_WRITE(GEN8_MASTER_IRQ, 0); 2687 POSTING_READ(GEN8_MASTER_IRQ); 2688 2689 /* IIR can theoretically queue up two events. Be paranoid */ 2690 #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2691 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2692 POSTING_READ(GEN8_##type##_IMR(which)); \ 2693 I915_WRITE(GEN8_##type##_IER(which), 0); \ 2694 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2695 POSTING_READ(GEN8_##type##_IIR(which)); \ 2696 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2697 } while (0) 2698 2699 #define GEN8_IRQ_INIT(type) do { \ 2700 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2701 POSTING_READ(GEN8_##type##_IMR); \ 2702 I915_WRITE(GEN8_##type##_IER, 0); \ 2703 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2704 POSTING_READ(GEN8_##type##_IIR); \ 2705 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2706 } while (0) 2707 2708 GEN8_IRQ_INIT_NDX(GT, 0); 2709 GEN8_IRQ_INIT_NDX(GT, 1); 2710 GEN8_IRQ_INIT_NDX(GT, 2); 2711 GEN8_IRQ_INIT_NDX(GT, 3); 2712 2713 for_each_pipe(pipe) { 2714 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2715 } 2716 2717 GEN8_IRQ_INIT(DE_PORT); 2718 GEN8_IRQ_INIT(DE_MISC); 2719 GEN8_IRQ_INIT(PCU); 2720 #undef GEN8_IRQ_INIT 2721 #undef GEN8_IRQ_INIT_NDX 2722 2723 POSTING_READ(GEN8_PCU_IIR); 2724 2725 ibx_irq_preinstall(dev); 2726 } 2727 2728 static void ibx_hpd_irq_setup(struct drm_device *dev) 2729 { 2730 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2731 struct drm_mode_config *mode_config = &dev->mode_config; 2732 struct intel_encoder *intel_encoder; 2733 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2734 2735 if (HAS_PCH_IBX(dev)) { 2736 hotplug_irqs = SDE_HOTPLUG_MASK; 2737 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2738 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2739 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2740 } else { 2741 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2742 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2743 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2744 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2745 } 2746 2747 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2748 2749 /* 2750 * Enable digital hotplug on the PCH, and configure the DP short pulse 2751 * duration to 2ms (which is the minimum in the Display Port spec) 2752 * 2753 * This register is the same on all known PCH chips. 2754 */ 2755 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2756 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2757 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2758 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2759 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2760 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2761 } 2762 2763 static void ibx_irq_postinstall(struct drm_device *dev) 2764 { 2765 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2766 u32 mask; 2767 2768 if (HAS_PCH_NOP(dev)) 2769 return; 2770 2771 if (HAS_PCH_IBX(dev)) { 2772 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2773 SDE_TRANSA_FIFO_UNDER | SDE_POISON; 2774 } else { 2775 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 2776 2777 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2778 } 2779 2780 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2781 I915_WRITE(SDEIMR, ~mask); 2782 } 2783 2784 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2785 { 2786 struct drm_i915_private *dev_priv = dev->dev_private; 2787 u32 pm_irqs, gt_irqs; 2788 2789 pm_irqs = gt_irqs = 0; 2790 2791 dev_priv->gt_irq_mask = ~0; 2792 if (HAS_L3_DPF(dev)) { 2793 /* L3 parity interrupt is always unmasked. */ 2794 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 2795 gt_irqs |= GT_PARITY_ERROR(dev); 2796 } 2797 2798 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2799 if (IS_GEN5(dev)) { 2800 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2801 ILK_BSD_USER_INTERRUPT; 2802 } else { 2803 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2804 } 2805 2806 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2807 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2808 I915_WRITE(GTIER, gt_irqs); 2809 POSTING_READ(GTIER); 2810 2811 if (INTEL_INFO(dev)->gen >= 6) { 2812 pm_irqs |= GEN6_PM_RPS_EVENTS; 2813 2814 if (HAS_VEBOX(dev)) 2815 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2816 2817 dev_priv->pm_irq_mask = 0xffffffff; 2818 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2819 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2820 I915_WRITE(GEN6_PMIER, pm_irqs); 2821 POSTING_READ(GEN6_PMIER); 2822 } 2823 } 2824 2825 static int ironlake_irq_postinstall(struct drm_device *dev) 2826 { 2827 unsigned long irqflags; 2828 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2829 u32 display_mask, extra_mask; 2830 2831 if (INTEL_INFO(dev)->gen >= 7) { 2832 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2833 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2834 DE_PLANEB_FLIP_DONE_IVB | 2835 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 2836 DE_ERR_INT_IVB); 2837 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2838 DE_PIPEA_VBLANK_IVB); 2839 2840 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2841 } else { 2842 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2843 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2844 DE_AUX_CHANNEL_A | 2845 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 2846 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2847 DE_POISON); 2848 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2849 } 2850 2851 dev_priv->irq_mask = ~display_mask; 2852 2853 /* should always can generate irq */ 2854 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2855 I915_WRITE(DEIMR, dev_priv->irq_mask); 2856 I915_WRITE(DEIER, display_mask | extra_mask); 2857 POSTING_READ(DEIER); 2858 2859 gen5_gt_irq_postinstall(dev); 2860 2861 ibx_irq_postinstall(dev); 2862 2863 if (IS_IRONLAKE_M(dev)) { 2864 /* Enable PCU event interrupts 2865 * 2866 * spinlocking not required here for correctness since interrupt 2867 * setup is guaranteed to run in single-threaded context. But we 2868 * need it to make the assert_spin_locked happy. */ 2869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2870 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2872 } 2873 2874 return 0; 2875 } 2876 2877 static int valleyview_irq_postinstall(struct drm_device *dev) 2878 { 2879 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2880 u32 enable_mask; 2881 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2882 PIPE_CRC_DONE_ENABLE; 2883 unsigned long irqflags; 2884 2885 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2886 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2887 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2888 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2889 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2890 2891 /* 2892 *Leave vblank interrupts masked initially. enable/disable will 2893 * toggle them based on usage. 2894 */ 2895 dev_priv->irq_mask = (~enable_mask) | 2896 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2897 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2898 2899 I915_WRITE(PORT_HOTPLUG_EN, 0); 2900 POSTING_READ(PORT_HOTPLUG_EN); 2901 2902 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2903 I915_WRITE(VLV_IER, enable_mask); 2904 I915_WRITE(VLV_IIR, 0xffffffff); 2905 I915_WRITE(PIPESTAT(0), 0xffff); 2906 I915_WRITE(PIPESTAT(1), 0xffff); 2907 POSTING_READ(VLV_IER); 2908 2909 /* Interrupt setup is already guaranteed to be single-threaded, this is 2910 * just to make the assert_spin_locked check happy. */ 2911 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2912 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 2913 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 2914 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 2915 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2916 2917 I915_WRITE(VLV_IIR, 0xffffffff); 2918 I915_WRITE(VLV_IIR, 0xffffffff); 2919 2920 gen5_gt_irq_postinstall(dev); 2921 2922 /* ack & enable invalid PTE error interrupts */ 2923 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2924 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2925 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2926 #endif 2927 2928 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2929 2930 return 0; 2931 } 2932 2933 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 2934 { 2935 int i; 2936 2937 /* These are interrupts we'll toggle with the ring mask register */ 2938 uint32_t gt_interrupts[] = { 2939 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 2940 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 2941 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 2942 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 2943 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 2944 0, 2945 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 2946 }; 2947 2948 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 2949 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 2950 if (tmp) 2951 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2952 i, tmp); 2953 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 2954 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 2955 } 2956 POSTING_READ(GEN8_GT_IER(0)); 2957 } 2958 2959 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 2960 { 2961 struct drm_device *dev = dev_priv->dev; 2962 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 2963 GEN8_PIPE_CDCLK_CRC_DONE | 2964 GEN8_PIPE_FIFO_UNDERRUN | 2965 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2966 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; 2967 int pipe; 2968 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 2969 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 2970 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 2971 2972 for_each_pipe(pipe) { 2973 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2974 if (tmp) 2975 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2976 pipe, tmp); 2977 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2978 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 2979 } 2980 POSTING_READ(GEN8_DE_PIPE_ISR(0)); 2981 2982 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 2983 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 2984 POSTING_READ(GEN8_DE_PORT_IER); 2985 } 2986 2987 static int gen8_irq_postinstall(struct drm_device *dev) 2988 { 2989 struct drm_i915_private *dev_priv = dev->dev_private; 2990 2991 gen8_gt_irq_postinstall(dev_priv); 2992 gen8_de_irq_postinstall(dev_priv); 2993 2994 ibx_irq_postinstall(dev); 2995 2996 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2997 POSTING_READ(GEN8_MASTER_IRQ); 2998 2999 return 0; 3000 } 3001 3002 static void gen8_irq_uninstall(struct drm_device *dev) 3003 { 3004 struct drm_i915_private *dev_priv = dev->dev_private; 3005 int pipe; 3006 3007 if (!dev_priv) 3008 return; 3009 3010 atomic_set(&dev_priv->irq_received, 0); 3011 3012 I915_WRITE(GEN8_MASTER_IRQ, 0); 3013 3014 #define GEN8_IRQ_FINI_NDX(type, which) do { \ 3015 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3016 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3017 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3018 } while (0) 3019 3020 #define GEN8_IRQ_FINI(type) do { \ 3021 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3022 I915_WRITE(GEN8_##type##_IER, 0); \ 3023 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3024 } while (0) 3025 3026 GEN8_IRQ_FINI_NDX(GT, 0); 3027 GEN8_IRQ_FINI_NDX(GT, 1); 3028 GEN8_IRQ_FINI_NDX(GT, 2); 3029 GEN8_IRQ_FINI_NDX(GT, 3); 3030 3031 for_each_pipe(pipe) { 3032 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3033 } 3034 3035 GEN8_IRQ_FINI(DE_PORT); 3036 GEN8_IRQ_FINI(DE_MISC); 3037 GEN8_IRQ_FINI(PCU); 3038 #undef GEN8_IRQ_FINI 3039 #undef GEN8_IRQ_FINI_NDX 3040 3041 POSTING_READ(GEN8_PCU_IIR); 3042 } 3043 3044 static void valleyview_irq_uninstall(struct drm_device *dev) 3045 { 3046 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3047 int pipe; 3048 3049 if (!dev_priv) 3050 return; 3051 3052 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3053 3054 for_each_pipe(pipe) 3055 I915_WRITE(PIPESTAT(pipe), 0xffff); 3056 3057 I915_WRITE(HWSTAM, 0xffffffff); 3058 I915_WRITE(PORT_HOTPLUG_EN, 0); 3059 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3060 for_each_pipe(pipe) 3061 I915_WRITE(PIPESTAT(pipe), 0xffff); 3062 I915_WRITE(VLV_IIR, 0xffffffff); 3063 I915_WRITE(VLV_IMR, 0xffffffff); 3064 I915_WRITE(VLV_IER, 0x0); 3065 POSTING_READ(VLV_IER); 3066 } 3067 3068 static void ironlake_irq_uninstall(struct drm_device *dev) 3069 { 3070 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3071 3072 if (!dev_priv) 3073 return; 3074 3075 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3076 3077 I915_WRITE(HWSTAM, 0xffffffff); 3078 3079 I915_WRITE(DEIMR, 0xffffffff); 3080 I915_WRITE(DEIER, 0x0); 3081 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3082 if (IS_GEN7(dev)) 3083 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3084 3085 I915_WRITE(GTIMR, 0xffffffff); 3086 I915_WRITE(GTIER, 0x0); 3087 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3088 3089 if (HAS_PCH_NOP(dev)) 3090 return; 3091 3092 I915_WRITE(SDEIMR, 0xffffffff); 3093 I915_WRITE(SDEIER, 0x0); 3094 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3095 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3096 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3097 } 3098 3099 static void i8xx_irq_preinstall(struct drm_device * dev) 3100 { 3101 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3102 int pipe; 3103 3104 atomic_set(&dev_priv->irq_received, 0); 3105 3106 for_each_pipe(pipe) 3107 I915_WRITE(PIPESTAT(pipe), 0); 3108 I915_WRITE16(IMR, 0xffff); 3109 I915_WRITE16(IER, 0x0); 3110 POSTING_READ16(IER); 3111 } 3112 3113 static int i8xx_irq_postinstall(struct drm_device *dev) 3114 { 3115 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3116 unsigned long irqflags; 3117 3118 I915_WRITE16(EMR, 3119 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3120 3121 /* Unmask the interrupts that we always want on. */ 3122 dev_priv->irq_mask = 3123 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3124 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3125 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3126 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3127 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3128 I915_WRITE16(IMR, dev_priv->irq_mask); 3129 3130 I915_WRITE16(IER, 3131 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3132 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3133 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3134 I915_USER_INTERRUPT); 3135 POSTING_READ16(IER); 3136 3137 /* Interrupt setup is already guaranteed to be single-threaded, this is 3138 * just to make the assert_spin_locked check happy. */ 3139 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3140 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3141 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3142 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3143 3144 return 0; 3145 } 3146 3147 /* 3148 * Returns true when a page flip has completed. 3149 */ 3150 static bool i8xx_handle_vblank(struct drm_device *dev, 3151 int plane, int pipe, u32 iir) 3152 { 3153 drm_i915_private_t *dev_priv = dev->dev_private; 3154 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3155 3156 if (!drm_handle_vblank(dev, pipe)) 3157 return false; 3158 3159 if ((iir & flip_pending) == 0) 3160 return false; 3161 3162 intel_prepare_page_flip(dev, plane); 3163 3164 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3165 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3166 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3167 * the flip is completed (no longer pending). Since this doesn't raise 3168 * an interrupt per se, we watch for the change at vblank. 3169 */ 3170 if (I915_READ16(ISR) & flip_pending) 3171 return false; 3172 3173 intel_finish_page_flip(dev, pipe); 3174 3175 return true; 3176 } 3177 3178 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3179 { 3180 struct drm_device *dev = (struct drm_device *) arg; 3181 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3182 u16 iir, new_iir; 3183 u32 pipe_stats[2]; 3184 unsigned long irqflags; 3185 int pipe; 3186 u16 flip_mask = 3187 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3188 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3189 3190 atomic_inc(&dev_priv->irq_received); 3191 3192 iir = I915_READ16(IIR); 3193 if (iir == 0) 3194 return IRQ_NONE; 3195 3196 while (iir & ~flip_mask) { 3197 /* Can't rely on pipestat interrupt bit in iir as it might 3198 * have been cleared after the pipestat interrupt was received. 3199 * It doesn't set the bit in iir again, but it still produces 3200 * interrupts (for non-MSI). 3201 */ 3202 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3203 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3204 i915_handle_error(dev, false); 3205 3206 for_each_pipe(pipe) { 3207 int reg = PIPESTAT(pipe); 3208 pipe_stats[pipe] = I915_READ(reg); 3209 3210 /* 3211 * Clear the PIPE*STAT regs before the IIR 3212 */ 3213 if (pipe_stats[pipe] & 0x8000ffff) { 3214 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3215 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3216 pipe_name(pipe)); 3217 I915_WRITE(reg, pipe_stats[pipe]); 3218 } 3219 } 3220 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3221 3222 I915_WRITE16(IIR, iir & ~flip_mask); 3223 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3224 3225 i915_update_dri1_breadcrumb(dev); 3226 3227 if (iir & I915_USER_INTERRUPT) 3228 notify_ring(dev, &dev_priv->ring[RCS]); 3229 3230 for_each_pipe(pipe) { 3231 int plane = pipe; 3232 if (HAS_FBC(dev)) 3233 plane = !plane; 3234 3235 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3236 i8xx_handle_vblank(dev, plane, pipe, iir)) 3237 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3238 3239 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3240 i9xx_pipe_crc_irq_handler(dev, pipe); 3241 } 3242 3243 iir = new_iir; 3244 } 3245 3246 return IRQ_HANDLED; 3247 } 3248 3249 static void i8xx_irq_uninstall(struct drm_device * dev) 3250 { 3251 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3252 int pipe; 3253 3254 for_each_pipe(pipe) { 3255 /* Clear enable bits; then clear status bits */ 3256 I915_WRITE(PIPESTAT(pipe), 0); 3257 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3258 } 3259 I915_WRITE16(IMR, 0xffff); 3260 I915_WRITE16(IER, 0x0); 3261 I915_WRITE16(IIR, I915_READ16(IIR)); 3262 } 3263 3264 static void i915_irq_preinstall(struct drm_device * dev) 3265 { 3266 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3267 int pipe; 3268 3269 atomic_set(&dev_priv->irq_received, 0); 3270 3271 if (I915_HAS_HOTPLUG(dev)) { 3272 I915_WRITE(PORT_HOTPLUG_EN, 0); 3273 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3274 } 3275 3276 I915_WRITE16(HWSTAM, 0xeffe); 3277 for_each_pipe(pipe) 3278 I915_WRITE(PIPESTAT(pipe), 0); 3279 I915_WRITE(IMR, 0xffffffff); 3280 I915_WRITE(IER, 0x0); 3281 POSTING_READ(IER); 3282 } 3283 3284 static int i915_irq_postinstall(struct drm_device *dev) 3285 { 3286 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3287 u32 enable_mask; 3288 unsigned long irqflags; 3289 3290 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3291 3292 /* Unmask the interrupts that we always want on. */ 3293 dev_priv->irq_mask = 3294 ~(I915_ASLE_INTERRUPT | 3295 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3296 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3297 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3298 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3299 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3300 3301 enable_mask = 3302 I915_ASLE_INTERRUPT | 3303 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3304 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3305 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3306 I915_USER_INTERRUPT; 3307 3308 if (I915_HAS_HOTPLUG(dev)) { 3309 I915_WRITE(PORT_HOTPLUG_EN, 0); 3310 POSTING_READ(PORT_HOTPLUG_EN); 3311 3312 /* Enable in IER... */ 3313 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3314 /* and unmask in IMR */ 3315 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3316 } 3317 3318 I915_WRITE(IMR, dev_priv->irq_mask); 3319 I915_WRITE(IER, enable_mask); 3320 POSTING_READ(IER); 3321 3322 i915_enable_asle_pipestat(dev); 3323 3324 /* Interrupt setup is already guaranteed to be single-threaded, this is 3325 * just to make the assert_spin_locked check happy. */ 3326 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3327 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3328 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3329 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3330 3331 return 0; 3332 } 3333 3334 /* 3335 * Returns true when a page flip has completed. 3336 */ 3337 static bool i915_handle_vblank(struct drm_device *dev, 3338 int plane, int pipe, u32 iir) 3339 { 3340 drm_i915_private_t *dev_priv = dev->dev_private; 3341 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3342 3343 if (!drm_handle_vblank(dev, pipe)) 3344 return false; 3345 3346 if ((iir & flip_pending) == 0) 3347 return false; 3348 3349 intel_prepare_page_flip(dev, plane); 3350 3351 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3352 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3353 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3354 * the flip is completed (no longer pending). Since this doesn't raise 3355 * an interrupt per se, we watch for the change at vblank. 3356 */ 3357 if (I915_READ(ISR) & flip_pending) 3358 return false; 3359 3360 intel_finish_page_flip(dev, pipe); 3361 3362 return true; 3363 } 3364 3365 static irqreturn_t i915_irq_handler(int irq, void *arg) 3366 { 3367 struct drm_device *dev = (struct drm_device *) arg; 3368 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3369 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3370 unsigned long irqflags; 3371 u32 flip_mask = 3372 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3373 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3374 int pipe, ret = IRQ_NONE; 3375 3376 atomic_inc(&dev_priv->irq_received); 3377 3378 iir = I915_READ(IIR); 3379 do { 3380 bool irq_received = (iir & ~flip_mask) != 0; 3381 bool blc_event = false; 3382 3383 /* Can't rely on pipestat interrupt bit in iir as it might 3384 * have been cleared after the pipestat interrupt was received. 3385 * It doesn't set the bit in iir again, but it still produces 3386 * interrupts (for non-MSI). 3387 */ 3388 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3389 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3390 i915_handle_error(dev, false); 3391 3392 for_each_pipe(pipe) { 3393 int reg = PIPESTAT(pipe); 3394 pipe_stats[pipe] = I915_READ(reg); 3395 3396 /* Clear the PIPE*STAT regs before the IIR */ 3397 if (pipe_stats[pipe] & 0x8000ffff) { 3398 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3399 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3400 pipe_name(pipe)); 3401 I915_WRITE(reg, pipe_stats[pipe]); 3402 irq_received = true; 3403 } 3404 } 3405 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3406 3407 if (!irq_received) 3408 break; 3409 3410 /* Consume port. Then clear IIR or we'll miss events */ 3411 if ((I915_HAS_HOTPLUG(dev)) && 3412 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3413 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3414 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3415 3416 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3417 hotplug_status); 3418 3419 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3420 3421 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3422 POSTING_READ(PORT_HOTPLUG_STAT); 3423 } 3424 3425 I915_WRITE(IIR, iir & ~flip_mask); 3426 new_iir = I915_READ(IIR); /* Flush posted writes */ 3427 3428 if (iir & I915_USER_INTERRUPT) 3429 notify_ring(dev, &dev_priv->ring[RCS]); 3430 3431 for_each_pipe(pipe) { 3432 int plane = pipe; 3433 if (HAS_FBC(dev)) 3434 plane = !plane; 3435 3436 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3437 i915_handle_vblank(dev, plane, pipe, iir)) 3438 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3439 3440 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3441 blc_event = true; 3442 3443 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3444 i9xx_pipe_crc_irq_handler(dev, pipe); 3445 } 3446 3447 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3448 intel_opregion_asle_intr(dev); 3449 3450 /* With MSI, interrupts are only generated when iir 3451 * transitions from zero to nonzero. If another bit got 3452 * set while we were handling the existing iir bits, then 3453 * we would never get another interrupt. 3454 * 3455 * This is fine on non-MSI as well, as if we hit this path 3456 * we avoid exiting the interrupt handler only to generate 3457 * another one. 3458 * 3459 * Note that for MSI this could cause a stray interrupt report 3460 * if an interrupt landed in the time between writing IIR and 3461 * the posting read. This should be rare enough to never 3462 * trigger the 99% of 100,000 interrupts test for disabling 3463 * stray interrupts. 3464 */ 3465 ret = IRQ_HANDLED; 3466 iir = new_iir; 3467 } while (iir & ~flip_mask); 3468 3469 i915_update_dri1_breadcrumb(dev); 3470 3471 return ret; 3472 } 3473 3474 static void i915_irq_uninstall(struct drm_device * dev) 3475 { 3476 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3477 int pipe; 3478 3479 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3480 3481 if (I915_HAS_HOTPLUG(dev)) { 3482 I915_WRITE(PORT_HOTPLUG_EN, 0); 3483 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3484 } 3485 3486 I915_WRITE16(HWSTAM, 0xffff); 3487 for_each_pipe(pipe) { 3488 /* Clear enable bits; then clear status bits */ 3489 I915_WRITE(PIPESTAT(pipe), 0); 3490 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3491 } 3492 I915_WRITE(IMR, 0xffffffff); 3493 I915_WRITE(IER, 0x0); 3494 3495 I915_WRITE(IIR, I915_READ(IIR)); 3496 } 3497 3498 static void i965_irq_preinstall(struct drm_device * dev) 3499 { 3500 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3501 int pipe; 3502 3503 atomic_set(&dev_priv->irq_received, 0); 3504 3505 I915_WRITE(PORT_HOTPLUG_EN, 0); 3506 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3507 3508 I915_WRITE(HWSTAM, 0xeffe); 3509 for_each_pipe(pipe) 3510 I915_WRITE(PIPESTAT(pipe), 0); 3511 I915_WRITE(IMR, 0xffffffff); 3512 I915_WRITE(IER, 0x0); 3513 POSTING_READ(IER); 3514 } 3515 3516 static int i965_irq_postinstall(struct drm_device *dev) 3517 { 3518 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3519 u32 enable_mask; 3520 u32 error_mask; 3521 unsigned long irqflags; 3522 3523 /* Unmask the interrupts that we always want on. */ 3524 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3525 I915_DISPLAY_PORT_INTERRUPT | 3526 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3527 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3528 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3529 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3530 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3531 3532 enable_mask = ~dev_priv->irq_mask; 3533 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3534 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3535 enable_mask |= I915_USER_INTERRUPT; 3536 3537 if (IS_G4X(dev)) 3538 enable_mask |= I915_BSD_USER_INTERRUPT; 3539 3540 /* Interrupt setup is already guaranteed to be single-threaded, this is 3541 * just to make the assert_spin_locked check happy. */ 3542 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3543 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3544 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3545 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3546 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3547 3548 /* 3549 * Enable some error detection, note the instruction error mask 3550 * bit is reserved, so we leave it masked. 3551 */ 3552 if (IS_G4X(dev)) { 3553 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3554 GM45_ERROR_MEM_PRIV | 3555 GM45_ERROR_CP_PRIV | 3556 I915_ERROR_MEMORY_REFRESH); 3557 } else { 3558 error_mask = ~(I915_ERROR_PAGE_TABLE | 3559 I915_ERROR_MEMORY_REFRESH); 3560 } 3561 I915_WRITE(EMR, error_mask); 3562 3563 I915_WRITE(IMR, dev_priv->irq_mask); 3564 I915_WRITE(IER, enable_mask); 3565 POSTING_READ(IER); 3566 3567 I915_WRITE(PORT_HOTPLUG_EN, 0); 3568 POSTING_READ(PORT_HOTPLUG_EN); 3569 3570 i915_enable_asle_pipestat(dev); 3571 3572 return 0; 3573 } 3574 3575 static void i915_hpd_irq_setup(struct drm_device *dev) 3576 { 3577 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3578 struct drm_mode_config *mode_config = &dev->mode_config; 3579 struct intel_encoder *intel_encoder; 3580 u32 hotplug_en; 3581 3582 assert_spin_locked(&dev_priv->irq_lock); 3583 3584 if (I915_HAS_HOTPLUG(dev)) { 3585 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3586 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3587 /* Note HDMI and DP share hotplug bits */ 3588 /* enable bits are the same for all generations */ 3589 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3590 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3591 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3592 /* Programming the CRT detection parameters tends 3593 to generate a spurious hotplug event about three 3594 seconds later. So just do it once. 3595 */ 3596 if (IS_G4X(dev)) 3597 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3598 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3599 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3600 3601 /* Ignore TV since it's buggy */ 3602 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3603 } 3604 } 3605 3606 static irqreturn_t i965_irq_handler(int irq, void *arg) 3607 { 3608 struct drm_device *dev = (struct drm_device *) arg; 3609 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3610 u32 iir, new_iir; 3611 u32 pipe_stats[I915_MAX_PIPES]; 3612 unsigned long irqflags; 3613 int irq_received; 3614 int ret = IRQ_NONE, pipe; 3615 u32 flip_mask = 3616 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3617 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3618 3619 atomic_inc(&dev_priv->irq_received); 3620 3621 iir = I915_READ(IIR); 3622 3623 for (;;) { 3624 bool blc_event = false; 3625 3626 irq_received = (iir & ~flip_mask) != 0; 3627 3628 /* Can't rely on pipestat interrupt bit in iir as it might 3629 * have been cleared after the pipestat interrupt was received. 3630 * It doesn't set the bit in iir again, but it still produces 3631 * interrupts (for non-MSI). 3632 */ 3633 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3634 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3635 i915_handle_error(dev, false); 3636 3637 for_each_pipe(pipe) { 3638 int reg = PIPESTAT(pipe); 3639 pipe_stats[pipe] = I915_READ(reg); 3640 3641 /* 3642 * Clear the PIPE*STAT regs before the IIR 3643 */ 3644 if (pipe_stats[pipe] & 0x8000ffff) { 3645 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3646 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3647 pipe_name(pipe)); 3648 I915_WRITE(reg, pipe_stats[pipe]); 3649 irq_received = 1; 3650 } 3651 } 3652 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3653 3654 if (!irq_received) 3655 break; 3656 3657 ret = IRQ_HANDLED; 3658 3659 /* Consume port. Then clear IIR or we'll miss events */ 3660 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3661 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3662 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3663 HOTPLUG_INT_STATUS_G4X : 3664 HOTPLUG_INT_STATUS_I915); 3665 3666 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3667 hotplug_status); 3668 3669 intel_hpd_irq_handler(dev, hotplug_trigger, 3670 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3671 3672 if (IS_G4X(dev) && 3673 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3674 dp_aux_irq_handler(dev); 3675 3676 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3677 I915_READ(PORT_HOTPLUG_STAT); 3678 } 3679 3680 I915_WRITE(IIR, iir & ~flip_mask); 3681 new_iir = I915_READ(IIR); /* Flush posted writes */ 3682 3683 if (iir & I915_USER_INTERRUPT) 3684 notify_ring(dev, &dev_priv->ring[RCS]); 3685 if (iir & I915_BSD_USER_INTERRUPT) 3686 notify_ring(dev, &dev_priv->ring[VCS]); 3687 3688 for_each_pipe(pipe) { 3689 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3690 i915_handle_vblank(dev, pipe, pipe, iir)) 3691 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3692 3693 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3694 blc_event = true; 3695 3696 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3697 i9xx_pipe_crc_irq_handler(dev, pipe); 3698 } 3699 3700 3701 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3702 intel_opregion_asle_intr(dev); 3703 3704 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3705 gmbus_irq_handler(dev); 3706 3707 /* With MSI, interrupts are only generated when iir 3708 * transitions from zero to nonzero. If another bit got 3709 * set while we were handling the existing iir bits, then 3710 * we would never get another interrupt. 3711 * 3712 * This is fine on non-MSI as well, as if we hit this path 3713 * we avoid exiting the interrupt handler only to generate 3714 * another one. 3715 * 3716 * Note that for MSI this could cause a stray interrupt report 3717 * if an interrupt landed in the time between writing IIR and 3718 * the posting read. This should be rare enough to never 3719 * trigger the 99% of 100,000 interrupts test for disabling 3720 * stray interrupts. 3721 */ 3722 iir = new_iir; 3723 } 3724 3725 i915_update_dri1_breadcrumb(dev); 3726 3727 return ret; 3728 } 3729 3730 static void i965_irq_uninstall(struct drm_device * dev) 3731 { 3732 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3733 int pipe; 3734 3735 if (!dev_priv) 3736 return; 3737 3738 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3739 3740 I915_WRITE(PORT_HOTPLUG_EN, 0); 3741 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3742 3743 I915_WRITE(HWSTAM, 0xffffffff); 3744 for_each_pipe(pipe) 3745 I915_WRITE(PIPESTAT(pipe), 0); 3746 I915_WRITE(IMR, 0xffffffff); 3747 I915_WRITE(IER, 0x0); 3748 3749 for_each_pipe(pipe) 3750 I915_WRITE(PIPESTAT(pipe), 3751 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3752 I915_WRITE(IIR, I915_READ(IIR)); 3753 } 3754 3755 static void i915_reenable_hotplug_timer_func(unsigned long data) 3756 { 3757 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3758 struct drm_device *dev = dev_priv->dev; 3759 struct drm_mode_config *mode_config = &dev->mode_config; 3760 unsigned long irqflags; 3761 int i; 3762 3763 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3764 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3765 struct drm_connector *connector; 3766 3767 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3768 continue; 3769 3770 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3771 3772 list_for_each_entry(connector, &mode_config->connector_list, head) { 3773 struct intel_connector *intel_connector = to_intel_connector(connector); 3774 3775 if (intel_connector->encoder->hpd_pin == i) { 3776 if (connector->polled != intel_connector->polled) 3777 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3778 drm_get_connector_name(connector)); 3779 connector->polled = intel_connector->polled; 3780 if (!connector->polled) 3781 connector->polled = DRM_CONNECTOR_POLL_HPD; 3782 } 3783 } 3784 } 3785 if (dev_priv->display.hpd_irq_setup) 3786 dev_priv->display.hpd_irq_setup(dev); 3787 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3788 } 3789 3790 void intel_irq_init(struct drm_device *dev) 3791 { 3792 struct drm_i915_private *dev_priv = dev->dev_private; 3793 3794 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3795 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3796 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3797 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3798 3799 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3800 i915_hangcheck_elapsed, 3801 (unsigned long) dev); 3802 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3803 (unsigned long) dev_priv); 3804 3805 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3806 3807 if (IS_GEN2(dev)) { 3808 dev->max_vblank_count = 0; 3809 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 3810 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3811 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3812 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3813 } else { 3814 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3815 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3816 } 3817 3818 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3819 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3820 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3821 } 3822 3823 if (IS_VALLEYVIEW(dev)) { 3824 dev->driver->irq_handler = valleyview_irq_handler; 3825 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3826 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3827 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3828 dev->driver->enable_vblank = valleyview_enable_vblank; 3829 dev->driver->disable_vblank = valleyview_disable_vblank; 3830 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3831 } else if (IS_GEN8(dev)) { 3832 dev->driver->irq_handler = gen8_irq_handler; 3833 dev->driver->irq_preinstall = gen8_irq_preinstall; 3834 dev->driver->irq_postinstall = gen8_irq_postinstall; 3835 dev->driver->irq_uninstall = gen8_irq_uninstall; 3836 dev->driver->enable_vblank = gen8_enable_vblank; 3837 dev->driver->disable_vblank = gen8_disable_vblank; 3838 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3839 } else if (HAS_PCH_SPLIT(dev)) { 3840 dev->driver->irq_handler = ironlake_irq_handler; 3841 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3842 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3843 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3844 dev->driver->enable_vblank = ironlake_enable_vblank; 3845 dev->driver->disable_vblank = ironlake_disable_vblank; 3846 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3847 } else { 3848 if (INTEL_INFO(dev)->gen == 2) { 3849 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3850 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3851 dev->driver->irq_handler = i8xx_irq_handler; 3852 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3853 } else if (INTEL_INFO(dev)->gen == 3) { 3854 dev->driver->irq_preinstall = i915_irq_preinstall; 3855 dev->driver->irq_postinstall = i915_irq_postinstall; 3856 dev->driver->irq_uninstall = i915_irq_uninstall; 3857 dev->driver->irq_handler = i915_irq_handler; 3858 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3859 } else { 3860 dev->driver->irq_preinstall = i965_irq_preinstall; 3861 dev->driver->irq_postinstall = i965_irq_postinstall; 3862 dev->driver->irq_uninstall = i965_irq_uninstall; 3863 dev->driver->irq_handler = i965_irq_handler; 3864 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3865 } 3866 dev->driver->enable_vblank = i915_enable_vblank; 3867 dev->driver->disable_vblank = i915_disable_vblank; 3868 } 3869 } 3870 3871 void intel_hpd_init(struct drm_device *dev) 3872 { 3873 struct drm_i915_private *dev_priv = dev->dev_private; 3874 struct drm_mode_config *mode_config = &dev->mode_config; 3875 struct drm_connector *connector; 3876 unsigned long irqflags; 3877 int i; 3878 3879 for (i = 1; i < HPD_NUM_PINS; i++) { 3880 dev_priv->hpd_stats[i].hpd_cnt = 0; 3881 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3882 } 3883 list_for_each_entry(connector, &mode_config->connector_list, head) { 3884 struct intel_connector *intel_connector = to_intel_connector(connector); 3885 connector->polled = intel_connector->polled; 3886 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3887 connector->polled = DRM_CONNECTOR_POLL_HPD; 3888 } 3889 3890 /* Interrupt setup is already guaranteed to be single-threaded, this is 3891 * just to make the assert_spin_locked checks happy. */ 3892 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3893 if (dev_priv->display.hpd_irq_setup) 3894 dev_priv->display.hpd_irq_setup(dev); 3895 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3896 } 3897 3898 /* Disable interrupts so we can allow Package C8+. */ 3899 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3900 { 3901 struct drm_i915_private *dev_priv = dev->dev_private; 3902 unsigned long irqflags; 3903 3904 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3905 3906 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3907 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3908 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3909 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3910 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3911 3912 ironlake_disable_display_irq(dev_priv, 0xffffffff); 3913 ibx_disable_display_interrupt(dev_priv, 0xffffffff); 3914 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3915 snb_disable_pm_irq(dev_priv, 0xffffffff); 3916 3917 dev_priv->pc8.irqs_disabled = true; 3918 3919 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3920 } 3921 3922 /* Restore interrupts so we can recover from Package C8+. */ 3923 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3924 { 3925 struct drm_i915_private *dev_priv = dev->dev_private; 3926 unsigned long irqflags; 3927 uint32_t val; 3928 3929 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3930 3931 val = I915_READ(DEIMR); 3932 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); 3933 3934 val = I915_READ(SDEIMR); 3935 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); 3936 3937 val = I915_READ(GTIMR); 3938 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); 3939 3940 val = I915_READ(GEN6_PMIMR); 3941 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); 3942 3943 dev_priv->pc8.irqs_disabled = false; 3944 3945 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3946 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); 3947 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3948 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3949 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3950 3951 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3952 } 3953