1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <drm/drmP.h> 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include "intel_drv.h" 38 39 static const u32 hpd_ibx[] = { 40 [HPD_CRT] = SDE_CRT_HOTPLUG, 41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45 }; 46 47 static const u32 hpd_cpt[] = { 48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53 }; 54 55 static const u32 hpd_mask_i915[] = { 56 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62 }; 63 64 static const u32 hpd_status_gen4[] = { 65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71 }; 72 73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 }; 81 82 /* For display hotplug interrupt */ 83 static void 84 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85 { 86 assert_spin_locked(&dev_priv->irq_lock); 87 88 if (dev_priv->pc8.irqs_disabled) { 89 WARN(1, "IRQs disabled\n"); 90 dev_priv->pc8.regsave.deimr &= ~mask; 91 return; 92 } 93 94 if ((dev_priv->irq_mask & mask) != 0) { 95 dev_priv->irq_mask &= ~mask; 96 I915_WRITE(DEIMR, dev_priv->irq_mask); 97 POSTING_READ(DEIMR); 98 } 99 } 100 101 static void 102 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 103 { 104 assert_spin_locked(&dev_priv->irq_lock); 105 106 if (dev_priv->pc8.irqs_disabled) { 107 WARN(1, "IRQs disabled\n"); 108 dev_priv->pc8.regsave.deimr |= mask; 109 return; 110 } 111 112 if ((dev_priv->irq_mask & mask) != mask) { 113 dev_priv->irq_mask |= mask; 114 I915_WRITE(DEIMR, dev_priv->irq_mask); 115 POSTING_READ(DEIMR); 116 } 117 } 118 119 /** 120 * ilk_update_gt_irq - update GTIMR 121 * @dev_priv: driver private 122 * @interrupt_mask: mask of interrupt bits to update 123 * @enabled_irq_mask: mask of interrupt bits to enable 124 */ 125 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 126 uint32_t interrupt_mask, 127 uint32_t enabled_irq_mask) 128 { 129 assert_spin_locked(&dev_priv->irq_lock); 130 131 if (dev_priv->pc8.irqs_disabled) { 132 WARN(1, "IRQs disabled\n"); 133 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 134 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 135 interrupt_mask); 136 return; 137 } 138 139 dev_priv->gt_irq_mask &= ~interrupt_mask; 140 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 141 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 142 POSTING_READ(GTIMR); 143 } 144 145 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 146 { 147 ilk_update_gt_irq(dev_priv, mask, mask); 148 } 149 150 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 151 { 152 ilk_update_gt_irq(dev_priv, mask, 0); 153 } 154 155 /** 156 * snb_update_pm_irq - update GEN6_PMIMR 157 * @dev_priv: driver private 158 * @interrupt_mask: mask of interrupt bits to update 159 * @enabled_irq_mask: mask of interrupt bits to enable 160 */ 161 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 162 uint32_t interrupt_mask, 163 uint32_t enabled_irq_mask) 164 { 165 uint32_t new_val; 166 167 assert_spin_locked(&dev_priv->irq_lock); 168 169 if (dev_priv->pc8.irqs_disabled) { 170 WARN(1, "IRQs disabled\n"); 171 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 172 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 173 interrupt_mask); 174 return; 175 } 176 177 new_val = dev_priv->pm_irq_mask; 178 new_val &= ~interrupt_mask; 179 new_val |= (~enabled_irq_mask & interrupt_mask); 180 181 if (new_val != dev_priv->pm_irq_mask) { 182 dev_priv->pm_irq_mask = new_val; 183 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 184 POSTING_READ(GEN6_PMIMR); 185 } 186 } 187 188 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 189 { 190 snb_update_pm_irq(dev_priv, mask, mask); 191 } 192 193 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 194 { 195 snb_update_pm_irq(dev_priv, mask, 0); 196 } 197 198 static bool ivb_can_enable_err_int(struct drm_device *dev) 199 { 200 struct drm_i915_private *dev_priv = dev->dev_private; 201 struct intel_crtc *crtc; 202 enum pipe pipe; 203 204 assert_spin_locked(&dev_priv->irq_lock); 205 206 for_each_pipe(pipe) { 207 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 208 209 if (crtc->cpu_fifo_underrun_disabled) 210 return false; 211 } 212 213 return true; 214 } 215 216 static bool cpt_can_enable_serr_int(struct drm_device *dev) 217 { 218 struct drm_i915_private *dev_priv = dev->dev_private; 219 enum pipe pipe; 220 struct intel_crtc *crtc; 221 222 assert_spin_locked(&dev_priv->irq_lock); 223 224 for_each_pipe(pipe) { 225 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 226 227 if (crtc->pch_fifo_underrun_disabled) 228 return false; 229 } 230 231 return true; 232 } 233 234 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 235 enum pipe pipe, bool enable) 236 { 237 struct drm_i915_private *dev_priv = dev->dev_private; 238 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 239 DE_PIPEB_FIFO_UNDERRUN; 240 241 if (enable) 242 ironlake_enable_display_irq(dev_priv, bit); 243 else 244 ironlake_disable_display_irq(dev_priv, bit); 245 } 246 247 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 248 enum pipe pipe, bool enable) 249 { 250 struct drm_i915_private *dev_priv = dev->dev_private; 251 if (enable) { 252 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 253 254 if (!ivb_can_enable_err_int(dev)) 255 return; 256 257 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 258 } else { 259 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 260 261 /* Change the state _after_ we've read out the current one. */ 262 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 263 264 if (!was_enabled && 265 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 266 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 267 pipe_name(pipe)); 268 } 269 } 270 } 271 272 /** 273 * ibx_display_interrupt_update - update SDEIMR 274 * @dev_priv: driver private 275 * @interrupt_mask: mask of interrupt bits to update 276 * @enabled_irq_mask: mask of interrupt bits to enable 277 */ 278 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 279 uint32_t interrupt_mask, 280 uint32_t enabled_irq_mask) 281 { 282 uint32_t sdeimr = I915_READ(SDEIMR); 283 sdeimr &= ~interrupt_mask; 284 sdeimr |= (~enabled_irq_mask & interrupt_mask); 285 286 assert_spin_locked(&dev_priv->irq_lock); 287 288 if (dev_priv->pc8.irqs_disabled && 289 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 290 WARN(1, "IRQs disabled\n"); 291 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 292 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 293 interrupt_mask); 294 return; 295 } 296 297 I915_WRITE(SDEIMR, sdeimr); 298 POSTING_READ(SDEIMR); 299 } 300 #define ibx_enable_display_interrupt(dev_priv, bits) \ 301 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 302 #define ibx_disable_display_interrupt(dev_priv, bits) \ 303 ibx_display_interrupt_update((dev_priv), (bits), 0) 304 305 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 306 enum transcoder pch_transcoder, 307 bool enable) 308 { 309 struct drm_i915_private *dev_priv = dev->dev_private; 310 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 311 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 312 313 if (enable) 314 ibx_enable_display_interrupt(dev_priv, bit); 315 else 316 ibx_disable_display_interrupt(dev_priv, bit); 317 } 318 319 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 320 enum transcoder pch_transcoder, 321 bool enable) 322 { 323 struct drm_i915_private *dev_priv = dev->dev_private; 324 325 if (enable) { 326 I915_WRITE(SERR_INT, 327 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 328 329 if (!cpt_can_enable_serr_int(dev)) 330 return; 331 332 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 333 } else { 334 uint32_t tmp = I915_READ(SERR_INT); 335 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 336 337 /* Change the state _after_ we've read out the current one. */ 338 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 339 340 if (!was_enabled && 341 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 342 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 343 transcoder_name(pch_transcoder)); 344 } 345 } 346 } 347 348 /** 349 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 350 * @dev: drm device 351 * @pipe: pipe 352 * @enable: true if we want to report FIFO underrun errors, false otherwise 353 * 354 * This function makes us disable or enable CPU fifo underruns for a specific 355 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 356 * reporting for one pipe may also disable all the other CPU error interruts for 357 * the other pipes, due to the fact that there's just one interrupt mask/enable 358 * bit for all the pipes. 359 * 360 * Returns the previous state of underrun reporting. 361 */ 362 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 363 enum pipe pipe, bool enable) 364 { 365 struct drm_i915_private *dev_priv = dev->dev_private; 366 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 368 unsigned long flags; 369 bool ret; 370 371 spin_lock_irqsave(&dev_priv->irq_lock, flags); 372 373 ret = !intel_crtc->cpu_fifo_underrun_disabled; 374 375 if (enable == ret) 376 goto done; 377 378 intel_crtc->cpu_fifo_underrun_disabled = !enable; 379 380 if (IS_GEN5(dev) || IS_GEN6(dev)) 381 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 382 else if (IS_GEN7(dev)) 383 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 384 385 done: 386 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 387 return ret; 388 } 389 390 /** 391 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 392 * @dev: drm device 393 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 394 * @enable: true if we want to report FIFO underrun errors, false otherwise 395 * 396 * This function makes us disable or enable PCH fifo underruns for a specific 397 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 398 * underrun reporting for one transcoder may also disable all the other PCH 399 * error interruts for the other transcoders, due to the fact that there's just 400 * one interrupt mask/enable bit for all the transcoders. 401 * 402 * Returns the previous state of underrun reporting. 403 */ 404 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 405 enum transcoder pch_transcoder, 406 bool enable) 407 { 408 struct drm_i915_private *dev_priv = dev->dev_private; 409 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 410 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 411 unsigned long flags; 412 bool ret; 413 414 /* 415 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 416 * has only one pch transcoder A that all pipes can use. To avoid racy 417 * pch transcoder -> pipe lookups from interrupt code simply store the 418 * underrun statistics in crtc A. Since we never expose this anywhere 419 * nor use it outside of the fifo underrun code here using the "wrong" 420 * crtc on LPT won't cause issues. 421 */ 422 423 spin_lock_irqsave(&dev_priv->irq_lock, flags); 424 425 ret = !intel_crtc->pch_fifo_underrun_disabled; 426 427 if (enable == ret) 428 goto done; 429 430 intel_crtc->pch_fifo_underrun_disabled = !enable; 431 432 if (HAS_PCH_IBX(dev)) 433 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 434 else 435 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 436 437 done: 438 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 439 return ret; 440 } 441 442 443 void 444 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 445 { 446 u32 reg = PIPESTAT(pipe); 447 u32 pipestat = I915_READ(reg) & 0x7fff0000; 448 449 assert_spin_locked(&dev_priv->irq_lock); 450 451 if ((pipestat & mask) == mask) 452 return; 453 454 /* Enable the interrupt, clear any pending status */ 455 pipestat |= mask | (mask >> 16); 456 I915_WRITE(reg, pipestat); 457 POSTING_READ(reg); 458 } 459 460 void 461 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 462 { 463 u32 reg = PIPESTAT(pipe); 464 u32 pipestat = I915_READ(reg) & 0x7fff0000; 465 466 assert_spin_locked(&dev_priv->irq_lock); 467 468 if ((pipestat & mask) == 0) 469 return; 470 471 pipestat &= ~mask; 472 I915_WRITE(reg, pipestat); 473 POSTING_READ(reg); 474 } 475 476 /** 477 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 478 */ 479 static void i915_enable_asle_pipestat(struct drm_device *dev) 480 { 481 drm_i915_private_t *dev_priv = dev->dev_private; 482 unsigned long irqflags; 483 484 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 485 return; 486 487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 488 489 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 490 if (INTEL_INFO(dev)->gen >= 4) 491 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 492 493 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 494 } 495 496 /** 497 * i915_pipe_enabled - check if a pipe is enabled 498 * @dev: DRM device 499 * @pipe: pipe to check 500 * 501 * Reading certain registers when the pipe is disabled can hang the chip. 502 * Use this routine to make sure the PLL is running and the pipe is active 503 * before reading such registers if unsure. 504 */ 505 static int 506 i915_pipe_enabled(struct drm_device *dev, int pipe) 507 { 508 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 509 510 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 511 /* Locking is horribly broken here, but whatever. */ 512 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 513 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 514 515 return intel_crtc->active; 516 } else { 517 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 518 } 519 } 520 521 /* Called from drm generic code, passed a 'crtc', which 522 * we use as a pipe index 523 */ 524 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 525 { 526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 527 unsigned long high_frame; 528 unsigned long low_frame; 529 u32 high1, high2, low; 530 531 if (!i915_pipe_enabled(dev, pipe)) { 532 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 533 "pipe %c\n", pipe_name(pipe)); 534 return 0; 535 } 536 537 high_frame = PIPEFRAME(pipe); 538 low_frame = PIPEFRAMEPIXEL(pipe); 539 540 /* 541 * High & low register fields aren't synchronized, so make sure 542 * we get a low value that's stable across two reads of the high 543 * register. 544 */ 545 do { 546 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 547 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 548 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 549 } while (high1 != high2); 550 551 high1 >>= PIPE_FRAME_HIGH_SHIFT; 552 low >>= PIPE_FRAME_LOW_SHIFT; 553 return (high1 << 8) | low; 554 } 555 556 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 557 { 558 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 559 int reg = PIPE_FRMCOUNT_GM45(pipe); 560 561 if (!i915_pipe_enabled(dev, pipe)) { 562 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 563 "pipe %c\n", pipe_name(pipe)); 564 return 0; 565 } 566 567 return I915_READ(reg); 568 } 569 570 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 571 int *vpos, int *hpos) 572 { 573 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 574 u32 vbl = 0, position = 0; 575 int vbl_start, vbl_end, htotal, vtotal; 576 bool in_vbl = true; 577 int ret = 0; 578 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 579 pipe); 580 581 if (!i915_pipe_enabled(dev, pipe)) { 582 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 583 "pipe %c\n", pipe_name(pipe)); 584 return 0; 585 } 586 587 /* Get vtotal. */ 588 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 589 590 if (INTEL_INFO(dev)->gen >= 4) { 591 /* No obvious pixelcount register. Only query vertical 592 * scanout position from Display scan line register. 593 */ 594 position = I915_READ(PIPEDSL(pipe)); 595 596 /* Decode into vertical scanout position. Don't have 597 * horizontal scanout position. 598 */ 599 *vpos = position & 0x1fff; 600 *hpos = 0; 601 } else { 602 /* Have access to pixelcount since start of frame. 603 * We can split this into vertical and horizontal 604 * scanout position. 605 */ 606 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 607 608 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 609 *vpos = position / htotal; 610 *hpos = position - (*vpos * htotal); 611 } 612 613 /* Query vblank area. */ 614 vbl = I915_READ(VBLANK(cpu_transcoder)); 615 616 /* Test position against vblank region. */ 617 vbl_start = vbl & 0x1fff; 618 vbl_end = (vbl >> 16) & 0x1fff; 619 620 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 621 in_vbl = false; 622 623 /* Inside "upper part" of vblank area? Apply corrective offset: */ 624 if (in_vbl && (*vpos >= vbl_start)) 625 *vpos = *vpos - vtotal; 626 627 /* Readouts valid? */ 628 if (vbl > 0) 629 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 630 631 /* In vblank? */ 632 if (in_vbl) 633 ret |= DRM_SCANOUTPOS_INVBL; 634 635 return ret; 636 } 637 638 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 639 int *max_error, 640 struct timeval *vblank_time, 641 unsigned flags) 642 { 643 struct drm_crtc *crtc; 644 645 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 646 DRM_ERROR("Invalid crtc %d\n", pipe); 647 return -EINVAL; 648 } 649 650 /* Get drm_crtc to timestamp: */ 651 crtc = intel_get_crtc_for_pipe(dev, pipe); 652 if (crtc == NULL) { 653 DRM_ERROR("Invalid crtc %d\n", pipe); 654 return -EINVAL; 655 } 656 657 if (!crtc->enabled) { 658 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 659 return -EBUSY; 660 } 661 662 /* Helper routine in DRM core does all the work: */ 663 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 664 vblank_time, flags, 665 crtc); 666 } 667 668 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 669 { 670 enum drm_connector_status old_status; 671 672 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 673 old_status = connector->status; 674 675 connector->status = connector->funcs->detect(connector, false); 676 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 677 connector->base.id, 678 drm_get_connector_name(connector), 679 old_status, connector->status); 680 return (old_status != connector->status); 681 } 682 683 /* 684 * Handle hotplug events outside the interrupt handler proper. 685 */ 686 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 687 688 static void i915_hotplug_work_func(struct work_struct *work) 689 { 690 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 691 hotplug_work); 692 struct drm_device *dev = dev_priv->dev; 693 struct drm_mode_config *mode_config = &dev->mode_config; 694 struct intel_connector *intel_connector; 695 struct intel_encoder *intel_encoder; 696 struct drm_connector *connector; 697 unsigned long irqflags; 698 bool hpd_disabled = false; 699 bool changed = false; 700 u32 hpd_event_bits; 701 702 /* HPD irq before everything is fully set up. */ 703 if (!dev_priv->enable_hotplug_processing) 704 return; 705 706 mutex_lock(&mode_config->mutex); 707 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 708 709 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 710 711 hpd_event_bits = dev_priv->hpd_event_bits; 712 dev_priv->hpd_event_bits = 0; 713 list_for_each_entry(connector, &mode_config->connector_list, head) { 714 intel_connector = to_intel_connector(connector); 715 intel_encoder = intel_connector->encoder; 716 if (intel_encoder->hpd_pin > HPD_NONE && 717 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 718 connector->polled == DRM_CONNECTOR_POLL_HPD) { 719 DRM_INFO("HPD interrupt storm detected on connector %s: " 720 "switching from hotplug detection to polling\n", 721 drm_get_connector_name(connector)); 722 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 723 connector->polled = DRM_CONNECTOR_POLL_CONNECT 724 | DRM_CONNECTOR_POLL_DISCONNECT; 725 hpd_disabled = true; 726 } 727 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 728 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 729 drm_get_connector_name(connector), intel_encoder->hpd_pin); 730 } 731 } 732 /* if there were no outputs to poll, poll was disabled, 733 * therefore make sure it's enabled when disabling HPD on 734 * some connectors */ 735 if (hpd_disabled) { 736 drm_kms_helper_poll_enable(dev); 737 mod_timer(&dev_priv->hotplug_reenable_timer, 738 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 739 } 740 741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 742 743 list_for_each_entry(connector, &mode_config->connector_list, head) { 744 intel_connector = to_intel_connector(connector); 745 intel_encoder = intel_connector->encoder; 746 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 747 if (intel_encoder->hot_plug) 748 intel_encoder->hot_plug(intel_encoder); 749 if (intel_hpd_irq_event(dev, connector)) 750 changed = true; 751 } 752 } 753 mutex_unlock(&mode_config->mutex); 754 755 if (changed) 756 drm_kms_helper_hotplug_event(dev); 757 } 758 759 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 760 { 761 drm_i915_private_t *dev_priv = dev->dev_private; 762 u32 busy_up, busy_down, max_avg, min_avg; 763 u8 new_delay; 764 765 spin_lock(&mchdev_lock); 766 767 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 768 769 new_delay = dev_priv->ips.cur_delay; 770 771 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 772 busy_up = I915_READ(RCPREVBSYTUPAVG); 773 busy_down = I915_READ(RCPREVBSYTDNAVG); 774 max_avg = I915_READ(RCBMAXAVG); 775 min_avg = I915_READ(RCBMINAVG); 776 777 /* Handle RCS change request from hw */ 778 if (busy_up > max_avg) { 779 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 780 new_delay = dev_priv->ips.cur_delay - 1; 781 if (new_delay < dev_priv->ips.max_delay) 782 new_delay = dev_priv->ips.max_delay; 783 } else if (busy_down < min_avg) { 784 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 785 new_delay = dev_priv->ips.cur_delay + 1; 786 if (new_delay > dev_priv->ips.min_delay) 787 new_delay = dev_priv->ips.min_delay; 788 } 789 790 if (ironlake_set_drps(dev, new_delay)) 791 dev_priv->ips.cur_delay = new_delay; 792 793 spin_unlock(&mchdev_lock); 794 795 return; 796 } 797 798 static void notify_ring(struct drm_device *dev, 799 struct intel_ring_buffer *ring) 800 { 801 if (ring->obj == NULL) 802 return; 803 804 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 805 806 wake_up_all(&ring->irq_queue); 807 i915_queue_hangcheck(dev); 808 } 809 810 static void gen6_pm_rps_work(struct work_struct *work) 811 { 812 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 813 rps.work); 814 u32 pm_iir; 815 u8 new_delay; 816 817 spin_lock_irq(&dev_priv->irq_lock); 818 pm_iir = dev_priv->rps.pm_iir; 819 dev_priv->rps.pm_iir = 0; 820 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 821 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 822 spin_unlock_irq(&dev_priv->irq_lock); 823 824 /* Make sure we didn't queue anything we're not going to process. */ 825 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 826 827 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 828 return; 829 830 mutex_lock(&dev_priv->rps.hw_lock); 831 832 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 833 new_delay = dev_priv->rps.cur_delay + 1; 834 835 /* 836 * For better performance, jump directly 837 * to RPe if we're below it. 838 */ 839 if (IS_VALLEYVIEW(dev_priv->dev) && 840 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 841 new_delay = dev_priv->rps.rpe_delay; 842 } else 843 new_delay = dev_priv->rps.cur_delay - 1; 844 845 /* sysfs frequency interfaces may have snuck in while servicing the 846 * interrupt 847 */ 848 if (new_delay >= dev_priv->rps.min_delay && 849 new_delay <= dev_priv->rps.max_delay) { 850 if (IS_VALLEYVIEW(dev_priv->dev)) 851 valleyview_set_rps(dev_priv->dev, new_delay); 852 else 853 gen6_set_rps(dev_priv->dev, new_delay); 854 } 855 856 if (IS_VALLEYVIEW(dev_priv->dev)) { 857 /* 858 * On VLV, when we enter RC6 we may not be at the minimum 859 * voltage level, so arm a timer to check. It should only 860 * fire when there's activity or once after we've entered 861 * RC6, and then won't be re-armed until the next RPS interrupt. 862 */ 863 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 864 msecs_to_jiffies(100)); 865 } 866 867 mutex_unlock(&dev_priv->rps.hw_lock); 868 } 869 870 871 /** 872 * ivybridge_parity_work - Workqueue called when a parity error interrupt 873 * occurred. 874 * @work: workqueue struct 875 * 876 * Doesn't actually do anything except notify userspace. As a consequence of 877 * this event, userspace should try to remap the bad rows since statistically 878 * it is likely the same row is more likely to go bad again. 879 */ 880 static void ivybridge_parity_work(struct work_struct *work) 881 { 882 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 883 l3_parity.error_work); 884 u32 error_status, row, bank, subbank; 885 char *parity_event[5]; 886 uint32_t misccpctl; 887 unsigned long flags; 888 889 /* We must turn off DOP level clock gating to access the L3 registers. 890 * In order to prevent a get/put style interface, acquire struct mutex 891 * any time we access those registers. 892 */ 893 mutex_lock(&dev_priv->dev->struct_mutex); 894 895 misccpctl = I915_READ(GEN7_MISCCPCTL); 896 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 897 POSTING_READ(GEN7_MISCCPCTL); 898 899 error_status = I915_READ(GEN7_L3CDERRST1); 900 row = GEN7_PARITY_ERROR_ROW(error_status); 901 bank = GEN7_PARITY_ERROR_BANK(error_status); 902 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 903 904 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 905 GEN7_L3CDERRST1_ENABLE); 906 POSTING_READ(GEN7_L3CDERRST1); 907 908 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 909 910 spin_lock_irqsave(&dev_priv->irq_lock, flags); 911 ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 912 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 913 914 mutex_unlock(&dev_priv->dev->struct_mutex); 915 916 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 917 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 918 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 919 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 920 parity_event[4] = NULL; 921 922 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 923 KOBJ_CHANGE, parity_event); 924 925 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 926 row, bank, subbank); 927 928 kfree(parity_event[3]); 929 kfree(parity_event[2]); 930 kfree(parity_event[1]); 931 } 932 933 static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 934 { 935 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 936 937 if (!HAS_L3_GPU_CACHE(dev)) 938 return; 939 940 spin_lock(&dev_priv->irq_lock); 941 ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 942 spin_unlock(&dev_priv->irq_lock); 943 944 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 945 } 946 947 static void ilk_gt_irq_handler(struct drm_device *dev, 948 struct drm_i915_private *dev_priv, 949 u32 gt_iir) 950 { 951 if (gt_iir & 952 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 953 notify_ring(dev, &dev_priv->ring[RCS]); 954 if (gt_iir & ILK_BSD_USER_INTERRUPT) 955 notify_ring(dev, &dev_priv->ring[VCS]); 956 } 957 958 static void snb_gt_irq_handler(struct drm_device *dev, 959 struct drm_i915_private *dev_priv, 960 u32 gt_iir) 961 { 962 963 if (gt_iir & 964 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 965 notify_ring(dev, &dev_priv->ring[RCS]); 966 if (gt_iir & GT_BSD_USER_INTERRUPT) 967 notify_ring(dev, &dev_priv->ring[VCS]); 968 if (gt_iir & GT_BLT_USER_INTERRUPT) 969 notify_ring(dev, &dev_priv->ring[BCS]); 970 971 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 972 GT_BSD_CS_ERROR_INTERRUPT | 973 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 974 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 975 i915_handle_error(dev, false); 976 } 977 978 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 979 ivybridge_parity_error_irq_handler(dev); 980 } 981 982 #define HPD_STORM_DETECT_PERIOD 1000 983 #define HPD_STORM_THRESHOLD 5 984 985 static inline void intel_hpd_irq_handler(struct drm_device *dev, 986 u32 hotplug_trigger, 987 const u32 *hpd) 988 { 989 drm_i915_private_t *dev_priv = dev->dev_private; 990 int i; 991 bool storm_detected = false; 992 993 if (!hotplug_trigger) 994 return; 995 996 spin_lock(&dev_priv->irq_lock); 997 for (i = 1; i < HPD_NUM_PINS; i++) { 998 999 WARN(((hpd[i] & hotplug_trigger) && 1000 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1001 "Received HPD interrupt although disabled\n"); 1002 1003 if (!(hpd[i] & hotplug_trigger) || 1004 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1005 continue; 1006 1007 dev_priv->hpd_event_bits |= (1 << i); 1008 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1009 dev_priv->hpd_stats[i].hpd_last_jiffies 1010 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1011 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1012 dev_priv->hpd_stats[i].hpd_cnt = 0; 1013 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1014 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1015 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1016 dev_priv->hpd_event_bits &= ~(1 << i); 1017 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1018 storm_detected = true; 1019 } else { 1020 dev_priv->hpd_stats[i].hpd_cnt++; 1021 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1022 dev_priv->hpd_stats[i].hpd_cnt); 1023 } 1024 } 1025 1026 if (storm_detected) 1027 dev_priv->display.hpd_irq_setup(dev); 1028 spin_unlock(&dev_priv->irq_lock); 1029 1030 /* 1031 * Our hotplug handler can grab modeset locks (by calling down into the 1032 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1033 * queue for otherwise the flush_work in the pageflip code will 1034 * deadlock. 1035 */ 1036 schedule_work(&dev_priv->hotplug_work); 1037 } 1038 1039 static void gmbus_irq_handler(struct drm_device *dev) 1040 { 1041 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1042 1043 wake_up_all(&dev_priv->gmbus_wait_queue); 1044 } 1045 1046 static void dp_aux_irq_handler(struct drm_device *dev) 1047 { 1048 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1049 1050 wake_up_all(&dev_priv->gmbus_wait_queue); 1051 } 1052 1053 /* The RPS events need forcewake, so we add them to a work queue and mask their 1054 * IMR bits until the work is done. Other interrupts can be processed without 1055 * the work queue. */ 1056 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1057 { 1058 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1059 spin_lock(&dev_priv->irq_lock); 1060 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1061 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1062 spin_unlock(&dev_priv->irq_lock); 1063 1064 queue_work(dev_priv->wq, &dev_priv->rps.work); 1065 } 1066 1067 if (HAS_VEBOX(dev_priv->dev)) { 1068 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1069 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1070 1071 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1072 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1073 i915_handle_error(dev_priv->dev, false); 1074 } 1075 } 1076 } 1077 1078 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1079 { 1080 struct drm_device *dev = (struct drm_device *) arg; 1081 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1082 u32 iir, gt_iir, pm_iir; 1083 irqreturn_t ret = IRQ_NONE; 1084 unsigned long irqflags; 1085 int pipe; 1086 u32 pipe_stats[I915_MAX_PIPES]; 1087 1088 atomic_inc(&dev_priv->irq_received); 1089 1090 while (true) { 1091 iir = I915_READ(VLV_IIR); 1092 gt_iir = I915_READ(GTIIR); 1093 pm_iir = I915_READ(GEN6_PMIIR); 1094 1095 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1096 goto out; 1097 1098 ret = IRQ_HANDLED; 1099 1100 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1101 1102 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1103 for_each_pipe(pipe) { 1104 int reg = PIPESTAT(pipe); 1105 pipe_stats[pipe] = I915_READ(reg); 1106 1107 /* 1108 * Clear the PIPE*STAT regs before the IIR 1109 */ 1110 if (pipe_stats[pipe] & 0x8000ffff) { 1111 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1112 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1113 pipe_name(pipe)); 1114 I915_WRITE(reg, pipe_stats[pipe]); 1115 } 1116 } 1117 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1118 1119 for_each_pipe(pipe) { 1120 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1121 drm_handle_vblank(dev, pipe); 1122 1123 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1124 intel_prepare_page_flip(dev, pipe); 1125 intel_finish_page_flip(dev, pipe); 1126 } 1127 } 1128 1129 /* Consume port. Then clear IIR or we'll miss events */ 1130 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1131 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1132 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1133 1134 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1135 hotplug_status); 1136 1137 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1138 1139 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1140 I915_READ(PORT_HOTPLUG_STAT); 1141 } 1142 1143 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1144 gmbus_irq_handler(dev); 1145 1146 if (pm_iir) 1147 gen6_rps_irq_handler(dev_priv, pm_iir); 1148 1149 I915_WRITE(GTIIR, gt_iir); 1150 I915_WRITE(GEN6_PMIIR, pm_iir); 1151 I915_WRITE(VLV_IIR, iir); 1152 } 1153 1154 out: 1155 return ret; 1156 } 1157 1158 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1159 { 1160 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1161 int pipe; 1162 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1163 1164 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1165 1166 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1167 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1168 SDE_AUDIO_POWER_SHIFT); 1169 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1170 port_name(port)); 1171 } 1172 1173 if (pch_iir & SDE_AUX_MASK) 1174 dp_aux_irq_handler(dev); 1175 1176 if (pch_iir & SDE_GMBUS) 1177 gmbus_irq_handler(dev); 1178 1179 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1180 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1181 1182 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1183 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1184 1185 if (pch_iir & SDE_POISON) 1186 DRM_ERROR("PCH poison interrupt\n"); 1187 1188 if (pch_iir & SDE_FDI_MASK) 1189 for_each_pipe(pipe) 1190 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1191 pipe_name(pipe), 1192 I915_READ(FDI_RX_IIR(pipe))); 1193 1194 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1195 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1196 1197 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1198 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1199 1200 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1201 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1202 false)) 1203 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1204 1205 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1206 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1207 false)) 1208 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1209 } 1210 1211 static void ivb_err_int_handler(struct drm_device *dev) 1212 { 1213 struct drm_i915_private *dev_priv = dev->dev_private; 1214 u32 err_int = I915_READ(GEN7_ERR_INT); 1215 1216 if (err_int & ERR_INT_POISON) 1217 DRM_ERROR("Poison interrupt\n"); 1218 1219 if (err_int & ERR_INT_FIFO_UNDERRUN_A) 1220 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1221 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1222 1223 if (err_int & ERR_INT_FIFO_UNDERRUN_B) 1224 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1225 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1226 1227 if (err_int & ERR_INT_FIFO_UNDERRUN_C) 1228 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 1229 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 1230 1231 I915_WRITE(GEN7_ERR_INT, err_int); 1232 } 1233 1234 static void cpt_serr_int_handler(struct drm_device *dev) 1235 { 1236 struct drm_i915_private *dev_priv = dev->dev_private; 1237 u32 serr_int = I915_READ(SERR_INT); 1238 1239 if (serr_int & SERR_INT_POISON) 1240 DRM_ERROR("PCH poison interrupt\n"); 1241 1242 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1243 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1244 false)) 1245 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1246 1247 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1248 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1249 false)) 1250 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1251 1252 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1253 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1254 false)) 1255 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1256 1257 I915_WRITE(SERR_INT, serr_int); 1258 } 1259 1260 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1261 { 1262 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1263 int pipe; 1264 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1265 1266 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1267 1268 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1269 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1270 SDE_AUDIO_POWER_SHIFT_CPT); 1271 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1272 port_name(port)); 1273 } 1274 1275 if (pch_iir & SDE_AUX_MASK_CPT) 1276 dp_aux_irq_handler(dev); 1277 1278 if (pch_iir & SDE_GMBUS_CPT) 1279 gmbus_irq_handler(dev); 1280 1281 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1282 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1283 1284 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1285 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1286 1287 if (pch_iir & SDE_FDI_MASK_CPT) 1288 for_each_pipe(pipe) 1289 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1290 pipe_name(pipe), 1291 I915_READ(FDI_RX_IIR(pipe))); 1292 1293 if (pch_iir & SDE_ERROR_CPT) 1294 cpt_serr_int_handler(dev); 1295 } 1296 1297 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1298 { 1299 struct drm_i915_private *dev_priv = dev->dev_private; 1300 1301 if (de_iir & DE_AUX_CHANNEL_A) 1302 dp_aux_irq_handler(dev); 1303 1304 if (de_iir & DE_GSE) 1305 intel_opregion_asle_intr(dev); 1306 1307 if (de_iir & DE_PIPEA_VBLANK) 1308 drm_handle_vblank(dev, 0); 1309 1310 if (de_iir & DE_PIPEB_VBLANK) 1311 drm_handle_vblank(dev, 1); 1312 1313 if (de_iir & DE_POISON) 1314 DRM_ERROR("Poison interrupt\n"); 1315 1316 if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1317 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1318 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1319 1320 if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1321 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1322 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1323 1324 if (de_iir & DE_PLANEA_FLIP_DONE) { 1325 intel_prepare_page_flip(dev, 0); 1326 intel_finish_page_flip_plane(dev, 0); 1327 } 1328 1329 if (de_iir & DE_PLANEB_FLIP_DONE) { 1330 intel_prepare_page_flip(dev, 1); 1331 intel_finish_page_flip_plane(dev, 1); 1332 } 1333 1334 /* check event from PCH */ 1335 if (de_iir & DE_PCH_EVENT) { 1336 u32 pch_iir = I915_READ(SDEIIR); 1337 1338 if (HAS_PCH_CPT(dev)) 1339 cpt_irq_handler(dev, pch_iir); 1340 else 1341 ibx_irq_handler(dev, pch_iir); 1342 1343 /* should clear PCH hotplug event before clear CPU irq */ 1344 I915_WRITE(SDEIIR, pch_iir); 1345 } 1346 1347 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1348 ironlake_rps_change_irq_handler(dev); 1349 } 1350 1351 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1352 { 1353 struct drm_i915_private *dev_priv = dev->dev_private; 1354 int i; 1355 1356 if (de_iir & DE_ERR_INT_IVB) 1357 ivb_err_int_handler(dev); 1358 1359 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1360 dp_aux_irq_handler(dev); 1361 1362 if (de_iir & DE_GSE_IVB) 1363 intel_opregion_asle_intr(dev); 1364 1365 for (i = 0; i < 3; i++) { 1366 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 1367 drm_handle_vblank(dev, i); 1368 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 1369 intel_prepare_page_flip(dev, i); 1370 intel_finish_page_flip_plane(dev, i); 1371 } 1372 } 1373 1374 /* check event from PCH */ 1375 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1376 u32 pch_iir = I915_READ(SDEIIR); 1377 1378 cpt_irq_handler(dev, pch_iir); 1379 1380 /* clear PCH hotplug event before clear CPU irq */ 1381 I915_WRITE(SDEIIR, pch_iir); 1382 } 1383 } 1384 1385 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1386 { 1387 struct drm_device *dev = (struct drm_device *) arg; 1388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1389 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1390 irqreturn_t ret = IRQ_NONE; 1391 bool err_int_reenable = false; 1392 1393 atomic_inc(&dev_priv->irq_received); 1394 1395 /* We get interrupts on unclaimed registers, so check for this before we 1396 * do any I915_{READ,WRITE}. */ 1397 intel_uncore_check_errors(dev); 1398 1399 /* disable master interrupt before clearing iir */ 1400 de_ier = I915_READ(DEIER); 1401 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1402 POSTING_READ(DEIER); 1403 1404 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1405 * interrupts will will be stored on its back queue, and then we'll be 1406 * able to process them after we restore SDEIER (as soon as we restore 1407 * it, we'll get an interrupt if SDEIIR still has something to process 1408 * due to its back queue). */ 1409 if (!HAS_PCH_NOP(dev)) { 1410 sde_ier = I915_READ(SDEIER); 1411 I915_WRITE(SDEIER, 0); 1412 POSTING_READ(SDEIER); 1413 } 1414 1415 /* On Haswell, also mask ERR_INT because we don't want to risk 1416 * generating "unclaimed register" interrupts from inside the interrupt 1417 * handler. */ 1418 if (IS_HASWELL(dev)) { 1419 spin_lock(&dev_priv->irq_lock); 1420 err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; 1421 if (err_int_reenable) 1422 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1423 spin_unlock(&dev_priv->irq_lock); 1424 } 1425 1426 gt_iir = I915_READ(GTIIR); 1427 if (gt_iir) { 1428 if (INTEL_INFO(dev)->gen >= 6) 1429 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1430 else 1431 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1432 I915_WRITE(GTIIR, gt_iir); 1433 ret = IRQ_HANDLED; 1434 } 1435 1436 de_iir = I915_READ(DEIIR); 1437 if (de_iir) { 1438 if (INTEL_INFO(dev)->gen >= 7) 1439 ivb_display_irq_handler(dev, de_iir); 1440 else 1441 ilk_display_irq_handler(dev, de_iir); 1442 I915_WRITE(DEIIR, de_iir); 1443 ret = IRQ_HANDLED; 1444 } 1445 1446 if (INTEL_INFO(dev)->gen >= 6) { 1447 u32 pm_iir = I915_READ(GEN6_PMIIR); 1448 if (pm_iir) { 1449 gen6_rps_irq_handler(dev_priv, pm_iir); 1450 I915_WRITE(GEN6_PMIIR, pm_iir); 1451 ret = IRQ_HANDLED; 1452 } 1453 } 1454 1455 if (err_int_reenable) { 1456 spin_lock(&dev_priv->irq_lock); 1457 if (ivb_can_enable_err_int(dev)) 1458 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1459 spin_unlock(&dev_priv->irq_lock); 1460 } 1461 1462 I915_WRITE(DEIER, de_ier); 1463 POSTING_READ(DEIER); 1464 if (!HAS_PCH_NOP(dev)) { 1465 I915_WRITE(SDEIER, sde_ier); 1466 POSTING_READ(SDEIER); 1467 } 1468 1469 return ret; 1470 } 1471 1472 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1473 bool reset_completed) 1474 { 1475 struct intel_ring_buffer *ring; 1476 int i; 1477 1478 /* 1479 * Notify all waiters for GPU completion events that reset state has 1480 * been changed, and that they need to restart their wait after 1481 * checking for potential errors (and bail out to drop locks if there is 1482 * a gpu reset pending so that i915_error_work_func can acquire them). 1483 */ 1484 1485 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1486 for_each_ring(ring, dev_priv, i) 1487 wake_up_all(&ring->irq_queue); 1488 1489 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1490 wake_up_all(&dev_priv->pending_flip_queue); 1491 1492 /* 1493 * Signal tasks blocked in i915_gem_wait_for_error that the pending 1494 * reset state is cleared. 1495 */ 1496 if (reset_completed) 1497 wake_up_all(&dev_priv->gpu_error.reset_queue); 1498 } 1499 1500 /** 1501 * i915_error_work_func - do process context error handling work 1502 * @work: work struct 1503 * 1504 * Fire an error uevent so userspace can see that a hang or error 1505 * was detected. 1506 */ 1507 static void i915_error_work_func(struct work_struct *work) 1508 { 1509 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1510 work); 1511 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1512 gpu_error); 1513 struct drm_device *dev = dev_priv->dev; 1514 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1515 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1516 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1517 int ret; 1518 1519 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1520 1521 /* 1522 * Note that there's only one work item which does gpu resets, so we 1523 * need not worry about concurrent gpu resets potentially incrementing 1524 * error->reset_counter twice. We only need to take care of another 1525 * racing irq/hangcheck declaring the gpu dead for a second time. A 1526 * quick check for that is good enough: schedule_work ensures the 1527 * correct ordering between hang detection and this work item, and since 1528 * the reset in-progress bit is only ever set by code outside of this 1529 * work we don't need to worry about any other races. 1530 */ 1531 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1532 DRM_DEBUG_DRIVER("resetting chip\n"); 1533 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1534 reset_event); 1535 1536 /* 1537 * All state reset _must_ be completed before we update the 1538 * reset counter, for otherwise waiters might miss the reset 1539 * pending state and not properly drop locks, resulting in 1540 * deadlocks with the reset work. 1541 */ 1542 ret = i915_reset(dev); 1543 1544 intel_display_handle_reset(dev); 1545 1546 if (ret == 0) { 1547 /* 1548 * After all the gem state is reset, increment the reset 1549 * counter and wake up everyone waiting for the reset to 1550 * complete. 1551 * 1552 * Since unlock operations are a one-sided barrier only, 1553 * we need to insert a barrier here to order any seqno 1554 * updates before 1555 * the counter increment. 1556 */ 1557 smp_mb__before_atomic_inc(); 1558 atomic_inc(&dev_priv->gpu_error.reset_counter); 1559 1560 kobject_uevent_env(&dev->primary->kdev.kobj, 1561 KOBJ_CHANGE, reset_done_event); 1562 } else { 1563 atomic_set(&error->reset_counter, I915_WEDGED); 1564 } 1565 1566 /* 1567 * Note: The wake_up also serves as a memory barrier so that 1568 * waiters see the update value of the reset counter atomic_t. 1569 */ 1570 i915_error_wake_up(dev_priv, true); 1571 } 1572 } 1573 1574 static void i915_report_and_clear_eir(struct drm_device *dev) 1575 { 1576 struct drm_i915_private *dev_priv = dev->dev_private; 1577 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1578 u32 eir = I915_READ(EIR); 1579 int pipe, i; 1580 1581 if (!eir) 1582 return; 1583 1584 pr_err("render error detected, EIR: 0x%08x\n", eir); 1585 1586 i915_get_extra_instdone(dev, instdone); 1587 1588 if (IS_G4X(dev)) { 1589 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1590 u32 ipeir = I915_READ(IPEIR_I965); 1591 1592 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1593 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1594 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1595 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1596 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1597 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1598 I915_WRITE(IPEIR_I965, ipeir); 1599 POSTING_READ(IPEIR_I965); 1600 } 1601 if (eir & GM45_ERROR_PAGE_TABLE) { 1602 u32 pgtbl_err = I915_READ(PGTBL_ER); 1603 pr_err("page table error\n"); 1604 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1605 I915_WRITE(PGTBL_ER, pgtbl_err); 1606 POSTING_READ(PGTBL_ER); 1607 } 1608 } 1609 1610 if (!IS_GEN2(dev)) { 1611 if (eir & I915_ERROR_PAGE_TABLE) { 1612 u32 pgtbl_err = I915_READ(PGTBL_ER); 1613 pr_err("page table error\n"); 1614 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1615 I915_WRITE(PGTBL_ER, pgtbl_err); 1616 POSTING_READ(PGTBL_ER); 1617 } 1618 } 1619 1620 if (eir & I915_ERROR_MEMORY_REFRESH) { 1621 pr_err("memory refresh error:\n"); 1622 for_each_pipe(pipe) 1623 pr_err("pipe %c stat: 0x%08x\n", 1624 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1625 /* pipestat has already been acked */ 1626 } 1627 if (eir & I915_ERROR_INSTRUCTION) { 1628 pr_err("instruction error\n"); 1629 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1630 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1631 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1632 if (INTEL_INFO(dev)->gen < 4) { 1633 u32 ipeir = I915_READ(IPEIR); 1634 1635 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1636 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1637 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1638 I915_WRITE(IPEIR, ipeir); 1639 POSTING_READ(IPEIR); 1640 } else { 1641 u32 ipeir = I915_READ(IPEIR_I965); 1642 1643 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1644 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1645 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1646 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1647 I915_WRITE(IPEIR_I965, ipeir); 1648 POSTING_READ(IPEIR_I965); 1649 } 1650 } 1651 1652 I915_WRITE(EIR, eir); 1653 POSTING_READ(EIR); 1654 eir = I915_READ(EIR); 1655 if (eir) { 1656 /* 1657 * some errors might have become stuck, 1658 * mask them. 1659 */ 1660 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1661 I915_WRITE(EMR, I915_READ(EMR) | eir); 1662 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1663 } 1664 } 1665 1666 /** 1667 * i915_handle_error - handle an error interrupt 1668 * @dev: drm device 1669 * 1670 * Do some basic checking of regsiter state at error interrupt time and 1671 * dump it to the syslog. Also call i915_capture_error_state() to make 1672 * sure we get a record and make it available in debugfs. Fire a uevent 1673 * so userspace knows something bad happened (should trigger collection 1674 * of a ring dump etc.). 1675 */ 1676 void i915_handle_error(struct drm_device *dev, bool wedged) 1677 { 1678 struct drm_i915_private *dev_priv = dev->dev_private; 1679 1680 i915_capture_error_state(dev); 1681 i915_report_and_clear_eir(dev); 1682 1683 if (wedged) { 1684 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1685 &dev_priv->gpu_error.reset_counter); 1686 1687 /* 1688 * Wakeup waiting processes so that the reset work function 1689 * i915_error_work_func doesn't deadlock trying to grab various 1690 * locks. By bumping the reset counter first, the woken 1691 * processes will see a reset in progress and back off, 1692 * releasing their locks and then wait for the reset completion. 1693 * We must do this for _all_ gpu waiters that might hold locks 1694 * that the reset work needs to acquire. 1695 * 1696 * Note: The wake_up serves as the required memory barrier to 1697 * ensure that the waiters see the updated value of the reset 1698 * counter atomic_t. 1699 */ 1700 i915_error_wake_up(dev_priv, false); 1701 } 1702 1703 /* 1704 * Our reset work can grab modeset locks (since it needs to reset the 1705 * state of outstanding pagelips). Hence it must not be run on our own 1706 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 1707 * code will deadlock. 1708 */ 1709 schedule_work(&dev_priv->gpu_error.work); 1710 } 1711 1712 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1713 { 1714 drm_i915_private_t *dev_priv = dev->dev_private; 1715 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1716 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1717 struct drm_i915_gem_object *obj; 1718 struct intel_unpin_work *work; 1719 unsigned long flags; 1720 bool stall_detected; 1721 1722 /* Ignore early vblank irqs */ 1723 if (intel_crtc == NULL) 1724 return; 1725 1726 spin_lock_irqsave(&dev->event_lock, flags); 1727 work = intel_crtc->unpin_work; 1728 1729 if (work == NULL || 1730 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1731 !work->enable_stall_check) { 1732 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1733 spin_unlock_irqrestore(&dev->event_lock, flags); 1734 return; 1735 } 1736 1737 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1738 obj = work->pending_flip_obj; 1739 if (INTEL_INFO(dev)->gen >= 4) { 1740 int dspsurf = DSPSURF(intel_crtc->plane); 1741 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1742 i915_gem_obj_ggtt_offset(obj); 1743 } else { 1744 int dspaddr = DSPADDR(intel_crtc->plane); 1745 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 1746 crtc->y * crtc->fb->pitches[0] + 1747 crtc->x * crtc->fb->bits_per_pixel/8); 1748 } 1749 1750 spin_unlock_irqrestore(&dev->event_lock, flags); 1751 1752 if (stall_detected) { 1753 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1754 intel_prepare_page_flip(dev, intel_crtc->plane); 1755 } 1756 } 1757 1758 /* Called from drm generic code, passed 'crtc' which 1759 * we use as a pipe index 1760 */ 1761 static int i915_enable_vblank(struct drm_device *dev, int pipe) 1762 { 1763 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1764 unsigned long irqflags; 1765 1766 if (!i915_pipe_enabled(dev, pipe)) 1767 return -EINVAL; 1768 1769 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1770 if (INTEL_INFO(dev)->gen >= 4) 1771 i915_enable_pipestat(dev_priv, pipe, 1772 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1773 else 1774 i915_enable_pipestat(dev_priv, pipe, 1775 PIPE_VBLANK_INTERRUPT_ENABLE); 1776 1777 /* maintain vblank delivery even in deep C-states */ 1778 if (dev_priv->info->gen == 3) 1779 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1780 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1781 1782 return 0; 1783 } 1784 1785 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1786 { 1787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1788 unsigned long irqflags; 1789 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1790 DE_PIPE_VBLANK_ILK(pipe); 1791 1792 if (!i915_pipe_enabled(dev, pipe)) 1793 return -EINVAL; 1794 1795 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1796 ironlake_enable_display_irq(dev_priv, bit); 1797 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1798 1799 return 0; 1800 } 1801 1802 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1803 { 1804 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1805 unsigned long irqflags; 1806 u32 imr; 1807 1808 if (!i915_pipe_enabled(dev, pipe)) 1809 return -EINVAL; 1810 1811 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1812 imr = I915_READ(VLV_IMR); 1813 if (pipe == 0) 1814 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1815 else 1816 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1817 I915_WRITE(VLV_IMR, imr); 1818 i915_enable_pipestat(dev_priv, pipe, 1819 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1820 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1821 1822 return 0; 1823 } 1824 1825 /* Called from drm generic code, passed 'crtc' which 1826 * we use as a pipe index 1827 */ 1828 static void i915_disable_vblank(struct drm_device *dev, int pipe) 1829 { 1830 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1831 unsigned long irqflags; 1832 1833 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1834 if (dev_priv->info->gen == 3) 1835 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1836 1837 i915_disable_pipestat(dev_priv, pipe, 1838 PIPE_VBLANK_INTERRUPT_ENABLE | 1839 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1840 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1841 } 1842 1843 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1844 { 1845 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1846 unsigned long irqflags; 1847 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1848 DE_PIPE_VBLANK_ILK(pipe); 1849 1850 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1851 ironlake_disable_display_irq(dev_priv, bit); 1852 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1853 } 1854 1855 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1856 { 1857 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1858 unsigned long irqflags; 1859 u32 imr; 1860 1861 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1862 i915_disable_pipestat(dev_priv, pipe, 1863 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1864 imr = I915_READ(VLV_IMR); 1865 if (pipe == 0) 1866 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1867 else 1868 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1869 I915_WRITE(VLV_IMR, imr); 1870 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1871 } 1872 1873 static u32 1874 ring_last_seqno(struct intel_ring_buffer *ring) 1875 { 1876 return list_entry(ring->request_list.prev, 1877 struct drm_i915_gem_request, list)->seqno; 1878 } 1879 1880 static bool 1881 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 1882 { 1883 return (list_empty(&ring->request_list) || 1884 i915_seqno_passed(seqno, ring_last_seqno(ring))); 1885 } 1886 1887 static struct intel_ring_buffer * 1888 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 1889 { 1890 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1891 u32 cmd, ipehr, acthd, acthd_min; 1892 1893 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 1894 if ((ipehr & ~(0x3 << 16)) != 1895 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 1896 return NULL; 1897 1898 /* ACTHD is likely pointing to the dword after the actual command, 1899 * so scan backwards until we find the MBOX. 1900 */ 1901 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 1902 acthd_min = max((int)acthd - 3 * 4, 0); 1903 do { 1904 cmd = ioread32(ring->virtual_start + acthd); 1905 if (cmd == ipehr) 1906 break; 1907 1908 acthd -= 4; 1909 if (acthd < acthd_min) 1910 return NULL; 1911 } while (1); 1912 1913 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 1914 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 1915 } 1916 1917 static int semaphore_passed(struct intel_ring_buffer *ring) 1918 { 1919 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1920 struct intel_ring_buffer *signaller; 1921 u32 seqno, ctl; 1922 1923 ring->hangcheck.deadlock = true; 1924 1925 signaller = semaphore_waits_for(ring, &seqno); 1926 if (signaller == NULL || signaller->hangcheck.deadlock) 1927 return -1; 1928 1929 /* cursory check for an unkickable deadlock */ 1930 ctl = I915_READ_CTL(signaller); 1931 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 1932 return -1; 1933 1934 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 1935 } 1936 1937 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 1938 { 1939 struct intel_ring_buffer *ring; 1940 int i; 1941 1942 for_each_ring(ring, dev_priv, i) 1943 ring->hangcheck.deadlock = false; 1944 } 1945 1946 static enum intel_ring_hangcheck_action 1947 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 1948 { 1949 struct drm_device *dev = ring->dev; 1950 struct drm_i915_private *dev_priv = dev->dev_private; 1951 u32 tmp; 1952 1953 if (ring->hangcheck.acthd != acthd) 1954 return HANGCHECK_ACTIVE; 1955 1956 if (IS_GEN2(dev)) 1957 return HANGCHECK_HUNG; 1958 1959 /* Is the chip hanging on a WAIT_FOR_EVENT? 1960 * If so we can simply poke the RB_WAIT bit 1961 * and break the hang. This should work on 1962 * all but the second generation chipsets. 1963 */ 1964 tmp = I915_READ_CTL(ring); 1965 if (tmp & RING_WAIT) { 1966 DRM_ERROR("Kicking stuck wait on %s\n", 1967 ring->name); 1968 I915_WRITE_CTL(ring, tmp); 1969 return HANGCHECK_KICK; 1970 } 1971 1972 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 1973 switch (semaphore_passed(ring)) { 1974 default: 1975 return HANGCHECK_HUNG; 1976 case 1: 1977 DRM_ERROR("Kicking stuck semaphore on %s\n", 1978 ring->name); 1979 I915_WRITE_CTL(ring, tmp); 1980 return HANGCHECK_KICK; 1981 case 0: 1982 return HANGCHECK_WAIT; 1983 } 1984 } 1985 1986 return HANGCHECK_HUNG; 1987 } 1988 1989 /** 1990 * This is called when the chip hasn't reported back with completed 1991 * batchbuffers in a long time. We keep track per ring seqno progress and 1992 * if there are no progress, hangcheck score for that ring is increased. 1993 * Further, acthd is inspected to see if the ring is stuck. On stuck case 1994 * we kick the ring. If we see no progress on three subsequent calls 1995 * we assume chip is wedged and try to fix it by resetting the chip. 1996 */ 1997 static void i915_hangcheck_elapsed(unsigned long data) 1998 { 1999 struct drm_device *dev = (struct drm_device *)data; 2000 drm_i915_private_t *dev_priv = dev->dev_private; 2001 struct intel_ring_buffer *ring; 2002 int i; 2003 int busy_count = 0, rings_hung = 0; 2004 bool stuck[I915_NUM_RINGS] = { 0 }; 2005 #define BUSY 1 2006 #define KICK 5 2007 #define HUNG 20 2008 #define FIRE 30 2009 2010 if (!i915_enable_hangcheck) 2011 return; 2012 2013 for_each_ring(ring, dev_priv, i) { 2014 u32 seqno, acthd; 2015 bool busy = true; 2016 2017 semaphore_clear_deadlocks(dev_priv); 2018 2019 seqno = ring->get_seqno(ring, false); 2020 acthd = intel_ring_get_active_head(ring); 2021 2022 if (ring->hangcheck.seqno == seqno) { 2023 if (ring_idle(ring, seqno)) { 2024 if (waitqueue_active(&ring->irq_queue)) { 2025 /* Issue a wake-up to catch stuck h/w. */ 2026 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2027 ring->name); 2028 wake_up_all(&ring->irq_queue); 2029 ring->hangcheck.score += HUNG; 2030 } else 2031 busy = false; 2032 } else { 2033 /* We always increment the hangcheck score 2034 * if the ring is busy and still processing 2035 * the same request, so that no single request 2036 * can run indefinitely (such as a chain of 2037 * batches). The only time we do not increment 2038 * the hangcheck score on this ring, if this 2039 * ring is in a legitimate wait for another 2040 * ring. In that case the waiting ring is a 2041 * victim and we want to be sure we catch the 2042 * right culprit. Then every time we do kick 2043 * the ring, add a small increment to the 2044 * score so that we can catch a batch that is 2045 * being repeatedly kicked and so responsible 2046 * for stalling the machine. 2047 */ 2048 ring->hangcheck.action = ring_stuck(ring, 2049 acthd); 2050 2051 switch (ring->hangcheck.action) { 2052 case HANGCHECK_WAIT: 2053 break; 2054 case HANGCHECK_ACTIVE: 2055 ring->hangcheck.score += BUSY; 2056 break; 2057 case HANGCHECK_KICK: 2058 ring->hangcheck.score += KICK; 2059 break; 2060 case HANGCHECK_HUNG: 2061 ring->hangcheck.score += HUNG; 2062 stuck[i] = true; 2063 break; 2064 } 2065 } 2066 } else { 2067 /* Gradually reduce the count so that we catch DoS 2068 * attempts across multiple batches. 2069 */ 2070 if (ring->hangcheck.score > 0) 2071 ring->hangcheck.score--; 2072 } 2073 2074 ring->hangcheck.seqno = seqno; 2075 ring->hangcheck.acthd = acthd; 2076 busy_count += busy; 2077 } 2078 2079 for_each_ring(ring, dev_priv, i) { 2080 if (ring->hangcheck.score > FIRE) { 2081 DRM_INFO("%s on %s\n", 2082 stuck[i] ? "stuck" : "no progress", 2083 ring->name); 2084 rings_hung++; 2085 } 2086 } 2087 2088 if (rings_hung) 2089 return i915_handle_error(dev, true); 2090 2091 if (busy_count) 2092 /* Reset timer case chip hangs without another request 2093 * being added */ 2094 i915_queue_hangcheck(dev); 2095 } 2096 2097 void i915_queue_hangcheck(struct drm_device *dev) 2098 { 2099 struct drm_i915_private *dev_priv = dev->dev_private; 2100 if (!i915_enable_hangcheck) 2101 return; 2102 2103 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2104 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2105 } 2106 2107 static void ibx_irq_preinstall(struct drm_device *dev) 2108 { 2109 struct drm_i915_private *dev_priv = dev->dev_private; 2110 2111 if (HAS_PCH_NOP(dev)) 2112 return; 2113 2114 /* south display irq */ 2115 I915_WRITE(SDEIMR, 0xffffffff); 2116 /* 2117 * SDEIER is also touched by the interrupt handler to work around missed 2118 * PCH interrupts. Hence we can't update it after the interrupt handler 2119 * is enabled - instead we unconditionally enable all PCH interrupt 2120 * sources here, but then only unmask them as needed with SDEIMR. 2121 */ 2122 I915_WRITE(SDEIER, 0xffffffff); 2123 POSTING_READ(SDEIER); 2124 } 2125 2126 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2127 { 2128 struct drm_i915_private *dev_priv = dev->dev_private; 2129 2130 /* and GT */ 2131 I915_WRITE(GTIMR, 0xffffffff); 2132 I915_WRITE(GTIER, 0x0); 2133 POSTING_READ(GTIER); 2134 2135 if (INTEL_INFO(dev)->gen >= 6) { 2136 /* and PM */ 2137 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2138 I915_WRITE(GEN6_PMIER, 0x0); 2139 POSTING_READ(GEN6_PMIER); 2140 } 2141 } 2142 2143 /* drm_dma.h hooks 2144 */ 2145 static void ironlake_irq_preinstall(struct drm_device *dev) 2146 { 2147 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2148 2149 atomic_set(&dev_priv->irq_received, 0); 2150 2151 I915_WRITE(HWSTAM, 0xeffe); 2152 2153 I915_WRITE(DEIMR, 0xffffffff); 2154 I915_WRITE(DEIER, 0x0); 2155 POSTING_READ(DEIER); 2156 2157 gen5_gt_irq_preinstall(dev); 2158 2159 ibx_irq_preinstall(dev); 2160 } 2161 2162 static void valleyview_irq_preinstall(struct drm_device *dev) 2163 { 2164 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2165 int pipe; 2166 2167 atomic_set(&dev_priv->irq_received, 0); 2168 2169 /* VLV magic */ 2170 I915_WRITE(VLV_IMR, 0); 2171 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2172 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2173 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2174 2175 /* and GT */ 2176 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2177 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2178 2179 gen5_gt_irq_preinstall(dev); 2180 2181 I915_WRITE(DPINVGTT, 0xff); 2182 2183 I915_WRITE(PORT_HOTPLUG_EN, 0); 2184 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2185 for_each_pipe(pipe) 2186 I915_WRITE(PIPESTAT(pipe), 0xffff); 2187 I915_WRITE(VLV_IIR, 0xffffffff); 2188 I915_WRITE(VLV_IMR, 0xffffffff); 2189 I915_WRITE(VLV_IER, 0x0); 2190 POSTING_READ(VLV_IER); 2191 } 2192 2193 static void ibx_hpd_irq_setup(struct drm_device *dev) 2194 { 2195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2196 struct drm_mode_config *mode_config = &dev->mode_config; 2197 struct intel_encoder *intel_encoder; 2198 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2199 2200 if (HAS_PCH_IBX(dev)) { 2201 hotplug_irqs = SDE_HOTPLUG_MASK; 2202 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2203 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2204 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2205 } else { 2206 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2207 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2208 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2209 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2210 } 2211 2212 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2213 2214 /* 2215 * Enable digital hotplug on the PCH, and configure the DP short pulse 2216 * duration to 2ms (which is the minimum in the Display Port spec) 2217 * 2218 * This register is the same on all known PCH chips. 2219 */ 2220 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2221 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2222 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2223 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2224 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2225 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2226 } 2227 2228 static void ibx_irq_postinstall(struct drm_device *dev) 2229 { 2230 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2231 u32 mask; 2232 2233 if (HAS_PCH_NOP(dev)) 2234 return; 2235 2236 if (HAS_PCH_IBX(dev)) { 2237 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2238 SDE_TRANSA_FIFO_UNDER | SDE_POISON; 2239 } else { 2240 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 2241 2242 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2243 } 2244 2245 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2246 I915_WRITE(SDEIMR, ~mask); 2247 } 2248 2249 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2250 { 2251 struct drm_i915_private *dev_priv = dev->dev_private; 2252 u32 pm_irqs, gt_irqs; 2253 2254 pm_irqs = gt_irqs = 0; 2255 2256 dev_priv->gt_irq_mask = ~0; 2257 if (HAS_L3_GPU_CACHE(dev)) { 2258 /* L3 parity interrupt is always unmasked. */ 2259 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2260 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2261 } 2262 2263 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2264 if (IS_GEN5(dev)) { 2265 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2266 ILK_BSD_USER_INTERRUPT; 2267 } else { 2268 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2269 } 2270 2271 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2272 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2273 I915_WRITE(GTIER, gt_irqs); 2274 POSTING_READ(GTIER); 2275 2276 if (INTEL_INFO(dev)->gen >= 6) { 2277 pm_irqs |= GEN6_PM_RPS_EVENTS; 2278 2279 if (HAS_VEBOX(dev)) 2280 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2281 2282 dev_priv->pm_irq_mask = 0xffffffff; 2283 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2284 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2285 I915_WRITE(GEN6_PMIER, pm_irqs); 2286 POSTING_READ(GEN6_PMIER); 2287 } 2288 } 2289 2290 static int ironlake_irq_postinstall(struct drm_device *dev) 2291 { 2292 unsigned long irqflags; 2293 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2294 u32 display_mask, extra_mask; 2295 2296 if (INTEL_INFO(dev)->gen >= 7) { 2297 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2298 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2299 DE_PLANEB_FLIP_DONE_IVB | 2300 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 2301 DE_ERR_INT_IVB); 2302 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2303 DE_PIPEA_VBLANK_IVB); 2304 2305 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2306 } else { 2307 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2308 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2309 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2310 DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 2311 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2312 } 2313 2314 dev_priv->irq_mask = ~display_mask; 2315 2316 /* should always can generate irq */ 2317 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2318 I915_WRITE(DEIMR, dev_priv->irq_mask); 2319 I915_WRITE(DEIER, display_mask | extra_mask); 2320 POSTING_READ(DEIER); 2321 2322 gen5_gt_irq_postinstall(dev); 2323 2324 ibx_irq_postinstall(dev); 2325 2326 if (IS_IRONLAKE_M(dev)) { 2327 /* Enable PCU event interrupts 2328 * 2329 * spinlocking not required here for correctness since interrupt 2330 * setup is guaranteed to run in single-threaded context. But we 2331 * need it to make the assert_spin_locked happy. */ 2332 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2333 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2334 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2335 } 2336 2337 return 0; 2338 } 2339 2340 static int valleyview_irq_postinstall(struct drm_device *dev) 2341 { 2342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2343 u32 enable_mask; 2344 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2345 unsigned long irqflags; 2346 2347 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2348 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2349 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2350 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2351 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2352 2353 /* 2354 *Leave vblank interrupts masked initially. enable/disable will 2355 * toggle them based on usage. 2356 */ 2357 dev_priv->irq_mask = (~enable_mask) | 2358 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2359 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2360 2361 I915_WRITE(PORT_HOTPLUG_EN, 0); 2362 POSTING_READ(PORT_HOTPLUG_EN); 2363 2364 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2365 I915_WRITE(VLV_IER, enable_mask); 2366 I915_WRITE(VLV_IIR, 0xffffffff); 2367 I915_WRITE(PIPESTAT(0), 0xffff); 2368 I915_WRITE(PIPESTAT(1), 0xffff); 2369 POSTING_READ(VLV_IER); 2370 2371 /* Interrupt setup is already guaranteed to be single-threaded, this is 2372 * just to make the assert_spin_locked check happy. */ 2373 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2374 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2375 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2376 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2377 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2378 2379 I915_WRITE(VLV_IIR, 0xffffffff); 2380 I915_WRITE(VLV_IIR, 0xffffffff); 2381 2382 gen5_gt_irq_postinstall(dev); 2383 2384 /* ack & enable invalid PTE error interrupts */ 2385 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2386 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2387 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2388 #endif 2389 2390 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2391 2392 return 0; 2393 } 2394 2395 static void valleyview_irq_uninstall(struct drm_device *dev) 2396 { 2397 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2398 int pipe; 2399 2400 if (!dev_priv) 2401 return; 2402 2403 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2404 2405 for_each_pipe(pipe) 2406 I915_WRITE(PIPESTAT(pipe), 0xffff); 2407 2408 I915_WRITE(HWSTAM, 0xffffffff); 2409 I915_WRITE(PORT_HOTPLUG_EN, 0); 2410 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2411 for_each_pipe(pipe) 2412 I915_WRITE(PIPESTAT(pipe), 0xffff); 2413 I915_WRITE(VLV_IIR, 0xffffffff); 2414 I915_WRITE(VLV_IMR, 0xffffffff); 2415 I915_WRITE(VLV_IER, 0x0); 2416 POSTING_READ(VLV_IER); 2417 } 2418 2419 static void ironlake_irq_uninstall(struct drm_device *dev) 2420 { 2421 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2422 2423 if (!dev_priv) 2424 return; 2425 2426 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2427 2428 I915_WRITE(HWSTAM, 0xffffffff); 2429 2430 I915_WRITE(DEIMR, 0xffffffff); 2431 I915_WRITE(DEIER, 0x0); 2432 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2433 if (IS_GEN7(dev)) 2434 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2435 2436 I915_WRITE(GTIMR, 0xffffffff); 2437 I915_WRITE(GTIER, 0x0); 2438 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2439 2440 if (HAS_PCH_NOP(dev)) 2441 return; 2442 2443 I915_WRITE(SDEIMR, 0xffffffff); 2444 I915_WRITE(SDEIER, 0x0); 2445 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2446 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 2447 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2448 } 2449 2450 static void i8xx_irq_preinstall(struct drm_device * dev) 2451 { 2452 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2453 int pipe; 2454 2455 atomic_set(&dev_priv->irq_received, 0); 2456 2457 for_each_pipe(pipe) 2458 I915_WRITE(PIPESTAT(pipe), 0); 2459 I915_WRITE16(IMR, 0xffff); 2460 I915_WRITE16(IER, 0x0); 2461 POSTING_READ16(IER); 2462 } 2463 2464 static int i8xx_irq_postinstall(struct drm_device *dev) 2465 { 2466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2467 2468 I915_WRITE16(EMR, 2469 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2470 2471 /* Unmask the interrupts that we always want on. */ 2472 dev_priv->irq_mask = 2473 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2474 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2475 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2476 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2477 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2478 I915_WRITE16(IMR, dev_priv->irq_mask); 2479 2480 I915_WRITE16(IER, 2481 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2482 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2483 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2484 I915_USER_INTERRUPT); 2485 POSTING_READ16(IER); 2486 2487 return 0; 2488 } 2489 2490 /* 2491 * Returns true when a page flip has completed. 2492 */ 2493 static bool i8xx_handle_vblank(struct drm_device *dev, 2494 int pipe, u16 iir) 2495 { 2496 drm_i915_private_t *dev_priv = dev->dev_private; 2497 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 2498 2499 if (!drm_handle_vblank(dev, pipe)) 2500 return false; 2501 2502 if ((iir & flip_pending) == 0) 2503 return false; 2504 2505 intel_prepare_page_flip(dev, pipe); 2506 2507 /* We detect FlipDone by looking for the change in PendingFlip from '1' 2508 * to '0' on the following vblank, i.e. IIR has the Pendingflip 2509 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 2510 * the flip is completed (no longer pending). Since this doesn't raise 2511 * an interrupt per se, we watch for the change at vblank. 2512 */ 2513 if (I915_READ16(ISR) & flip_pending) 2514 return false; 2515 2516 intel_finish_page_flip(dev, pipe); 2517 2518 return true; 2519 } 2520 2521 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2522 { 2523 struct drm_device *dev = (struct drm_device *) arg; 2524 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2525 u16 iir, new_iir; 2526 u32 pipe_stats[2]; 2527 unsigned long irqflags; 2528 int pipe; 2529 u16 flip_mask = 2530 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2531 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2532 2533 atomic_inc(&dev_priv->irq_received); 2534 2535 iir = I915_READ16(IIR); 2536 if (iir == 0) 2537 return IRQ_NONE; 2538 2539 while (iir & ~flip_mask) { 2540 /* Can't rely on pipestat interrupt bit in iir as it might 2541 * have been cleared after the pipestat interrupt was received. 2542 * It doesn't set the bit in iir again, but it still produces 2543 * interrupts (for non-MSI). 2544 */ 2545 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2546 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2547 i915_handle_error(dev, false); 2548 2549 for_each_pipe(pipe) { 2550 int reg = PIPESTAT(pipe); 2551 pipe_stats[pipe] = I915_READ(reg); 2552 2553 /* 2554 * Clear the PIPE*STAT regs before the IIR 2555 */ 2556 if (pipe_stats[pipe] & 0x8000ffff) { 2557 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2558 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2559 pipe_name(pipe)); 2560 I915_WRITE(reg, pipe_stats[pipe]); 2561 } 2562 } 2563 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2564 2565 I915_WRITE16(IIR, iir & ~flip_mask); 2566 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2567 2568 i915_update_dri1_breadcrumb(dev); 2569 2570 if (iir & I915_USER_INTERRUPT) 2571 notify_ring(dev, &dev_priv->ring[RCS]); 2572 2573 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2574 i8xx_handle_vblank(dev, 0, iir)) 2575 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2576 2577 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2578 i8xx_handle_vblank(dev, 1, iir)) 2579 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2580 2581 iir = new_iir; 2582 } 2583 2584 return IRQ_HANDLED; 2585 } 2586 2587 static void i8xx_irq_uninstall(struct drm_device * dev) 2588 { 2589 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2590 int pipe; 2591 2592 for_each_pipe(pipe) { 2593 /* Clear enable bits; then clear status bits */ 2594 I915_WRITE(PIPESTAT(pipe), 0); 2595 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2596 } 2597 I915_WRITE16(IMR, 0xffff); 2598 I915_WRITE16(IER, 0x0); 2599 I915_WRITE16(IIR, I915_READ16(IIR)); 2600 } 2601 2602 static void i915_irq_preinstall(struct drm_device * dev) 2603 { 2604 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2605 int pipe; 2606 2607 atomic_set(&dev_priv->irq_received, 0); 2608 2609 if (I915_HAS_HOTPLUG(dev)) { 2610 I915_WRITE(PORT_HOTPLUG_EN, 0); 2611 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2612 } 2613 2614 I915_WRITE16(HWSTAM, 0xeffe); 2615 for_each_pipe(pipe) 2616 I915_WRITE(PIPESTAT(pipe), 0); 2617 I915_WRITE(IMR, 0xffffffff); 2618 I915_WRITE(IER, 0x0); 2619 POSTING_READ(IER); 2620 } 2621 2622 static int i915_irq_postinstall(struct drm_device *dev) 2623 { 2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2625 u32 enable_mask; 2626 2627 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2628 2629 /* Unmask the interrupts that we always want on. */ 2630 dev_priv->irq_mask = 2631 ~(I915_ASLE_INTERRUPT | 2632 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2633 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2634 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2635 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2636 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2637 2638 enable_mask = 2639 I915_ASLE_INTERRUPT | 2640 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2641 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2642 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2643 I915_USER_INTERRUPT; 2644 2645 if (I915_HAS_HOTPLUG(dev)) { 2646 I915_WRITE(PORT_HOTPLUG_EN, 0); 2647 POSTING_READ(PORT_HOTPLUG_EN); 2648 2649 /* Enable in IER... */ 2650 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2651 /* and unmask in IMR */ 2652 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2653 } 2654 2655 I915_WRITE(IMR, dev_priv->irq_mask); 2656 I915_WRITE(IER, enable_mask); 2657 POSTING_READ(IER); 2658 2659 i915_enable_asle_pipestat(dev); 2660 2661 return 0; 2662 } 2663 2664 /* 2665 * Returns true when a page flip has completed. 2666 */ 2667 static bool i915_handle_vblank(struct drm_device *dev, 2668 int plane, int pipe, u32 iir) 2669 { 2670 drm_i915_private_t *dev_priv = dev->dev_private; 2671 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 2672 2673 if (!drm_handle_vblank(dev, pipe)) 2674 return false; 2675 2676 if ((iir & flip_pending) == 0) 2677 return false; 2678 2679 intel_prepare_page_flip(dev, plane); 2680 2681 /* We detect FlipDone by looking for the change in PendingFlip from '1' 2682 * to '0' on the following vblank, i.e. IIR has the Pendingflip 2683 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 2684 * the flip is completed (no longer pending). Since this doesn't raise 2685 * an interrupt per se, we watch for the change at vblank. 2686 */ 2687 if (I915_READ(ISR) & flip_pending) 2688 return false; 2689 2690 intel_finish_page_flip(dev, pipe); 2691 2692 return true; 2693 } 2694 2695 static irqreturn_t i915_irq_handler(int irq, void *arg) 2696 { 2697 struct drm_device *dev = (struct drm_device *) arg; 2698 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2699 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2700 unsigned long irqflags; 2701 u32 flip_mask = 2702 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2703 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2704 int pipe, ret = IRQ_NONE; 2705 2706 atomic_inc(&dev_priv->irq_received); 2707 2708 iir = I915_READ(IIR); 2709 do { 2710 bool irq_received = (iir & ~flip_mask) != 0; 2711 bool blc_event = false; 2712 2713 /* Can't rely on pipestat interrupt bit in iir as it might 2714 * have been cleared after the pipestat interrupt was received. 2715 * It doesn't set the bit in iir again, but it still produces 2716 * interrupts (for non-MSI). 2717 */ 2718 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2719 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2720 i915_handle_error(dev, false); 2721 2722 for_each_pipe(pipe) { 2723 int reg = PIPESTAT(pipe); 2724 pipe_stats[pipe] = I915_READ(reg); 2725 2726 /* Clear the PIPE*STAT regs before the IIR */ 2727 if (pipe_stats[pipe] & 0x8000ffff) { 2728 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2729 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2730 pipe_name(pipe)); 2731 I915_WRITE(reg, pipe_stats[pipe]); 2732 irq_received = true; 2733 } 2734 } 2735 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2736 2737 if (!irq_received) 2738 break; 2739 2740 /* Consume port. Then clear IIR or we'll miss events */ 2741 if ((I915_HAS_HOTPLUG(dev)) && 2742 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2743 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2744 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2745 2746 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2747 hotplug_status); 2748 2749 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 2750 2751 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2752 POSTING_READ(PORT_HOTPLUG_STAT); 2753 } 2754 2755 I915_WRITE(IIR, iir & ~flip_mask); 2756 new_iir = I915_READ(IIR); /* Flush posted writes */ 2757 2758 if (iir & I915_USER_INTERRUPT) 2759 notify_ring(dev, &dev_priv->ring[RCS]); 2760 2761 for_each_pipe(pipe) { 2762 int plane = pipe; 2763 if (IS_MOBILE(dev)) 2764 plane = !plane; 2765 2766 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2767 i915_handle_vblank(dev, plane, pipe, iir)) 2768 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 2769 2770 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2771 blc_event = true; 2772 } 2773 2774 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2775 intel_opregion_asle_intr(dev); 2776 2777 /* With MSI, interrupts are only generated when iir 2778 * transitions from zero to nonzero. If another bit got 2779 * set while we were handling the existing iir bits, then 2780 * we would never get another interrupt. 2781 * 2782 * This is fine on non-MSI as well, as if we hit this path 2783 * we avoid exiting the interrupt handler only to generate 2784 * another one. 2785 * 2786 * Note that for MSI this could cause a stray interrupt report 2787 * if an interrupt landed in the time between writing IIR and 2788 * the posting read. This should be rare enough to never 2789 * trigger the 99% of 100,000 interrupts test for disabling 2790 * stray interrupts. 2791 */ 2792 ret = IRQ_HANDLED; 2793 iir = new_iir; 2794 } while (iir & ~flip_mask); 2795 2796 i915_update_dri1_breadcrumb(dev); 2797 2798 return ret; 2799 } 2800 2801 static void i915_irq_uninstall(struct drm_device * dev) 2802 { 2803 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2804 int pipe; 2805 2806 del_timer_sync(&dev_priv->hotplug_reenable_timer); 2807 2808 if (I915_HAS_HOTPLUG(dev)) { 2809 I915_WRITE(PORT_HOTPLUG_EN, 0); 2810 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2811 } 2812 2813 I915_WRITE16(HWSTAM, 0xffff); 2814 for_each_pipe(pipe) { 2815 /* Clear enable bits; then clear status bits */ 2816 I915_WRITE(PIPESTAT(pipe), 0); 2817 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2818 } 2819 I915_WRITE(IMR, 0xffffffff); 2820 I915_WRITE(IER, 0x0); 2821 2822 I915_WRITE(IIR, I915_READ(IIR)); 2823 } 2824 2825 static void i965_irq_preinstall(struct drm_device * dev) 2826 { 2827 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2828 int pipe; 2829 2830 atomic_set(&dev_priv->irq_received, 0); 2831 2832 I915_WRITE(PORT_HOTPLUG_EN, 0); 2833 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2834 2835 I915_WRITE(HWSTAM, 0xeffe); 2836 for_each_pipe(pipe) 2837 I915_WRITE(PIPESTAT(pipe), 0); 2838 I915_WRITE(IMR, 0xffffffff); 2839 I915_WRITE(IER, 0x0); 2840 POSTING_READ(IER); 2841 } 2842 2843 static int i965_irq_postinstall(struct drm_device *dev) 2844 { 2845 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2846 u32 enable_mask; 2847 u32 error_mask; 2848 unsigned long irqflags; 2849 2850 /* Unmask the interrupts that we always want on. */ 2851 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2852 I915_DISPLAY_PORT_INTERRUPT | 2853 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2854 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2855 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2856 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2857 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2858 2859 enable_mask = ~dev_priv->irq_mask; 2860 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2861 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 2862 enable_mask |= I915_USER_INTERRUPT; 2863 2864 if (IS_G4X(dev)) 2865 enable_mask |= I915_BSD_USER_INTERRUPT; 2866 2867 /* Interrupt setup is already guaranteed to be single-threaded, this is 2868 * just to make the assert_spin_locked check happy. */ 2869 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2870 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2872 2873 /* 2874 * Enable some error detection, note the instruction error mask 2875 * bit is reserved, so we leave it masked. 2876 */ 2877 if (IS_G4X(dev)) { 2878 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2879 GM45_ERROR_MEM_PRIV | 2880 GM45_ERROR_CP_PRIV | 2881 I915_ERROR_MEMORY_REFRESH); 2882 } else { 2883 error_mask = ~(I915_ERROR_PAGE_TABLE | 2884 I915_ERROR_MEMORY_REFRESH); 2885 } 2886 I915_WRITE(EMR, error_mask); 2887 2888 I915_WRITE(IMR, dev_priv->irq_mask); 2889 I915_WRITE(IER, enable_mask); 2890 POSTING_READ(IER); 2891 2892 I915_WRITE(PORT_HOTPLUG_EN, 0); 2893 POSTING_READ(PORT_HOTPLUG_EN); 2894 2895 i915_enable_asle_pipestat(dev); 2896 2897 return 0; 2898 } 2899 2900 static void i915_hpd_irq_setup(struct drm_device *dev) 2901 { 2902 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2903 struct drm_mode_config *mode_config = &dev->mode_config; 2904 struct intel_encoder *intel_encoder; 2905 u32 hotplug_en; 2906 2907 assert_spin_locked(&dev_priv->irq_lock); 2908 2909 if (I915_HAS_HOTPLUG(dev)) { 2910 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2911 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 2912 /* Note HDMI and DP share hotplug bits */ 2913 /* enable bits are the same for all generations */ 2914 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2915 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2916 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 2917 /* Programming the CRT detection parameters tends 2918 to generate a spurious hotplug event about three 2919 seconds later. So just do it once. 2920 */ 2921 if (IS_G4X(dev)) 2922 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2923 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 2924 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2925 2926 /* Ignore TV since it's buggy */ 2927 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2928 } 2929 } 2930 2931 static irqreturn_t i965_irq_handler(int irq, void *arg) 2932 { 2933 struct drm_device *dev = (struct drm_device *) arg; 2934 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2935 u32 iir, new_iir; 2936 u32 pipe_stats[I915_MAX_PIPES]; 2937 unsigned long irqflags; 2938 int irq_received; 2939 int ret = IRQ_NONE, pipe; 2940 u32 flip_mask = 2941 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2942 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2943 2944 atomic_inc(&dev_priv->irq_received); 2945 2946 iir = I915_READ(IIR); 2947 2948 for (;;) { 2949 bool blc_event = false; 2950 2951 irq_received = (iir & ~flip_mask) != 0; 2952 2953 /* Can't rely on pipestat interrupt bit in iir as it might 2954 * have been cleared after the pipestat interrupt was received. 2955 * It doesn't set the bit in iir again, but it still produces 2956 * interrupts (for non-MSI). 2957 */ 2958 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2959 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2960 i915_handle_error(dev, false); 2961 2962 for_each_pipe(pipe) { 2963 int reg = PIPESTAT(pipe); 2964 pipe_stats[pipe] = I915_READ(reg); 2965 2966 /* 2967 * Clear the PIPE*STAT regs before the IIR 2968 */ 2969 if (pipe_stats[pipe] & 0x8000ffff) { 2970 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2971 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2972 pipe_name(pipe)); 2973 I915_WRITE(reg, pipe_stats[pipe]); 2974 irq_received = 1; 2975 } 2976 } 2977 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2978 2979 if (!irq_received) 2980 break; 2981 2982 ret = IRQ_HANDLED; 2983 2984 /* Consume port. Then clear IIR or we'll miss events */ 2985 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2986 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2987 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 2988 HOTPLUG_INT_STATUS_G4X : 2989 HOTPLUG_INT_STATUS_I915); 2990 2991 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2992 hotplug_status); 2993 2994 intel_hpd_irq_handler(dev, hotplug_trigger, 2995 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 2996 2997 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2998 I915_READ(PORT_HOTPLUG_STAT); 2999 } 3000 3001 I915_WRITE(IIR, iir & ~flip_mask); 3002 new_iir = I915_READ(IIR); /* Flush posted writes */ 3003 3004 if (iir & I915_USER_INTERRUPT) 3005 notify_ring(dev, &dev_priv->ring[RCS]); 3006 if (iir & I915_BSD_USER_INTERRUPT) 3007 notify_ring(dev, &dev_priv->ring[VCS]); 3008 3009 for_each_pipe(pipe) { 3010 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3011 i915_handle_vblank(dev, pipe, pipe, iir)) 3012 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3013 3014 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3015 blc_event = true; 3016 } 3017 3018 3019 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3020 intel_opregion_asle_intr(dev); 3021 3022 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3023 gmbus_irq_handler(dev); 3024 3025 /* With MSI, interrupts are only generated when iir 3026 * transitions from zero to nonzero. If another bit got 3027 * set while we were handling the existing iir bits, then 3028 * we would never get another interrupt. 3029 * 3030 * This is fine on non-MSI as well, as if we hit this path 3031 * we avoid exiting the interrupt handler only to generate 3032 * another one. 3033 * 3034 * Note that for MSI this could cause a stray interrupt report 3035 * if an interrupt landed in the time between writing IIR and 3036 * the posting read. This should be rare enough to never 3037 * trigger the 99% of 100,000 interrupts test for disabling 3038 * stray interrupts. 3039 */ 3040 iir = new_iir; 3041 } 3042 3043 i915_update_dri1_breadcrumb(dev); 3044 3045 return ret; 3046 } 3047 3048 static void i965_irq_uninstall(struct drm_device * dev) 3049 { 3050 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3051 int pipe; 3052 3053 if (!dev_priv) 3054 return; 3055 3056 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3057 3058 I915_WRITE(PORT_HOTPLUG_EN, 0); 3059 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3060 3061 I915_WRITE(HWSTAM, 0xffffffff); 3062 for_each_pipe(pipe) 3063 I915_WRITE(PIPESTAT(pipe), 0); 3064 I915_WRITE(IMR, 0xffffffff); 3065 I915_WRITE(IER, 0x0); 3066 3067 for_each_pipe(pipe) 3068 I915_WRITE(PIPESTAT(pipe), 3069 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3070 I915_WRITE(IIR, I915_READ(IIR)); 3071 } 3072 3073 static void i915_reenable_hotplug_timer_func(unsigned long data) 3074 { 3075 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3076 struct drm_device *dev = dev_priv->dev; 3077 struct drm_mode_config *mode_config = &dev->mode_config; 3078 unsigned long irqflags; 3079 int i; 3080 3081 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3082 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3083 struct drm_connector *connector; 3084 3085 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3086 continue; 3087 3088 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3089 3090 list_for_each_entry(connector, &mode_config->connector_list, head) { 3091 struct intel_connector *intel_connector = to_intel_connector(connector); 3092 3093 if (intel_connector->encoder->hpd_pin == i) { 3094 if (connector->polled != intel_connector->polled) 3095 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3096 drm_get_connector_name(connector)); 3097 connector->polled = intel_connector->polled; 3098 if (!connector->polled) 3099 connector->polled = DRM_CONNECTOR_POLL_HPD; 3100 } 3101 } 3102 } 3103 if (dev_priv->display.hpd_irq_setup) 3104 dev_priv->display.hpd_irq_setup(dev); 3105 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3106 } 3107 3108 void intel_irq_init(struct drm_device *dev) 3109 { 3110 struct drm_i915_private *dev_priv = dev->dev_private; 3111 3112 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3113 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3114 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3115 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3116 3117 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3118 i915_hangcheck_elapsed, 3119 (unsigned long) dev); 3120 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3121 (unsigned long) dev_priv); 3122 3123 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3124 3125 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3126 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3127 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3128 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3129 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3130 } 3131 3132 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3133 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3134 else 3135 dev->driver->get_vblank_timestamp = NULL; 3136 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3137 3138 if (IS_VALLEYVIEW(dev)) { 3139 dev->driver->irq_handler = valleyview_irq_handler; 3140 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3141 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3142 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3143 dev->driver->enable_vblank = valleyview_enable_vblank; 3144 dev->driver->disable_vblank = valleyview_disable_vblank; 3145 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3146 } else if (HAS_PCH_SPLIT(dev)) { 3147 dev->driver->irq_handler = ironlake_irq_handler; 3148 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3149 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3150 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3151 dev->driver->enable_vblank = ironlake_enable_vblank; 3152 dev->driver->disable_vblank = ironlake_disable_vblank; 3153 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3154 } else { 3155 if (INTEL_INFO(dev)->gen == 2) { 3156 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3157 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3158 dev->driver->irq_handler = i8xx_irq_handler; 3159 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3160 } else if (INTEL_INFO(dev)->gen == 3) { 3161 dev->driver->irq_preinstall = i915_irq_preinstall; 3162 dev->driver->irq_postinstall = i915_irq_postinstall; 3163 dev->driver->irq_uninstall = i915_irq_uninstall; 3164 dev->driver->irq_handler = i915_irq_handler; 3165 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3166 } else { 3167 dev->driver->irq_preinstall = i965_irq_preinstall; 3168 dev->driver->irq_postinstall = i965_irq_postinstall; 3169 dev->driver->irq_uninstall = i965_irq_uninstall; 3170 dev->driver->irq_handler = i965_irq_handler; 3171 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3172 } 3173 dev->driver->enable_vblank = i915_enable_vblank; 3174 dev->driver->disable_vblank = i915_disable_vblank; 3175 } 3176 } 3177 3178 void intel_hpd_init(struct drm_device *dev) 3179 { 3180 struct drm_i915_private *dev_priv = dev->dev_private; 3181 struct drm_mode_config *mode_config = &dev->mode_config; 3182 struct drm_connector *connector; 3183 unsigned long irqflags; 3184 int i; 3185 3186 for (i = 1; i < HPD_NUM_PINS; i++) { 3187 dev_priv->hpd_stats[i].hpd_cnt = 0; 3188 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3189 } 3190 list_for_each_entry(connector, &mode_config->connector_list, head) { 3191 struct intel_connector *intel_connector = to_intel_connector(connector); 3192 connector->polled = intel_connector->polled; 3193 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3194 connector->polled = DRM_CONNECTOR_POLL_HPD; 3195 } 3196 3197 /* Interrupt setup is already guaranteed to be single-threaded, this is 3198 * just to make the assert_spin_locked checks happy. */ 3199 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3200 if (dev_priv->display.hpd_irq_setup) 3201 dev_priv->display.hpd_irq_setup(dev); 3202 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3203 } 3204 3205 /* Disable interrupts so we can allow Package C8+. */ 3206 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3207 { 3208 struct drm_i915_private *dev_priv = dev->dev_private; 3209 unsigned long irqflags; 3210 3211 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3212 3213 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3214 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3215 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3216 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3217 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3218 3219 ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3220 ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3221 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3222 snb_disable_pm_irq(dev_priv, 0xffffffff); 3223 3224 dev_priv->pc8.irqs_disabled = true; 3225 3226 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3227 } 3228 3229 /* Restore interrupts so we can recover from Package C8+. */ 3230 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3231 { 3232 struct drm_i915_private *dev_priv = dev->dev_private; 3233 unsigned long irqflags; 3234 uint32_t val, expected; 3235 3236 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3237 3238 val = I915_READ(DEIMR); 3239 expected = ~DE_PCH_EVENT_IVB; 3240 WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); 3241 3242 val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3243 expected = ~SDE_HOTPLUG_MASK_CPT; 3244 WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", 3245 val, expected); 3246 3247 val = I915_READ(GTIMR); 3248 expected = 0xffffffff; 3249 WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); 3250 3251 val = I915_READ(GEN6_PMIMR); 3252 expected = 0xffffffff; 3253 WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, 3254 expected); 3255 3256 dev_priv->pc8.irqs_disabled = false; 3257 3258 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3259 ibx_enable_display_interrupt(dev_priv, 3260 ~dev_priv->pc8.regsave.sdeimr & 3261 ~SDE_HOTPLUG_MASK_CPT); 3262 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3263 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3264 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3265 3266 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3267 } 3268