1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 static const u32 hpd_ibx[] = { 41 [HPD_CRT] = SDE_CRT_HOTPLUG, 42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46 }; 47 48 static const u32 hpd_cpt[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54 }; 55 56 static const u32 hpd_mask_i915[] = { 57 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63 }; 64 65 static const u32 hpd_status_g4x[] = { 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72 }; 73 74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81 }; 82 83 /* For display hotplug interrupt */ 84 static void 85 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 86 { 87 assert_spin_locked(&dev_priv->irq_lock); 88 89 if (dev_priv->pm.irqs_disabled) { 90 WARN(1, "IRQs disabled\n"); 91 dev_priv->pm.regsave.deimr &= ~mask; 92 return; 93 } 94 95 if ((dev_priv->irq_mask & mask) != 0) { 96 dev_priv->irq_mask &= ~mask; 97 I915_WRITE(DEIMR, dev_priv->irq_mask); 98 POSTING_READ(DEIMR); 99 } 100 } 101 102 static void 103 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 104 { 105 assert_spin_locked(&dev_priv->irq_lock); 106 107 if (dev_priv->pm.irqs_disabled) { 108 WARN(1, "IRQs disabled\n"); 109 dev_priv->pm.regsave.deimr |= mask; 110 return; 111 } 112 113 if ((dev_priv->irq_mask & mask) != mask) { 114 dev_priv->irq_mask |= mask; 115 I915_WRITE(DEIMR, dev_priv->irq_mask); 116 POSTING_READ(DEIMR); 117 } 118 } 119 120 /** 121 * ilk_update_gt_irq - update GTIMR 122 * @dev_priv: driver private 123 * @interrupt_mask: mask of interrupt bits to update 124 * @enabled_irq_mask: mask of interrupt bits to enable 125 */ 126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 127 uint32_t interrupt_mask, 128 uint32_t enabled_irq_mask) 129 { 130 assert_spin_locked(&dev_priv->irq_lock); 131 132 if (dev_priv->pm.irqs_disabled) { 133 WARN(1, "IRQs disabled\n"); 134 dev_priv->pm.regsave.gtimr &= ~interrupt_mask; 135 dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask & 136 interrupt_mask); 137 return; 138 } 139 140 dev_priv->gt_irq_mask &= ~interrupt_mask; 141 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 142 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 143 POSTING_READ(GTIMR); 144 } 145 146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 147 { 148 ilk_update_gt_irq(dev_priv, mask, mask); 149 } 150 151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 152 { 153 ilk_update_gt_irq(dev_priv, mask, 0); 154 } 155 156 /** 157 * snb_update_pm_irq - update GEN6_PMIMR 158 * @dev_priv: driver private 159 * @interrupt_mask: mask of interrupt bits to update 160 * @enabled_irq_mask: mask of interrupt bits to enable 161 */ 162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 163 uint32_t interrupt_mask, 164 uint32_t enabled_irq_mask) 165 { 166 uint32_t new_val; 167 168 assert_spin_locked(&dev_priv->irq_lock); 169 170 if (dev_priv->pm.irqs_disabled) { 171 WARN(1, "IRQs disabled\n"); 172 dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask; 173 dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask & 174 interrupt_mask); 175 return; 176 } 177 178 new_val = dev_priv->pm_irq_mask; 179 new_val &= ~interrupt_mask; 180 new_val |= (~enabled_irq_mask & interrupt_mask); 181 182 if (new_val != dev_priv->pm_irq_mask) { 183 dev_priv->pm_irq_mask = new_val; 184 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 185 POSTING_READ(GEN6_PMIMR); 186 } 187 } 188 189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190 { 191 snb_update_pm_irq(dev_priv, mask, mask); 192 } 193 194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 195 { 196 snb_update_pm_irq(dev_priv, mask, 0); 197 } 198 199 static bool ivb_can_enable_err_int(struct drm_device *dev) 200 { 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct intel_crtc *crtc; 203 enum pipe pipe; 204 205 assert_spin_locked(&dev_priv->irq_lock); 206 207 for_each_pipe(pipe) { 208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 209 210 if (crtc->cpu_fifo_underrun_disabled) 211 return false; 212 } 213 214 return true; 215 } 216 217 static bool cpt_can_enable_serr_int(struct drm_device *dev) 218 { 219 struct drm_i915_private *dev_priv = dev->dev_private; 220 enum pipe pipe; 221 struct intel_crtc *crtc; 222 223 assert_spin_locked(&dev_priv->irq_lock); 224 225 for_each_pipe(pipe) { 226 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 227 228 if (crtc->pch_fifo_underrun_disabled) 229 return false; 230 } 231 232 return true; 233 } 234 235 static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe) 236 { 237 struct drm_i915_private *dev_priv = dev->dev_private; 238 u32 reg = PIPESTAT(pipe); 239 u32 pipestat = I915_READ(reg) & 0x7fff0000; 240 241 assert_spin_locked(&dev_priv->irq_lock); 242 243 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 244 POSTING_READ(reg); 245 } 246 247 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 248 enum pipe pipe, bool enable) 249 { 250 struct drm_i915_private *dev_priv = dev->dev_private; 251 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 252 DE_PIPEB_FIFO_UNDERRUN; 253 254 if (enable) 255 ironlake_enable_display_irq(dev_priv, bit); 256 else 257 ironlake_disable_display_irq(dev_priv, bit); 258 } 259 260 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 261 enum pipe pipe, bool enable) 262 { 263 struct drm_i915_private *dev_priv = dev->dev_private; 264 if (enable) { 265 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 266 267 if (!ivb_can_enable_err_int(dev)) 268 return; 269 270 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 271 } else { 272 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 273 274 /* Change the state _after_ we've read out the current one. */ 275 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 276 277 if (!was_enabled && 278 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 279 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 280 pipe_name(pipe)); 281 } 282 } 283 } 284 285 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 286 enum pipe pipe, bool enable) 287 { 288 struct drm_i915_private *dev_priv = dev->dev_private; 289 290 assert_spin_locked(&dev_priv->irq_lock); 291 292 if (enable) 293 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 294 else 295 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 296 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 297 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 298 } 299 300 /** 301 * ibx_display_interrupt_update - update SDEIMR 302 * @dev_priv: driver private 303 * @interrupt_mask: mask of interrupt bits to update 304 * @enabled_irq_mask: mask of interrupt bits to enable 305 */ 306 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 307 uint32_t interrupt_mask, 308 uint32_t enabled_irq_mask) 309 { 310 uint32_t sdeimr = I915_READ(SDEIMR); 311 sdeimr &= ~interrupt_mask; 312 sdeimr |= (~enabled_irq_mask & interrupt_mask); 313 314 assert_spin_locked(&dev_priv->irq_lock); 315 316 if (dev_priv->pm.irqs_disabled && 317 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 318 WARN(1, "IRQs disabled\n"); 319 dev_priv->pm.regsave.sdeimr &= ~interrupt_mask; 320 dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask & 321 interrupt_mask); 322 return; 323 } 324 325 I915_WRITE(SDEIMR, sdeimr); 326 POSTING_READ(SDEIMR); 327 } 328 #define ibx_enable_display_interrupt(dev_priv, bits) \ 329 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 330 #define ibx_disable_display_interrupt(dev_priv, bits) \ 331 ibx_display_interrupt_update((dev_priv), (bits), 0) 332 333 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 334 enum transcoder pch_transcoder, 335 bool enable) 336 { 337 struct drm_i915_private *dev_priv = dev->dev_private; 338 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 339 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 340 341 if (enable) 342 ibx_enable_display_interrupt(dev_priv, bit); 343 else 344 ibx_disable_display_interrupt(dev_priv, bit); 345 } 346 347 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 348 enum transcoder pch_transcoder, 349 bool enable) 350 { 351 struct drm_i915_private *dev_priv = dev->dev_private; 352 353 if (enable) { 354 I915_WRITE(SERR_INT, 355 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 356 357 if (!cpt_can_enable_serr_int(dev)) 358 return; 359 360 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 361 } else { 362 uint32_t tmp = I915_READ(SERR_INT); 363 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 364 365 /* Change the state _after_ we've read out the current one. */ 366 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 367 368 if (!was_enabled && 369 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 370 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 371 transcoder_name(pch_transcoder)); 372 } 373 } 374 } 375 376 /** 377 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 378 * @dev: drm device 379 * @pipe: pipe 380 * @enable: true if we want to report FIFO underrun errors, false otherwise 381 * 382 * This function makes us disable or enable CPU fifo underruns for a specific 383 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 384 * reporting for one pipe may also disable all the other CPU error interruts for 385 * the other pipes, due to the fact that there's just one interrupt mask/enable 386 * bit for all the pipes. 387 * 388 * Returns the previous state of underrun reporting. 389 */ 390 bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 391 enum pipe pipe, bool enable) 392 { 393 struct drm_i915_private *dev_priv = dev->dev_private; 394 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 395 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 396 bool ret; 397 398 assert_spin_locked(&dev_priv->irq_lock); 399 400 ret = !intel_crtc->cpu_fifo_underrun_disabled; 401 402 if (enable == ret) 403 goto done; 404 405 intel_crtc->cpu_fifo_underrun_disabled = !enable; 406 407 if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))) 408 i9xx_clear_fifo_underrun(dev, pipe); 409 else if (IS_GEN5(dev) || IS_GEN6(dev)) 410 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 411 else if (IS_GEN7(dev)) 412 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 413 else if (IS_GEN8(dev)) 414 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 415 416 done: 417 return ret; 418 } 419 420 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 421 enum pipe pipe, bool enable) 422 { 423 struct drm_i915_private *dev_priv = dev->dev_private; 424 unsigned long flags; 425 bool ret; 426 427 spin_lock_irqsave(&dev_priv->irq_lock, flags); 428 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); 429 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 430 431 return ret; 432 } 433 434 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, 435 enum pipe pipe) 436 { 437 struct drm_i915_private *dev_priv = dev->dev_private; 438 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 439 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 440 441 return !intel_crtc->cpu_fifo_underrun_disabled; 442 } 443 444 /** 445 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 446 * @dev: drm device 447 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 448 * @enable: true if we want to report FIFO underrun errors, false otherwise 449 * 450 * This function makes us disable or enable PCH fifo underruns for a specific 451 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 452 * underrun reporting for one transcoder may also disable all the other PCH 453 * error interruts for the other transcoders, due to the fact that there's just 454 * one interrupt mask/enable bit for all the transcoders. 455 * 456 * Returns the previous state of underrun reporting. 457 */ 458 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 459 enum transcoder pch_transcoder, 460 bool enable) 461 { 462 struct drm_i915_private *dev_priv = dev->dev_private; 463 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 464 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 465 unsigned long flags; 466 bool ret; 467 468 /* 469 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 470 * has only one pch transcoder A that all pipes can use. To avoid racy 471 * pch transcoder -> pipe lookups from interrupt code simply store the 472 * underrun statistics in crtc A. Since we never expose this anywhere 473 * nor use it outside of the fifo underrun code here using the "wrong" 474 * crtc on LPT won't cause issues. 475 */ 476 477 spin_lock_irqsave(&dev_priv->irq_lock, flags); 478 479 ret = !intel_crtc->pch_fifo_underrun_disabled; 480 481 if (enable == ret) 482 goto done; 483 484 intel_crtc->pch_fifo_underrun_disabled = !enable; 485 486 if (HAS_PCH_IBX(dev)) 487 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 488 else 489 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 490 491 done: 492 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 493 return ret; 494 } 495 496 497 static void 498 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 499 u32 enable_mask, u32 status_mask) 500 { 501 u32 reg = PIPESTAT(pipe); 502 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 503 504 assert_spin_locked(&dev_priv->irq_lock); 505 506 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 507 status_mask & ~PIPESTAT_INT_STATUS_MASK)) 508 return; 509 510 if ((pipestat & enable_mask) == enable_mask) 511 return; 512 513 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 514 515 /* Enable the interrupt, clear any pending status */ 516 pipestat |= enable_mask | status_mask; 517 I915_WRITE(reg, pipestat); 518 POSTING_READ(reg); 519 } 520 521 static void 522 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 523 u32 enable_mask, u32 status_mask) 524 { 525 u32 reg = PIPESTAT(pipe); 526 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 527 528 assert_spin_locked(&dev_priv->irq_lock); 529 530 if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 531 status_mask & ~PIPESTAT_INT_STATUS_MASK)) 532 return; 533 534 if ((pipestat & enable_mask) == 0) 535 return; 536 537 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 538 539 pipestat &= ~enable_mask; 540 I915_WRITE(reg, pipestat); 541 POSTING_READ(reg); 542 } 543 544 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 545 { 546 u32 enable_mask = status_mask << 16; 547 548 /* 549 * On pipe A we don't support the PSR interrupt yet, on pipe B the 550 * same bit MBZ. 551 */ 552 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 553 return 0; 554 555 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 556 SPRITE0_FLIP_DONE_INT_EN_VLV | 557 SPRITE1_FLIP_DONE_INT_EN_VLV); 558 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 559 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 560 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 561 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 562 563 return enable_mask; 564 } 565 566 void 567 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 568 u32 status_mask) 569 { 570 u32 enable_mask; 571 572 if (IS_VALLEYVIEW(dev_priv->dev)) 573 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 574 status_mask); 575 else 576 enable_mask = status_mask << 16; 577 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 578 } 579 580 void 581 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 582 u32 status_mask) 583 { 584 u32 enable_mask; 585 586 if (IS_VALLEYVIEW(dev_priv->dev)) 587 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 588 status_mask); 589 else 590 enable_mask = status_mask << 16; 591 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 592 } 593 594 /** 595 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 596 */ 597 static void i915_enable_asle_pipestat(struct drm_device *dev) 598 { 599 struct drm_i915_private *dev_priv = dev->dev_private; 600 unsigned long irqflags; 601 602 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 603 return; 604 605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 606 607 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 608 if (INTEL_INFO(dev)->gen >= 4) 609 i915_enable_pipestat(dev_priv, PIPE_A, 610 PIPE_LEGACY_BLC_EVENT_STATUS); 611 612 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 613 } 614 615 /** 616 * i915_pipe_enabled - check if a pipe is enabled 617 * @dev: DRM device 618 * @pipe: pipe to check 619 * 620 * Reading certain registers when the pipe is disabled can hang the chip. 621 * Use this routine to make sure the PLL is running and the pipe is active 622 * before reading such registers if unsure. 623 */ 624 static int 625 i915_pipe_enabled(struct drm_device *dev, int pipe) 626 { 627 struct drm_i915_private *dev_priv = dev->dev_private; 628 629 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 630 /* Locking is horribly broken here, but whatever. */ 631 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 633 634 return intel_crtc->active; 635 } else { 636 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 637 } 638 } 639 640 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 641 { 642 /* Gen2 doesn't have a hardware frame counter */ 643 return 0; 644 } 645 646 /* Called from drm generic code, passed a 'crtc', which 647 * we use as a pipe index 648 */ 649 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 650 { 651 struct drm_i915_private *dev_priv = dev->dev_private; 652 unsigned long high_frame; 653 unsigned long low_frame; 654 u32 high1, high2, low, pixel, vbl_start; 655 656 if (!i915_pipe_enabled(dev, pipe)) { 657 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 658 "pipe %c\n", pipe_name(pipe)); 659 return 0; 660 } 661 662 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 663 struct intel_crtc *intel_crtc = 664 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 665 const struct drm_display_mode *mode = 666 &intel_crtc->config.adjusted_mode; 667 668 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 669 } else { 670 enum transcoder cpu_transcoder = (enum transcoder) pipe; 671 u32 htotal; 672 673 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 674 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 675 676 vbl_start *= htotal; 677 } 678 679 high_frame = PIPEFRAME(pipe); 680 low_frame = PIPEFRAMEPIXEL(pipe); 681 682 /* 683 * High & low register fields aren't synchronized, so make sure 684 * we get a low value that's stable across two reads of the high 685 * register. 686 */ 687 do { 688 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 689 low = I915_READ(low_frame); 690 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 691 } while (high1 != high2); 692 693 high1 >>= PIPE_FRAME_HIGH_SHIFT; 694 pixel = low & PIPE_PIXEL_MASK; 695 low >>= PIPE_FRAME_LOW_SHIFT; 696 697 /* 698 * The frame counter increments at beginning of active. 699 * Cook up a vblank counter by also checking the pixel 700 * counter against vblank start. 701 */ 702 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 703 } 704 705 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 706 { 707 struct drm_i915_private *dev_priv = dev->dev_private; 708 int reg = PIPE_FRMCOUNT_GM45(pipe); 709 710 if (!i915_pipe_enabled(dev, pipe)) { 711 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 712 "pipe %c\n", pipe_name(pipe)); 713 return 0; 714 } 715 716 return I915_READ(reg); 717 } 718 719 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 720 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 721 722 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 723 { 724 struct drm_i915_private *dev_priv = dev->dev_private; 725 uint32_t status; 726 int reg; 727 728 if (INTEL_INFO(dev)->gen >= 8) { 729 status = GEN8_PIPE_VBLANK; 730 reg = GEN8_DE_PIPE_ISR(pipe); 731 } else if (INTEL_INFO(dev)->gen >= 7) { 732 status = DE_PIPE_VBLANK_IVB(pipe); 733 reg = DEISR; 734 } else { 735 status = DE_PIPE_VBLANK(pipe); 736 reg = DEISR; 737 } 738 739 return __raw_i915_read32(dev_priv, reg) & status; 740 } 741 742 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 743 unsigned int flags, int *vpos, int *hpos, 744 ktime_t *stime, ktime_t *etime) 745 { 746 struct drm_i915_private *dev_priv = dev->dev_private; 747 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 748 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 749 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 750 int position; 751 int vbl_start, vbl_end, htotal, vtotal; 752 bool in_vbl = true; 753 int ret = 0; 754 unsigned long irqflags; 755 756 if (!intel_crtc->active) { 757 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 758 "pipe %c\n", pipe_name(pipe)); 759 return 0; 760 } 761 762 htotal = mode->crtc_htotal; 763 vtotal = mode->crtc_vtotal; 764 vbl_start = mode->crtc_vblank_start; 765 vbl_end = mode->crtc_vblank_end; 766 767 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 768 vbl_start = DIV_ROUND_UP(vbl_start, 2); 769 vbl_end /= 2; 770 vtotal /= 2; 771 } 772 773 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 774 775 /* 776 * Lock uncore.lock, as we will do multiple timing critical raw 777 * register reads, potentially with preemption disabled, so the 778 * following code must not block on uncore.lock. 779 */ 780 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 781 782 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 783 784 /* Get optional system timestamp before query. */ 785 if (stime) 786 *stime = ktime_get(); 787 788 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 789 /* No obvious pixelcount register. Only query vertical 790 * scanout position from Display scan line register. 791 */ 792 if (IS_GEN2(dev)) 793 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 794 else 795 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 796 797 if (HAS_DDI(dev)) { 798 /* 799 * On HSW HDMI outputs there seems to be a 2 line 800 * difference, whereas eDP has the normal 1 line 801 * difference that earlier platforms have. External 802 * DP is unknown. For now just check for the 2 line 803 * difference case on all output types on HSW+. 804 * 805 * This might misinterpret the scanline counter being 806 * one line too far along on eDP, but that's less 807 * dangerous than the alternative since that would lead 808 * the vblank timestamp code astray when it sees a 809 * scanline count before vblank_start during a vblank 810 * interrupt. 811 */ 812 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 813 if ((in_vbl && (position == vbl_start - 2 || 814 position == vbl_start - 1)) || 815 (!in_vbl && (position == vbl_end - 2 || 816 position == vbl_end - 1))) 817 position = (position + 2) % vtotal; 818 } else if (HAS_PCH_SPLIT(dev)) { 819 /* 820 * The scanline counter increments at the leading edge 821 * of hsync, ie. it completely misses the active portion 822 * of the line. Fix up the counter at both edges of vblank 823 * to get a more accurate picture whether we're in vblank 824 * or not. 825 */ 826 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 827 if ((in_vbl && position == vbl_start - 1) || 828 (!in_vbl && position == vbl_end - 1)) 829 position = (position + 1) % vtotal; 830 } else { 831 /* 832 * ISR vblank status bits don't work the way we'd want 833 * them to work on non-PCH platforms (for 834 * ilk_pipe_in_vblank_locked()), and there doesn't 835 * appear any other way to determine if we're currently 836 * in vblank. 837 * 838 * Instead let's assume that we're already in vblank if 839 * we got called from the vblank interrupt and the 840 * scanline counter value indicates that we're on the 841 * line just prior to vblank start. This should result 842 * in the correct answer, unless the vblank interrupt 843 * delivery really got delayed for almost exactly one 844 * full frame/field. 845 */ 846 if (flags & DRM_CALLED_FROM_VBLIRQ && 847 position == vbl_start - 1) { 848 position = (position + 1) % vtotal; 849 850 /* Signal this correction as "applied". */ 851 ret |= 0x8; 852 } 853 } 854 } else { 855 /* Have access to pixelcount since start of frame. 856 * We can split this into vertical and horizontal 857 * scanout position. 858 */ 859 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 860 861 /* convert to pixel counts */ 862 vbl_start *= htotal; 863 vbl_end *= htotal; 864 vtotal *= htotal; 865 } 866 867 /* Get optional system timestamp after query. */ 868 if (etime) 869 *etime = ktime_get(); 870 871 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 872 873 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 874 875 in_vbl = position >= vbl_start && position < vbl_end; 876 877 /* 878 * While in vblank, position will be negative 879 * counting up towards 0 at vbl_end. And outside 880 * vblank, position will be positive counting 881 * up since vbl_end. 882 */ 883 if (position >= vbl_start) 884 position -= vbl_end; 885 else 886 position += vtotal - vbl_end; 887 888 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 889 *vpos = position; 890 *hpos = 0; 891 } else { 892 *vpos = position / htotal; 893 *hpos = position - (*vpos * htotal); 894 } 895 896 /* In vblank? */ 897 if (in_vbl) 898 ret |= DRM_SCANOUTPOS_INVBL; 899 900 return ret; 901 } 902 903 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 904 int *max_error, 905 struct timeval *vblank_time, 906 unsigned flags) 907 { 908 struct drm_crtc *crtc; 909 910 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 911 DRM_ERROR("Invalid crtc %d\n", pipe); 912 return -EINVAL; 913 } 914 915 /* Get drm_crtc to timestamp: */ 916 crtc = intel_get_crtc_for_pipe(dev, pipe); 917 if (crtc == NULL) { 918 DRM_ERROR("Invalid crtc %d\n", pipe); 919 return -EINVAL; 920 } 921 922 if (!crtc->enabled) { 923 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 924 return -EBUSY; 925 } 926 927 /* Helper routine in DRM core does all the work: */ 928 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 929 vblank_time, flags, 930 crtc, 931 &to_intel_crtc(crtc)->config.adjusted_mode); 932 } 933 934 static bool intel_hpd_irq_event(struct drm_device *dev, 935 struct drm_connector *connector) 936 { 937 enum drm_connector_status old_status; 938 939 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 940 old_status = connector->status; 941 942 connector->status = connector->funcs->detect(connector, false); 943 if (old_status == connector->status) 944 return false; 945 946 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 947 connector->base.id, 948 drm_get_connector_name(connector), 949 drm_get_connector_status_name(old_status), 950 drm_get_connector_status_name(connector->status)); 951 952 return true; 953 } 954 955 /* 956 * Handle hotplug events outside the interrupt handler proper. 957 */ 958 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 959 960 static void i915_hotplug_work_func(struct work_struct *work) 961 { 962 struct drm_i915_private *dev_priv = 963 container_of(work, struct drm_i915_private, hotplug_work); 964 struct drm_device *dev = dev_priv->dev; 965 struct drm_mode_config *mode_config = &dev->mode_config; 966 struct intel_connector *intel_connector; 967 struct intel_encoder *intel_encoder; 968 struct drm_connector *connector; 969 unsigned long irqflags; 970 bool hpd_disabled = false; 971 bool changed = false; 972 u32 hpd_event_bits; 973 974 /* HPD irq before everything is fully set up. */ 975 if (!dev_priv->enable_hotplug_processing) 976 return; 977 978 mutex_lock(&mode_config->mutex); 979 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 980 981 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 982 983 hpd_event_bits = dev_priv->hpd_event_bits; 984 dev_priv->hpd_event_bits = 0; 985 list_for_each_entry(connector, &mode_config->connector_list, head) { 986 intel_connector = to_intel_connector(connector); 987 intel_encoder = intel_connector->encoder; 988 if (intel_encoder->hpd_pin > HPD_NONE && 989 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 990 connector->polled == DRM_CONNECTOR_POLL_HPD) { 991 DRM_INFO("HPD interrupt storm detected on connector %s: " 992 "switching from hotplug detection to polling\n", 993 drm_get_connector_name(connector)); 994 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 995 connector->polled = DRM_CONNECTOR_POLL_CONNECT 996 | DRM_CONNECTOR_POLL_DISCONNECT; 997 hpd_disabled = true; 998 } 999 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1000 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 1001 drm_get_connector_name(connector), intel_encoder->hpd_pin); 1002 } 1003 } 1004 /* if there were no outputs to poll, poll was disabled, 1005 * therefore make sure it's enabled when disabling HPD on 1006 * some connectors */ 1007 if (hpd_disabled) { 1008 drm_kms_helper_poll_enable(dev); 1009 mod_timer(&dev_priv->hotplug_reenable_timer, 1010 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1011 } 1012 1013 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1014 1015 list_for_each_entry(connector, &mode_config->connector_list, head) { 1016 intel_connector = to_intel_connector(connector); 1017 intel_encoder = intel_connector->encoder; 1018 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1019 if (intel_encoder->hot_plug) 1020 intel_encoder->hot_plug(intel_encoder); 1021 if (intel_hpd_irq_event(dev, connector)) 1022 changed = true; 1023 } 1024 } 1025 mutex_unlock(&mode_config->mutex); 1026 1027 if (changed) 1028 drm_kms_helper_hotplug_event(dev); 1029 } 1030 1031 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) 1032 { 1033 del_timer_sync(&dev_priv->hotplug_reenable_timer); 1034 } 1035 1036 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1037 { 1038 struct drm_i915_private *dev_priv = dev->dev_private; 1039 u32 busy_up, busy_down, max_avg, min_avg; 1040 u8 new_delay; 1041 1042 spin_lock(&mchdev_lock); 1043 1044 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1045 1046 new_delay = dev_priv->ips.cur_delay; 1047 1048 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1049 busy_up = I915_READ(RCPREVBSYTUPAVG); 1050 busy_down = I915_READ(RCPREVBSYTDNAVG); 1051 max_avg = I915_READ(RCBMAXAVG); 1052 min_avg = I915_READ(RCBMINAVG); 1053 1054 /* Handle RCS change request from hw */ 1055 if (busy_up > max_avg) { 1056 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1057 new_delay = dev_priv->ips.cur_delay - 1; 1058 if (new_delay < dev_priv->ips.max_delay) 1059 new_delay = dev_priv->ips.max_delay; 1060 } else if (busy_down < min_avg) { 1061 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1062 new_delay = dev_priv->ips.cur_delay + 1; 1063 if (new_delay > dev_priv->ips.min_delay) 1064 new_delay = dev_priv->ips.min_delay; 1065 } 1066 1067 if (ironlake_set_drps(dev, new_delay)) 1068 dev_priv->ips.cur_delay = new_delay; 1069 1070 spin_unlock(&mchdev_lock); 1071 1072 return; 1073 } 1074 1075 static void notify_ring(struct drm_device *dev, 1076 struct intel_ring_buffer *ring) 1077 { 1078 if (ring->obj == NULL) 1079 return; 1080 1081 trace_i915_gem_request_complete(ring); 1082 1083 wake_up_all(&ring->irq_queue); 1084 i915_queue_hangcheck(dev); 1085 } 1086 1087 static void gen6_pm_rps_work(struct work_struct *work) 1088 { 1089 struct drm_i915_private *dev_priv = 1090 container_of(work, struct drm_i915_private, rps.work); 1091 u32 pm_iir; 1092 int new_delay, adj; 1093 1094 spin_lock_irq(&dev_priv->irq_lock); 1095 pm_iir = dev_priv->rps.pm_iir; 1096 dev_priv->rps.pm_iir = 0; 1097 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 1098 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1099 spin_unlock_irq(&dev_priv->irq_lock); 1100 1101 /* Make sure we didn't queue anything we're not going to process. */ 1102 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1103 1104 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1105 return; 1106 1107 mutex_lock(&dev_priv->rps.hw_lock); 1108 1109 adj = dev_priv->rps.last_adj; 1110 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1111 if (adj > 0) 1112 adj *= 2; 1113 else 1114 adj = 1; 1115 new_delay = dev_priv->rps.cur_freq + adj; 1116 1117 /* 1118 * For better performance, jump directly 1119 * to RPe if we're below it. 1120 */ 1121 if (new_delay < dev_priv->rps.efficient_freq) 1122 new_delay = dev_priv->rps.efficient_freq; 1123 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1124 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1125 new_delay = dev_priv->rps.efficient_freq; 1126 else 1127 new_delay = dev_priv->rps.min_freq_softlimit; 1128 adj = 0; 1129 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1130 if (adj < 0) 1131 adj *= 2; 1132 else 1133 adj = -1; 1134 new_delay = dev_priv->rps.cur_freq + adj; 1135 } else { /* unknown event */ 1136 new_delay = dev_priv->rps.cur_freq; 1137 } 1138 1139 /* sysfs frequency interfaces may have snuck in while servicing the 1140 * interrupt 1141 */ 1142 new_delay = clamp_t(int, new_delay, 1143 dev_priv->rps.min_freq_softlimit, 1144 dev_priv->rps.max_freq_softlimit); 1145 1146 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1147 1148 if (IS_VALLEYVIEW(dev_priv->dev)) 1149 valleyview_set_rps(dev_priv->dev, new_delay); 1150 else 1151 gen6_set_rps(dev_priv->dev, new_delay); 1152 1153 mutex_unlock(&dev_priv->rps.hw_lock); 1154 } 1155 1156 1157 /** 1158 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1159 * occurred. 1160 * @work: workqueue struct 1161 * 1162 * Doesn't actually do anything except notify userspace. As a consequence of 1163 * this event, userspace should try to remap the bad rows since statistically 1164 * it is likely the same row is more likely to go bad again. 1165 */ 1166 static void ivybridge_parity_work(struct work_struct *work) 1167 { 1168 struct drm_i915_private *dev_priv = 1169 container_of(work, struct drm_i915_private, l3_parity.error_work); 1170 u32 error_status, row, bank, subbank; 1171 char *parity_event[6]; 1172 uint32_t misccpctl; 1173 unsigned long flags; 1174 uint8_t slice = 0; 1175 1176 /* We must turn off DOP level clock gating to access the L3 registers. 1177 * In order to prevent a get/put style interface, acquire struct mutex 1178 * any time we access those registers. 1179 */ 1180 mutex_lock(&dev_priv->dev->struct_mutex); 1181 1182 /* If we've screwed up tracking, just let the interrupt fire again */ 1183 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1184 goto out; 1185 1186 misccpctl = I915_READ(GEN7_MISCCPCTL); 1187 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1188 POSTING_READ(GEN7_MISCCPCTL); 1189 1190 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1191 u32 reg; 1192 1193 slice--; 1194 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1195 break; 1196 1197 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1198 1199 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1200 1201 error_status = I915_READ(reg); 1202 row = GEN7_PARITY_ERROR_ROW(error_status); 1203 bank = GEN7_PARITY_ERROR_BANK(error_status); 1204 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1205 1206 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1207 POSTING_READ(reg); 1208 1209 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1210 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1211 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1212 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1213 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1214 parity_event[5] = NULL; 1215 1216 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1217 KOBJ_CHANGE, parity_event); 1218 1219 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1220 slice, row, bank, subbank); 1221 1222 kfree(parity_event[4]); 1223 kfree(parity_event[3]); 1224 kfree(parity_event[2]); 1225 kfree(parity_event[1]); 1226 } 1227 1228 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1229 1230 out: 1231 WARN_ON(dev_priv->l3_parity.which_slice); 1232 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1233 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1234 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1235 1236 mutex_unlock(&dev_priv->dev->struct_mutex); 1237 } 1238 1239 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1240 { 1241 struct drm_i915_private *dev_priv = dev->dev_private; 1242 1243 if (!HAS_L3_DPF(dev)) 1244 return; 1245 1246 spin_lock(&dev_priv->irq_lock); 1247 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1248 spin_unlock(&dev_priv->irq_lock); 1249 1250 iir &= GT_PARITY_ERROR(dev); 1251 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1252 dev_priv->l3_parity.which_slice |= 1 << 1; 1253 1254 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1255 dev_priv->l3_parity.which_slice |= 1 << 0; 1256 1257 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1258 } 1259 1260 static void ilk_gt_irq_handler(struct drm_device *dev, 1261 struct drm_i915_private *dev_priv, 1262 u32 gt_iir) 1263 { 1264 if (gt_iir & 1265 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1266 notify_ring(dev, &dev_priv->ring[RCS]); 1267 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1268 notify_ring(dev, &dev_priv->ring[VCS]); 1269 } 1270 1271 static void snb_gt_irq_handler(struct drm_device *dev, 1272 struct drm_i915_private *dev_priv, 1273 u32 gt_iir) 1274 { 1275 1276 if (gt_iir & 1277 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1278 notify_ring(dev, &dev_priv->ring[RCS]); 1279 if (gt_iir & GT_BSD_USER_INTERRUPT) 1280 notify_ring(dev, &dev_priv->ring[VCS]); 1281 if (gt_iir & GT_BLT_USER_INTERRUPT) 1282 notify_ring(dev, &dev_priv->ring[BCS]); 1283 1284 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1285 GT_BSD_CS_ERROR_INTERRUPT | 1286 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1287 i915_handle_error(dev, false, "GT error interrupt 0x%08x", 1288 gt_iir); 1289 } 1290 1291 if (gt_iir & GT_PARITY_ERROR(dev)) 1292 ivybridge_parity_error_irq_handler(dev, gt_iir); 1293 } 1294 1295 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1296 struct drm_i915_private *dev_priv, 1297 u32 master_ctl) 1298 { 1299 u32 rcs, bcs, vcs; 1300 uint32_t tmp = 0; 1301 irqreturn_t ret = IRQ_NONE; 1302 1303 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1304 tmp = I915_READ(GEN8_GT_IIR(0)); 1305 if (tmp) { 1306 ret = IRQ_HANDLED; 1307 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1308 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1309 if (rcs & GT_RENDER_USER_INTERRUPT) 1310 notify_ring(dev, &dev_priv->ring[RCS]); 1311 if (bcs & GT_RENDER_USER_INTERRUPT) 1312 notify_ring(dev, &dev_priv->ring[BCS]); 1313 I915_WRITE(GEN8_GT_IIR(0), tmp); 1314 } else 1315 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1316 } 1317 1318 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1319 tmp = I915_READ(GEN8_GT_IIR(1)); 1320 if (tmp) { 1321 ret = IRQ_HANDLED; 1322 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1323 if (vcs & GT_RENDER_USER_INTERRUPT) 1324 notify_ring(dev, &dev_priv->ring[VCS]); 1325 I915_WRITE(GEN8_GT_IIR(1), tmp); 1326 } else 1327 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1328 } 1329 1330 if (master_ctl & GEN8_GT_VECS_IRQ) { 1331 tmp = I915_READ(GEN8_GT_IIR(3)); 1332 if (tmp) { 1333 ret = IRQ_HANDLED; 1334 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1335 if (vcs & GT_RENDER_USER_INTERRUPT) 1336 notify_ring(dev, &dev_priv->ring[VECS]); 1337 I915_WRITE(GEN8_GT_IIR(3), tmp); 1338 } else 1339 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1340 } 1341 1342 return ret; 1343 } 1344 1345 #define HPD_STORM_DETECT_PERIOD 1000 1346 #define HPD_STORM_THRESHOLD 5 1347 1348 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1349 u32 hotplug_trigger, 1350 const u32 *hpd) 1351 { 1352 struct drm_i915_private *dev_priv = dev->dev_private; 1353 int i; 1354 bool storm_detected = false; 1355 1356 if (!hotplug_trigger) 1357 return; 1358 1359 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1360 hotplug_trigger); 1361 1362 spin_lock(&dev_priv->irq_lock); 1363 for (i = 1; i < HPD_NUM_PINS; i++) { 1364 1365 WARN_ONCE(hpd[i] & hotplug_trigger && 1366 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1367 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1368 hotplug_trigger, i, hpd[i]); 1369 1370 if (!(hpd[i] & hotplug_trigger) || 1371 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1372 continue; 1373 1374 dev_priv->hpd_event_bits |= (1 << i); 1375 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1376 dev_priv->hpd_stats[i].hpd_last_jiffies 1377 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1378 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1379 dev_priv->hpd_stats[i].hpd_cnt = 0; 1380 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1381 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1382 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1383 dev_priv->hpd_event_bits &= ~(1 << i); 1384 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1385 storm_detected = true; 1386 } else { 1387 dev_priv->hpd_stats[i].hpd_cnt++; 1388 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1389 dev_priv->hpd_stats[i].hpd_cnt); 1390 } 1391 } 1392 1393 if (storm_detected) 1394 dev_priv->display.hpd_irq_setup(dev); 1395 spin_unlock(&dev_priv->irq_lock); 1396 1397 /* 1398 * Our hotplug handler can grab modeset locks (by calling down into the 1399 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1400 * queue for otherwise the flush_work in the pageflip code will 1401 * deadlock. 1402 */ 1403 schedule_work(&dev_priv->hotplug_work); 1404 } 1405 1406 static void gmbus_irq_handler(struct drm_device *dev) 1407 { 1408 struct drm_i915_private *dev_priv = dev->dev_private; 1409 1410 wake_up_all(&dev_priv->gmbus_wait_queue); 1411 } 1412 1413 static void dp_aux_irq_handler(struct drm_device *dev) 1414 { 1415 struct drm_i915_private *dev_priv = dev->dev_private; 1416 1417 wake_up_all(&dev_priv->gmbus_wait_queue); 1418 } 1419 1420 #if defined(CONFIG_DEBUG_FS) 1421 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1422 uint32_t crc0, uint32_t crc1, 1423 uint32_t crc2, uint32_t crc3, 1424 uint32_t crc4) 1425 { 1426 struct drm_i915_private *dev_priv = dev->dev_private; 1427 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1428 struct intel_pipe_crc_entry *entry; 1429 int head, tail; 1430 1431 spin_lock(&pipe_crc->lock); 1432 1433 if (!pipe_crc->entries) { 1434 spin_unlock(&pipe_crc->lock); 1435 DRM_ERROR("spurious interrupt\n"); 1436 return; 1437 } 1438 1439 head = pipe_crc->head; 1440 tail = pipe_crc->tail; 1441 1442 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1443 spin_unlock(&pipe_crc->lock); 1444 DRM_ERROR("CRC buffer overflowing\n"); 1445 return; 1446 } 1447 1448 entry = &pipe_crc->entries[head]; 1449 1450 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1451 entry->crc[0] = crc0; 1452 entry->crc[1] = crc1; 1453 entry->crc[2] = crc2; 1454 entry->crc[3] = crc3; 1455 entry->crc[4] = crc4; 1456 1457 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1458 pipe_crc->head = head; 1459 1460 spin_unlock(&pipe_crc->lock); 1461 1462 wake_up_interruptible(&pipe_crc->wq); 1463 } 1464 #else 1465 static inline void 1466 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1467 uint32_t crc0, uint32_t crc1, 1468 uint32_t crc2, uint32_t crc3, 1469 uint32_t crc4) {} 1470 #endif 1471 1472 1473 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1474 { 1475 struct drm_i915_private *dev_priv = dev->dev_private; 1476 1477 display_pipe_crc_irq_handler(dev, pipe, 1478 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1479 0, 0, 0, 0); 1480 } 1481 1482 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1483 { 1484 struct drm_i915_private *dev_priv = dev->dev_private; 1485 1486 display_pipe_crc_irq_handler(dev, pipe, 1487 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1488 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1489 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1490 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1491 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1492 } 1493 1494 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1495 { 1496 struct drm_i915_private *dev_priv = dev->dev_private; 1497 uint32_t res1, res2; 1498 1499 if (INTEL_INFO(dev)->gen >= 3) 1500 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1501 else 1502 res1 = 0; 1503 1504 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1505 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1506 else 1507 res2 = 0; 1508 1509 display_pipe_crc_irq_handler(dev, pipe, 1510 I915_READ(PIPE_CRC_RES_RED(pipe)), 1511 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1512 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1513 res1, res2); 1514 } 1515 1516 /* The RPS events need forcewake, so we add them to a work queue and mask their 1517 * IMR bits until the work is done. Other interrupts can be processed without 1518 * the work queue. */ 1519 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1520 { 1521 if (pm_iir & dev_priv->pm_rps_events) { 1522 spin_lock(&dev_priv->irq_lock); 1523 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1524 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1525 spin_unlock(&dev_priv->irq_lock); 1526 1527 queue_work(dev_priv->wq, &dev_priv->rps.work); 1528 } 1529 1530 if (HAS_VEBOX(dev_priv->dev)) { 1531 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1532 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1533 1534 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1535 i915_handle_error(dev_priv->dev, false, 1536 "VEBOX CS error interrupt 0x%08x", 1537 pm_iir); 1538 } 1539 } 1540 } 1541 1542 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1543 { 1544 struct drm_i915_private *dev_priv = dev->dev_private; 1545 u32 pipe_stats[I915_MAX_PIPES] = { }; 1546 int pipe; 1547 1548 spin_lock(&dev_priv->irq_lock); 1549 for_each_pipe(pipe) { 1550 int reg; 1551 u32 mask, iir_bit = 0; 1552 1553 /* 1554 * PIPESTAT bits get signalled even when the interrupt is 1555 * disabled with the mask bits, and some of the status bits do 1556 * not generate interrupts at all (like the underrun bit). Hence 1557 * we need to be careful that we only handle what we want to 1558 * handle. 1559 */ 1560 mask = 0; 1561 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1562 mask |= PIPE_FIFO_UNDERRUN_STATUS; 1563 1564 switch (pipe) { 1565 case PIPE_A: 1566 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1567 break; 1568 case PIPE_B: 1569 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1570 break; 1571 } 1572 if (iir & iir_bit) 1573 mask |= dev_priv->pipestat_irq_mask[pipe]; 1574 1575 if (!mask) 1576 continue; 1577 1578 reg = PIPESTAT(pipe); 1579 mask |= PIPESTAT_INT_ENABLE_MASK; 1580 pipe_stats[pipe] = I915_READ(reg) & mask; 1581 1582 /* 1583 * Clear the PIPE*STAT regs before the IIR 1584 */ 1585 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1586 PIPESTAT_INT_STATUS_MASK)) 1587 I915_WRITE(reg, pipe_stats[pipe]); 1588 } 1589 spin_unlock(&dev_priv->irq_lock); 1590 1591 for_each_pipe(pipe) { 1592 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1593 drm_handle_vblank(dev, pipe); 1594 1595 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1596 intel_prepare_page_flip(dev, pipe); 1597 intel_finish_page_flip(dev, pipe); 1598 } 1599 1600 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1601 i9xx_pipe_crc_irq_handler(dev, pipe); 1602 1603 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 1604 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1605 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 1606 } 1607 1608 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1609 gmbus_irq_handler(dev); 1610 } 1611 1612 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1613 { 1614 struct drm_device *dev = (struct drm_device *) arg; 1615 struct drm_i915_private *dev_priv = dev->dev_private; 1616 u32 iir, gt_iir, pm_iir; 1617 irqreturn_t ret = IRQ_NONE; 1618 1619 while (true) { 1620 iir = I915_READ(VLV_IIR); 1621 gt_iir = I915_READ(GTIIR); 1622 pm_iir = I915_READ(GEN6_PMIIR); 1623 1624 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1625 goto out; 1626 1627 ret = IRQ_HANDLED; 1628 1629 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1630 1631 valleyview_pipestat_irq_handler(dev, iir); 1632 1633 /* Consume port. Then clear IIR or we'll miss events */ 1634 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1635 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1636 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1637 1638 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1639 1640 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1641 dp_aux_irq_handler(dev); 1642 1643 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1644 I915_READ(PORT_HOTPLUG_STAT); 1645 } 1646 1647 1648 if (pm_iir) 1649 gen6_rps_irq_handler(dev_priv, pm_iir); 1650 1651 I915_WRITE(GTIIR, gt_iir); 1652 I915_WRITE(GEN6_PMIIR, pm_iir); 1653 I915_WRITE(VLV_IIR, iir); 1654 } 1655 1656 out: 1657 return ret; 1658 } 1659 1660 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1661 { 1662 struct drm_i915_private *dev_priv = dev->dev_private; 1663 int pipe; 1664 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1665 1666 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1667 1668 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1669 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1670 SDE_AUDIO_POWER_SHIFT); 1671 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1672 port_name(port)); 1673 } 1674 1675 if (pch_iir & SDE_AUX_MASK) 1676 dp_aux_irq_handler(dev); 1677 1678 if (pch_iir & SDE_GMBUS) 1679 gmbus_irq_handler(dev); 1680 1681 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1682 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1683 1684 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1685 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1686 1687 if (pch_iir & SDE_POISON) 1688 DRM_ERROR("PCH poison interrupt\n"); 1689 1690 if (pch_iir & SDE_FDI_MASK) 1691 for_each_pipe(pipe) 1692 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1693 pipe_name(pipe), 1694 I915_READ(FDI_RX_IIR(pipe))); 1695 1696 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1697 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1698 1699 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1700 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1701 1702 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1703 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1704 false)) 1705 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1706 1707 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1708 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1709 false)) 1710 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 1711 } 1712 1713 static void ivb_err_int_handler(struct drm_device *dev) 1714 { 1715 struct drm_i915_private *dev_priv = dev->dev_private; 1716 u32 err_int = I915_READ(GEN7_ERR_INT); 1717 enum pipe pipe; 1718 1719 if (err_int & ERR_INT_POISON) 1720 DRM_ERROR("Poison interrupt\n"); 1721 1722 for_each_pipe(pipe) { 1723 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1724 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1725 false)) 1726 DRM_ERROR("Pipe %c FIFO underrun\n", 1727 pipe_name(pipe)); 1728 } 1729 1730 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1731 if (IS_IVYBRIDGE(dev)) 1732 ivb_pipe_crc_irq_handler(dev, pipe); 1733 else 1734 hsw_pipe_crc_irq_handler(dev, pipe); 1735 } 1736 } 1737 1738 I915_WRITE(GEN7_ERR_INT, err_int); 1739 } 1740 1741 static void cpt_serr_int_handler(struct drm_device *dev) 1742 { 1743 struct drm_i915_private *dev_priv = dev->dev_private; 1744 u32 serr_int = I915_READ(SERR_INT); 1745 1746 if (serr_int & SERR_INT_POISON) 1747 DRM_ERROR("PCH poison interrupt\n"); 1748 1749 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1750 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1751 false)) 1752 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1753 1754 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1755 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1756 false)) 1757 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 1758 1759 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1760 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1761 false)) 1762 DRM_ERROR("PCH transcoder C FIFO underrun\n"); 1763 1764 I915_WRITE(SERR_INT, serr_int); 1765 } 1766 1767 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1768 { 1769 struct drm_i915_private *dev_priv = dev->dev_private; 1770 int pipe; 1771 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1772 1773 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1774 1775 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1776 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1777 SDE_AUDIO_POWER_SHIFT_CPT); 1778 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1779 port_name(port)); 1780 } 1781 1782 if (pch_iir & SDE_AUX_MASK_CPT) 1783 dp_aux_irq_handler(dev); 1784 1785 if (pch_iir & SDE_GMBUS_CPT) 1786 gmbus_irq_handler(dev); 1787 1788 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1789 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1790 1791 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1792 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1793 1794 if (pch_iir & SDE_FDI_MASK_CPT) 1795 for_each_pipe(pipe) 1796 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1797 pipe_name(pipe), 1798 I915_READ(FDI_RX_IIR(pipe))); 1799 1800 if (pch_iir & SDE_ERROR_CPT) 1801 cpt_serr_int_handler(dev); 1802 } 1803 1804 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1805 { 1806 struct drm_i915_private *dev_priv = dev->dev_private; 1807 enum pipe pipe; 1808 1809 if (de_iir & DE_AUX_CHANNEL_A) 1810 dp_aux_irq_handler(dev); 1811 1812 if (de_iir & DE_GSE) 1813 intel_opregion_asle_intr(dev); 1814 1815 if (de_iir & DE_POISON) 1816 DRM_ERROR("Poison interrupt\n"); 1817 1818 for_each_pipe(pipe) { 1819 if (de_iir & DE_PIPE_VBLANK(pipe)) 1820 drm_handle_vblank(dev, pipe); 1821 1822 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1823 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1824 DRM_ERROR("Pipe %c FIFO underrun\n", 1825 pipe_name(pipe)); 1826 1827 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1828 i9xx_pipe_crc_irq_handler(dev, pipe); 1829 1830 /* plane/pipes map 1:1 on ilk+ */ 1831 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1832 intel_prepare_page_flip(dev, pipe); 1833 intel_finish_page_flip_plane(dev, pipe); 1834 } 1835 } 1836 1837 /* check event from PCH */ 1838 if (de_iir & DE_PCH_EVENT) { 1839 u32 pch_iir = I915_READ(SDEIIR); 1840 1841 if (HAS_PCH_CPT(dev)) 1842 cpt_irq_handler(dev, pch_iir); 1843 else 1844 ibx_irq_handler(dev, pch_iir); 1845 1846 /* should clear PCH hotplug event before clear CPU irq */ 1847 I915_WRITE(SDEIIR, pch_iir); 1848 } 1849 1850 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1851 ironlake_rps_change_irq_handler(dev); 1852 } 1853 1854 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1855 { 1856 struct drm_i915_private *dev_priv = dev->dev_private; 1857 enum pipe pipe; 1858 1859 if (de_iir & DE_ERR_INT_IVB) 1860 ivb_err_int_handler(dev); 1861 1862 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1863 dp_aux_irq_handler(dev); 1864 1865 if (de_iir & DE_GSE_IVB) 1866 intel_opregion_asle_intr(dev); 1867 1868 for_each_pipe(pipe) { 1869 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 1870 drm_handle_vblank(dev, pipe); 1871 1872 /* plane/pipes map 1:1 on ilk+ */ 1873 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 1874 intel_prepare_page_flip(dev, pipe); 1875 intel_finish_page_flip_plane(dev, pipe); 1876 } 1877 } 1878 1879 /* check event from PCH */ 1880 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1881 u32 pch_iir = I915_READ(SDEIIR); 1882 1883 cpt_irq_handler(dev, pch_iir); 1884 1885 /* clear PCH hotplug event before clear CPU irq */ 1886 I915_WRITE(SDEIIR, pch_iir); 1887 } 1888 } 1889 1890 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1891 { 1892 struct drm_device *dev = (struct drm_device *) arg; 1893 struct drm_i915_private *dev_priv = dev->dev_private; 1894 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1895 irqreturn_t ret = IRQ_NONE; 1896 1897 /* We get interrupts on unclaimed registers, so check for this before we 1898 * do any I915_{READ,WRITE}. */ 1899 intel_uncore_check_errors(dev); 1900 1901 /* disable master interrupt before clearing iir */ 1902 de_ier = I915_READ(DEIER); 1903 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1904 POSTING_READ(DEIER); 1905 1906 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1907 * interrupts will will be stored on its back queue, and then we'll be 1908 * able to process them after we restore SDEIER (as soon as we restore 1909 * it, we'll get an interrupt if SDEIIR still has something to process 1910 * due to its back queue). */ 1911 if (!HAS_PCH_NOP(dev)) { 1912 sde_ier = I915_READ(SDEIER); 1913 I915_WRITE(SDEIER, 0); 1914 POSTING_READ(SDEIER); 1915 } 1916 1917 gt_iir = I915_READ(GTIIR); 1918 if (gt_iir) { 1919 if (INTEL_INFO(dev)->gen >= 6) 1920 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1921 else 1922 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1923 I915_WRITE(GTIIR, gt_iir); 1924 ret = IRQ_HANDLED; 1925 } 1926 1927 de_iir = I915_READ(DEIIR); 1928 if (de_iir) { 1929 if (INTEL_INFO(dev)->gen >= 7) 1930 ivb_display_irq_handler(dev, de_iir); 1931 else 1932 ilk_display_irq_handler(dev, de_iir); 1933 I915_WRITE(DEIIR, de_iir); 1934 ret = IRQ_HANDLED; 1935 } 1936 1937 if (INTEL_INFO(dev)->gen >= 6) { 1938 u32 pm_iir = I915_READ(GEN6_PMIIR); 1939 if (pm_iir) { 1940 gen6_rps_irq_handler(dev_priv, pm_iir); 1941 I915_WRITE(GEN6_PMIIR, pm_iir); 1942 ret = IRQ_HANDLED; 1943 } 1944 } 1945 1946 I915_WRITE(DEIER, de_ier); 1947 POSTING_READ(DEIER); 1948 if (!HAS_PCH_NOP(dev)) { 1949 I915_WRITE(SDEIER, sde_ier); 1950 POSTING_READ(SDEIER); 1951 } 1952 1953 return ret; 1954 } 1955 1956 static irqreturn_t gen8_irq_handler(int irq, void *arg) 1957 { 1958 struct drm_device *dev = arg; 1959 struct drm_i915_private *dev_priv = dev->dev_private; 1960 u32 master_ctl; 1961 irqreturn_t ret = IRQ_NONE; 1962 uint32_t tmp = 0; 1963 enum pipe pipe; 1964 1965 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1966 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1967 if (!master_ctl) 1968 return IRQ_NONE; 1969 1970 I915_WRITE(GEN8_MASTER_IRQ, 0); 1971 POSTING_READ(GEN8_MASTER_IRQ); 1972 1973 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1974 1975 if (master_ctl & GEN8_DE_MISC_IRQ) { 1976 tmp = I915_READ(GEN8_DE_MISC_IIR); 1977 if (tmp & GEN8_DE_MISC_GSE) 1978 intel_opregion_asle_intr(dev); 1979 else if (tmp) 1980 DRM_ERROR("Unexpected DE Misc interrupt\n"); 1981 else 1982 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1983 1984 if (tmp) { 1985 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1986 ret = IRQ_HANDLED; 1987 } 1988 } 1989 1990 if (master_ctl & GEN8_DE_PORT_IRQ) { 1991 tmp = I915_READ(GEN8_DE_PORT_IIR); 1992 if (tmp & GEN8_AUX_CHANNEL_A) 1993 dp_aux_irq_handler(dev); 1994 else if (tmp) 1995 DRM_ERROR("Unexpected DE Port interrupt\n"); 1996 else 1997 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 1998 1999 if (tmp) { 2000 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2001 ret = IRQ_HANDLED; 2002 } 2003 } 2004 2005 for_each_pipe(pipe) { 2006 uint32_t pipe_iir; 2007 2008 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2009 continue; 2010 2011 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2012 if (pipe_iir & GEN8_PIPE_VBLANK) 2013 drm_handle_vblank(dev, pipe); 2014 2015 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 2016 intel_prepare_page_flip(dev, pipe); 2017 intel_finish_page_flip_plane(dev, pipe); 2018 } 2019 2020 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2021 hsw_pipe_crc_irq_handler(dev, pipe); 2022 2023 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2024 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2025 false)) 2026 DRM_ERROR("Pipe %c FIFO underrun\n", 2027 pipe_name(pipe)); 2028 } 2029 2030 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2031 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2032 pipe_name(pipe), 2033 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2034 } 2035 2036 if (pipe_iir) { 2037 ret = IRQ_HANDLED; 2038 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2039 } else 2040 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2041 } 2042 2043 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2044 /* 2045 * FIXME(BDW): Assume for now that the new interrupt handling 2046 * scheme also closed the SDE interrupt handling race we've seen 2047 * on older pch-split platforms. But this needs testing. 2048 */ 2049 u32 pch_iir = I915_READ(SDEIIR); 2050 2051 cpt_irq_handler(dev, pch_iir); 2052 2053 if (pch_iir) { 2054 I915_WRITE(SDEIIR, pch_iir); 2055 ret = IRQ_HANDLED; 2056 } 2057 } 2058 2059 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2060 POSTING_READ(GEN8_MASTER_IRQ); 2061 2062 return ret; 2063 } 2064 2065 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2066 bool reset_completed) 2067 { 2068 struct intel_ring_buffer *ring; 2069 int i; 2070 2071 /* 2072 * Notify all waiters for GPU completion events that reset state has 2073 * been changed, and that they need to restart their wait after 2074 * checking for potential errors (and bail out to drop locks if there is 2075 * a gpu reset pending so that i915_error_work_func can acquire them). 2076 */ 2077 2078 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2079 for_each_ring(ring, dev_priv, i) 2080 wake_up_all(&ring->irq_queue); 2081 2082 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2083 wake_up_all(&dev_priv->pending_flip_queue); 2084 2085 /* 2086 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2087 * reset state is cleared. 2088 */ 2089 if (reset_completed) 2090 wake_up_all(&dev_priv->gpu_error.reset_queue); 2091 } 2092 2093 /** 2094 * i915_error_work_func - do process context error handling work 2095 * @work: work struct 2096 * 2097 * Fire an error uevent so userspace can see that a hang or error 2098 * was detected. 2099 */ 2100 static void i915_error_work_func(struct work_struct *work) 2101 { 2102 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2103 work); 2104 struct drm_i915_private *dev_priv = 2105 container_of(error, struct drm_i915_private, gpu_error); 2106 struct drm_device *dev = dev_priv->dev; 2107 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2108 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2109 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2110 int ret; 2111 2112 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2113 2114 /* 2115 * Note that there's only one work item which does gpu resets, so we 2116 * need not worry about concurrent gpu resets potentially incrementing 2117 * error->reset_counter twice. We only need to take care of another 2118 * racing irq/hangcheck declaring the gpu dead for a second time. A 2119 * quick check for that is good enough: schedule_work ensures the 2120 * correct ordering between hang detection and this work item, and since 2121 * the reset in-progress bit is only ever set by code outside of this 2122 * work we don't need to worry about any other races. 2123 */ 2124 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2125 DRM_DEBUG_DRIVER("resetting chip\n"); 2126 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2127 reset_event); 2128 2129 /* 2130 * All state reset _must_ be completed before we update the 2131 * reset counter, for otherwise waiters might miss the reset 2132 * pending state and not properly drop locks, resulting in 2133 * deadlocks with the reset work. 2134 */ 2135 ret = i915_reset(dev); 2136 2137 intel_display_handle_reset(dev); 2138 2139 if (ret == 0) { 2140 /* 2141 * After all the gem state is reset, increment the reset 2142 * counter and wake up everyone waiting for the reset to 2143 * complete. 2144 * 2145 * Since unlock operations are a one-sided barrier only, 2146 * we need to insert a barrier here to order any seqno 2147 * updates before 2148 * the counter increment. 2149 */ 2150 smp_mb__before_atomic_inc(); 2151 atomic_inc(&dev_priv->gpu_error.reset_counter); 2152 2153 kobject_uevent_env(&dev->primary->kdev->kobj, 2154 KOBJ_CHANGE, reset_done_event); 2155 } else { 2156 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2157 } 2158 2159 /* 2160 * Note: The wake_up also serves as a memory barrier so that 2161 * waiters see the update value of the reset counter atomic_t. 2162 */ 2163 i915_error_wake_up(dev_priv, true); 2164 } 2165 } 2166 2167 static void i915_report_and_clear_eir(struct drm_device *dev) 2168 { 2169 struct drm_i915_private *dev_priv = dev->dev_private; 2170 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2171 u32 eir = I915_READ(EIR); 2172 int pipe, i; 2173 2174 if (!eir) 2175 return; 2176 2177 pr_err("render error detected, EIR: 0x%08x\n", eir); 2178 2179 i915_get_extra_instdone(dev, instdone); 2180 2181 if (IS_G4X(dev)) { 2182 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2183 u32 ipeir = I915_READ(IPEIR_I965); 2184 2185 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2186 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2187 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2188 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2189 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2190 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2191 I915_WRITE(IPEIR_I965, ipeir); 2192 POSTING_READ(IPEIR_I965); 2193 } 2194 if (eir & GM45_ERROR_PAGE_TABLE) { 2195 u32 pgtbl_err = I915_READ(PGTBL_ER); 2196 pr_err("page table error\n"); 2197 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2198 I915_WRITE(PGTBL_ER, pgtbl_err); 2199 POSTING_READ(PGTBL_ER); 2200 } 2201 } 2202 2203 if (!IS_GEN2(dev)) { 2204 if (eir & I915_ERROR_PAGE_TABLE) { 2205 u32 pgtbl_err = I915_READ(PGTBL_ER); 2206 pr_err("page table error\n"); 2207 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2208 I915_WRITE(PGTBL_ER, pgtbl_err); 2209 POSTING_READ(PGTBL_ER); 2210 } 2211 } 2212 2213 if (eir & I915_ERROR_MEMORY_REFRESH) { 2214 pr_err("memory refresh error:\n"); 2215 for_each_pipe(pipe) 2216 pr_err("pipe %c stat: 0x%08x\n", 2217 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2218 /* pipestat has already been acked */ 2219 } 2220 if (eir & I915_ERROR_INSTRUCTION) { 2221 pr_err("instruction error\n"); 2222 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2223 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2224 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2225 if (INTEL_INFO(dev)->gen < 4) { 2226 u32 ipeir = I915_READ(IPEIR); 2227 2228 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2229 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2230 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2231 I915_WRITE(IPEIR, ipeir); 2232 POSTING_READ(IPEIR); 2233 } else { 2234 u32 ipeir = I915_READ(IPEIR_I965); 2235 2236 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2237 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2238 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2239 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2240 I915_WRITE(IPEIR_I965, ipeir); 2241 POSTING_READ(IPEIR_I965); 2242 } 2243 } 2244 2245 I915_WRITE(EIR, eir); 2246 POSTING_READ(EIR); 2247 eir = I915_READ(EIR); 2248 if (eir) { 2249 /* 2250 * some errors might have become stuck, 2251 * mask them. 2252 */ 2253 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2254 I915_WRITE(EMR, I915_READ(EMR) | eir); 2255 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2256 } 2257 } 2258 2259 /** 2260 * i915_handle_error - handle an error interrupt 2261 * @dev: drm device 2262 * 2263 * Do some basic checking of regsiter state at error interrupt time and 2264 * dump it to the syslog. Also call i915_capture_error_state() to make 2265 * sure we get a record and make it available in debugfs. Fire a uevent 2266 * so userspace knows something bad happened (should trigger collection 2267 * of a ring dump etc.). 2268 */ 2269 void i915_handle_error(struct drm_device *dev, bool wedged, 2270 const char *fmt, ...) 2271 { 2272 struct drm_i915_private *dev_priv = dev->dev_private; 2273 va_list args; 2274 char error_msg[80]; 2275 2276 va_start(args, fmt); 2277 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2278 va_end(args); 2279 2280 i915_capture_error_state(dev, wedged, error_msg); 2281 i915_report_and_clear_eir(dev); 2282 2283 if (wedged) { 2284 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2285 &dev_priv->gpu_error.reset_counter); 2286 2287 /* 2288 * Wakeup waiting processes so that the reset work function 2289 * i915_error_work_func doesn't deadlock trying to grab various 2290 * locks. By bumping the reset counter first, the woken 2291 * processes will see a reset in progress and back off, 2292 * releasing their locks and then wait for the reset completion. 2293 * We must do this for _all_ gpu waiters that might hold locks 2294 * that the reset work needs to acquire. 2295 * 2296 * Note: The wake_up serves as the required memory barrier to 2297 * ensure that the waiters see the updated value of the reset 2298 * counter atomic_t. 2299 */ 2300 i915_error_wake_up(dev_priv, false); 2301 } 2302 2303 /* 2304 * Our reset work can grab modeset locks (since it needs to reset the 2305 * state of outstanding pagelips). Hence it must not be run on our own 2306 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2307 * code will deadlock. 2308 */ 2309 schedule_work(&dev_priv->gpu_error.work); 2310 } 2311 2312 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2313 { 2314 struct drm_i915_private *dev_priv = dev->dev_private; 2315 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2316 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2317 struct drm_i915_gem_object *obj; 2318 struct intel_unpin_work *work; 2319 unsigned long flags; 2320 bool stall_detected; 2321 2322 /* Ignore early vblank irqs */ 2323 if (intel_crtc == NULL) 2324 return; 2325 2326 spin_lock_irqsave(&dev->event_lock, flags); 2327 work = intel_crtc->unpin_work; 2328 2329 if (work == NULL || 2330 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2331 !work->enable_stall_check) { 2332 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2333 spin_unlock_irqrestore(&dev->event_lock, flags); 2334 return; 2335 } 2336 2337 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2338 obj = work->pending_flip_obj; 2339 if (INTEL_INFO(dev)->gen >= 4) { 2340 int dspsurf = DSPSURF(intel_crtc->plane); 2341 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2342 i915_gem_obj_ggtt_offset(obj); 2343 } else { 2344 int dspaddr = DSPADDR(intel_crtc->plane); 2345 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2346 crtc->y * crtc->primary->fb->pitches[0] + 2347 crtc->x * crtc->primary->fb->bits_per_pixel/8); 2348 } 2349 2350 spin_unlock_irqrestore(&dev->event_lock, flags); 2351 2352 if (stall_detected) { 2353 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2354 intel_prepare_page_flip(dev, intel_crtc->plane); 2355 } 2356 } 2357 2358 /* Called from drm generic code, passed 'crtc' which 2359 * we use as a pipe index 2360 */ 2361 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2362 { 2363 struct drm_i915_private *dev_priv = dev->dev_private; 2364 unsigned long irqflags; 2365 2366 if (!i915_pipe_enabled(dev, pipe)) 2367 return -EINVAL; 2368 2369 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2370 if (INTEL_INFO(dev)->gen >= 4) 2371 i915_enable_pipestat(dev_priv, pipe, 2372 PIPE_START_VBLANK_INTERRUPT_STATUS); 2373 else 2374 i915_enable_pipestat(dev_priv, pipe, 2375 PIPE_VBLANK_INTERRUPT_STATUS); 2376 2377 /* maintain vblank delivery even in deep C-states */ 2378 if (INTEL_INFO(dev)->gen == 3) 2379 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2380 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2381 2382 return 0; 2383 } 2384 2385 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2386 { 2387 struct drm_i915_private *dev_priv = dev->dev_private; 2388 unsigned long irqflags; 2389 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2390 DE_PIPE_VBLANK(pipe); 2391 2392 if (!i915_pipe_enabled(dev, pipe)) 2393 return -EINVAL; 2394 2395 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2396 ironlake_enable_display_irq(dev_priv, bit); 2397 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2398 2399 return 0; 2400 } 2401 2402 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2403 { 2404 struct drm_i915_private *dev_priv = dev->dev_private; 2405 unsigned long irqflags; 2406 2407 if (!i915_pipe_enabled(dev, pipe)) 2408 return -EINVAL; 2409 2410 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2411 i915_enable_pipestat(dev_priv, pipe, 2412 PIPE_START_VBLANK_INTERRUPT_STATUS); 2413 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2414 2415 return 0; 2416 } 2417 2418 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2419 { 2420 struct drm_i915_private *dev_priv = dev->dev_private; 2421 unsigned long irqflags; 2422 2423 if (!i915_pipe_enabled(dev, pipe)) 2424 return -EINVAL; 2425 2426 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2427 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2428 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2429 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2430 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2431 return 0; 2432 } 2433 2434 /* Called from drm generic code, passed 'crtc' which 2435 * we use as a pipe index 2436 */ 2437 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2438 { 2439 struct drm_i915_private *dev_priv = dev->dev_private; 2440 unsigned long irqflags; 2441 2442 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2443 if (INTEL_INFO(dev)->gen == 3) 2444 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2445 2446 i915_disable_pipestat(dev_priv, pipe, 2447 PIPE_VBLANK_INTERRUPT_STATUS | 2448 PIPE_START_VBLANK_INTERRUPT_STATUS); 2449 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2450 } 2451 2452 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2453 { 2454 struct drm_i915_private *dev_priv = dev->dev_private; 2455 unsigned long irqflags; 2456 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2457 DE_PIPE_VBLANK(pipe); 2458 2459 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2460 ironlake_disable_display_irq(dev_priv, bit); 2461 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2462 } 2463 2464 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2465 { 2466 struct drm_i915_private *dev_priv = dev->dev_private; 2467 unsigned long irqflags; 2468 2469 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2470 i915_disable_pipestat(dev_priv, pipe, 2471 PIPE_START_VBLANK_INTERRUPT_STATUS); 2472 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2473 } 2474 2475 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2476 { 2477 struct drm_i915_private *dev_priv = dev->dev_private; 2478 unsigned long irqflags; 2479 2480 if (!i915_pipe_enabled(dev, pipe)) 2481 return; 2482 2483 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2484 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2485 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2486 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2487 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2488 } 2489 2490 static u32 2491 ring_last_seqno(struct intel_ring_buffer *ring) 2492 { 2493 return list_entry(ring->request_list.prev, 2494 struct drm_i915_gem_request, list)->seqno; 2495 } 2496 2497 static bool 2498 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2499 { 2500 return (list_empty(&ring->request_list) || 2501 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2502 } 2503 2504 static struct intel_ring_buffer * 2505 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2506 { 2507 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2508 u32 cmd, ipehr, head; 2509 int i; 2510 2511 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2512 if ((ipehr & ~(0x3 << 16)) != 2513 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2514 return NULL; 2515 2516 /* 2517 * HEAD is likely pointing to the dword after the actual command, 2518 * so scan backwards until we find the MBOX. But limit it to just 3 2519 * dwords. Note that we don't care about ACTHD here since that might 2520 * point at at batch, and semaphores are always emitted into the 2521 * ringbuffer itself. 2522 */ 2523 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2524 2525 for (i = 4; i; --i) { 2526 /* 2527 * Be paranoid and presume the hw has gone off into the wild - 2528 * our ring is smaller than what the hardware (and hence 2529 * HEAD_ADDR) allows. Also handles wrap-around. 2530 */ 2531 head &= ring->size - 1; 2532 2533 /* This here seems to blow up */ 2534 cmd = ioread32(ring->virtual_start + head); 2535 if (cmd == ipehr) 2536 break; 2537 2538 head -= 4; 2539 } 2540 2541 if (!i) 2542 return NULL; 2543 2544 *seqno = ioread32(ring->virtual_start + head + 4) + 1; 2545 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2546 } 2547 2548 static int semaphore_passed(struct intel_ring_buffer *ring) 2549 { 2550 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2551 struct intel_ring_buffer *signaller; 2552 u32 seqno, ctl; 2553 2554 ring->hangcheck.deadlock = true; 2555 2556 signaller = semaphore_waits_for(ring, &seqno); 2557 if (signaller == NULL || signaller->hangcheck.deadlock) 2558 return -1; 2559 2560 /* cursory check for an unkickable deadlock */ 2561 ctl = I915_READ_CTL(signaller); 2562 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2563 return -1; 2564 2565 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2566 } 2567 2568 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2569 { 2570 struct intel_ring_buffer *ring; 2571 int i; 2572 2573 for_each_ring(ring, dev_priv, i) 2574 ring->hangcheck.deadlock = false; 2575 } 2576 2577 static enum intel_ring_hangcheck_action 2578 ring_stuck(struct intel_ring_buffer *ring, u64 acthd) 2579 { 2580 struct drm_device *dev = ring->dev; 2581 struct drm_i915_private *dev_priv = dev->dev_private; 2582 u32 tmp; 2583 2584 if (ring->hangcheck.acthd != acthd) 2585 return HANGCHECK_ACTIVE; 2586 2587 if (IS_GEN2(dev)) 2588 return HANGCHECK_HUNG; 2589 2590 /* Is the chip hanging on a WAIT_FOR_EVENT? 2591 * If so we can simply poke the RB_WAIT bit 2592 * and break the hang. This should work on 2593 * all but the second generation chipsets. 2594 */ 2595 tmp = I915_READ_CTL(ring); 2596 if (tmp & RING_WAIT) { 2597 i915_handle_error(dev, false, 2598 "Kicking stuck wait on %s", 2599 ring->name); 2600 I915_WRITE_CTL(ring, tmp); 2601 return HANGCHECK_KICK; 2602 } 2603 2604 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2605 switch (semaphore_passed(ring)) { 2606 default: 2607 return HANGCHECK_HUNG; 2608 case 1: 2609 i915_handle_error(dev, false, 2610 "Kicking stuck semaphore on %s", 2611 ring->name); 2612 I915_WRITE_CTL(ring, tmp); 2613 return HANGCHECK_KICK; 2614 case 0: 2615 return HANGCHECK_WAIT; 2616 } 2617 } 2618 2619 return HANGCHECK_HUNG; 2620 } 2621 2622 /** 2623 * This is called when the chip hasn't reported back with completed 2624 * batchbuffers in a long time. We keep track per ring seqno progress and 2625 * if there are no progress, hangcheck score for that ring is increased. 2626 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2627 * we kick the ring. If we see no progress on three subsequent calls 2628 * we assume chip is wedged and try to fix it by resetting the chip. 2629 */ 2630 static void i915_hangcheck_elapsed(unsigned long data) 2631 { 2632 struct drm_device *dev = (struct drm_device *)data; 2633 struct drm_i915_private *dev_priv = dev->dev_private; 2634 struct intel_ring_buffer *ring; 2635 int i; 2636 int busy_count = 0, rings_hung = 0; 2637 bool stuck[I915_NUM_RINGS] = { 0 }; 2638 #define BUSY 1 2639 #define KICK 5 2640 #define HUNG 20 2641 2642 if (!i915.enable_hangcheck) 2643 return; 2644 2645 for_each_ring(ring, dev_priv, i) { 2646 u64 acthd; 2647 u32 seqno; 2648 bool busy = true; 2649 2650 semaphore_clear_deadlocks(dev_priv); 2651 2652 seqno = ring->get_seqno(ring, false); 2653 acthd = intel_ring_get_active_head(ring); 2654 2655 if (ring->hangcheck.seqno == seqno) { 2656 if (ring_idle(ring, seqno)) { 2657 ring->hangcheck.action = HANGCHECK_IDLE; 2658 2659 if (waitqueue_active(&ring->irq_queue)) { 2660 /* Issue a wake-up to catch stuck h/w. */ 2661 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2662 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2663 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2664 ring->name); 2665 else 2666 DRM_INFO("Fake missed irq on %s\n", 2667 ring->name); 2668 wake_up_all(&ring->irq_queue); 2669 } 2670 /* Safeguard against driver failure */ 2671 ring->hangcheck.score += BUSY; 2672 } else 2673 busy = false; 2674 } else { 2675 /* We always increment the hangcheck score 2676 * if the ring is busy and still processing 2677 * the same request, so that no single request 2678 * can run indefinitely (such as a chain of 2679 * batches). The only time we do not increment 2680 * the hangcheck score on this ring, if this 2681 * ring is in a legitimate wait for another 2682 * ring. In that case the waiting ring is a 2683 * victim and we want to be sure we catch the 2684 * right culprit. Then every time we do kick 2685 * the ring, add a small increment to the 2686 * score so that we can catch a batch that is 2687 * being repeatedly kicked and so responsible 2688 * for stalling the machine. 2689 */ 2690 ring->hangcheck.action = ring_stuck(ring, 2691 acthd); 2692 2693 switch (ring->hangcheck.action) { 2694 case HANGCHECK_IDLE: 2695 case HANGCHECK_WAIT: 2696 break; 2697 case HANGCHECK_ACTIVE: 2698 ring->hangcheck.score += BUSY; 2699 break; 2700 case HANGCHECK_KICK: 2701 ring->hangcheck.score += KICK; 2702 break; 2703 case HANGCHECK_HUNG: 2704 ring->hangcheck.score += HUNG; 2705 stuck[i] = true; 2706 break; 2707 } 2708 } 2709 } else { 2710 ring->hangcheck.action = HANGCHECK_ACTIVE; 2711 2712 /* Gradually reduce the count so that we catch DoS 2713 * attempts across multiple batches. 2714 */ 2715 if (ring->hangcheck.score > 0) 2716 ring->hangcheck.score--; 2717 } 2718 2719 ring->hangcheck.seqno = seqno; 2720 ring->hangcheck.acthd = acthd; 2721 busy_count += busy; 2722 } 2723 2724 for_each_ring(ring, dev_priv, i) { 2725 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2726 DRM_INFO("%s on %s\n", 2727 stuck[i] ? "stuck" : "no progress", 2728 ring->name); 2729 rings_hung++; 2730 } 2731 } 2732 2733 if (rings_hung) 2734 return i915_handle_error(dev, true, "Ring hung"); 2735 2736 if (busy_count) 2737 /* Reset timer case chip hangs without another request 2738 * being added */ 2739 i915_queue_hangcheck(dev); 2740 } 2741 2742 void i915_queue_hangcheck(struct drm_device *dev) 2743 { 2744 struct drm_i915_private *dev_priv = dev->dev_private; 2745 if (!i915.enable_hangcheck) 2746 return; 2747 2748 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2749 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2750 } 2751 2752 static void ibx_irq_preinstall(struct drm_device *dev) 2753 { 2754 struct drm_i915_private *dev_priv = dev->dev_private; 2755 2756 if (HAS_PCH_NOP(dev)) 2757 return; 2758 2759 /* south display irq */ 2760 I915_WRITE(SDEIMR, 0xffffffff); 2761 /* 2762 * SDEIER is also touched by the interrupt handler to work around missed 2763 * PCH interrupts. Hence we can't update it after the interrupt handler 2764 * is enabled - instead we unconditionally enable all PCH interrupt 2765 * sources here, but then only unmask them as needed with SDEIMR. 2766 */ 2767 I915_WRITE(SDEIER, 0xffffffff); 2768 POSTING_READ(SDEIER); 2769 } 2770 2771 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2772 { 2773 struct drm_i915_private *dev_priv = dev->dev_private; 2774 2775 /* and GT */ 2776 I915_WRITE(GTIMR, 0xffffffff); 2777 I915_WRITE(GTIER, 0x0); 2778 POSTING_READ(GTIER); 2779 2780 if (INTEL_INFO(dev)->gen >= 6) { 2781 /* and PM */ 2782 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2783 I915_WRITE(GEN6_PMIER, 0x0); 2784 POSTING_READ(GEN6_PMIER); 2785 } 2786 } 2787 2788 /* drm_dma.h hooks 2789 */ 2790 static void ironlake_irq_preinstall(struct drm_device *dev) 2791 { 2792 struct drm_i915_private *dev_priv = dev->dev_private; 2793 2794 I915_WRITE(HWSTAM, 0xeffe); 2795 2796 I915_WRITE(DEIMR, 0xffffffff); 2797 I915_WRITE(DEIER, 0x0); 2798 POSTING_READ(DEIER); 2799 2800 gen5_gt_irq_preinstall(dev); 2801 2802 ibx_irq_preinstall(dev); 2803 } 2804 2805 static void valleyview_irq_preinstall(struct drm_device *dev) 2806 { 2807 struct drm_i915_private *dev_priv = dev->dev_private; 2808 int pipe; 2809 2810 /* VLV magic */ 2811 I915_WRITE(VLV_IMR, 0); 2812 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2813 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2814 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2815 2816 /* and GT */ 2817 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2818 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2819 2820 gen5_gt_irq_preinstall(dev); 2821 2822 I915_WRITE(DPINVGTT, 0xff); 2823 2824 I915_WRITE(PORT_HOTPLUG_EN, 0); 2825 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2826 for_each_pipe(pipe) 2827 I915_WRITE(PIPESTAT(pipe), 0xffff); 2828 I915_WRITE(VLV_IIR, 0xffffffff); 2829 I915_WRITE(VLV_IMR, 0xffffffff); 2830 I915_WRITE(VLV_IER, 0x0); 2831 POSTING_READ(VLV_IER); 2832 } 2833 2834 static void gen8_irq_preinstall(struct drm_device *dev) 2835 { 2836 struct drm_i915_private *dev_priv = dev->dev_private; 2837 int pipe; 2838 2839 I915_WRITE(GEN8_MASTER_IRQ, 0); 2840 POSTING_READ(GEN8_MASTER_IRQ); 2841 2842 /* IIR can theoretically queue up two events. Be paranoid */ 2843 #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2844 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2845 POSTING_READ(GEN8_##type##_IMR(which)); \ 2846 I915_WRITE(GEN8_##type##_IER(which), 0); \ 2847 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2848 POSTING_READ(GEN8_##type##_IIR(which)); \ 2849 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2850 } while (0) 2851 2852 #define GEN8_IRQ_INIT(type) do { \ 2853 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2854 POSTING_READ(GEN8_##type##_IMR); \ 2855 I915_WRITE(GEN8_##type##_IER, 0); \ 2856 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2857 POSTING_READ(GEN8_##type##_IIR); \ 2858 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2859 } while (0) 2860 2861 GEN8_IRQ_INIT_NDX(GT, 0); 2862 GEN8_IRQ_INIT_NDX(GT, 1); 2863 GEN8_IRQ_INIT_NDX(GT, 2); 2864 GEN8_IRQ_INIT_NDX(GT, 3); 2865 2866 for_each_pipe(pipe) { 2867 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2868 } 2869 2870 GEN8_IRQ_INIT(DE_PORT); 2871 GEN8_IRQ_INIT(DE_MISC); 2872 GEN8_IRQ_INIT(PCU); 2873 #undef GEN8_IRQ_INIT 2874 #undef GEN8_IRQ_INIT_NDX 2875 2876 POSTING_READ(GEN8_PCU_IIR); 2877 2878 ibx_irq_preinstall(dev); 2879 } 2880 2881 static void ibx_hpd_irq_setup(struct drm_device *dev) 2882 { 2883 struct drm_i915_private *dev_priv = dev->dev_private; 2884 struct drm_mode_config *mode_config = &dev->mode_config; 2885 struct intel_encoder *intel_encoder; 2886 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2887 2888 if (HAS_PCH_IBX(dev)) { 2889 hotplug_irqs = SDE_HOTPLUG_MASK; 2890 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2891 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2892 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2893 } else { 2894 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2895 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2896 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2897 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2898 } 2899 2900 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2901 2902 /* 2903 * Enable digital hotplug on the PCH, and configure the DP short pulse 2904 * duration to 2ms (which is the minimum in the Display Port spec) 2905 * 2906 * This register is the same on all known PCH chips. 2907 */ 2908 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2909 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2910 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2911 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2912 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2913 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2914 } 2915 2916 static void ibx_irq_postinstall(struct drm_device *dev) 2917 { 2918 struct drm_i915_private *dev_priv = dev->dev_private; 2919 u32 mask; 2920 2921 if (HAS_PCH_NOP(dev)) 2922 return; 2923 2924 if (HAS_PCH_IBX(dev)) { 2925 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2926 } else { 2927 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2928 2929 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2930 } 2931 2932 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2933 I915_WRITE(SDEIMR, ~mask); 2934 } 2935 2936 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2937 { 2938 struct drm_i915_private *dev_priv = dev->dev_private; 2939 u32 pm_irqs, gt_irqs; 2940 2941 pm_irqs = gt_irqs = 0; 2942 2943 dev_priv->gt_irq_mask = ~0; 2944 if (HAS_L3_DPF(dev)) { 2945 /* L3 parity interrupt is always unmasked. */ 2946 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 2947 gt_irqs |= GT_PARITY_ERROR(dev); 2948 } 2949 2950 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2951 if (IS_GEN5(dev)) { 2952 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2953 ILK_BSD_USER_INTERRUPT; 2954 } else { 2955 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2956 } 2957 2958 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2959 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2960 I915_WRITE(GTIER, gt_irqs); 2961 POSTING_READ(GTIER); 2962 2963 if (INTEL_INFO(dev)->gen >= 6) { 2964 pm_irqs |= dev_priv->pm_rps_events; 2965 2966 if (HAS_VEBOX(dev)) 2967 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2968 2969 dev_priv->pm_irq_mask = 0xffffffff; 2970 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2971 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2972 I915_WRITE(GEN6_PMIER, pm_irqs); 2973 POSTING_READ(GEN6_PMIER); 2974 } 2975 } 2976 2977 static int ironlake_irq_postinstall(struct drm_device *dev) 2978 { 2979 unsigned long irqflags; 2980 struct drm_i915_private *dev_priv = dev->dev_private; 2981 u32 display_mask, extra_mask; 2982 2983 if (INTEL_INFO(dev)->gen >= 7) { 2984 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2985 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2986 DE_PLANEB_FLIP_DONE_IVB | 2987 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 2988 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2989 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 2990 2991 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2992 } else { 2993 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2994 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2995 DE_AUX_CHANNEL_A | 2996 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2997 DE_POISON); 2998 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 2999 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3000 } 3001 3002 dev_priv->irq_mask = ~display_mask; 3003 3004 /* should always can generate irq */ 3005 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3006 I915_WRITE(DEIMR, dev_priv->irq_mask); 3007 I915_WRITE(DEIER, display_mask | extra_mask); 3008 POSTING_READ(DEIER); 3009 3010 gen5_gt_irq_postinstall(dev); 3011 3012 ibx_irq_postinstall(dev); 3013 3014 if (IS_IRONLAKE_M(dev)) { 3015 /* Enable PCU event interrupts 3016 * 3017 * spinlocking not required here for correctness since interrupt 3018 * setup is guaranteed to run in single-threaded context. But we 3019 * need it to make the assert_spin_locked happy. */ 3020 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3021 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3022 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3023 } 3024 3025 return 0; 3026 } 3027 3028 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3029 { 3030 u32 pipestat_mask; 3031 u32 iir_mask; 3032 3033 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3034 PIPE_FIFO_UNDERRUN_STATUS; 3035 3036 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3037 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3038 POSTING_READ(PIPESTAT(PIPE_A)); 3039 3040 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3041 PIPE_CRC_DONE_INTERRUPT_STATUS; 3042 3043 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3044 PIPE_GMBUS_INTERRUPT_STATUS); 3045 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3046 3047 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3048 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3049 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3050 dev_priv->irq_mask &= ~iir_mask; 3051 3052 I915_WRITE(VLV_IIR, iir_mask); 3053 I915_WRITE(VLV_IIR, iir_mask); 3054 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3055 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3056 POSTING_READ(VLV_IER); 3057 } 3058 3059 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3060 { 3061 u32 pipestat_mask; 3062 u32 iir_mask; 3063 3064 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3065 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3066 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3067 3068 dev_priv->irq_mask |= iir_mask; 3069 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3070 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3071 I915_WRITE(VLV_IIR, iir_mask); 3072 I915_WRITE(VLV_IIR, iir_mask); 3073 POSTING_READ(VLV_IIR); 3074 3075 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3076 PIPE_CRC_DONE_INTERRUPT_STATUS; 3077 3078 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3079 PIPE_GMBUS_INTERRUPT_STATUS); 3080 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3081 3082 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3083 PIPE_FIFO_UNDERRUN_STATUS; 3084 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3085 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3086 POSTING_READ(PIPESTAT(PIPE_A)); 3087 } 3088 3089 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3090 { 3091 assert_spin_locked(&dev_priv->irq_lock); 3092 3093 if (dev_priv->display_irqs_enabled) 3094 return; 3095 3096 dev_priv->display_irqs_enabled = true; 3097 3098 if (dev_priv->dev->irq_enabled) 3099 valleyview_display_irqs_install(dev_priv); 3100 } 3101 3102 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3103 { 3104 assert_spin_locked(&dev_priv->irq_lock); 3105 3106 if (!dev_priv->display_irqs_enabled) 3107 return; 3108 3109 dev_priv->display_irqs_enabled = false; 3110 3111 if (dev_priv->dev->irq_enabled) 3112 valleyview_display_irqs_uninstall(dev_priv); 3113 } 3114 3115 static int valleyview_irq_postinstall(struct drm_device *dev) 3116 { 3117 struct drm_i915_private *dev_priv = dev->dev_private; 3118 unsigned long irqflags; 3119 3120 dev_priv->irq_mask = ~0; 3121 3122 I915_WRITE(PORT_HOTPLUG_EN, 0); 3123 POSTING_READ(PORT_HOTPLUG_EN); 3124 3125 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3126 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3127 I915_WRITE(VLV_IIR, 0xffffffff); 3128 POSTING_READ(VLV_IER); 3129 3130 /* Interrupt setup is already guaranteed to be single-threaded, this is 3131 * just to make the assert_spin_locked check happy. */ 3132 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3133 if (dev_priv->display_irqs_enabled) 3134 valleyview_display_irqs_install(dev_priv); 3135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3136 3137 I915_WRITE(VLV_IIR, 0xffffffff); 3138 I915_WRITE(VLV_IIR, 0xffffffff); 3139 3140 gen5_gt_irq_postinstall(dev); 3141 3142 /* ack & enable invalid PTE error interrupts */ 3143 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3144 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3145 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3146 #endif 3147 3148 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3149 3150 return 0; 3151 } 3152 3153 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3154 { 3155 int i; 3156 3157 /* These are interrupts we'll toggle with the ring mask register */ 3158 uint32_t gt_interrupts[] = { 3159 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3160 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3161 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3162 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3163 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3164 0, 3165 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3166 }; 3167 3168 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 3169 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 3170 if (tmp) 3171 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 3172 i, tmp); 3173 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 3174 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 3175 } 3176 POSTING_READ(GEN8_GT_IER(0)); 3177 } 3178 3179 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3180 { 3181 struct drm_device *dev = dev_priv->dev; 3182 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 3183 GEN8_PIPE_CDCLK_CRC_DONE | 3184 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3185 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3186 GEN8_PIPE_FIFO_UNDERRUN; 3187 int pipe; 3188 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3189 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3190 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3191 3192 for_each_pipe(pipe) { 3193 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 3194 if (tmp) 3195 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 3196 pipe, tmp); 3197 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 3198 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 3199 } 3200 POSTING_READ(GEN8_DE_PIPE_ISR(0)); 3201 3202 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 3203 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 3204 POSTING_READ(GEN8_DE_PORT_IER); 3205 } 3206 3207 static int gen8_irq_postinstall(struct drm_device *dev) 3208 { 3209 struct drm_i915_private *dev_priv = dev->dev_private; 3210 3211 gen8_gt_irq_postinstall(dev_priv); 3212 gen8_de_irq_postinstall(dev_priv); 3213 3214 ibx_irq_postinstall(dev); 3215 3216 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3217 POSTING_READ(GEN8_MASTER_IRQ); 3218 3219 return 0; 3220 } 3221 3222 static void gen8_irq_uninstall(struct drm_device *dev) 3223 { 3224 struct drm_i915_private *dev_priv = dev->dev_private; 3225 int pipe; 3226 3227 if (!dev_priv) 3228 return; 3229 3230 I915_WRITE(GEN8_MASTER_IRQ, 0); 3231 3232 #define GEN8_IRQ_FINI_NDX(type, which) do { \ 3233 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3234 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3235 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3236 } while (0) 3237 3238 #define GEN8_IRQ_FINI(type) do { \ 3239 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3240 I915_WRITE(GEN8_##type##_IER, 0); \ 3241 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3242 } while (0) 3243 3244 GEN8_IRQ_FINI_NDX(GT, 0); 3245 GEN8_IRQ_FINI_NDX(GT, 1); 3246 GEN8_IRQ_FINI_NDX(GT, 2); 3247 GEN8_IRQ_FINI_NDX(GT, 3); 3248 3249 for_each_pipe(pipe) { 3250 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3251 } 3252 3253 GEN8_IRQ_FINI(DE_PORT); 3254 GEN8_IRQ_FINI(DE_MISC); 3255 GEN8_IRQ_FINI(PCU); 3256 #undef GEN8_IRQ_FINI 3257 #undef GEN8_IRQ_FINI_NDX 3258 3259 POSTING_READ(GEN8_PCU_IIR); 3260 } 3261 3262 static void valleyview_irq_uninstall(struct drm_device *dev) 3263 { 3264 struct drm_i915_private *dev_priv = dev->dev_private; 3265 unsigned long irqflags; 3266 int pipe; 3267 3268 if (!dev_priv) 3269 return; 3270 3271 intel_hpd_irq_uninstall(dev_priv); 3272 3273 for_each_pipe(pipe) 3274 I915_WRITE(PIPESTAT(pipe), 0xffff); 3275 3276 I915_WRITE(HWSTAM, 0xffffffff); 3277 I915_WRITE(PORT_HOTPLUG_EN, 0); 3278 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3279 3280 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3281 if (dev_priv->display_irqs_enabled) 3282 valleyview_display_irqs_uninstall(dev_priv); 3283 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3284 3285 dev_priv->irq_mask = 0; 3286 3287 I915_WRITE(VLV_IIR, 0xffffffff); 3288 I915_WRITE(VLV_IMR, 0xffffffff); 3289 I915_WRITE(VLV_IER, 0x0); 3290 POSTING_READ(VLV_IER); 3291 } 3292 3293 static void ironlake_irq_uninstall(struct drm_device *dev) 3294 { 3295 struct drm_i915_private *dev_priv = dev->dev_private; 3296 3297 if (!dev_priv) 3298 return; 3299 3300 intel_hpd_irq_uninstall(dev_priv); 3301 3302 I915_WRITE(HWSTAM, 0xffffffff); 3303 3304 I915_WRITE(DEIMR, 0xffffffff); 3305 I915_WRITE(DEIER, 0x0); 3306 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3307 if (IS_GEN7(dev)) 3308 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3309 3310 I915_WRITE(GTIMR, 0xffffffff); 3311 I915_WRITE(GTIER, 0x0); 3312 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3313 3314 if (HAS_PCH_NOP(dev)) 3315 return; 3316 3317 I915_WRITE(SDEIMR, 0xffffffff); 3318 I915_WRITE(SDEIER, 0x0); 3319 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3320 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3321 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3322 } 3323 3324 static void i8xx_irq_preinstall(struct drm_device * dev) 3325 { 3326 struct drm_i915_private *dev_priv = dev->dev_private; 3327 int pipe; 3328 3329 for_each_pipe(pipe) 3330 I915_WRITE(PIPESTAT(pipe), 0); 3331 I915_WRITE16(IMR, 0xffff); 3332 I915_WRITE16(IER, 0x0); 3333 POSTING_READ16(IER); 3334 } 3335 3336 static int i8xx_irq_postinstall(struct drm_device *dev) 3337 { 3338 struct drm_i915_private *dev_priv = dev->dev_private; 3339 unsigned long irqflags; 3340 3341 I915_WRITE16(EMR, 3342 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3343 3344 /* Unmask the interrupts that we always want on. */ 3345 dev_priv->irq_mask = 3346 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3347 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3348 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3349 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3350 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3351 I915_WRITE16(IMR, dev_priv->irq_mask); 3352 3353 I915_WRITE16(IER, 3354 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3355 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3356 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3357 I915_USER_INTERRUPT); 3358 POSTING_READ16(IER); 3359 3360 /* Interrupt setup is already guaranteed to be single-threaded, this is 3361 * just to make the assert_spin_locked check happy. */ 3362 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3363 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3364 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3365 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3366 3367 return 0; 3368 } 3369 3370 /* 3371 * Returns true when a page flip has completed. 3372 */ 3373 static bool i8xx_handle_vblank(struct drm_device *dev, 3374 int plane, int pipe, u32 iir) 3375 { 3376 struct drm_i915_private *dev_priv = dev->dev_private; 3377 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3378 3379 if (!drm_handle_vblank(dev, pipe)) 3380 return false; 3381 3382 if ((iir & flip_pending) == 0) 3383 return false; 3384 3385 intel_prepare_page_flip(dev, plane); 3386 3387 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3388 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3389 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3390 * the flip is completed (no longer pending). Since this doesn't raise 3391 * an interrupt per se, we watch for the change at vblank. 3392 */ 3393 if (I915_READ16(ISR) & flip_pending) 3394 return false; 3395 3396 intel_finish_page_flip(dev, pipe); 3397 3398 return true; 3399 } 3400 3401 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3402 { 3403 struct drm_device *dev = (struct drm_device *) arg; 3404 struct drm_i915_private *dev_priv = dev->dev_private; 3405 u16 iir, new_iir; 3406 u32 pipe_stats[2]; 3407 unsigned long irqflags; 3408 int pipe; 3409 u16 flip_mask = 3410 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3411 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3412 3413 iir = I915_READ16(IIR); 3414 if (iir == 0) 3415 return IRQ_NONE; 3416 3417 while (iir & ~flip_mask) { 3418 /* Can't rely on pipestat interrupt bit in iir as it might 3419 * have been cleared after the pipestat interrupt was received. 3420 * It doesn't set the bit in iir again, but it still produces 3421 * interrupts (for non-MSI). 3422 */ 3423 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3424 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3425 i915_handle_error(dev, false, 3426 "Command parser error, iir 0x%08x", 3427 iir); 3428 3429 for_each_pipe(pipe) { 3430 int reg = PIPESTAT(pipe); 3431 pipe_stats[pipe] = I915_READ(reg); 3432 3433 /* 3434 * Clear the PIPE*STAT regs before the IIR 3435 */ 3436 if (pipe_stats[pipe] & 0x8000ffff) 3437 I915_WRITE(reg, pipe_stats[pipe]); 3438 } 3439 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3440 3441 I915_WRITE16(IIR, iir & ~flip_mask); 3442 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3443 3444 i915_update_dri1_breadcrumb(dev); 3445 3446 if (iir & I915_USER_INTERRUPT) 3447 notify_ring(dev, &dev_priv->ring[RCS]); 3448 3449 for_each_pipe(pipe) { 3450 int plane = pipe; 3451 if (HAS_FBC(dev)) 3452 plane = !plane; 3453 3454 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3455 i8xx_handle_vblank(dev, plane, pipe, iir)) 3456 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3457 3458 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3459 i9xx_pipe_crc_irq_handler(dev, pipe); 3460 3461 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3462 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3463 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3464 } 3465 3466 iir = new_iir; 3467 } 3468 3469 return IRQ_HANDLED; 3470 } 3471 3472 static void i8xx_irq_uninstall(struct drm_device * dev) 3473 { 3474 struct drm_i915_private *dev_priv = dev->dev_private; 3475 int pipe; 3476 3477 for_each_pipe(pipe) { 3478 /* Clear enable bits; then clear status bits */ 3479 I915_WRITE(PIPESTAT(pipe), 0); 3480 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3481 } 3482 I915_WRITE16(IMR, 0xffff); 3483 I915_WRITE16(IER, 0x0); 3484 I915_WRITE16(IIR, I915_READ16(IIR)); 3485 } 3486 3487 static void i915_irq_preinstall(struct drm_device * dev) 3488 { 3489 struct drm_i915_private *dev_priv = dev->dev_private; 3490 int pipe; 3491 3492 if (I915_HAS_HOTPLUG(dev)) { 3493 I915_WRITE(PORT_HOTPLUG_EN, 0); 3494 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3495 } 3496 3497 I915_WRITE16(HWSTAM, 0xeffe); 3498 for_each_pipe(pipe) 3499 I915_WRITE(PIPESTAT(pipe), 0); 3500 I915_WRITE(IMR, 0xffffffff); 3501 I915_WRITE(IER, 0x0); 3502 POSTING_READ(IER); 3503 } 3504 3505 static int i915_irq_postinstall(struct drm_device *dev) 3506 { 3507 struct drm_i915_private *dev_priv = dev->dev_private; 3508 u32 enable_mask; 3509 unsigned long irqflags; 3510 3511 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3512 3513 /* Unmask the interrupts that we always want on. */ 3514 dev_priv->irq_mask = 3515 ~(I915_ASLE_INTERRUPT | 3516 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3517 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3518 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3519 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3520 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3521 3522 enable_mask = 3523 I915_ASLE_INTERRUPT | 3524 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3525 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3526 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3527 I915_USER_INTERRUPT; 3528 3529 if (I915_HAS_HOTPLUG(dev)) { 3530 I915_WRITE(PORT_HOTPLUG_EN, 0); 3531 POSTING_READ(PORT_HOTPLUG_EN); 3532 3533 /* Enable in IER... */ 3534 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3535 /* and unmask in IMR */ 3536 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3537 } 3538 3539 I915_WRITE(IMR, dev_priv->irq_mask); 3540 I915_WRITE(IER, enable_mask); 3541 POSTING_READ(IER); 3542 3543 i915_enable_asle_pipestat(dev); 3544 3545 /* Interrupt setup is already guaranteed to be single-threaded, this is 3546 * just to make the assert_spin_locked check happy. */ 3547 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3548 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3549 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3550 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3551 3552 return 0; 3553 } 3554 3555 /* 3556 * Returns true when a page flip has completed. 3557 */ 3558 static bool i915_handle_vblank(struct drm_device *dev, 3559 int plane, int pipe, u32 iir) 3560 { 3561 struct drm_i915_private *dev_priv = dev->dev_private; 3562 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3563 3564 if (!drm_handle_vblank(dev, pipe)) 3565 return false; 3566 3567 if ((iir & flip_pending) == 0) 3568 return false; 3569 3570 intel_prepare_page_flip(dev, plane); 3571 3572 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3573 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3574 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3575 * the flip is completed (no longer pending). Since this doesn't raise 3576 * an interrupt per se, we watch for the change at vblank. 3577 */ 3578 if (I915_READ(ISR) & flip_pending) 3579 return false; 3580 3581 intel_finish_page_flip(dev, pipe); 3582 3583 return true; 3584 } 3585 3586 static irqreturn_t i915_irq_handler(int irq, void *arg) 3587 { 3588 struct drm_device *dev = (struct drm_device *) arg; 3589 struct drm_i915_private *dev_priv = dev->dev_private; 3590 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3591 unsigned long irqflags; 3592 u32 flip_mask = 3593 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3594 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3595 int pipe, ret = IRQ_NONE; 3596 3597 iir = I915_READ(IIR); 3598 do { 3599 bool irq_received = (iir & ~flip_mask) != 0; 3600 bool blc_event = false; 3601 3602 /* Can't rely on pipestat interrupt bit in iir as it might 3603 * have been cleared after the pipestat interrupt was received. 3604 * It doesn't set the bit in iir again, but it still produces 3605 * interrupts (for non-MSI). 3606 */ 3607 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3608 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3609 i915_handle_error(dev, false, 3610 "Command parser error, iir 0x%08x", 3611 iir); 3612 3613 for_each_pipe(pipe) { 3614 int reg = PIPESTAT(pipe); 3615 pipe_stats[pipe] = I915_READ(reg); 3616 3617 /* Clear the PIPE*STAT regs before the IIR */ 3618 if (pipe_stats[pipe] & 0x8000ffff) { 3619 I915_WRITE(reg, pipe_stats[pipe]); 3620 irq_received = true; 3621 } 3622 } 3623 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3624 3625 if (!irq_received) 3626 break; 3627 3628 /* Consume port. Then clear IIR or we'll miss events */ 3629 if ((I915_HAS_HOTPLUG(dev)) && 3630 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3631 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3632 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3633 3634 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3635 3636 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3637 POSTING_READ(PORT_HOTPLUG_STAT); 3638 } 3639 3640 I915_WRITE(IIR, iir & ~flip_mask); 3641 new_iir = I915_READ(IIR); /* Flush posted writes */ 3642 3643 if (iir & I915_USER_INTERRUPT) 3644 notify_ring(dev, &dev_priv->ring[RCS]); 3645 3646 for_each_pipe(pipe) { 3647 int plane = pipe; 3648 if (HAS_FBC(dev)) 3649 plane = !plane; 3650 3651 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3652 i915_handle_vblank(dev, plane, pipe, iir)) 3653 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3654 3655 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3656 blc_event = true; 3657 3658 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3659 i9xx_pipe_crc_irq_handler(dev, pipe); 3660 3661 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3662 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3663 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3664 } 3665 3666 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3667 intel_opregion_asle_intr(dev); 3668 3669 /* With MSI, interrupts are only generated when iir 3670 * transitions from zero to nonzero. If another bit got 3671 * set while we were handling the existing iir bits, then 3672 * we would never get another interrupt. 3673 * 3674 * This is fine on non-MSI as well, as if we hit this path 3675 * we avoid exiting the interrupt handler only to generate 3676 * another one. 3677 * 3678 * Note that for MSI this could cause a stray interrupt report 3679 * if an interrupt landed in the time between writing IIR and 3680 * the posting read. This should be rare enough to never 3681 * trigger the 99% of 100,000 interrupts test for disabling 3682 * stray interrupts. 3683 */ 3684 ret = IRQ_HANDLED; 3685 iir = new_iir; 3686 } while (iir & ~flip_mask); 3687 3688 i915_update_dri1_breadcrumb(dev); 3689 3690 return ret; 3691 } 3692 3693 static void i915_irq_uninstall(struct drm_device * dev) 3694 { 3695 struct drm_i915_private *dev_priv = dev->dev_private; 3696 int pipe; 3697 3698 intel_hpd_irq_uninstall(dev_priv); 3699 3700 if (I915_HAS_HOTPLUG(dev)) { 3701 I915_WRITE(PORT_HOTPLUG_EN, 0); 3702 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3703 } 3704 3705 I915_WRITE16(HWSTAM, 0xffff); 3706 for_each_pipe(pipe) { 3707 /* Clear enable bits; then clear status bits */ 3708 I915_WRITE(PIPESTAT(pipe), 0); 3709 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3710 } 3711 I915_WRITE(IMR, 0xffffffff); 3712 I915_WRITE(IER, 0x0); 3713 3714 I915_WRITE(IIR, I915_READ(IIR)); 3715 } 3716 3717 static void i965_irq_preinstall(struct drm_device * dev) 3718 { 3719 struct drm_i915_private *dev_priv = dev->dev_private; 3720 int pipe; 3721 3722 I915_WRITE(PORT_HOTPLUG_EN, 0); 3723 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3724 3725 I915_WRITE(HWSTAM, 0xeffe); 3726 for_each_pipe(pipe) 3727 I915_WRITE(PIPESTAT(pipe), 0); 3728 I915_WRITE(IMR, 0xffffffff); 3729 I915_WRITE(IER, 0x0); 3730 POSTING_READ(IER); 3731 } 3732 3733 static int i965_irq_postinstall(struct drm_device *dev) 3734 { 3735 struct drm_i915_private *dev_priv = dev->dev_private; 3736 u32 enable_mask; 3737 u32 error_mask; 3738 unsigned long irqflags; 3739 3740 /* Unmask the interrupts that we always want on. */ 3741 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3742 I915_DISPLAY_PORT_INTERRUPT | 3743 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3744 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3745 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3746 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3747 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3748 3749 enable_mask = ~dev_priv->irq_mask; 3750 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3751 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3752 enable_mask |= I915_USER_INTERRUPT; 3753 3754 if (IS_G4X(dev)) 3755 enable_mask |= I915_BSD_USER_INTERRUPT; 3756 3757 /* Interrupt setup is already guaranteed to be single-threaded, this is 3758 * just to make the assert_spin_locked check happy. */ 3759 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3760 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3761 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3762 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3763 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3764 3765 /* 3766 * Enable some error detection, note the instruction error mask 3767 * bit is reserved, so we leave it masked. 3768 */ 3769 if (IS_G4X(dev)) { 3770 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3771 GM45_ERROR_MEM_PRIV | 3772 GM45_ERROR_CP_PRIV | 3773 I915_ERROR_MEMORY_REFRESH); 3774 } else { 3775 error_mask = ~(I915_ERROR_PAGE_TABLE | 3776 I915_ERROR_MEMORY_REFRESH); 3777 } 3778 I915_WRITE(EMR, error_mask); 3779 3780 I915_WRITE(IMR, dev_priv->irq_mask); 3781 I915_WRITE(IER, enable_mask); 3782 POSTING_READ(IER); 3783 3784 I915_WRITE(PORT_HOTPLUG_EN, 0); 3785 POSTING_READ(PORT_HOTPLUG_EN); 3786 3787 i915_enable_asle_pipestat(dev); 3788 3789 return 0; 3790 } 3791 3792 static void i915_hpd_irq_setup(struct drm_device *dev) 3793 { 3794 struct drm_i915_private *dev_priv = dev->dev_private; 3795 struct drm_mode_config *mode_config = &dev->mode_config; 3796 struct intel_encoder *intel_encoder; 3797 u32 hotplug_en; 3798 3799 assert_spin_locked(&dev_priv->irq_lock); 3800 3801 if (I915_HAS_HOTPLUG(dev)) { 3802 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3803 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3804 /* Note HDMI and DP share hotplug bits */ 3805 /* enable bits are the same for all generations */ 3806 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3807 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3808 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3809 /* Programming the CRT detection parameters tends 3810 to generate a spurious hotplug event about three 3811 seconds later. So just do it once. 3812 */ 3813 if (IS_G4X(dev)) 3814 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3815 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3816 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3817 3818 /* Ignore TV since it's buggy */ 3819 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3820 } 3821 } 3822 3823 static irqreturn_t i965_irq_handler(int irq, void *arg) 3824 { 3825 struct drm_device *dev = (struct drm_device *) arg; 3826 struct drm_i915_private *dev_priv = dev->dev_private; 3827 u32 iir, new_iir; 3828 u32 pipe_stats[I915_MAX_PIPES]; 3829 unsigned long irqflags; 3830 int ret = IRQ_NONE, pipe; 3831 u32 flip_mask = 3832 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3833 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3834 3835 iir = I915_READ(IIR); 3836 3837 for (;;) { 3838 bool irq_received = (iir & ~flip_mask) != 0; 3839 bool blc_event = false; 3840 3841 /* Can't rely on pipestat interrupt bit in iir as it might 3842 * have been cleared after the pipestat interrupt was received. 3843 * It doesn't set the bit in iir again, but it still produces 3844 * interrupts (for non-MSI). 3845 */ 3846 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3847 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3848 i915_handle_error(dev, false, 3849 "Command parser error, iir 0x%08x", 3850 iir); 3851 3852 for_each_pipe(pipe) { 3853 int reg = PIPESTAT(pipe); 3854 pipe_stats[pipe] = I915_READ(reg); 3855 3856 /* 3857 * Clear the PIPE*STAT regs before the IIR 3858 */ 3859 if (pipe_stats[pipe] & 0x8000ffff) { 3860 I915_WRITE(reg, pipe_stats[pipe]); 3861 irq_received = true; 3862 } 3863 } 3864 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3865 3866 if (!irq_received) 3867 break; 3868 3869 ret = IRQ_HANDLED; 3870 3871 /* Consume port. Then clear IIR or we'll miss events */ 3872 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3873 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3874 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3875 HOTPLUG_INT_STATUS_G4X : 3876 HOTPLUG_INT_STATUS_I915); 3877 3878 intel_hpd_irq_handler(dev, hotplug_trigger, 3879 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3880 3881 if (IS_G4X(dev) && 3882 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3883 dp_aux_irq_handler(dev); 3884 3885 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3886 I915_READ(PORT_HOTPLUG_STAT); 3887 } 3888 3889 I915_WRITE(IIR, iir & ~flip_mask); 3890 new_iir = I915_READ(IIR); /* Flush posted writes */ 3891 3892 if (iir & I915_USER_INTERRUPT) 3893 notify_ring(dev, &dev_priv->ring[RCS]); 3894 if (iir & I915_BSD_USER_INTERRUPT) 3895 notify_ring(dev, &dev_priv->ring[VCS]); 3896 3897 for_each_pipe(pipe) { 3898 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3899 i915_handle_vblank(dev, pipe, pipe, iir)) 3900 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3901 3902 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3903 blc_event = true; 3904 3905 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3906 i9xx_pipe_crc_irq_handler(dev, pipe); 3907 3908 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3909 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3910 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3911 } 3912 3913 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3914 intel_opregion_asle_intr(dev); 3915 3916 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3917 gmbus_irq_handler(dev); 3918 3919 /* With MSI, interrupts are only generated when iir 3920 * transitions from zero to nonzero. If another bit got 3921 * set while we were handling the existing iir bits, then 3922 * we would never get another interrupt. 3923 * 3924 * This is fine on non-MSI as well, as if we hit this path 3925 * we avoid exiting the interrupt handler only to generate 3926 * another one. 3927 * 3928 * Note that for MSI this could cause a stray interrupt report 3929 * if an interrupt landed in the time between writing IIR and 3930 * the posting read. This should be rare enough to never 3931 * trigger the 99% of 100,000 interrupts test for disabling 3932 * stray interrupts. 3933 */ 3934 iir = new_iir; 3935 } 3936 3937 i915_update_dri1_breadcrumb(dev); 3938 3939 return ret; 3940 } 3941 3942 static void i965_irq_uninstall(struct drm_device * dev) 3943 { 3944 struct drm_i915_private *dev_priv = dev->dev_private; 3945 int pipe; 3946 3947 if (!dev_priv) 3948 return; 3949 3950 intel_hpd_irq_uninstall(dev_priv); 3951 3952 I915_WRITE(PORT_HOTPLUG_EN, 0); 3953 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3954 3955 I915_WRITE(HWSTAM, 0xffffffff); 3956 for_each_pipe(pipe) 3957 I915_WRITE(PIPESTAT(pipe), 0); 3958 I915_WRITE(IMR, 0xffffffff); 3959 I915_WRITE(IER, 0x0); 3960 3961 for_each_pipe(pipe) 3962 I915_WRITE(PIPESTAT(pipe), 3963 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3964 I915_WRITE(IIR, I915_READ(IIR)); 3965 } 3966 3967 static void intel_hpd_irq_reenable(unsigned long data) 3968 { 3969 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 3970 struct drm_device *dev = dev_priv->dev; 3971 struct drm_mode_config *mode_config = &dev->mode_config; 3972 unsigned long irqflags; 3973 int i; 3974 3975 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3976 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3977 struct drm_connector *connector; 3978 3979 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3980 continue; 3981 3982 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3983 3984 list_for_each_entry(connector, &mode_config->connector_list, head) { 3985 struct intel_connector *intel_connector = to_intel_connector(connector); 3986 3987 if (intel_connector->encoder->hpd_pin == i) { 3988 if (connector->polled != intel_connector->polled) 3989 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3990 drm_get_connector_name(connector)); 3991 connector->polled = intel_connector->polled; 3992 if (!connector->polled) 3993 connector->polled = DRM_CONNECTOR_POLL_HPD; 3994 } 3995 } 3996 } 3997 if (dev_priv->display.hpd_irq_setup) 3998 dev_priv->display.hpd_irq_setup(dev); 3999 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4000 } 4001 4002 void intel_irq_init(struct drm_device *dev) 4003 { 4004 struct drm_i915_private *dev_priv = dev->dev_private; 4005 4006 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4007 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4008 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4009 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4010 4011 /* Let's track the enabled rps events */ 4012 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4013 4014 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4015 i915_hangcheck_elapsed, 4016 (unsigned long) dev); 4017 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4018 (unsigned long) dev_priv); 4019 4020 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4021 4022 if (IS_GEN2(dev)) { 4023 dev->max_vblank_count = 0; 4024 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4025 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 4026 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4027 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4028 } else { 4029 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4030 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4031 } 4032 4033 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4034 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4035 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4036 } 4037 4038 if (IS_VALLEYVIEW(dev)) { 4039 dev->driver->irq_handler = valleyview_irq_handler; 4040 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4041 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4042 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4043 dev->driver->enable_vblank = valleyview_enable_vblank; 4044 dev->driver->disable_vblank = valleyview_disable_vblank; 4045 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4046 } else if (IS_GEN8(dev)) { 4047 dev->driver->irq_handler = gen8_irq_handler; 4048 dev->driver->irq_preinstall = gen8_irq_preinstall; 4049 dev->driver->irq_postinstall = gen8_irq_postinstall; 4050 dev->driver->irq_uninstall = gen8_irq_uninstall; 4051 dev->driver->enable_vblank = gen8_enable_vblank; 4052 dev->driver->disable_vblank = gen8_disable_vblank; 4053 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4054 } else if (HAS_PCH_SPLIT(dev)) { 4055 dev->driver->irq_handler = ironlake_irq_handler; 4056 dev->driver->irq_preinstall = ironlake_irq_preinstall; 4057 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4058 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4059 dev->driver->enable_vblank = ironlake_enable_vblank; 4060 dev->driver->disable_vblank = ironlake_disable_vblank; 4061 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4062 } else { 4063 if (INTEL_INFO(dev)->gen == 2) { 4064 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4065 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4066 dev->driver->irq_handler = i8xx_irq_handler; 4067 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4068 } else if (INTEL_INFO(dev)->gen == 3) { 4069 dev->driver->irq_preinstall = i915_irq_preinstall; 4070 dev->driver->irq_postinstall = i915_irq_postinstall; 4071 dev->driver->irq_uninstall = i915_irq_uninstall; 4072 dev->driver->irq_handler = i915_irq_handler; 4073 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4074 } else { 4075 dev->driver->irq_preinstall = i965_irq_preinstall; 4076 dev->driver->irq_postinstall = i965_irq_postinstall; 4077 dev->driver->irq_uninstall = i965_irq_uninstall; 4078 dev->driver->irq_handler = i965_irq_handler; 4079 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4080 } 4081 dev->driver->enable_vblank = i915_enable_vblank; 4082 dev->driver->disable_vblank = i915_disable_vblank; 4083 } 4084 } 4085 4086 void intel_hpd_init(struct drm_device *dev) 4087 { 4088 struct drm_i915_private *dev_priv = dev->dev_private; 4089 struct drm_mode_config *mode_config = &dev->mode_config; 4090 struct drm_connector *connector; 4091 unsigned long irqflags; 4092 int i; 4093 4094 for (i = 1; i < HPD_NUM_PINS; i++) { 4095 dev_priv->hpd_stats[i].hpd_cnt = 0; 4096 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4097 } 4098 list_for_each_entry(connector, &mode_config->connector_list, head) { 4099 struct intel_connector *intel_connector = to_intel_connector(connector); 4100 connector->polled = intel_connector->polled; 4101 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4102 connector->polled = DRM_CONNECTOR_POLL_HPD; 4103 } 4104 4105 /* Interrupt setup is already guaranteed to be single-threaded, this is 4106 * just to make the assert_spin_locked checks happy. */ 4107 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4108 if (dev_priv->display.hpd_irq_setup) 4109 dev_priv->display.hpd_irq_setup(dev); 4110 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4111 } 4112 4113 /* Disable interrupts so we can allow runtime PM. */ 4114 void hsw_runtime_pm_disable_interrupts(struct drm_device *dev) 4115 { 4116 struct drm_i915_private *dev_priv = dev->dev_private; 4117 unsigned long irqflags; 4118 4119 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4120 4121 dev_priv->pm.regsave.deimr = I915_READ(DEIMR); 4122 dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR); 4123 dev_priv->pm.regsave.gtimr = I915_READ(GTIMR); 4124 dev_priv->pm.regsave.gtier = I915_READ(GTIER); 4125 dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 4126 4127 ironlake_disable_display_irq(dev_priv, 0xffffffff); 4128 ibx_disable_display_interrupt(dev_priv, 0xffffffff); 4129 ilk_disable_gt_irq(dev_priv, 0xffffffff); 4130 snb_disable_pm_irq(dev_priv, 0xffffffff); 4131 4132 dev_priv->pm.irqs_disabled = true; 4133 4134 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4135 } 4136 4137 /* Restore interrupts so we can recover from runtime PM. */ 4138 void hsw_runtime_pm_restore_interrupts(struct drm_device *dev) 4139 { 4140 struct drm_i915_private *dev_priv = dev->dev_private; 4141 unsigned long irqflags; 4142 uint32_t val; 4143 4144 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4145 4146 val = I915_READ(DEIMR); 4147 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); 4148 4149 val = I915_READ(SDEIMR); 4150 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); 4151 4152 val = I915_READ(GTIMR); 4153 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); 4154 4155 val = I915_READ(GEN6_PMIMR); 4156 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); 4157 4158 dev_priv->pm.irqs_disabled = false; 4159 4160 ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr); 4161 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr); 4162 ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr); 4163 snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr); 4164 I915_WRITE(GTIER, dev_priv->pm.regsave.gtier); 4165 4166 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4167 } 4168