1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 static const u32 hpd_ibx[] = { 41 [HPD_CRT] = SDE_CRT_HOTPLUG, 42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46 }; 47 48 static const u32 hpd_cpt[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54 }; 55 56 static const u32 hpd_mask_i915[] = { 57 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63 }; 64 65 static const u32 hpd_status_g4x[] = { 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72 }; 73 74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81 }; 82 83 /* IIR can theoretically queue up two events. Be paranoid. */ 84 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 86 POSTING_READ(GEN8_##type##_IMR(which)); \ 87 I915_WRITE(GEN8_##type##_IER(which), 0); \ 88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 89 POSTING_READ(GEN8_##type##_IIR(which)); \ 90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 91 POSTING_READ(GEN8_##type##_IIR(which)); \ 92 } while (0) 93 94 #define GEN5_IRQ_RESET(type) do { \ 95 I915_WRITE(type##IMR, 0xffffffff); \ 96 POSTING_READ(type##IMR); \ 97 I915_WRITE(type##IER, 0); \ 98 I915_WRITE(type##IIR, 0xffffffff); \ 99 POSTING_READ(type##IIR); \ 100 I915_WRITE(type##IIR, 0xffffffff); \ 101 POSTING_READ(type##IIR); \ 102 } while (0) 103 104 /* 105 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 106 */ 107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 108 u32 val = I915_READ(reg); \ 109 if (val) { \ 110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 111 (reg), val); \ 112 I915_WRITE((reg), 0xffffffff); \ 113 POSTING_READ(reg); \ 114 I915_WRITE((reg), 0xffffffff); \ 115 POSTING_READ(reg); \ 116 } \ 117 } while (0) 118 119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 123 POSTING_READ(GEN8_##type##_IER(which)); \ 124 } while (0) 125 126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 128 I915_WRITE(type##IMR, (imr_val)); \ 129 I915_WRITE(type##IER, (ier_val)); \ 130 POSTING_READ(type##IER); \ 131 } while (0) 132 133 /* For display hotplug interrupt */ 134 static void 135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 136 { 137 assert_spin_locked(&dev_priv->irq_lock); 138 139 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 140 return; 141 142 if ((dev_priv->irq_mask & mask) != 0) { 143 dev_priv->irq_mask &= ~mask; 144 I915_WRITE(DEIMR, dev_priv->irq_mask); 145 POSTING_READ(DEIMR); 146 } 147 } 148 149 static void 150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 151 { 152 assert_spin_locked(&dev_priv->irq_lock); 153 154 if (!intel_irqs_enabled(dev_priv)) 155 return; 156 157 if ((dev_priv->irq_mask & mask) != mask) { 158 dev_priv->irq_mask |= mask; 159 I915_WRITE(DEIMR, dev_priv->irq_mask); 160 POSTING_READ(DEIMR); 161 } 162 } 163 164 /** 165 * ilk_update_gt_irq - update GTIMR 166 * @dev_priv: driver private 167 * @interrupt_mask: mask of interrupt bits to update 168 * @enabled_irq_mask: mask of interrupt bits to enable 169 */ 170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 171 uint32_t interrupt_mask, 172 uint32_t enabled_irq_mask) 173 { 174 assert_spin_locked(&dev_priv->irq_lock); 175 176 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 177 return; 178 179 dev_priv->gt_irq_mask &= ~interrupt_mask; 180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 182 POSTING_READ(GTIMR); 183 } 184 185 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 186 { 187 ilk_update_gt_irq(dev_priv, mask, mask); 188 } 189 190 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 191 { 192 ilk_update_gt_irq(dev_priv, mask, 0); 193 } 194 195 /** 196 * snb_update_pm_irq - update GEN6_PMIMR 197 * @dev_priv: driver private 198 * @interrupt_mask: mask of interrupt bits to update 199 * @enabled_irq_mask: mask of interrupt bits to enable 200 */ 201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 202 uint32_t interrupt_mask, 203 uint32_t enabled_irq_mask) 204 { 205 uint32_t new_val; 206 207 assert_spin_locked(&dev_priv->irq_lock); 208 209 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 210 return; 211 212 new_val = dev_priv->pm_irq_mask; 213 new_val &= ~interrupt_mask; 214 new_val |= (~enabled_irq_mask & interrupt_mask); 215 216 if (new_val != dev_priv->pm_irq_mask) { 217 dev_priv->pm_irq_mask = new_val; 218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 219 POSTING_READ(GEN6_PMIMR); 220 } 221 } 222 223 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 224 { 225 snb_update_pm_irq(dev_priv, mask, mask); 226 } 227 228 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 229 { 230 snb_update_pm_irq(dev_priv, mask, 0); 231 } 232 233 static bool ivb_can_enable_err_int(struct drm_device *dev) 234 { 235 struct drm_i915_private *dev_priv = dev->dev_private; 236 struct intel_crtc *crtc; 237 enum pipe pipe; 238 239 assert_spin_locked(&dev_priv->irq_lock); 240 241 for_each_pipe(pipe) { 242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 243 244 if (crtc->cpu_fifo_underrun_disabled) 245 return false; 246 } 247 248 return true; 249 } 250 251 /** 252 * bdw_update_pm_irq - update GT interrupt 2 253 * @dev_priv: driver private 254 * @interrupt_mask: mask of interrupt bits to update 255 * @enabled_irq_mask: mask of interrupt bits to enable 256 * 257 * Copied from the snb function, updated with relevant register offsets 258 */ 259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, 260 uint32_t interrupt_mask, 261 uint32_t enabled_irq_mask) 262 { 263 uint32_t new_val; 264 265 assert_spin_locked(&dev_priv->irq_lock); 266 267 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 268 return; 269 270 new_val = dev_priv->pm_irq_mask; 271 new_val &= ~interrupt_mask; 272 new_val |= (~enabled_irq_mask & interrupt_mask); 273 274 if (new_val != dev_priv->pm_irq_mask) { 275 dev_priv->pm_irq_mask = new_val; 276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); 277 POSTING_READ(GEN8_GT_IMR(2)); 278 } 279 } 280 281 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 282 { 283 bdw_update_pm_irq(dev_priv, mask, mask); 284 } 285 286 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 287 { 288 bdw_update_pm_irq(dev_priv, mask, 0); 289 } 290 291 static bool cpt_can_enable_serr_int(struct drm_device *dev) 292 { 293 struct drm_i915_private *dev_priv = dev->dev_private; 294 enum pipe pipe; 295 struct intel_crtc *crtc; 296 297 assert_spin_locked(&dev_priv->irq_lock); 298 299 for_each_pipe(pipe) { 300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 301 302 if (crtc->pch_fifo_underrun_disabled) 303 return false; 304 } 305 306 return true; 307 } 308 309 void i9xx_check_fifo_underruns(struct drm_device *dev) 310 { 311 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct intel_crtc *crtc; 313 unsigned long flags; 314 315 spin_lock_irqsave(&dev_priv->irq_lock, flags); 316 317 for_each_intel_crtc(dev, crtc) { 318 u32 reg = PIPESTAT(crtc->pipe); 319 u32 pipestat; 320 321 if (crtc->cpu_fifo_underrun_disabled) 322 continue; 323 324 pipestat = I915_READ(reg) & 0xffff0000; 325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) 326 continue; 327 328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 329 POSTING_READ(reg); 330 331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 332 } 333 334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 335 } 336 337 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 338 enum pipe pipe, 339 bool enable, bool old) 340 { 341 struct drm_i915_private *dev_priv = dev->dev_private; 342 u32 reg = PIPESTAT(pipe); 343 u32 pipestat = I915_READ(reg) & 0xffff0000; 344 345 assert_spin_locked(&dev_priv->irq_lock); 346 347 if (enable) { 348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 349 POSTING_READ(reg); 350 } else { 351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 353 } 354 } 355 356 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 357 enum pipe pipe, bool enable) 358 { 359 struct drm_i915_private *dev_priv = dev->dev_private; 360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 361 DE_PIPEB_FIFO_UNDERRUN; 362 363 if (enable) 364 ironlake_enable_display_irq(dev_priv, bit); 365 else 366 ironlake_disable_display_irq(dev_priv, bit); 367 } 368 369 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 370 enum pipe pipe, 371 bool enable, bool old) 372 { 373 struct drm_i915_private *dev_priv = dev->dev_private; 374 if (enable) { 375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 376 377 if (!ivb_can_enable_err_int(dev)) 378 return; 379 380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 381 } else { 382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 383 384 if (old && 385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { 386 DRM_ERROR("uncleared fifo underrun on pipe %c\n", 387 pipe_name(pipe)); 388 } 389 } 390 } 391 392 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 393 enum pipe pipe, bool enable) 394 { 395 struct drm_i915_private *dev_priv = dev->dev_private; 396 397 assert_spin_locked(&dev_priv->irq_lock); 398 399 if (enable) 400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 401 else 402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 405 } 406 407 /** 408 * ibx_display_interrupt_update - update SDEIMR 409 * @dev_priv: driver private 410 * @interrupt_mask: mask of interrupt bits to update 411 * @enabled_irq_mask: mask of interrupt bits to enable 412 */ 413 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 414 uint32_t interrupt_mask, 415 uint32_t enabled_irq_mask) 416 { 417 uint32_t sdeimr = I915_READ(SDEIMR); 418 sdeimr &= ~interrupt_mask; 419 sdeimr |= (~enabled_irq_mask & interrupt_mask); 420 421 assert_spin_locked(&dev_priv->irq_lock); 422 423 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 424 return; 425 426 I915_WRITE(SDEIMR, sdeimr); 427 POSTING_READ(SDEIMR); 428 } 429 #define ibx_enable_display_interrupt(dev_priv, bits) \ 430 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 431 #define ibx_disable_display_interrupt(dev_priv, bits) \ 432 ibx_display_interrupt_update((dev_priv), (bits), 0) 433 434 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 435 enum transcoder pch_transcoder, 436 bool enable) 437 { 438 struct drm_i915_private *dev_priv = dev->dev_private; 439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 441 442 if (enable) 443 ibx_enable_display_interrupt(dev_priv, bit); 444 else 445 ibx_disable_display_interrupt(dev_priv, bit); 446 } 447 448 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 449 enum transcoder pch_transcoder, 450 bool enable, bool old) 451 { 452 struct drm_i915_private *dev_priv = dev->dev_private; 453 454 if (enable) { 455 I915_WRITE(SERR_INT, 456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 457 458 if (!cpt_can_enable_serr_int(dev)) 459 return; 460 461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 462 } else { 463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 464 465 if (old && I915_READ(SERR_INT) & 466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { 467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", 468 transcoder_name(pch_transcoder)); 469 } 470 } 471 } 472 473 /** 474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 475 * @dev: drm device 476 * @pipe: pipe 477 * @enable: true if we want to report FIFO underrun errors, false otherwise 478 * 479 * This function makes us disable or enable CPU fifo underruns for a specific 480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 481 * reporting for one pipe may also disable all the other CPU error interruts for 482 * the other pipes, due to the fact that there's just one interrupt mask/enable 483 * bit for all the pipes. 484 * 485 * Returns the previous state of underrun reporting. 486 */ 487 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 488 enum pipe pipe, bool enable) 489 { 490 struct drm_i915_private *dev_priv = dev->dev_private; 491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 493 bool old; 494 495 assert_spin_locked(&dev_priv->irq_lock); 496 497 old = !intel_crtc->cpu_fifo_underrun_disabled; 498 intel_crtc->cpu_fifo_underrun_disabled = !enable; 499 500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 502 else if (IS_GEN5(dev) || IS_GEN6(dev)) 503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 504 else if (IS_GEN7(dev)) 505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 506 else if (IS_GEN8(dev)) 507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 508 509 return old; 510 } 511 512 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 513 enum pipe pipe, bool enable) 514 { 515 struct drm_i915_private *dev_priv = dev->dev_private; 516 unsigned long flags; 517 bool ret; 518 519 spin_lock_irqsave(&dev_priv->irq_lock, flags); 520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); 521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 522 523 return ret; 524 } 525 526 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, 527 enum pipe pipe) 528 { 529 struct drm_i915_private *dev_priv = dev->dev_private; 530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 532 533 return !intel_crtc->cpu_fifo_underrun_disabled; 534 } 535 536 /** 537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 538 * @dev: drm device 539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 540 * @enable: true if we want to report FIFO underrun errors, false otherwise 541 * 542 * This function makes us disable or enable PCH fifo underruns for a specific 543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 544 * underrun reporting for one transcoder may also disable all the other PCH 545 * error interruts for the other transcoders, due to the fact that there's just 546 * one interrupt mask/enable bit for all the transcoders. 547 * 548 * Returns the previous state of underrun reporting. 549 */ 550 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 551 enum transcoder pch_transcoder, 552 bool enable) 553 { 554 struct drm_i915_private *dev_priv = dev->dev_private; 555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 557 unsigned long flags; 558 bool old; 559 560 /* 561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 562 * has only one pch transcoder A that all pipes can use. To avoid racy 563 * pch transcoder -> pipe lookups from interrupt code simply store the 564 * underrun statistics in crtc A. Since we never expose this anywhere 565 * nor use it outside of the fifo underrun code here using the "wrong" 566 * crtc on LPT won't cause issues. 567 */ 568 569 spin_lock_irqsave(&dev_priv->irq_lock, flags); 570 571 old = !intel_crtc->pch_fifo_underrun_disabled; 572 intel_crtc->pch_fifo_underrun_disabled = !enable; 573 574 if (HAS_PCH_IBX(dev)) 575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 576 else 577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); 578 579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 580 return old; 581 } 582 583 584 static void 585 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 586 u32 enable_mask, u32 status_mask) 587 { 588 u32 reg = PIPESTAT(pipe); 589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 590 591 assert_spin_locked(&dev_priv->irq_lock); 592 593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 594 status_mask & ~PIPESTAT_INT_STATUS_MASK, 595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 596 pipe_name(pipe), enable_mask, status_mask)) 597 return; 598 599 if ((pipestat & enable_mask) == enable_mask) 600 return; 601 602 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 603 604 /* Enable the interrupt, clear any pending status */ 605 pipestat |= enable_mask | status_mask; 606 I915_WRITE(reg, pipestat); 607 POSTING_READ(reg); 608 } 609 610 static void 611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 612 u32 enable_mask, u32 status_mask) 613 { 614 u32 reg = PIPESTAT(pipe); 615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 616 617 assert_spin_locked(&dev_priv->irq_lock); 618 619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 620 status_mask & ~PIPESTAT_INT_STATUS_MASK, 621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 622 pipe_name(pipe), enable_mask, status_mask)) 623 return; 624 625 if ((pipestat & enable_mask) == 0) 626 return; 627 628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 629 630 pipestat &= ~enable_mask; 631 I915_WRITE(reg, pipestat); 632 POSTING_READ(reg); 633 } 634 635 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 636 { 637 u32 enable_mask = status_mask << 16; 638 639 /* 640 * On pipe A we don't support the PSR interrupt yet, 641 * on pipe B and C the same bit MBZ. 642 */ 643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 644 return 0; 645 /* 646 * On pipe B and C we don't support the PSR interrupt yet, on pipe 647 * A the same bit is for perf counters which we don't use either. 648 */ 649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 650 return 0; 651 652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 653 SPRITE0_FLIP_DONE_INT_EN_VLV | 654 SPRITE1_FLIP_DONE_INT_EN_VLV); 655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 659 660 return enable_mask; 661 } 662 663 void 664 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 665 u32 status_mask) 666 { 667 u32 enable_mask; 668 669 if (IS_VALLEYVIEW(dev_priv->dev)) 670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 671 status_mask); 672 else 673 enable_mask = status_mask << 16; 674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 675 } 676 677 void 678 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 679 u32 status_mask) 680 { 681 u32 enable_mask; 682 683 if (IS_VALLEYVIEW(dev_priv->dev)) 684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 685 status_mask); 686 else 687 enable_mask = status_mask << 16; 688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 689 } 690 691 /** 692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 693 */ 694 static void i915_enable_asle_pipestat(struct drm_device *dev) 695 { 696 struct drm_i915_private *dev_priv = dev->dev_private; 697 unsigned long irqflags; 698 699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 700 return; 701 702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 703 704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 705 if (INTEL_INFO(dev)->gen >= 4) 706 i915_enable_pipestat(dev_priv, PIPE_A, 707 PIPE_LEGACY_BLC_EVENT_STATUS); 708 709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 710 } 711 712 /** 713 * i915_pipe_enabled - check if a pipe is enabled 714 * @dev: DRM device 715 * @pipe: pipe to check 716 * 717 * Reading certain registers when the pipe is disabled can hang the chip. 718 * Use this routine to make sure the PLL is running and the pipe is active 719 * before reading such registers if unsure. 720 */ 721 static int 722 i915_pipe_enabled(struct drm_device *dev, int pipe) 723 { 724 struct drm_i915_private *dev_priv = dev->dev_private; 725 726 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 727 /* Locking is horribly broken here, but whatever. */ 728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 730 731 return intel_crtc->active; 732 } else { 733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 734 } 735 } 736 737 /* 738 * This timing diagram depicts the video signal in and 739 * around the vertical blanking period. 740 * 741 * Assumptions about the fictitious mode used in this example: 742 * vblank_start >= 3 743 * vsync_start = vblank_start + 1 744 * vsync_end = vblank_start + 2 745 * vtotal = vblank_start + 3 746 * 747 * start of vblank: 748 * latch double buffered registers 749 * increment frame counter (ctg+) 750 * generate start of vblank interrupt (gen4+) 751 * | 752 * | frame start: 753 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 754 * | may be shifted forward 1-3 extra lines via PIPECONF 755 * | | 756 * | | start of vsync: 757 * | | generate vsync interrupt 758 * | | | 759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 761 * ----va---> <-----------------vb--------------------> <--------va------------- 762 * | | <----vs-----> | 763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 766 * | | | 767 * last visible pixel first visible pixel 768 * | increment frame counter (gen3/4) 769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 770 * 771 * x = horizontal active 772 * _ = horizontal blanking 773 * hs = horizontal sync 774 * va = vertical active 775 * vb = vertical blanking 776 * vs = vertical sync 777 * vbs = vblank_start (number) 778 * 779 * Summary: 780 * - most events happen at the start of horizontal sync 781 * - frame start happens at the start of horizontal blank, 1-4 lines 782 * (depending on PIPECONF settings) after the start of vblank 783 * - gen3/4 pixel and frame counter are synchronized with the start 784 * of horizontal active on the first line of vertical active 785 */ 786 787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 788 { 789 /* Gen2 doesn't have a hardware frame counter */ 790 return 0; 791 } 792 793 /* Called from drm generic code, passed a 'crtc', which 794 * we use as a pipe index 795 */ 796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 797 { 798 struct drm_i915_private *dev_priv = dev->dev_private; 799 unsigned long high_frame; 800 unsigned long low_frame; 801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 802 803 if (!i915_pipe_enabled(dev, pipe)) { 804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 805 "pipe %c\n", pipe_name(pipe)); 806 return 0; 807 } 808 809 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 810 struct intel_crtc *intel_crtc = 811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 812 const struct drm_display_mode *mode = 813 &intel_crtc->config.adjusted_mode; 814 815 htotal = mode->crtc_htotal; 816 hsync_start = mode->crtc_hsync_start; 817 vbl_start = mode->crtc_vblank_start; 818 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 819 vbl_start = DIV_ROUND_UP(vbl_start, 2); 820 } else { 821 enum transcoder cpu_transcoder = (enum transcoder) pipe; 822 823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 826 if ((I915_READ(PIPECONF(cpu_transcoder)) & 827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 828 vbl_start = DIV_ROUND_UP(vbl_start, 2); 829 } 830 831 /* Convert to pixel count */ 832 vbl_start *= htotal; 833 834 /* Start of vblank event occurs at start of hsync */ 835 vbl_start -= htotal - hsync_start; 836 837 high_frame = PIPEFRAME(pipe); 838 low_frame = PIPEFRAMEPIXEL(pipe); 839 840 /* 841 * High & low register fields aren't synchronized, so make sure 842 * we get a low value that's stable across two reads of the high 843 * register. 844 */ 845 do { 846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 847 low = I915_READ(low_frame); 848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 849 } while (high1 != high2); 850 851 high1 >>= PIPE_FRAME_HIGH_SHIFT; 852 pixel = low & PIPE_PIXEL_MASK; 853 low >>= PIPE_FRAME_LOW_SHIFT; 854 855 /* 856 * The frame counter increments at beginning of active. 857 * Cook up a vblank counter by also checking the pixel 858 * counter against vblank start. 859 */ 860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 861 } 862 863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 864 { 865 struct drm_i915_private *dev_priv = dev->dev_private; 866 int reg = PIPE_FRMCOUNT_GM45(pipe); 867 868 if (!i915_pipe_enabled(dev, pipe)) { 869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 870 "pipe %c\n", pipe_name(pipe)); 871 return 0; 872 } 873 874 return I915_READ(reg); 875 } 876 877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 879 880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 881 { 882 struct drm_device *dev = crtc->base.dev; 883 struct drm_i915_private *dev_priv = dev->dev_private; 884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 885 enum pipe pipe = crtc->pipe; 886 int position, vtotal; 887 888 vtotal = mode->crtc_vtotal; 889 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 890 vtotal /= 2; 891 892 if (IS_GEN2(dev)) 893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 894 else 895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 896 897 /* 898 * See update_scanline_offset() for the details on the 899 * scanline_offset adjustment. 900 */ 901 return (position + crtc->scanline_offset) % vtotal; 902 } 903 904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 905 unsigned int flags, int *vpos, int *hpos, 906 ktime_t *stime, ktime_t *etime) 907 { 908 struct drm_i915_private *dev_priv = dev->dev_private; 909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 912 int position; 913 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 914 bool in_vbl = true; 915 int ret = 0; 916 unsigned long irqflags; 917 918 if (!intel_crtc->active) { 919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 920 "pipe %c\n", pipe_name(pipe)); 921 return 0; 922 } 923 924 htotal = mode->crtc_htotal; 925 hsync_start = mode->crtc_hsync_start; 926 vtotal = mode->crtc_vtotal; 927 vbl_start = mode->crtc_vblank_start; 928 vbl_end = mode->crtc_vblank_end; 929 930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 931 vbl_start = DIV_ROUND_UP(vbl_start, 2); 932 vbl_end /= 2; 933 vtotal /= 2; 934 } 935 936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 937 938 /* 939 * Lock uncore.lock, as we will do multiple timing critical raw 940 * register reads, potentially with preemption disabled, so the 941 * following code must not block on uncore.lock. 942 */ 943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 944 945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 946 947 /* Get optional system timestamp before query. */ 948 if (stime) 949 *stime = ktime_get(); 950 951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 952 /* No obvious pixelcount register. Only query vertical 953 * scanout position from Display scan line register. 954 */ 955 position = __intel_get_crtc_scanline(intel_crtc); 956 } else { 957 /* Have access to pixelcount since start of frame. 958 * We can split this into vertical and horizontal 959 * scanout position. 960 */ 961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 962 963 /* convert to pixel counts */ 964 vbl_start *= htotal; 965 vbl_end *= htotal; 966 vtotal *= htotal; 967 968 /* 969 * In interlaced modes, the pixel counter counts all pixels, 970 * so one field will have htotal more pixels. In order to avoid 971 * the reported position from jumping backwards when the pixel 972 * counter is beyond the length of the shorter field, just 973 * clamp the position the length of the shorter field. This 974 * matches how the scanline counter based position works since 975 * the scanline counter doesn't count the two half lines. 976 */ 977 if (position >= vtotal) 978 position = vtotal - 1; 979 980 /* 981 * Start of vblank interrupt is triggered at start of hsync, 982 * just prior to the first active line of vblank. However we 983 * consider lines to start at the leading edge of horizontal 984 * active. So, should we get here before we've crossed into 985 * the horizontal active of the first line in vblank, we would 986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 987 * always add htotal-hsync_start to the current pixel position. 988 */ 989 position = (position + htotal - hsync_start) % vtotal; 990 } 991 992 /* Get optional system timestamp after query. */ 993 if (etime) 994 *etime = ktime_get(); 995 996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 997 998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 999 1000 in_vbl = position >= vbl_start && position < vbl_end; 1001 1002 /* 1003 * While in vblank, position will be negative 1004 * counting up towards 0 at vbl_end. And outside 1005 * vblank, position will be positive counting 1006 * up since vbl_end. 1007 */ 1008 if (position >= vbl_start) 1009 position -= vbl_end; 1010 else 1011 position += vtotal - vbl_end; 1012 1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 1014 *vpos = position; 1015 *hpos = 0; 1016 } else { 1017 *vpos = position / htotal; 1018 *hpos = position - (*vpos * htotal); 1019 } 1020 1021 /* In vblank? */ 1022 if (in_vbl) 1023 ret |= DRM_SCANOUTPOS_INVBL; 1024 1025 return ret; 1026 } 1027 1028 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1029 { 1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1031 unsigned long irqflags; 1032 int position; 1033 1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1035 position = __intel_get_crtc_scanline(crtc); 1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1037 1038 return position; 1039 } 1040 1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 1042 int *max_error, 1043 struct timeval *vblank_time, 1044 unsigned flags) 1045 { 1046 struct drm_crtc *crtc; 1047 1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 1049 DRM_ERROR("Invalid crtc %d\n", pipe); 1050 return -EINVAL; 1051 } 1052 1053 /* Get drm_crtc to timestamp: */ 1054 crtc = intel_get_crtc_for_pipe(dev, pipe); 1055 if (crtc == NULL) { 1056 DRM_ERROR("Invalid crtc %d\n", pipe); 1057 return -EINVAL; 1058 } 1059 1060 if (!crtc->enabled) { 1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 1062 return -EBUSY; 1063 } 1064 1065 /* Helper routine in DRM core does all the work: */ 1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 1067 vblank_time, flags, 1068 crtc, 1069 &to_intel_crtc(crtc)->config.adjusted_mode); 1070 } 1071 1072 static bool intel_hpd_irq_event(struct drm_device *dev, 1073 struct drm_connector *connector) 1074 { 1075 enum drm_connector_status old_status; 1076 1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1078 old_status = connector->status; 1079 1080 connector->status = connector->funcs->detect(connector, false); 1081 if (old_status == connector->status) 1082 return false; 1083 1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 1085 connector->base.id, 1086 connector->name, 1087 drm_get_connector_status_name(old_status), 1088 drm_get_connector_status_name(connector->status)); 1089 1090 return true; 1091 } 1092 1093 static void i915_digport_work_func(struct work_struct *work) 1094 { 1095 struct drm_i915_private *dev_priv = 1096 container_of(work, struct drm_i915_private, dig_port_work); 1097 unsigned long irqflags; 1098 u32 long_port_mask, short_port_mask; 1099 struct intel_digital_port *intel_dig_port; 1100 int i, ret; 1101 u32 old_bits = 0; 1102 1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1104 long_port_mask = dev_priv->long_hpd_port_mask; 1105 dev_priv->long_hpd_port_mask = 0; 1106 short_port_mask = dev_priv->short_hpd_port_mask; 1107 dev_priv->short_hpd_port_mask = 0; 1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1109 1110 for (i = 0; i < I915_MAX_PORTS; i++) { 1111 bool valid = false; 1112 bool long_hpd = false; 1113 intel_dig_port = dev_priv->hpd_irq_port[i]; 1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 1115 continue; 1116 1117 if (long_port_mask & (1 << i)) { 1118 valid = true; 1119 long_hpd = true; 1120 } else if (short_port_mask & (1 << i)) 1121 valid = true; 1122 1123 if (valid) { 1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 1125 if (ret == true) { 1126 /* if we get true fallback to old school hpd */ 1127 old_bits |= (1 << intel_dig_port->base.hpd_pin); 1128 } 1129 } 1130 } 1131 1132 if (old_bits) { 1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1134 dev_priv->hpd_event_bits |= old_bits; 1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1136 schedule_work(&dev_priv->hotplug_work); 1137 } 1138 } 1139 1140 /* 1141 * Handle hotplug events outside the interrupt handler proper. 1142 */ 1143 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 1144 1145 static void i915_hotplug_work_func(struct work_struct *work) 1146 { 1147 struct drm_i915_private *dev_priv = 1148 container_of(work, struct drm_i915_private, hotplug_work); 1149 struct drm_device *dev = dev_priv->dev; 1150 struct drm_mode_config *mode_config = &dev->mode_config; 1151 struct intel_connector *intel_connector; 1152 struct intel_encoder *intel_encoder; 1153 struct drm_connector *connector; 1154 unsigned long irqflags; 1155 bool hpd_disabled = false; 1156 bool changed = false; 1157 u32 hpd_event_bits; 1158 1159 mutex_lock(&mode_config->mutex); 1160 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 1161 1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1163 1164 hpd_event_bits = dev_priv->hpd_event_bits; 1165 dev_priv->hpd_event_bits = 0; 1166 list_for_each_entry(connector, &mode_config->connector_list, head) { 1167 intel_connector = to_intel_connector(connector); 1168 if (!intel_connector->encoder) 1169 continue; 1170 intel_encoder = intel_connector->encoder; 1171 if (intel_encoder->hpd_pin > HPD_NONE && 1172 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 1173 connector->polled == DRM_CONNECTOR_POLL_HPD) { 1174 DRM_INFO("HPD interrupt storm detected on connector %s: " 1175 "switching from hotplug detection to polling\n", 1176 connector->name); 1177 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 1178 connector->polled = DRM_CONNECTOR_POLL_CONNECT 1179 | DRM_CONNECTOR_POLL_DISCONNECT; 1180 hpd_disabled = true; 1181 } 1182 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1183 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 1184 connector->name, intel_encoder->hpd_pin); 1185 } 1186 } 1187 /* if there were no outputs to poll, poll was disabled, 1188 * therefore make sure it's enabled when disabling HPD on 1189 * some connectors */ 1190 if (hpd_disabled) { 1191 drm_kms_helper_poll_enable(dev); 1192 mod_timer(&dev_priv->hotplug_reenable_timer, 1193 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1194 } 1195 1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1197 1198 list_for_each_entry(connector, &mode_config->connector_list, head) { 1199 intel_connector = to_intel_connector(connector); 1200 if (!intel_connector->encoder) 1201 continue; 1202 intel_encoder = intel_connector->encoder; 1203 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1204 if (intel_encoder->hot_plug) 1205 intel_encoder->hot_plug(intel_encoder); 1206 if (intel_hpd_irq_event(dev, connector)) 1207 changed = true; 1208 } 1209 } 1210 mutex_unlock(&mode_config->mutex); 1211 1212 if (changed) 1213 drm_kms_helper_hotplug_event(dev); 1214 } 1215 1216 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) 1217 { 1218 del_timer_sync(&dev_priv->hotplug_reenable_timer); 1219 } 1220 1221 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1222 { 1223 struct drm_i915_private *dev_priv = dev->dev_private; 1224 u32 busy_up, busy_down, max_avg, min_avg; 1225 u8 new_delay; 1226 1227 spin_lock(&mchdev_lock); 1228 1229 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1230 1231 new_delay = dev_priv->ips.cur_delay; 1232 1233 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1234 busy_up = I915_READ(RCPREVBSYTUPAVG); 1235 busy_down = I915_READ(RCPREVBSYTDNAVG); 1236 max_avg = I915_READ(RCBMAXAVG); 1237 min_avg = I915_READ(RCBMINAVG); 1238 1239 /* Handle RCS change request from hw */ 1240 if (busy_up > max_avg) { 1241 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1242 new_delay = dev_priv->ips.cur_delay - 1; 1243 if (new_delay < dev_priv->ips.max_delay) 1244 new_delay = dev_priv->ips.max_delay; 1245 } else if (busy_down < min_avg) { 1246 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1247 new_delay = dev_priv->ips.cur_delay + 1; 1248 if (new_delay > dev_priv->ips.min_delay) 1249 new_delay = dev_priv->ips.min_delay; 1250 } 1251 1252 if (ironlake_set_drps(dev, new_delay)) 1253 dev_priv->ips.cur_delay = new_delay; 1254 1255 spin_unlock(&mchdev_lock); 1256 1257 return; 1258 } 1259 1260 static void notify_ring(struct drm_device *dev, 1261 struct intel_engine_cs *ring) 1262 { 1263 if (!intel_ring_initialized(ring)) 1264 return; 1265 1266 trace_i915_gem_request_complete(ring); 1267 1268 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1269 intel_notify_mmio_flip(ring); 1270 1271 wake_up_all(&ring->irq_queue); 1272 i915_queue_hangcheck(dev); 1273 } 1274 1275 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1276 struct intel_rps_ei *rps_ei) 1277 { 1278 u32 cz_ts, cz_freq_khz; 1279 u32 render_count, media_count; 1280 u32 elapsed_render, elapsed_media, elapsed_time; 1281 u32 residency = 0; 1282 1283 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1284 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); 1285 1286 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); 1287 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); 1288 1289 if (rps_ei->cz_clock == 0) { 1290 rps_ei->cz_clock = cz_ts; 1291 rps_ei->render_c0 = render_count; 1292 rps_ei->media_c0 = media_count; 1293 1294 return dev_priv->rps.cur_freq; 1295 } 1296 1297 elapsed_time = cz_ts - rps_ei->cz_clock; 1298 rps_ei->cz_clock = cz_ts; 1299 1300 elapsed_render = render_count - rps_ei->render_c0; 1301 rps_ei->render_c0 = render_count; 1302 1303 elapsed_media = media_count - rps_ei->media_c0; 1304 rps_ei->media_c0 = media_count; 1305 1306 /* Convert all the counters into common unit of milli sec */ 1307 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; 1308 elapsed_render /= cz_freq_khz; 1309 elapsed_media /= cz_freq_khz; 1310 1311 /* 1312 * Calculate overall C0 residency percentage 1313 * only if elapsed time is non zero 1314 */ 1315 if (elapsed_time) { 1316 residency = 1317 ((max(elapsed_render, elapsed_media) * 100) 1318 / elapsed_time); 1319 } 1320 1321 return residency; 1322 } 1323 1324 /** 1325 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU 1326 * busy-ness calculated from C0 counters of render & media power wells 1327 * @dev_priv: DRM device private 1328 * 1329 */ 1330 static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1331 { 1332 u32 residency_C0_up = 0, residency_C0_down = 0; 1333 u8 new_delay, adj; 1334 1335 dev_priv->rps.ei_interrupt_count++; 1336 1337 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 1338 1339 1340 if (dev_priv->rps.up_ei.cz_clock == 0) { 1341 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); 1342 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); 1343 return dev_priv->rps.cur_freq; 1344 } 1345 1346 1347 /* 1348 * To down throttle, C0 residency should be less than down threshold 1349 * for continous EI intervals. So calculate down EI counters 1350 * once in VLV_INT_COUNT_FOR_DOWN_EI 1351 */ 1352 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { 1353 1354 dev_priv->rps.ei_interrupt_count = 0; 1355 1356 residency_C0_down = vlv_c0_residency(dev_priv, 1357 &dev_priv->rps.down_ei); 1358 } else { 1359 residency_C0_up = vlv_c0_residency(dev_priv, 1360 &dev_priv->rps.up_ei); 1361 } 1362 1363 new_delay = dev_priv->rps.cur_freq; 1364 1365 adj = dev_priv->rps.last_adj; 1366 /* C0 residency is greater than UP threshold. Increase Frequency */ 1367 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { 1368 if (adj > 0) 1369 adj *= 2; 1370 else 1371 adj = 1; 1372 1373 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) 1374 new_delay = dev_priv->rps.cur_freq + adj; 1375 1376 /* 1377 * For better performance, jump directly 1378 * to RPe if we're below it. 1379 */ 1380 if (new_delay < dev_priv->rps.efficient_freq) 1381 new_delay = dev_priv->rps.efficient_freq; 1382 1383 } else if (!dev_priv->rps.ei_interrupt_count && 1384 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { 1385 if (adj < 0) 1386 adj *= 2; 1387 else 1388 adj = -1; 1389 /* 1390 * This means, C0 residency is less than down threshold over 1391 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq 1392 */ 1393 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1394 new_delay = dev_priv->rps.cur_freq + adj; 1395 } 1396 1397 return new_delay; 1398 } 1399 1400 static void gen6_pm_rps_work(struct work_struct *work) 1401 { 1402 struct drm_i915_private *dev_priv = 1403 container_of(work, struct drm_i915_private, rps.work); 1404 u32 pm_iir; 1405 int new_delay, adj; 1406 1407 spin_lock_irq(&dev_priv->irq_lock); 1408 pm_iir = dev_priv->rps.pm_iir; 1409 dev_priv->rps.pm_iir = 0; 1410 if (INTEL_INFO(dev_priv->dev)->gen >= 8) 1411 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1412 else { 1413 /* Make sure not to corrupt PMIMR state used by ringbuffer */ 1414 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1415 } 1416 spin_unlock_irq(&dev_priv->irq_lock); 1417 1418 /* Make sure we didn't queue anything we're not going to process. */ 1419 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1420 1421 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1422 return; 1423 1424 mutex_lock(&dev_priv->rps.hw_lock); 1425 1426 adj = dev_priv->rps.last_adj; 1427 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1428 if (adj > 0) 1429 adj *= 2; 1430 else { 1431 /* CHV needs even encode values */ 1432 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; 1433 } 1434 new_delay = dev_priv->rps.cur_freq + adj; 1435 1436 /* 1437 * For better performance, jump directly 1438 * to RPe if we're below it. 1439 */ 1440 if (new_delay < dev_priv->rps.efficient_freq) 1441 new_delay = dev_priv->rps.efficient_freq; 1442 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1443 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1444 new_delay = dev_priv->rps.efficient_freq; 1445 else 1446 new_delay = dev_priv->rps.min_freq_softlimit; 1447 adj = 0; 1448 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1449 new_delay = vlv_calc_delay_from_C0_counters(dev_priv); 1450 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1451 if (adj < 0) 1452 adj *= 2; 1453 else { 1454 /* CHV needs even encode values */ 1455 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; 1456 } 1457 new_delay = dev_priv->rps.cur_freq + adj; 1458 } else { /* unknown event */ 1459 new_delay = dev_priv->rps.cur_freq; 1460 } 1461 1462 /* sysfs frequency interfaces may have snuck in while servicing the 1463 * interrupt 1464 */ 1465 new_delay = clamp_t(int, new_delay, 1466 dev_priv->rps.min_freq_softlimit, 1467 dev_priv->rps.max_freq_softlimit); 1468 1469 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1470 1471 if (IS_VALLEYVIEW(dev_priv->dev)) 1472 valleyview_set_rps(dev_priv->dev, new_delay); 1473 else 1474 gen6_set_rps(dev_priv->dev, new_delay); 1475 1476 mutex_unlock(&dev_priv->rps.hw_lock); 1477 } 1478 1479 1480 /** 1481 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1482 * occurred. 1483 * @work: workqueue struct 1484 * 1485 * Doesn't actually do anything except notify userspace. As a consequence of 1486 * this event, userspace should try to remap the bad rows since statistically 1487 * it is likely the same row is more likely to go bad again. 1488 */ 1489 static void ivybridge_parity_work(struct work_struct *work) 1490 { 1491 struct drm_i915_private *dev_priv = 1492 container_of(work, struct drm_i915_private, l3_parity.error_work); 1493 u32 error_status, row, bank, subbank; 1494 char *parity_event[6]; 1495 uint32_t misccpctl; 1496 unsigned long flags; 1497 uint8_t slice = 0; 1498 1499 /* We must turn off DOP level clock gating to access the L3 registers. 1500 * In order to prevent a get/put style interface, acquire struct mutex 1501 * any time we access those registers. 1502 */ 1503 mutex_lock(&dev_priv->dev->struct_mutex); 1504 1505 /* If we've screwed up tracking, just let the interrupt fire again */ 1506 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1507 goto out; 1508 1509 misccpctl = I915_READ(GEN7_MISCCPCTL); 1510 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1511 POSTING_READ(GEN7_MISCCPCTL); 1512 1513 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1514 u32 reg; 1515 1516 slice--; 1517 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1518 break; 1519 1520 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1521 1522 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1523 1524 error_status = I915_READ(reg); 1525 row = GEN7_PARITY_ERROR_ROW(error_status); 1526 bank = GEN7_PARITY_ERROR_BANK(error_status); 1527 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1528 1529 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1530 POSTING_READ(reg); 1531 1532 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1533 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1534 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1535 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1536 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1537 parity_event[5] = NULL; 1538 1539 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1540 KOBJ_CHANGE, parity_event); 1541 1542 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1543 slice, row, bank, subbank); 1544 1545 kfree(parity_event[4]); 1546 kfree(parity_event[3]); 1547 kfree(parity_event[2]); 1548 kfree(parity_event[1]); 1549 } 1550 1551 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1552 1553 out: 1554 WARN_ON(dev_priv->l3_parity.which_slice); 1555 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1556 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1558 1559 mutex_unlock(&dev_priv->dev->struct_mutex); 1560 } 1561 1562 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1563 { 1564 struct drm_i915_private *dev_priv = dev->dev_private; 1565 1566 if (!HAS_L3_DPF(dev)) 1567 return; 1568 1569 spin_lock(&dev_priv->irq_lock); 1570 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1571 spin_unlock(&dev_priv->irq_lock); 1572 1573 iir &= GT_PARITY_ERROR(dev); 1574 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1575 dev_priv->l3_parity.which_slice |= 1 << 1; 1576 1577 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1578 dev_priv->l3_parity.which_slice |= 1 << 0; 1579 1580 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1581 } 1582 1583 static void ilk_gt_irq_handler(struct drm_device *dev, 1584 struct drm_i915_private *dev_priv, 1585 u32 gt_iir) 1586 { 1587 if (gt_iir & 1588 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1589 notify_ring(dev, &dev_priv->ring[RCS]); 1590 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1591 notify_ring(dev, &dev_priv->ring[VCS]); 1592 } 1593 1594 static void snb_gt_irq_handler(struct drm_device *dev, 1595 struct drm_i915_private *dev_priv, 1596 u32 gt_iir) 1597 { 1598 1599 if (gt_iir & 1600 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1601 notify_ring(dev, &dev_priv->ring[RCS]); 1602 if (gt_iir & GT_BSD_USER_INTERRUPT) 1603 notify_ring(dev, &dev_priv->ring[VCS]); 1604 if (gt_iir & GT_BLT_USER_INTERRUPT) 1605 notify_ring(dev, &dev_priv->ring[BCS]); 1606 1607 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1608 GT_BSD_CS_ERROR_INTERRUPT | 1609 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1610 i915_handle_error(dev, false, "GT error interrupt 0x%08x", 1611 gt_iir); 1612 } 1613 1614 if (gt_iir & GT_PARITY_ERROR(dev)) 1615 ivybridge_parity_error_irq_handler(dev, gt_iir); 1616 } 1617 1618 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1619 { 1620 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1621 return; 1622 1623 spin_lock(&dev_priv->irq_lock); 1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1625 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1626 spin_unlock(&dev_priv->irq_lock); 1627 1628 queue_work(dev_priv->wq, &dev_priv->rps.work); 1629 } 1630 1631 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1632 struct drm_i915_private *dev_priv, 1633 u32 master_ctl) 1634 { 1635 u32 rcs, bcs, vcs; 1636 uint32_t tmp = 0; 1637 irqreturn_t ret = IRQ_NONE; 1638 1639 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1640 tmp = I915_READ(GEN8_GT_IIR(0)); 1641 if (tmp) { 1642 I915_WRITE(GEN8_GT_IIR(0), tmp); 1643 ret = IRQ_HANDLED; 1644 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1646 if (rcs & GT_RENDER_USER_INTERRUPT) 1647 notify_ring(dev, &dev_priv->ring[RCS]); 1648 if (bcs & GT_RENDER_USER_INTERRUPT) 1649 notify_ring(dev, &dev_priv->ring[BCS]); 1650 } else 1651 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1652 } 1653 1654 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1655 tmp = I915_READ(GEN8_GT_IIR(1)); 1656 if (tmp) { 1657 I915_WRITE(GEN8_GT_IIR(1), tmp); 1658 ret = IRQ_HANDLED; 1659 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1660 if (vcs & GT_RENDER_USER_INTERRUPT) 1661 notify_ring(dev, &dev_priv->ring[VCS]); 1662 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1663 if (vcs & GT_RENDER_USER_INTERRUPT) 1664 notify_ring(dev, &dev_priv->ring[VCS2]); 1665 } else 1666 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1667 } 1668 1669 if (master_ctl & GEN8_GT_PM_IRQ) { 1670 tmp = I915_READ(GEN8_GT_IIR(2)); 1671 if (tmp & dev_priv->pm_rps_events) { 1672 I915_WRITE(GEN8_GT_IIR(2), 1673 tmp & dev_priv->pm_rps_events); 1674 ret = IRQ_HANDLED; 1675 gen8_rps_irq_handler(dev_priv, tmp); 1676 } else 1677 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1678 } 1679 1680 if (master_ctl & GEN8_GT_VECS_IRQ) { 1681 tmp = I915_READ(GEN8_GT_IIR(3)); 1682 if (tmp) { 1683 I915_WRITE(GEN8_GT_IIR(3), tmp); 1684 ret = IRQ_HANDLED; 1685 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1686 if (vcs & GT_RENDER_USER_INTERRUPT) 1687 notify_ring(dev, &dev_priv->ring[VECS]); 1688 } else 1689 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1690 } 1691 1692 return ret; 1693 } 1694 1695 #define HPD_STORM_DETECT_PERIOD 1000 1696 #define HPD_STORM_THRESHOLD 5 1697 1698 static int ilk_port_to_hotplug_shift(enum port port) 1699 { 1700 switch (port) { 1701 case PORT_A: 1702 case PORT_E: 1703 default: 1704 return -1; 1705 case PORT_B: 1706 return 0; 1707 case PORT_C: 1708 return 8; 1709 case PORT_D: 1710 return 16; 1711 } 1712 } 1713 1714 static int g4x_port_to_hotplug_shift(enum port port) 1715 { 1716 switch (port) { 1717 case PORT_A: 1718 case PORT_E: 1719 default: 1720 return -1; 1721 case PORT_B: 1722 return 17; 1723 case PORT_C: 1724 return 19; 1725 case PORT_D: 1726 return 21; 1727 } 1728 } 1729 1730 static inline enum port get_port_from_pin(enum hpd_pin pin) 1731 { 1732 switch (pin) { 1733 case HPD_PORT_B: 1734 return PORT_B; 1735 case HPD_PORT_C: 1736 return PORT_C; 1737 case HPD_PORT_D: 1738 return PORT_D; 1739 default: 1740 return PORT_A; /* no hpd */ 1741 } 1742 } 1743 1744 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1745 u32 hotplug_trigger, 1746 u32 dig_hotplug_reg, 1747 const u32 *hpd) 1748 { 1749 struct drm_i915_private *dev_priv = dev->dev_private; 1750 int i; 1751 enum port port; 1752 bool storm_detected = false; 1753 bool queue_dig = false, queue_hp = false; 1754 u32 dig_shift; 1755 u32 dig_port_mask = 0; 1756 1757 if (!hotplug_trigger) 1758 return; 1759 1760 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1761 hotplug_trigger, dig_hotplug_reg); 1762 1763 spin_lock(&dev_priv->irq_lock); 1764 for (i = 1; i < HPD_NUM_PINS; i++) { 1765 if (!(hpd[i] & hotplug_trigger)) 1766 continue; 1767 1768 port = get_port_from_pin(i); 1769 if (port && dev_priv->hpd_irq_port[port]) { 1770 bool long_hpd; 1771 1772 if (IS_G4X(dev)) { 1773 dig_shift = g4x_port_to_hotplug_shift(port); 1774 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1775 } else { 1776 dig_shift = ilk_port_to_hotplug_shift(port); 1777 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1778 } 1779 1780 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); 1781 /* for long HPD pulses we want to have the digital queue happen, 1782 but we still want HPD storm detection to function. */ 1783 if (long_hpd) { 1784 dev_priv->long_hpd_port_mask |= (1 << port); 1785 dig_port_mask |= hpd[i]; 1786 } else { 1787 /* for short HPD just trigger the digital queue */ 1788 dev_priv->short_hpd_port_mask |= (1 << port); 1789 hotplug_trigger &= ~hpd[i]; 1790 } 1791 queue_dig = true; 1792 } 1793 } 1794 1795 for (i = 1; i < HPD_NUM_PINS; i++) { 1796 if (hpd[i] & hotplug_trigger && 1797 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1798 /* 1799 * On GMCH platforms the interrupt mask bits only 1800 * prevent irq generation, not the setting of the 1801 * hotplug bits itself. So only WARN about unexpected 1802 * interrupts on saner platforms. 1803 */ 1804 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1805 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1806 hotplug_trigger, i, hpd[i]); 1807 1808 continue; 1809 } 1810 1811 if (!(hpd[i] & hotplug_trigger) || 1812 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1813 continue; 1814 1815 if (!(dig_port_mask & hpd[i])) { 1816 dev_priv->hpd_event_bits |= (1 << i); 1817 queue_hp = true; 1818 } 1819 1820 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1821 dev_priv->hpd_stats[i].hpd_last_jiffies 1822 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1823 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1824 dev_priv->hpd_stats[i].hpd_cnt = 0; 1825 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1826 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1827 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1828 dev_priv->hpd_event_bits &= ~(1 << i); 1829 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1830 storm_detected = true; 1831 } else { 1832 dev_priv->hpd_stats[i].hpd_cnt++; 1833 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1834 dev_priv->hpd_stats[i].hpd_cnt); 1835 } 1836 } 1837 1838 if (storm_detected) 1839 dev_priv->display.hpd_irq_setup(dev); 1840 spin_unlock(&dev_priv->irq_lock); 1841 1842 /* 1843 * Our hotplug handler can grab modeset locks (by calling down into the 1844 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1845 * queue for otherwise the flush_work in the pageflip code will 1846 * deadlock. 1847 */ 1848 if (queue_dig) 1849 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); 1850 if (queue_hp) 1851 schedule_work(&dev_priv->hotplug_work); 1852 } 1853 1854 static void gmbus_irq_handler(struct drm_device *dev) 1855 { 1856 struct drm_i915_private *dev_priv = dev->dev_private; 1857 1858 wake_up_all(&dev_priv->gmbus_wait_queue); 1859 } 1860 1861 static void dp_aux_irq_handler(struct drm_device *dev) 1862 { 1863 struct drm_i915_private *dev_priv = dev->dev_private; 1864 1865 wake_up_all(&dev_priv->gmbus_wait_queue); 1866 } 1867 1868 #if defined(CONFIG_DEBUG_FS) 1869 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1870 uint32_t crc0, uint32_t crc1, 1871 uint32_t crc2, uint32_t crc3, 1872 uint32_t crc4) 1873 { 1874 struct drm_i915_private *dev_priv = dev->dev_private; 1875 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1876 struct intel_pipe_crc_entry *entry; 1877 int head, tail; 1878 1879 spin_lock(&pipe_crc->lock); 1880 1881 if (!pipe_crc->entries) { 1882 spin_unlock(&pipe_crc->lock); 1883 DRM_ERROR("spurious interrupt\n"); 1884 return; 1885 } 1886 1887 head = pipe_crc->head; 1888 tail = pipe_crc->tail; 1889 1890 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1891 spin_unlock(&pipe_crc->lock); 1892 DRM_ERROR("CRC buffer overflowing\n"); 1893 return; 1894 } 1895 1896 entry = &pipe_crc->entries[head]; 1897 1898 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1899 entry->crc[0] = crc0; 1900 entry->crc[1] = crc1; 1901 entry->crc[2] = crc2; 1902 entry->crc[3] = crc3; 1903 entry->crc[4] = crc4; 1904 1905 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1906 pipe_crc->head = head; 1907 1908 spin_unlock(&pipe_crc->lock); 1909 1910 wake_up_interruptible(&pipe_crc->wq); 1911 } 1912 #else 1913 static inline void 1914 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1915 uint32_t crc0, uint32_t crc1, 1916 uint32_t crc2, uint32_t crc3, 1917 uint32_t crc4) {} 1918 #endif 1919 1920 1921 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1922 { 1923 struct drm_i915_private *dev_priv = dev->dev_private; 1924 1925 display_pipe_crc_irq_handler(dev, pipe, 1926 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1927 0, 0, 0, 0); 1928 } 1929 1930 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1931 { 1932 struct drm_i915_private *dev_priv = dev->dev_private; 1933 1934 display_pipe_crc_irq_handler(dev, pipe, 1935 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1936 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1937 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1938 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1939 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1940 } 1941 1942 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1943 { 1944 struct drm_i915_private *dev_priv = dev->dev_private; 1945 uint32_t res1, res2; 1946 1947 if (INTEL_INFO(dev)->gen >= 3) 1948 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1949 else 1950 res1 = 0; 1951 1952 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1953 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1954 else 1955 res2 = 0; 1956 1957 display_pipe_crc_irq_handler(dev, pipe, 1958 I915_READ(PIPE_CRC_RES_RED(pipe)), 1959 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1960 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1961 res1, res2); 1962 } 1963 1964 /* The RPS events need forcewake, so we add them to a work queue and mask their 1965 * IMR bits until the work is done. Other interrupts can be processed without 1966 * the work queue. */ 1967 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1968 { 1969 if (pm_iir & dev_priv->pm_rps_events) { 1970 spin_lock(&dev_priv->irq_lock); 1971 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1972 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1973 spin_unlock(&dev_priv->irq_lock); 1974 1975 queue_work(dev_priv->wq, &dev_priv->rps.work); 1976 } 1977 1978 if (HAS_VEBOX(dev_priv->dev)) { 1979 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1980 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1981 1982 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1983 i915_handle_error(dev_priv->dev, false, 1984 "VEBOX CS error interrupt 0x%08x", 1985 pm_iir); 1986 } 1987 } 1988 } 1989 1990 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1991 { 1992 struct intel_crtc *crtc; 1993 1994 if (!drm_handle_vblank(dev, pipe)) 1995 return false; 1996 1997 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 1998 wake_up(&crtc->vbl_wait); 1999 2000 return true; 2001 } 2002 2003 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 2004 { 2005 struct drm_i915_private *dev_priv = dev->dev_private; 2006 u32 pipe_stats[I915_MAX_PIPES] = { }; 2007 int pipe; 2008 2009 spin_lock(&dev_priv->irq_lock); 2010 for_each_pipe(pipe) { 2011 int reg; 2012 u32 mask, iir_bit = 0; 2013 2014 /* 2015 * PIPESTAT bits get signalled even when the interrupt is 2016 * disabled with the mask bits, and some of the status bits do 2017 * not generate interrupts at all (like the underrun bit). Hence 2018 * we need to be careful that we only handle what we want to 2019 * handle. 2020 */ 2021 mask = 0; 2022 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 2023 mask |= PIPE_FIFO_UNDERRUN_STATUS; 2024 2025 switch (pipe) { 2026 case PIPE_A: 2027 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 2028 break; 2029 case PIPE_B: 2030 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2031 break; 2032 case PIPE_C: 2033 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2034 break; 2035 } 2036 if (iir & iir_bit) 2037 mask |= dev_priv->pipestat_irq_mask[pipe]; 2038 2039 if (!mask) 2040 continue; 2041 2042 reg = PIPESTAT(pipe); 2043 mask |= PIPESTAT_INT_ENABLE_MASK; 2044 pipe_stats[pipe] = I915_READ(reg) & mask; 2045 2046 /* 2047 * Clear the PIPE*STAT regs before the IIR 2048 */ 2049 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 2050 PIPESTAT_INT_STATUS_MASK)) 2051 I915_WRITE(reg, pipe_stats[pipe]); 2052 } 2053 spin_unlock(&dev_priv->irq_lock); 2054 2055 for_each_pipe(pipe) { 2056 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2057 intel_pipe_handle_vblank(dev, pipe); 2058 2059 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 2060 intel_prepare_page_flip(dev, pipe); 2061 intel_finish_page_flip(dev, pipe); 2062 } 2063 2064 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2065 i9xx_pipe_crc_irq_handler(dev, pipe); 2066 2067 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 2068 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2069 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 2070 } 2071 2072 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2073 gmbus_irq_handler(dev); 2074 } 2075 2076 static void i9xx_hpd_irq_handler(struct drm_device *dev) 2077 { 2078 struct drm_i915_private *dev_priv = dev->dev_private; 2079 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2080 2081 if (hotplug_status) { 2082 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2083 /* 2084 * Make sure hotplug status is cleared before we clear IIR, or else we 2085 * may miss hotplug events. 2086 */ 2087 POSTING_READ(PORT_HOTPLUG_STAT); 2088 2089 if (IS_G4X(dev)) { 2090 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2091 2092 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 2093 } else { 2094 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2095 2096 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 2097 } 2098 2099 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 2100 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2101 dp_aux_irq_handler(dev); 2102 } 2103 } 2104 2105 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2106 { 2107 struct drm_device *dev = arg; 2108 struct drm_i915_private *dev_priv = dev->dev_private; 2109 u32 iir, gt_iir, pm_iir; 2110 irqreturn_t ret = IRQ_NONE; 2111 2112 while (true) { 2113 /* Find, clear, then process each source of interrupt */ 2114 2115 gt_iir = I915_READ(GTIIR); 2116 if (gt_iir) 2117 I915_WRITE(GTIIR, gt_iir); 2118 2119 pm_iir = I915_READ(GEN6_PMIIR); 2120 if (pm_iir) 2121 I915_WRITE(GEN6_PMIIR, pm_iir); 2122 2123 iir = I915_READ(VLV_IIR); 2124 if (iir) { 2125 /* Consume port before clearing IIR or we'll miss events */ 2126 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2127 i9xx_hpd_irq_handler(dev); 2128 I915_WRITE(VLV_IIR, iir); 2129 } 2130 2131 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2132 goto out; 2133 2134 ret = IRQ_HANDLED; 2135 2136 if (gt_iir) 2137 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2138 if (pm_iir) 2139 gen6_rps_irq_handler(dev_priv, pm_iir); 2140 /* Call regardless, as some status bits might not be 2141 * signalled in iir */ 2142 valleyview_pipestat_irq_handler(dev, iir); 2143 } 2144 2145 out: 2146 return ret; 2147 } 2148 2149 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2150 { 2151 struct drm_device *dev = arg; 2152 struct drm_i915_private *dev_priv = dev->dev_private; 2153 u32 master_ctl, iir; 2154 irqreturn_t ret = IRQ_NONE; 2155 2156 for (;;) { 2157 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2158 iir = I915_READ(VLV_IIR); 2159 2160 if (master_ctl == 0 && iir == 0) 2161 break; 2162 2163 ret = IRQ_HANDLED; 2164 2165 I915_WRITE(GEN8_MASTER_IRQ, 0); 2166 2167 /* Find, clear, then process each source of interrupt */ 2168 2169 if (iir) { 2170 /* Consume port before clearing IIR or we'll miss events */ 2171 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2172 i9xx_hpd_irq_handler(dev); 2173 I915_WRITE(VLV_IIR, iir); 2174 } 2175 2176 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2177 2178 /* Call regardless, as some status bits might not be 2179 * signalled in iir */ 2180 valleyview_pipestat_irq_handler(dev, iir); 2181 2182 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2183 POSTING_READ(GEN8_MASTER_IRQ); 2184 } 2185 2186 return ret; 2187 } 2188 2189 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 2190 { 2191 struct drm_i915_private *dev_priv = dev->dev_private; 2192 int pipe; 2193 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2194 u32 dig_hotplug_reg; 2195 2196 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2197 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2198 2199 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 2200 2201 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2202 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2203 SDE_AUDIO_POWER_SHIFT); 2204 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2205 port_name(port)); 2206 } 2207 2208 if (pch_iir & SDE_AUX_MASK) 2209 dp_aux_irq_handler(dev); 2210 2211 if (pch_iir & SDE_GMBUS) 2212 gmbus_irq_handler(dev); 2213 2214 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2215 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2216 2217 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2218 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2219 2220 if (pch_iir & SDE_POISON) 2221 DRM_ERROR("PCH poison interrupt\n"); 2222 2223 if (pch_iir & SDE_FDI_MASK) 2224 for_each_pipe(pipe) 2225 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2226 pipe_name(pipe), 2227 I915_READ(FDI_RX_IIR(pipe))); 2228 2229 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2230 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2231 2232 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2233 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2234 2235 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2236 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 2237 false)) 2238 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 2239 2240 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2241 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2242 false)) 2243 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 2244 } 2245 2246 static void ivb_err_int_handler(struct drm_device *dev) 2247 { 2248 struct drm_i915_private *dev_priv = dev->dev_private; 2249 u32 err_int = I915_READ(GEN7_ERR_INT); 2250 enum pipe pipe; 2251 2252 if (err_int & ERR_INT_POISON) 2253 DRM_ERROR("Poison interrupt\n"); 2254 2255 for_each_pipe(pipe) { 2256 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 2257 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2258 false)) 2259 DRM_ERROR("Pipe %c FIFO underrun\n", 2260 pipe_name(pipe)); 2261 } 2262 2263 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2264 if (IS_IVYBRIDGE(dev)) 2265 ivb_pipe_crc_irq_handler(dev, pipe); 2266 else 2267 hsw_pipe_crc_irq_handler(dev, pipe); 2268 } 2269 } 2270 2271 I915_WRITE(GEN7_ERR_INT, err_int); 2272 } 2273 2274 static void cpt_serr_int_handler(struct drm_device *dev) 2275 { 2276 struct drm_i915_private *dev_priv = dev->dev_private; 2277 u32 serr_int = I915_READ(SERR_INT); 2278 2279 if (serr_int & SERR_INT_POISON) 2280 DRM_ERROR("PCH poison interrupt\n"); 2281 2282 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2283 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 2284 false)) 2285 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 2286 2287 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2288 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2289 false)) 2290 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 2291 2292 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2293 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 2294 false)) 2295 DRM_ERROR("PCH transcoder C FIFO underrun\n"); 2296 2297 I915_WRITE(SERR_INT, serr_int); 2298 } 2299 2300 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2301 { 2302 struct drm_i915_private *dev_priv = dev->dev_private; 2303 int pipe; 2304 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2305 u32 dig_hotplug_reg; 2306 2307 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2308 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2309 2310 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 2311 2312 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2313 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2314 SDE_AUDIO_POWER_SHIFT_CPT); 2315 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2316 port_name(port)); 2317 } 2318 2319 if (pch_iir & SDE_AUX_MASK_CPT) 2320 dp_aux_irq_handler(dev); 2321 2322 if (pch_iir & SDE_GMBUS_CPT) 2323 gmbus_irq_handler(dev); 2324 2325 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2326 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2327 2328 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2329 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2330 2331 if (pch_iir & SDE_FDI_MASK_CPT) 2332 for_each_pipe(pipe) 2333 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2334 pipe_name(pipe), 2335 I915_READ(FDI_RX_IIR(pipe))); 2336 2337 if (pch_iir & SDE_ERROR_CPT) 2338 cpt_serr_int_handler(dev); 2339 } 2340 2341 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2342 { 2343 struct drm_i915_private *dev_priv = dev->dev_private; 2344 enum pipe pipe; 2345 2346 if (de_iir & DE_AUX_CHANNEL_A) 2347 dp_aux_irq_handler(dev); 2348 2349 if (de_iir & DE_GSE) 2350 intel_opregion_asle_intr(dev); 2351 2352 if (de_iir & DE_POISON) 2353 DRM_ERROR("Poison interrupt\n"); 2354 2355 for_each_pipe(pipe) { 2356 if (de_iir & DE_PIPE_VBLANK(pipe)) 2357 intel_pipe_handle_vblank(dev, pipe); 2358 2359 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2360 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2361 DRM_ERROR("Pipe %c FIFO underrun\n", 2362 pipe_name(pipe)); 2363 2364 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2365 i9xx_pipe_crc_irq_handler(dev, pipe); 2366 2367 /* plane/pipes map 1:1 on ilk+ */ 2368 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2369 intel_prepare_page_flip(dev, pipe); 2370 intel_finish_page_flip_plane(dev, pipe); 2371 } 2372 } 2373 2374 /* check event from PCH */ 2375 if (de_iir & DE_PCH_EVENT) { 2376 u32 pch_iir = I915_READ(SDEIIR); 2377 2378 if (HAS_PCH_CPT(dev)) 2379 cpt_irq_handler(dev, pch_iir); 2380 else 2381 ibx_irq_handler(dev, pch_iir); 2382 2383 /* should clear PCH hotplug event before clear CPU irq */ 2384 I915_WRITE(SDEIIR, pch_iir); 2385 } 2386 2387 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2388 ironlake_rps_change_irq_handler(dev); 2389 } 2390 2391 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2392 { 2393 struct drm_i915_private *dev_priv = dev->dev_private; 2394 enum pipe pipe; 2395 2396 if (de_iir & DE_ERR_INT_IVB) 2397 ivb_err_int_handler(dev); 2398 2399 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2400 dp_aux_irq_handler(dev); 2401 2402 if (de_iir & DE_GSE_IVB) 2403 intel_opregion_asle_intr(dev); 2404 2405 for_each_pipe(pipe) { 2406 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2407 intel_pipe_handle_vblank(dev, pipe); 2408 2409 /* plane/pipes map 1:1 on ilk+ */ 2410 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2411 intel_prepare_page_flip(dev, pipe); 2412 intel_finish_page_flip_plane(dev, pipe); 2413 } 2414 } 2415 2416 /* check event from PCH */ 2417 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2418 u32 pch_iir = I915_READ(SDEIIR); 2419 2420 cpt_irq_handler(dev, pch_iir); 2421 2422 /* clear PCH hotplug event before clear CPU irq */ 2423 I915_WRITE(SDEIIR, pch_iir); 2424 } 2425 } 2426 2427 /* 2428 * To handle irqs with the minimum potential races with fresh interrupts, we: 2429 * 1 - Disable Master Interrupt Control. 2430 * 2 - Find the source(s) of the interrupt. 2431 * 3 - Clear the Interrupt Identity bits (IIR). 2432 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2433 * 5 - Re-enable Master Interrupt Control. 2434 */ 2435 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2436 { 2437 struct drm_device *dev = arg; 2438 struct drm_i915_private *dev_priv = dev->dev_private; 2439 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2440 irqreturn_t ret = IRQ_NONE; 2441 2442 /* We get interrupts on unclaimed registers, so check for this before we 2443 * do any I915_{READ,WRITE}. */ 2444 intel_uncore_check_errors(dev); 2445 2446 /* disable master interrupt before clearing iir */ 2447 de_ier = I915_READ(DEIER); 2448 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2449 POSTING_READ(DEIER); 2450 2451 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2452 * interrupts will will be stored on its back queue, and then we'll be 2453 * able to process them after we restore SDEIER (as soon as we restore 2454 * it, we'll get an interrupt if SDEIIR still has something to process 2455 * due to its back queue). */ 2456 if (!HAS_PCH_NOP(dev)) { 2457 sde_ier = I915_READ(SDEIER); 2458 I915_WRITE(SDEIER, 0); 2459 POSTING_READ(SDEIER); 2460 } 2461 2462 /* Find, clear, then process each source of interrupt */ 2463 2464 gt_iir = I915_READ(GTIIR); 2465 if (gt_iir) { 2466 I915_WRITE(GTIIR, gt_iir); 2467 ret = IRQ_HANDLED; 2468 if (INTEL_INFO(dev)->gen >= 6) 2469 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2470 else 2471 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2472 } 2473 2474 de_iir = I915_READ(DEIIR); 2475 if (de_iir) { 2476 I915_WRITE(DEIIR, de_iir); 2477 ret = IRQ_HANDLED; 2478 if (INTEL_INFO(dev)->gen >= 7) 2479 ivb_display_irq_handler(dev, de_iir); 2480 else 2481 ilk_display_irq_handler(dev, de_iir); 2482 } 2483 2484 if (INTEL_INFO(dev)->gen >= 6) { 2485 u32 pm_iir = I915_READ(GEN6_PMIIR); 2486 if (pm_iir) { 2487 I915_WRITE(GEN6_PMIIR, pm_iir); 2488 ret = IRQ_HANDLED; 2489 gen6_rps_irq_handler(dev_priv, pm_iir); 2490 } 2491 } 2492 2493 I915_WRITE(DEIER, de_ier); 2494 POSTING_READ(DEIER); 2495 if (!HAS_PCH_NOP(dev)) { 2496 I915_WRITE(SDEIER, sde_ier); 2497 POSTING_READ(SDEIER); 2498 } 2499 2500 return ret; 2501 } 2502 2503 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2504 { 2505 struct drm_device *dev = arg; 2506 struct drm_i915_private *dev_priv = dev->dev_private; 2507 u32 master_ctl; 2508 irqreturn_t ret = IRQ_NONE; 2509 uint32_t tmp = 0; 2510 enum pipe pipe; 2511 2512 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2513 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2514 if (!master_ctl) 2515 return IRQ_NONE; 2516 2517 I915_WRITE(GEN8_MASTER_IRQ, 0); 2518 POSTING_READ(GEN8_MASTER_IRQ); 2519 2520 /* Find, clear, then process each source of interrupt */ 2521 2522 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2523 2524 if (master_ctl & GEN8_DE_MISC_IRQ) { 2525 tmp = I915_READ(GEN8_DE_MISC_IIR); 2526 if (tmp) { 2527 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2528 ret = IRQ_HANDLED; 2529 if (tmp & GEN8_DE_MISC_GSE) 2530 intel_opregion_asle_intr(dev); 2531 else 2532 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2533 } 2534 else 2535 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2536 } 2537 2538 if (master_ctl & GEN8_DE_PORT_IRQ) { 2539 tmp = I915_READ(GEN8_DE_PORT_IIR); 2540 if (tmp) { 2541 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2542 ret = IRQ_HANDLED; 2543 if (tmp & GEN8_AUX_CHANNEL_A) 2544 dp_aux_irq_handler(dev); 2545 else 2546 DRM_ERROR("Unexpected DE Port interrupt\n"); 2547 } 2548 else 2549 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2550 } 2551 2552 for_each_pipe(pipe) { 2553 uint32_t pipe_iir; 2554 2555 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2556 continue; 2557 2558 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2559 if (pipe_iir) { 2560 ret = IRQ_HANDLED; 2561 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2562 if (pipe_iir & GEN8_PIPE_VBLANK) 2563 intel_pipe_handle_vblank(dev, pipe); 2564 2565 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2566 intel_prepare_page_flip(dev, pipe); 2567 intel_finish_page_flip_plane(dev, pipe); 2568 } 2569 2570 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2571 hsw_pipe_crc_irq_handler(dev, pipe); 2572 2573 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2574 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2575 false)) 2576 DRM_ERROR("Pipe %c FIFO underrun\n", 2577 pipe_name(pipe)); 2578 } 2579 2580 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2581 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2582 pipe_name(pipe), 2583 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2584 } 2585 } else 2586 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2587 } 2588 2589 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2590 /* 2591 * FIXME(BDW): Assume for now that the new interrupt handling 2592 * scheme also closed the SDE interrupt handling race we've seen 2593 * on older pch-split platforms. But this needs testing. 2594 */ 2595 u32 pch_iir = I915_READ(SDEIIR); 2596 if (pch_iir) { 2597 I915_WRITE(SDEIIR, pch_iir); 2598 ret = IRQ_HANDLED; 2599 cpt_irq_handler(dev, pch_iir); 2600 } else 2601 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2602 2603 } 2604 2605 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2606 POSTING_READ(GEN8_MASTER_IRQ); 2607 2608 return ret; 2609 } 2610 2611 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2612 bool reset_completed) 2613 { 2614 struct intel_engine_cs *ring; 2615 int i; 2616 2617 /* 2618 * Notify all waiters for GPU completion events that reset state has 2619 * been changed, and that they need to restart their wait after 2620 * checking for potential errors (and bail out to drop locks if there is 2621 * a gpu reset pending so that i915_error_work_func can acquire them). 2622 */ 2623 2624 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2625 for_each_ring(ring, dev_priv, i) 2626 wake_up_all(&ring->irq_queue); 2627 2628 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2629 wake_up_all(&dev_priv->pending_flip_queue); 2630 2631 /* 2632 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2633 * reset state is cleared. 2634 */ 2635 if (reset_completed) 2636 wake_up_all(&dev_priv->gpu_error.reset_queue); 2637 } 2638 2639 /** 2640 * i915_error_work_func - do process context error handling work 2641 * @work: work struct 2642 * 2643 * Fire an error uevent so userspace can see that a hang or error 2644 * was detected. 2645 */ 2646 static void i915_error_work_func(struct work_struct *work) 2647 { 2648 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2649 work); 2650 struct drm_i915_private *dev_priv = 2651 container_of(error, struct drm_i915_private, gpu_error); 2652 struct drm_device *dev = dev_priv->dev; 2653 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2654 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2655 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2656 int ret; 2657 2658 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2659 2660 /* 2661 * Note that there's only one work item which does gpu resets, so we 2662 * need not worry about concurrent gpu resets potentially incrementing 2663 * error->reset_counter twice. We only need to take care of another 2664 * racing irq/hangcheck declaring the gpu dead for a second time. A 2665 * quick check for that is good enough: schedule_work ensures the 2666 * correct ordering between hang detection and this work item, and since 2667 * the reset in-progress bit is only ever set by code outside of this 2668 * work we don't need to worry about any other races. 2669 */ 2670 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2671 DRM_DEBUG_DRIVER("resetting chip\n"); 2672 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2673 reset_event); 2674 2675 /* 2676 * In most cases it's guaranteed that we get here with an RPM 2677 * reference held, for example because there is a pending GPU 2678 * request that won't finish until the reset is done. This 2679 * isn't the case at least when we get here by doing a 2680 * simulated reset via debugs, so get an RPM reference. 2681 */ 2682 intel_runtime_pm_get(dev_priv); 2683 /* 2684 * All state reset _must_ be completed before we update the 2685 * reset counter, for otherwise waiters might miss the reset 2686 * pending state and not properly drop locks, resulting in 2687 * deadlocks with the reset work. 2688 */ 2689 ret = i915_reset(dev); 2690 2691 intel_display_handle_reset(dev); 2692 2693 intel_runtime_pm_put(dev_priv); 2694 2695 if (ret == 0) { 2696 /* 2697 * After all the gem state is reset, increment the reset 2698 * counter and wake up everyone waiting for the reset to 2699 * complete. 2700 * 2701 * Since unlock operations are a one-sided barrier only, 2702 * we need to insert a barrier here to order any seqno 2703 * updates before 2704 * the counter increment. 2705 */ 2706 smp_mb__before_atomic(); 2707 atomic_inc(&dev_priv->gpu_error.reset_counter); 2708 2709 kobject_uevent_env(&dev->primary->kdev->kobj, 2710 KOBJ_CHANGE, reset_done_event); 2711 } else { 2712 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2713 } 2714 2715 /* 2716 * Note: The wake_up also serves as a memory barrier so that 2717 * waiters see the update value of the reset counter atomic_t. 2718 */ 2719 i915_error_wake_up(dev_priv, true); 2720 } 2721 } 2722 2723 static void i915_report_and_clear_eir(struct drm_device *dev) 2724 { 2725 struct drm_i915_private *dev_priv = dev->dev_private; 2726 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2727 u32 eir = I915_READ(EIR); 2728 int pipe, i; 2729 2730 if (!eir) 2731 return; 2732 2733 pr_err("render error detected, EIR: 0x%08x\n", eir); 2734 2735 i915_get_extra_instdone(dev, instdone); 2736 2737 if (IS_G4X(dev)) { 2738 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2739 u32 ipeir = I915_READ(IPEIR_I965); 2740 2741 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2742 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2743 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2744 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2745 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2746 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2747 I915_WRITE(IPEIR_I965, ipeir); 2748 POSTING_READ(IPEIR_I965); 2749 } 2750 if (eir & GM45_ERROR_PAGE_TABLE) { 2751 u32 pgtbl_err = I915_READ(PGTBL_ER); 2752 pr_err("page table error\n"); 2753 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2754 I915_WRITE(PGTBL_ER, pgtbl_err); 2755 POSTING_READ(PGTBL_ER); 2756 } 2757 } 2758 2759 if (!IS_GEN2(dev)) { 2760 if (eir & I915_ERROR_PAGE_TABLE) { 2761 u32 pgtbl_err = I915_READ(PGTBL_ER); 2762 pr_err("page table error\n"); 2763 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2764 I915_WRITE(PGTBL_ER, pgtbl_err); 2765 POSTING_READ(PGTBL_ER); 2766 } 2767 } 2768 2769 if (eir & I915_ERROR_MEMORY_REFRESH) { 2770 pr_err("memory refresh error:\n"); 2771 for_each_pipe(pipe) 2772 pr_err("pipe %c stat: 0x%08x\n", 2773 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2774 /* pipestat has already been acked */ 2775 } 2776 if (eir & I915_ERROR_INSTRUCTION) { 2777 pr_err("instruction error\n"); 2778 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2779 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2780 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2781 if (INTEL_INFO(dev)->gen < 4) { 2782 u32 ipeir = I915_READ(IPEIR); 2783 2784 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2785 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2786 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2787 I915_WRITE(IPEIR, ipeir); 2788 POSTING_READ(IPEIR); 2789 } else { 2790 u32 ipeir = I915_READ(IPEIR_I965); 2791 2792 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2793 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2794 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2795 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2796 I915_WRITE(IPEIR_I965, ipeir); 2797 POSTING_READ(IPEIR_I965); 2798 } 2799 } 2800 2801 I915_WRITE(EIR, eir); 2802 POSTING_READ(EIR); 2803 eir = I915_READ(EIR); 2804 if (eir) { 2805 /* 2806 * some errors might have become stuck, 2807 * mask them. 2808 */ 2809 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2810 I915_WRITE(EMR, I915_READ(EMR) | eir); 2811 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2812 } 2813 } 2814 2815 /** 2816 * i915_handle_error - handle an error interrupt 2817 * @dev: drm device 2818 * 2819 * Do some basic checking of regsiter state at error interrupt time and 2820 * dump it to the syslog. Also call i915_capture_error_state() to make 2821 * sure we get a record and make it available in debugfs. Fire a uevent 2822 * so userspace knows something bad happened (should trigger collection 2823 * of a ring dump etc.). 2824 */ 2825 void i915_handle_error(struct drm_device *dev, bool wedged, 2826 const char *fmt, ...) 2827 { 2828 struct drm_i915_private *dev_priv = dev->dev_private; 2829 va_list args; 2830 char error_msg[80]; 2831 2832 va_start(args, fmt); 2833 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2834 va_end(args); 2835 2836 i915_capture_error_state(dev, wedged, error_msg); 2837 i915_report_and_clear_eir(dev); 2838 2839 if (wedged) { 2840 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2841 &dev_priv->gpu_error.reset_counter); 2842 2843 /* 2844 * Wakeup waiting processes so that the reset work function 2845 * i915_error_work_func doesn't deadlock trying to grab various 2846 * locks. By bumping the reset counter first, the woken 2847 * processes will see a reset in progress and back off, 2848 * releasing their locks and then wait for the reset completion. 2849 * We must do this for _all_ gpu waiters that might hold locks 2850 * that the reset work needs to acquire. 2851 * 2852 * Note: The wake_up serves as the required memory barrier to 2853 * ensure that the waiters see the updated value of the reset 2854 * counter atomic_t. 2855 */ 2856 i915_error_wake_up(dev_priv, false); 2857 } 2858 2859 /* 2860 * Our reset work can grab modeset locks (since it needs to reset the 2861 * state of outstanding pagelips). Hence it must not be run on our own 2862 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2863 * code will deadlock. 2864 */ 2865 schedule_work(&dev_priv->gpu_error.work); 2866 } 2867 2868 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2869 { 2870 struct drm_i915_private *dev_priv = dev->dev_private; 2871 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2872 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2873 struct drm_i915_gem_object *obj; 2874 struct intel_unpin_work *work; 2875 unsigned long flags; 2876 bool stall_detected; 2877 2878 /* Ignore early vblank irqs */ 2879 if (intel_crtc == NULL) 2880 return; 2881 2882 spin_lock_irqsave(&dev->event_lock, flags); 2883 work = intel_crtc->unpin_work; 2884 2885 if (work == NULL || 2886 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2887 !work->enable_stall_check) { 2888 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2889 spin_unlock_irqrestore(&dev->event_lock, flags); 2890 return; 2891 } 2892 2893 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2894 obj = work->pending_flip_obj; 2895 if (INTEL_INFO(dev)->gen >= 4) { 2896 int dspsurf = DSPSURF(intel_crtc->plane); 2897 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2898 i915_gem_obj_ggtt_offset(obj); 2899 } else { 2900 int dspaddr = DSPADDR(intel_crtc->plane); 2901 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2902 crtc->y * crtc->primary->fb->pitches[0] + 2903 crtc->x * crtc->primary->fb->bits_per_pixel/8); 2904 } 2905 2906 spin_unlock_irqrestore(&dev->event_lock, flags); 2907 2908 if (stall_detected) { 2909 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2910 intel_prepare_page_flip(dev, intel_crtc->plane); 2911 } 2912 } 2913 2914 /* Called from drm generic code, passed 'crtc' which 2915 * we use as a pipe index 2916 */ 2917 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2918 { 2919 struct drm_i915_private *dev_priv = dev->dev_private; 2920 unsigned long irqflags; 2921 2922 if (!i915_pipe_enabled(dev, pipe)) 2923 return -EINVAL; 2924 2925 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2926 if (INTEL_INFO(dev)->gen >= 4) 2927 i915_enable_pipestat(dev_priv, pipe, 2928 PIPE_START_VBLANK_INTERRUPT_STATUS); 2929 else 2930 i915_enable_pipestat(dev_priv, pipe, 2931 PIPE_VBLANK_INTERRUPT_STATUS); 2932 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2933 2934 return 0; 2935 } 2936 2937 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2938 { 2939 struct drm_i915_private *dev_priv = dev->dev_private; 2940 unsigned long irqflags; 2941 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2942 DE_PIPE_VBLANK(pipe); 2943 2944 if (!i915_pipe_enabled(dev, pipe)) 2945 return -EINVAL; 2946 2947 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2948 ironlake_enable_display_irq(dev_priv, bit); 2949 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2950 2951 return 0; 2952 } 2953 2954 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2955 { 2956 struct drm_i915_private *dev_priv = dev->dev_private; 2957 unsigned long irqflags; 2958 2959 if (!i915_pipe_enabled(dev, pipe)) 2960 return -EINVAL; 2961 2962 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2963 i915_enable_pipestat(dev_priv, pipe, 2964 PIPE_START_VBLANK_INTERRUPT_STATUS); 2965 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2966 2967 return 0; 2968 } 2969 2970 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2971 { 2972 struct drm_i915_private *dev_priv = dev->dev_private; 2973 unsigned long irqflags; 2974 2975 if (!i915_pipe_enabled(dev, pipe)) 2976 return -EINVAL; 2977 2978 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2979 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2980 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2981 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2982 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2983 return 0; 2984 } 2985 2986 /* Called from drm generic code, passed 'crtc' which 2987 * we use as a pipe index 2988 */ 2989 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2990 { 2991 struct drm_i915_private *dev_priv = dev->dev_private; 2992 unsigned long irqflags; 2993 2994 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2995 i915_disable_pipestat(dev_priv, pipe, 2996 PIPE_VBLANK_INTERRUPT_STATUS | 2997 PIPE_START_VBLANK_INTERRUPT_STATUS); 2998 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2999 } 3000 3001 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 3002 { 3003 struct drm_i915_private *dev_priv = dev->dev_private; 3004 unsigned long irqflags; 3005 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 3006 DE_PIPE_VBLANK(pipe); 3007 3008 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3009 ironlake_disable_display_irq(dev_priv, bit); 3010 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3011 } 3012 3013 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 3014 { 3015 struct drm_i915_private *dev_priv = dev->dev_private; 3016 unsigned long irqflags; 3017 3018 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3019 i915_disable_pipestat(dev_priv, pipe, 3020 PIPE_START_VBLANK_INTERRUPT_STATUS); 3021 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3022 } 3023 3024 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 3025 { 3026 struct drm_i915_private *dev_priv = dev->dev_private; 3027 unsigned long irqflags; 3028 3029 if (!i915_pipe_enabled(dev, pipe)) 3030 return; 3031 3032 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3033 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 3034 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 3035 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 3036 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3037 } 3038 3039 static u32 3040 ring_last_seqno(struct intel_engine_cs *ring) 3041 { 3042 return list_entry(ring->request_list.prev, 3043 struct drm_i915_gem_request, list)->seqno; 3044 } 3045 3046 static bool 3047 ring_idle(struct intel_engine_cs *ring, u32 seqno) 3048 { 3049 return (list_empty(&ring->request_list) || 3050 i915_seqno_passed(seqno, ring_last_seqno(ring))); 3051 } 3052 3053 static bool 3054 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 3055 { 3056 if (INTEL_INFO(dev)->gen >= 8) { 3057 return (ipehr >> 23) == 0x1c; 3058 } else { 3059 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 3060 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 3061 MI_SEMAPHORE_REGISTER); 3062 } 3063 } 3064 3065 static struct intel_engine_cs * 3066 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 3067 { 3068 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3069 struct intel_engine_cs *signaller; 3070 int i; 3071 3072 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 3073 for_each_ring(signaller, dev_priv, i) { 3074 if (ring == signaller) 3075 continue; 3076 3077 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 3078 return signaller; 3079 } 3080 } else { 3081 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 3082 3083 for_each_ring(signaller, dev_priv, i) { 3084 if(ring == signaller) 3085 continue; 3086 3087 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 3088 return signaller; 3089 } 3090 } 3091 3092 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 3093 ring->id, ipehr, offset); 3094 3095 return NULL; 3096 } 3097 3098 static struct intel_engine_cs * 3099 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 3100 { 3101 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3102 u32 cmd, ipehr, head; 3103 u64 offset = 0; 3104 int i, backwards; 3105 3106 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 3107 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 3108 return NULL; 3109 3110 /* 3111 * HEAD is likely pointing to the dword after the actual command, 3112 * so scan backwards until we find the MBOX. But limit it to just 3 3113 * or 4 dwords depending on the semaphore wait command size. 3114 * Note that we don't care about ACTHD here since that might 3115 * point at at batch, and semaphores are always emitted into the 3116 * ringbuffer itself. 3117 */ 3118 head = I915_READ_HEAD(ring) & HEAD_ADDR; 3119 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 3120 3121 for (i = backwards; i; --i) { 3122 /* 3123 * Be paranoid and presume the hw has gone off into the wild - 3124 * our ring is smaller than what the hardware (and hence 3125 * HEAD_ADDR) allows. Also handles wrap-around. 3126 */ 3127 head &= ring->buffer->size - 1; 3128 3129 /* This here seems to blow up */ 3130 cmd = ioread32(ring->buffer->virtual_start + head); 3131 if (cmd == ipehr) 3132 break; 3133 3134 head -= 4; 3135 } 3136 3137 if (!i) 3138 return NULL; 3139 3140 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 3141 if (INTEL_INFO(ring->dev)->gen >= 8) { 3142 offset = ioread32(ring->buffer->virtual_start + head + 12); 3143 offset <<= 32; 3144 offset = ioread32(ring->buffer->virtual_start + head + 8); 3145 } 3146 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 3147 } 3148 3149 static int semaphore_passed(struct intel_engine_cs *ring) 3150 { 3151 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3152 struct intel_engine_cs *signaller; 3153 u32 seqno; 3154 3155 ring->hangcheck.deadlock++; 3156 3157 signaller = semaphore_waits_for(ring, &seqno); 3158 if (signaller == NULL) 3159 return -1; 3160 3161 /* Prevent pathological recursion due to driver bugs */ 3162 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 3163 return -1; 3164 3165 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 3166 return 1; 3167 3168 /* cursory check for an unkickable deadlock */ 3169 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 3170 semaphore_passed(signaller) < 0) 3171 return -1; 3172 3173 return 0; 3174 } 3175 3176 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 3177 { 3178 struct intel_engine_cs *ring; 3179 int i; 3180 3181 for_each_ring(ring, dev_priv, i) 3182 ring->hangcheck.deadlock = 0; 3183 } 3184 3185 static enum intel_ring_hangcheck_action 3186 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 3187 { 3188 struct drm_device *dev = ring->dev; 3189 struct drm_i915_private *dev_priv = dev->dev_private; 3190 u32 tmp; 3191 3192 if (acthd != ring->hangcheck.acthd) { 3193 if (acthd > ring->hangcheck.max_acthd) { 3194 ring->hangcheck.max_acthd = acthd; 3195 return HANGCHECK_ACTIVE; 3196 } 3197 3198 return HANGCHECK_ACTIVE_LOOP; 3199 } 3200 3201 if (IS_GEN2(dev)) 3202 return HANGCHECK_HUNG; 3203 3204 /* Is the chip hanging on a WAIT_FOR_EVENT? 3205 * If so we can simply poke the RB_WAIT bit 3206 * and break the hang. This should work on 3207 * all but the second generation chipsets. 3208 */ 3209 tmp = I915_READ_CTL(ring); 3210 if (tmp & RING_WAIT) { 3211 i915_handle_error(dev, false, 3212 "Kicking stuck wait on %s", 3213 ring->name); 3214 I915_WRITE_CTL(ring, tmp); 3215 return HANGCHECK_KICK; 3216 } 3217 3218 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3219 switch (semaphore_passed(ring)) { 3220 default: 3221 return HANGCHECK_HUNG; 3222 case 1: 3223 i915_handle_error(dev, false, 3224 "Kicking stuck semaphore on %s", 3225 ring->name); 3226 I915_WRITE_CTL(ring, tmp); 3227 return HANGCHECK_KICK; 3228 case 0: 3229 return HANGCHECK_WAIT; 3230 } 3231 } 3232 3233 return HANGCHECK_HUNG; 3234 } 3235 3236 /** 3237 * This is called when the chip hasn't reported back with completed 3238 * batchbuffers in a long time. We keep track per ring seqno progress and 3239 * if there are no progress, hangcheck score for that ring is increased. 3240 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3241 * we kick the ring. If we see no progress on three subsequent calls 3242 * we assume chip is wedged and try to fix it by resetting the chip. 3243 */ 3244 static void i915_hangcheck_elapsed(unsigned long data) 3245 { 3246 struct drm_device *dev = (struct drm_device *)data; 3247 struct drm_i915_private *dev_priv = dev->dev_private; 3248 struct intel_engine_cs *ring; 3249 int i; 3250 int busy_count = 0, rings_hung = 0; 3251 bool stuck[I915_NUM_RINGS] = { 0 }; 3252 #define BUSY 1 3253 #define KICK 5 3254 #define HUNG 20 3255 3256 if (!i915.enable_hangcheck) 3257 return; 3258 3259 for_each_ring(ring, dev_priv, i) { 3260 u64 acthd; 3261 u32 seqno; 3262 bool busy = true; 3263 3264 semaphore_clear_deadlocks(dev_priv); 3265 3266 seqno = ring->get_seqno(ring, false); 3267 acthd = intel_ring_get_active_head(ring); 3268 3269 if (ring->hangcheck.seqno == seqno) { 3270 if (ring_idle(ring, seqno)) { 3271 ring->hangcheck.action = HANGCHECK_IDLE; 3272 3273 if (waitqueue_active(&ring->irq_queue)) { 3274 /* Issue a wake-up to catch stuck h/w. */ 3275 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 3276 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 3277 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3278 ring->name); 3279 else 3280 DRM_INFO("Fake missed irq on %s\n", 3281 ring->name); 3282 wake_up_all(&ring->irq_queue); 3283 } 3284 /* Safeguard against driver failure */ 3285 ring->hangcheck.score += BUSY; 3286 } else 3287 busy = false; 3288 } else { 3289 /* We always increment the hangcheck score 3290 * if the ring is busy and still processing 3291 * the same request, so that no single request 3292 * can run indefinitely (such as a chain of 3293 * batches). The only time we do not increment 3294 * the hangcheck score on this ring, if this 3295 * ring is in a legitimate wait for another 3296 * ring. In that case the waiting ring is a 3297 * victim and we want to be sure we catch the 3298 * right culprit. Then every time we do kick 3299 * the ring, add a small increment to the 3300 * score so that we can catch a batch that is 3301 * being repeatedly kicked and so responsible 3302 * for stalling the machine. 3303 */ 3304 ring->hangcheck.action = ring_stuck(ring, 3305 acthd); 3306 3307 switch (ring->hangcheck.action) { 3308 case HANGCHECK_IDLE: 3309 case HANGCHECK_WAIT: 3310 case HANGCHECK_ACTIVE: 3311 break; 3312 case HANGCHECK_ACTIVE_LOOP: 3313 ring->hangcheck.score += BUSY; 3314 break; 3315 case HANGCHECK_KICK: 3316 ring->hangcheck.score += KICK; 3317 break; 3318 case HANGCHECK_HUNG: 3319 ring->hangcheck.score += HUNG; 3320 stuck[i] = true; 3321 break; 3322 } 3323 } 3324 } else { 3325 ring->hangcheck.action = HANGCHECK_ACTIVE; 3326 3327 /* Gradually reduce the count so that we catch DoS 3328 * attempts across multiple batches. 3329 */ 3330 if (ring->hangcheck.score > 0) 3331 ring->hangcheck.score--; 3332 3333 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3334 } 3335 3336 ring->hangcheck.seqno = seqno; 3337 ring->hangcheck.acthd = acthd; 3338 busy_count += busy; 3339 } 3340 3341 for_each_ring(ring, dev_priv, i) { 3342 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3343 DRM_INFO("%s on %s\n", 3344 stuck[i] ? "stuck" : "no progress", 3345 ring->name); 3346 rings_hung++; 3347 } 3348 } 3349 3350 if (rings_hung) 3351 return i915_handle_error(dev, true, "Ring hung"); 3352 3353 if (busy_count) 3354 /* Reset timer case chip hangs without another request 3355 * being added */ 3356 i915_queue_hangcheck(dev); 3357 } 3358 3359 void i915_queue_hangcheck(struct drm_device *dev) 3360 { 3361 struct drm_i915_private *dev_priv = dev->dev_private; 3362 if (!i915.enable_hangcheck) 3363 return; 3364 3365 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 3366 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3367 } 3368 3369 static void ibx_irq_reset(struct drm_device *dev) 3370 { 3371 struct drm_i915_private *dev_priv = dev->dev_private; 3372 3373 if (HAS_PCH_NOP(dev)) 3374 return; 3375 3376 GEN5_IRQ_RESET(SDE); 3377 3378 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3379 I915_WRITE(SERR_INT, 0xffffffff); 3380 } 3381 3382 /* 3383 * SDEIER is also touched by the interrupt handler to work around missed PCH 3384 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3385 * instead we unconditionally enable all PCH interrupt sources here, but then 3386 * only unmask them as needed with SDEIMR. 3387 * 3388 * This function needs to be called before interrupts are enabled. 3389 */ 3390 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3391 { 3392 struct drm_i915_private *dev_priv = dev->dev_private; 3393 3394 if (HAS_PCH_NOP(dev)) 3395 return; 3396 3397 WARN_ON(I915_READ(SDEIER) != 0); 3398 I915_WRITE(SDEIER, 0xffffffff); 3399 POSTING_READ(SDEIER); 3400 } 3401 3402 static void gen5_gt_irq_reset(struct drm_device *dev) 3403 { 3404 struct drm_i915_private *dev_priv = dev->dev_private; 3405 3406 GEN5_IRQ_RESET(GT); 3407 if (INTEL_INFO(dev)->gen >= 6) 3408 GEN5_IRQ_RESET(GEN6_PM); 3409 } 3410 3411 /* drm_dma.h hooks 3412 */ 3413 static void ironlake_irq_reset(struct drm_device *dev) 3414 { 3415 struct drm_i915_private *dev_priv = dev->dev_private; 3416 3417 I915_WRITE(HWSTAM, 0xffffffff); 3418 3419 GEN5_IRQ_RESET(DE); 3420 if (IS_GEN7(dev)) 3421 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3422 3423 gen5_gt_irq_reset(dev); 3424 3425 ibx_irq_reset(dev); 3426 } 3427 3428 static void valleyview_irq_preinstall(struct drm_device *dev) 3429 { 3430 struct drm_i915_private *dev_priv = dev->dev_private; 3431 int pipe; 3432 3433 /* VLV magic */ 3434 I915_WRITE(VLV_IMR, 0); 3435 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3436 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3437 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3438 3439 /* and GT */ 3440 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3441 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3442 3443 gen5_gt_irq_reset(dev); 3444 3445 I915_WRITE(DPINVGTT, 0xff); 3446 3447 I915_WRITE(PORT_HOTPLUG_EN, 0); 3448 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3449 for_each_pipe(pipe) 3450 I915_WRITE(PIPESTAT(pipe), 0xffff); 3451 I915_WRITE(VLV_IIR, 0xffffffff); 3452 I915_WRITE(VLV_IMR, 0xffffffff); 3453 I915_WRITE(VLV_IER, 0x0); 3454 POSTING_READ(VLV_IER); 3455 } 3456 3457 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3458 { 3459 GEN8_IRQ_RESET_NDX(GT, 0); 3460 GEN8_IRQ_RESET_NDX(GT, 1); 3461 GEN8_IRQ_RESET_NDX(GT, 2); 3462 GEN8_IRQ_RESET_NDX(GT, 3); 3463 } 3464 3465 static void gen8_irq_reset(struct drm_device *dev) 3466 { 3467 struct drm_i915_private *dev_priv = dev->dev_private; 3468 int pipe; 3469 3470 I915_WRITE(GEN8_MASTER_IRQ, 0); 3471 POSTING_READ(GEN8_MASTER_IRQ); 3472 3473 gen8_gt_irq_reset(dev_priv); 3474 3475 for_each_pipe(pipe) 3476 if (intel_display_power_enabled(dev_priv, 3477 POWER_DOMAIN_PIPE(pipe))) 3478 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3479 3480 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3481 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3482 GEN5_IRQ_RESET(GEN8_PCU_); 3483 3484 ibx_irq_reset(dev); 3485 } 3486 3487 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3488 { 3489 unsigned long irqflags; 3490 3491 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3492 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3493 ~dev_priv->de_irq_mask[PIPE_B]); 3494 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3495 ~dev_priv->de_irq_mask[PIPE_C]); 3496 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3497 } 3498 3499 static void cherryview_irq_preinstall(struct drm_device *dev) 3500 { 3501 struct drm_i915_private *dev_priv = dev->dev_private; 3502 int pipe; 3503 3504 I915_WRITE(GEN8_MASTER_IRQ, 0); 3505 POSTING_READ(GEN8_MASTER_IRQ); 3506 3507 gen8_gt_irq_reset(dev_priv); 3508 3509 GEN5_IRQ_RESET(GEN8_PCU_); 3510 3511 POSTING_READ(GEN8_PCU_IIR); 3512 3513 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3514 3515 I915_WRITE(PORT_HOTPLUG_EN, 0); 3516 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3517 3518 for_each_pipe(pipe) 3519 I915_WRITE(PIPESTAT(pipe), 0xffff); 3520 3521 I915_WRITE(VLV_IMR, 0xffffffff); 3522 I915_WRITE(VLV_IER, 0x0); 3523 I915_WRITE(VLV_IIR, 0xffffffff); 3524 POSTING_READ(VLV_IIR); 3525 } 3526 3527 static void ibx_hpd_irq_setup(struct drm_device *dev) 3528 { 3529 struct drm_i915_private *dev_priv = dev->dev_private; 3530 struct drm_mode_config *mode_config = &dev->mode_config; 3531 struct intel_encoder *intel_encoder; 3532 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3533 3534 if (HAS_PCH_IBX(dev)) { 3535 hotplug_irqs = SDE_HOTPLUG_MASK; 3536 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3537 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3538 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3539 } else { 3540 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3541 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3542 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3543 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3544 } 3545 3546 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3547 3548 /* 3549 * Enable digital hotplug on the PCH, and configure the DP short pulse 3550 * duration to 2ms (which is the minimum in the Display Port spec) 3551 * 3552 * This register is the same on all known PCH chips. 3553 */ 3554 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3555 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3556 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3557 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3558 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3559 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3560 } 3561 3562 static void ibx_irq_postinstall(struct drm_device *dev) 3563 { 3564 struct drm_i915_private *dev_priv = dev->dev_private; 3565 u32 mask; 3566 3567 if (HAS_PCH_NOP(dev)) 3568 return; 3569 3570 if (HAS_PCH_IBX(dev)) 3571 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3572 else 3573 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3574 3575 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3576 I915_WRITE(SDEIMR, ~mask); 3577 } 3578 3579 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3580 { 3581 struct drm_i915_private *dev_priv = dev->dev_private; 3582 u32 pm_irqs, gt_irqs; 3583 3584 pm_irqs = gt_irqs = 0; 3585 3586 dev_priv->gt_irq_mask = ~0; 3587 if (HAS_L3_DPF(dev)) { 3588 /* L3 parity interrupt is always unmasked. */ 3589 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3590 gt_irqs |= GT_PARITY_ERROR(dev); 3591 } 3592 3593 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3594 if (IS_GEN5(dev)) { 3595 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3596 ILK_BSD_USER_INTERRUPT; 3597 } else { 3598 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3599 } 3600 3601 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3602 3603 if (INTEL_INFO(dev)->gen >= 6) { 3604 pm_irqs |= dev_priv->pm_rps_events; 3605 3606 if (HAS_VEBOX(dev)) 3607 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3608 3609 dev_priv->pm_irq_mask = 0xffffffff; 3610 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3611 } 3612 } 3613 3614 static int ironlake_irq_postinstall(struct drm_device *dev) 3615 { 3616 unsigned long irqflags; 3617 struct drm_i915_private *dev_priv = dev->dev_private; 3618 u32 display_mask, extra_mask; 3619 3620 if (INTEL_INFO(dev)->gen >= 7) { 3621 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3622 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3623 DE_PLANEB_FLIP_DONE_IVB | 3624 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3625 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3626 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3627 } else { 3628 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3629 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3630 DE_AUX_CHANNEL_A | 3631 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3632 DE_POISON); 3633 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3634 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3635 } 3636 3637 dev_priv->irq_mask = ~display_mask; 3638 3639 I915_WRITE(HWSTAM, 0xeffe); 3640 3641 ibx_irq_pre_postinstall(dev); 3642 3643 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3644 3645 gen5_gt_irq_postinstall(dev); 3646 3647 ibx_irq_postinstall(dev); 3648 3649 if (IS_IRONLAKE_M(dev)) { 3650 /* Enable PCU event interrupts 3651 * 3652 * spinlocking not required here for correctness since interrupt 3653 * setup is guaranteed to run in single-threaded context. But we 3654 * need it to make the assert_spin_locked happy. */ 3655 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3656 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3657 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3658 } 3659 3660 return 0; 3661 } 3662 3663 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3664 { 3665 u32 pipestat_mask; 3666 u32 iir_mask; 3667 3668 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3669 PIPE_FIFO_UNDERRUN_STATUS; 3670 3671 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3672 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3673 POSTING_READ(PIPESTAT(PIPE_A)); 3674 3675 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3676 PIPE_CRC_DONE_INTERRUPT_STATUS; 3677 3678 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3679 PIPE_GMBUS_INTERRUPT_STATUS); 3680 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3681 3682 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3683 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3684 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3685 dev_priv->irq_mask &= ~iir_mask; 3686 3687 I915_WRITE(VLV_IIR, iir_mask); 3688 I915_WRITE(VLV_IIR, iir_mask); 3689 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3690 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3691 POSTING_READ(VLV_IER); 3692 } 3693 3694 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3695 { 3696 u32 pipestat_mask; 3697 u32 iir_mask; 3698 3699 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3700 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3701 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3702 3703 dev_priv->irq_mask |= iir_mask; 3704 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3705 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3706 I915_WRITE(VLV_IIR, iir_mask); 3707 I915_WRITE(VLV_IIR, iir_mask); 3708 POSTING_READ(VLV_IIR); 3709 3710 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3711 PIPE_CRC_DONE_INTERRUPT_STATUS; 3712 3713 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3714 PIPE_GMBUS_INTERRUPT_STATUS); 3715 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3716 3717 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3718 PIPE_FIFO_UNDERRUN_STATUS; 3719 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3720 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3721 POSTING_READ(PIPESTAT(PIPE_A)); 3722 } 3723 3724 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3725 { 3726 assert_spin_locked(&dev_priv->irq_lock); 3727 3728 if (dev_priv->display_irqs_enabled) 3729 return; 3730 3731 dev_priv->display_irqs_enabled = true; 3732 3733 if (dev_priv->dev->irq_enabled) 3734 valleyview_display_irqs_install(dev_priv); 3735 } 3736 3737 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3738 { 3739 assert_spin_locked(&dev_priv->irq_lock); 3740 3741 if (!dev_priv->display_irqs_enabled) 3742 return; 3743 3744 dev_priv->display_irqs_enabled = false; 3745 3746 if (dev_priv->dev->irq_enabled) 3747 valleyview_display_irqs_uninstall(dev_priv); 3748 } 3749 3750 static int valleyview_irq_postinstall(struct drm_device *dev) 3751 { 3752 struct drm_i915_private *dev_priv = dev->dev_private; 3753 unsigned long irqflags; 3754 3755 dev_priv->irq_mask = ~0; 3756 3757 I915_WRITE(PORT_HOTPLUG_EN, 0); 3758 POSTING_READ(PORT_HOTPLUG_EN); 3759 3760 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3761 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3762 I915_WRITE(VLV_IIR, 0xffffffff); 3763 POSTING_READ(VLV_IER); 3764 3765 /* Interrupt setup is already guaranteed to be single-threaded, this is 3766 * just to make the assert_spin_locked check happy. */ 3767 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3768 if (dev_priv->display_irqs_enabled) 3769 valleyview_display_irqs_install(dev_priv); 3770 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3771 3772 I915_WRITE(VLV_IIR, 0xffffffff); 3773 I915_WRITE(VLV_IIR, 0xffffffff); 3774 3775 gen5_gt_irq_postinstall(dev); 3776 3777 /* ack & enable invalid PTE error interrupts */ 3778 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3779 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3780 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3781 #endif 3782 3783 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3784 3785 return 0; 3786 } 3787 3788 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3789 { 3790 int i; 3791 3792 /* These are interrupts we'll toggle with the ring mask register */ 3793 uint32_t gt_interrupts[] = { 3794 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3795 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3796 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3797 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3798 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3799 0, 3800 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3801 }; 3802 3803 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) 3804 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]); 3805 3806 dev_priv->pm_irq_mask = 0xffffffff; 3807 } 3808 3809 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3810 { 3811 struct drm_device *dev = dev_priv->dev; 3812 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3813 GEN8_PIPE_CDCLK_CRC_DONE | 3814 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3815 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3816 GEN8_PIPE_FIFO_UNDERRUN; 3817 int pipe; 3818 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3819 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3820 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3821 3822 for_each_pipe(pipe) 3823 if (intel_display_power_enabled(dev_priv, 3824 POWER_DOMAIN_PIPE(pipe))) 3825 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3826 dev_priv->de_irq_mask[pipe], 3827 de_pipe_enables); 3828 3829 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3830 } 3831 3832 static int gen8_irq_postinstall(struct drm_device *dev) 3833 { 3834 struct drm_i915_private *dev_priv = dev->dev_private; 3835 3836 ibx_irq_pre_postinstall(dev); 3837 3838 gen8_gt_irq_postinstall(dev_priv); 3839 gen8_de_irq_postinstall(dev_priv); 3840 3841 ibx_irq_postinstall(dev); 3842 3843 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3844 POSTING_READ(GEN8_MASTER_IRQ); 3845 3846 return 0; 3847 } 3848 3849 static int cherryview_irq_postinstall(struct drm_device *dev) 3850 { 3851 struct drm_i915_private *dev_priv = dev->dev_private; 3852 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3853 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3854 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3855 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3856 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | 3857 PIPE_CRC_DONE_INTERRUPT_STATUS; 3858 unsigned long irqflags; 3859 int pipe; 3860 3861 /* 3862 * Leave vblank interrupts masked initially. enable/disable will 3863 * toggle them based on usage. 3864 */ 3865 dev_priv->irq_mask = ~enable_mask; 3866 3867 for_each_pipe(pipe) 3868 I915_WRITE(PIPESTAT(pipe), 0xffff); 3869 3870 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3871 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3872 for_each_pipe(pipe) 3873 i915_enable_pipestat(dev_priv, pipe, pipestat_enable); 3874 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3875 3876 I915_WRITE(VLV_IIR, 0xffffffff); 3877 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3878 I915_WRITE(VLV_IER, enable_mask); 3879 3880 gen8_gt_irq_postinstall(dev_priv); 3881 3882 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3883 POSTING_READ(GEN8_MASTER_IRQ); 3884 3885 return 0; 3886 } 3887 3888 static void gen8_irq_uninstall(struct drm_device *dev) 3889 { 3890 struct drm_i915_private *dev_priv = dev->dev_private; 3891 3892 if (!dev_priv) 3893 return; 3894 3895 intel_hpd_irq_uninstall(dev_priv); 3896 3897 gen8_irq_reset(dev); 3898 } 3899 3900 static void valleyview_irq_uninstall(struct drm_device *dev) 3901 { 3902 struct drm_i915_private *dev_priv = dev->dev_private; 3903 unsigned long irqflags; 3904 int pipe; 3905 3906 if (!dev_priv) 3907 return; 3908 3909 I915_WRITE(VLV_MASTER_IER, 0); 3910 3911 intel_hpd_irq_uninstall(dev_priv); 3912 3913 for_each_pipe(pipe) 3914 I915_WRITE(PIPESTAT(pipe), 0xffff); 3915 3916 I915_WRITE(HWSTAM, 0xffffffff); 3917 I915_WRITE(PORT_HOTPLUG_EN, 0); 3918 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3919 3920 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3921 if (dev_priv->display_irqs_enabled) 3922 valleyview_display_irqs_uninstall(dev_priv); 3923 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3924 3925 dev_priv->irq_mask = 0; 3926 3927 I915_WRITE(VLV_IIR, 0xffffffff); 3928 I915_WRITE(VLV_IMR, 0xffffffff); 3929 I915_WRITE(VLV_IER, 0x0); 3930 POSTING_READ(VLV_IER); 3931 } 3932 3933 static void cherryview_irq_uninstall(struct drm_device *dev) 3934 { 3935 struct drm_i915_private *dev_priv = dev->dev_private; 3936 int pipe; 3937 3938 if (!dev_priv) 3939 return; 3940 3941 I915_WRITE(GEN8_MASTER_IRQ, 0); 3942 POSTING_READ(GEN8_MASTER_IRQ); 3943 3944 #define GEN8_IRQ_FINI_NDX(type, which) \ 3945 do { \ 3946 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3947 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3948 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3949 POSTING_READ(GEN8_##type##_IIR(which)); \ 3950 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3951 } while (0) 3952 3953 #define GEN8_IRQ_FINI(type) \ 3954 do { \ 3955 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3956 I915_WRITE(GEN8_##type##_IER, 0); \ 3957 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3958 POSTING_READ(GEN8_##type##_IIR); \ 3959 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3960 } while (0) 3961 3962 GEN8_IRQ_FINI_NDX(GT, 0); 3963 GEN8_IRQ_FINI_NDX(GT, 1); 3964 GEN8_IRQ_FINI_NDX(GT, 2); 3965 GEN8_IRQ_FINI_NDX(GT, 3); 3966 3967 GEN8_IRQ_FINI(PCU); 3968 3969 #undef GEN8_IRQ_FINI 3970 #undef GEN8_IRQ_FINI_NDX 3971 3972 I915_WRITE(PORT_HOTPLUG_EN, 0); 3973 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3974 3975 for_each_pipe(pipe) 3976 I915_WRITE(PIPESTAT(pipe), 0xffff); 3977 3978 I915_WRITE(VLV_IMR, 0xffffffff); 3979 I915_WRITE(VLV_IER, 0x0); 3980 I915_WRITE(VLV_IIR, 0xffffffff); 3981 POSTING_READ(VLV_IIR); 3982 } 3983 3984 static void ironlake_irq_uninstall(struct drm_device *dev) 3985 { 3986 struct drm_i915_private *dev_priv = dev->dev_private; 3987 3988 if (!dev_priv) 3989 return; 3990 3991 intel_hpd_irq_uninstall(dev_priv); 3992 3993 ironlake_irq_reset(dev); 3994 } 3995 3996 static void i8xx_irq_preinstall(struct drm_device * dev) 3997 { 3998 struct drm_i915_private *dev_priv = dev->dev_private; 3999 int pipe; 4000 4001 for_each_pipe(pipe) 4002 I915_WRITE(PIPESTAT(pipe), 0); 4003 I915_WRITE16(IMR, 0xffff); 4004 I915_WRITE16(IER, 0x0); 4005 POSTING_READ16(IER); 4006 } 4007 4008 static int i8xx_irq_postinstall(struct drm_device *dev) 4009 { 4010 struct drm_i915_private *dev_priv = dev->dev_private; 4011 unsigned long irqflags; 4012 4013 I915_WRITE16(EMR, 4014 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4015 4016 /* Unmask the interrupts that we always want on. */ 4017 dev_priv->irq_mask = 4018 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4019 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4020 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4021 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4022 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4023 I915_WRITE16(IMR, dev_priv->irq_mask); 4024 4025 I915_WRITE16(IER, 4026 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4027 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4028 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 4029 I915_USER_INTERRUPT); 4030 POSTING_READ16(IER); 4031 4032 /* Interrupt setup is already guaranteed to be single-threaded, this is 4033 * just to make the assert_spin_locked check happy. */ 4034 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4035 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4036 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4037 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4038 4039 return 0; 4040 } 4041 4042 /* 4043 * Returns true when a page flip has completed. 4044 */ 4045 static bool i8xx_handle_vblank(struct drm_device *dev, 4046 int plane, int pipe, u32 iir) 4047 { 4048 struct drm_i915_private *dev_priv = dev->dev_private; 4049 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4050 4051 if (!intel_pipe_handle_vblank(dev, pipe)) 4052 return false; 4053 4054 if ((iir & flip_pending) == 0) 4055 return false; 4056 4057 intel_prepare_page_flip(dev, plane); 4058 4059 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4060 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4061 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4062 * the flip is completed (no longer pending). Since this doesn't raise 4063 * an interrupt per se, we watch for the change at vblank. 4064 */ 4065 if (I915_READ16(ISR) & flip_pending) 4066 return false; 4067 4068 intel_finish_page_flip(dev, pipe); 4069 4070 return true; 4071 } 4072 4073 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4074 { 4075 struct drm_device *dev = arg; 4076 struct drm_i915_private *dev_priv = dev->dev_private; 4077 u16 iir, new_iir; 4078 u32 pipe_stats[2]; 4079 unsigned long irqflags; 4080 int pipe; 4081 u16 flip_mask = 4082 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4083 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4084 4085 iir = I915_READ16(IIR); 4086 if (iir == 0) 4087 return IRQ_NONE; 4088 4089 while (iir & ~flip_mask) { 4090 /* Can't rely on pipestat interrupt bit in iir as it might 4091 * have been cleared after the pipestat interrupt was received. 4092 * It doesn't set the bit in iir again, but it still produces 4093 * interrupts (for non-MSI). 4094 */ 4095 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4096 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4097 i915_handle_error(dev, false, 4098 "Command parser error, iir 0x%08x", 4099 iir); 4100 4101 for_each_pipe(pipe) { 4102 int reg = PIPESTAT(pipe); 4103 pipe_stats[pipe] = I915_READ(reg); 4104 4105 /* 4106 * Clear the PIPE*STAT regs before the IIR 4107 */ 4108 if (pipe_stats[pipe] & 0x8000ffff) 4109 I915_WRITE(reg, pipe_stats[pipe]); 4110 } 4111 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4112 4113 I915_WRITE16(IIR, iir & ~flip_mask); 4114 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4115 4116 i915_update_dri1_breadcrumb(dev); 4117 4118 if (iir & I915_USER_INTERRUPT) 4119 notify_ring(dev, &dev_priv->ring[RCS]); 4120 4121 for_each_pipe(pipe) { 4122 int plane = pipe; 4123 if (HAS_FBC(dev)) 4124 plane = !plane; 4125 4126 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4127 i8xx_handle_vblank(dev, plane, pipe, iir)) 4128 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4129 4130 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4131 i9xx_pipe_crc_irq_handler(dev, pipe); 4132 4133 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4134 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4135 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4136 } 4137 4138 iir = new_iir; 4139 } 4140 4141 return IRQ_HANDLED; 4142 } 4143 4144 static void i8xx_irq_uninstall(struct drm_device * dev) 4145 { 4146 struct drm_i915_private *dev_priv = dev->dev_private; 4147 int pipe; 4148 4149 for_each_pipe(pipe) { 4150 /* Clear enable bits; then clear status bits */ 4151 I915_WRITE(PIPESTAT(pipe), 0); 4152 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4153 } 4154 I915_WRITE16(IMR, 0xffff); 4155 I915_WRITE16(IER, 0x0); 4156 I915_WRITE16(IIR, I915_READ16(IIR)); 4157 } 4158 4159 static void i915_irq_preinstall(struct drm_device * dev) 4160 { 4161 struct drm_i915_private *dev_priv = dev->dev_private; 4162 int pipe; 4163 4164 if (I915_HAS_HOTPLUG(dev)) { 4165 I915_WRITE(PORT_HOTPLUG_EN, 0); 4166 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4167 } 4168 4169 I915_WRITE16(HWSTAM, 0xeffe); 4170 for_each_pipe(pipe) 4171 I915_WRITE(PIPESTAT(pipe), 0); 4172 I915_WRITE(IMR, 0xffffffff); 4173 I915_WRITE(IER, 0x0); 4174 POSTING_READ(IER); 4175 } 4176 4177 static int i915_irq_postinstall(struct drm_device *dev) 4178 { 4179 struct drm_i915_private *dev_priv = dev->dev_private; 4180 u32 enable_mask; 4181 unsigned long irqflags; 4182 4183 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4184 4185 /* Unmask the interrupts that we always want on. */ 4186 dev_priv->irq_mask = 4187 ~(I915_ASLE_INTERRUPT | 4188 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4189 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4190 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4191 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4192 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4193 4194 enable_mask = 4195 I915_ASLE_INTERRUPT | 4196 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4197 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4198 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 4199 I915_USER_INTERRUPT; 4200 4201 if (I915_HAS_HOTPLUG(dev)) { 4202 I915_WRITE(PORT_HOTPLUG_EN, 0); 4203 POSTING_READ(PORT_HOTPLUG_EN); 4204 4205 /* Enable in IER... */ 4206 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4207 /* and unmask in IMR */ 4208 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4209 } 4210 4211 I915_WRITE(IMR, dev_priv->irq_mask); 4212 I915_WRITE(IER, enable_mask); 4213 POSTING_READ(IER); 4214 4215 i915_enable_asle_pipestat(dev); 4216 4217 /* Interrupt setup is already guaranteed to be single-threaded, this is 4218 * just to make the assert_spin_locked check happy. */ 4219 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4220 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4221 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4222 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4223 4224 return 0; 4225 } 4226 4227 /* 4228 * Returns true when a page flip has completed. 4229 */ 4230 static bool i915_handle_vblank(struct drm_device *dev, 4231 int plane, int pipe, u32 iir) 4232 { 4233 struct drm_i915_private *dev_priv = dev->dev_private; 4234 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4235 4236 if (!intel_pipe_handle_vblank(dev, pipe)) 4237 return false; 4238 4239 if ((iir & flip_pending) == 0) 4240 return false; 4241 4242 intel_prepare_page_flip(dev, plane); 4243 4244 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4245 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4246 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4247 * the flip is completed (no longer pending). Since this doesn't raise 4248 * an interrupt per se, we watch for the change at vblank. 4249 */ 4250 if (I915_READ(ISR) & flip_pending) 4251 return false; 4252 4253 intel_finish_page_flip(dev, pipe); 4254 4255 return true; 4256 } 4257 4258 static irqreturn_t i915_irq_handler(int irq, void *arg) 4259 { 4260 struct drm_device *dev = arg; 4261 struct drm_i915_private *dev_priv = dev->dev_private; 4262 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4263 unsigned long irqflags; 4264 u32 flip_mask = 4265 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4266 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4267 int pipe, ret = IRQ_NONE; 4268 4269 iir = I915_READ(IIR); 4270 do { 4271 bool irq_received = (iir & ~flip_mask) != 0; 4272 bool blc_event = false; 4273 4274 /* Can't rely on pipestat interrupt bit in iir as it might 4275 * have been cleared after the pipestat interrupt was received. 4276 * It doesn't set the bit in iir again, but it still produces 4277 * interrupts (for non-MSI). 4278 */ 4279 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4280 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4281 i915_handle_error(dev, false, 4282 "Command parser error, iir 0x%08x", 4283 iir); 4284 4285 for_each_pipe(pipe) { 4286 int reg = PIPESTAT(pipe); 4287 pipe_stats[pipe] = I915_READ(reg); 4288 4289 /* Clear the PIPE*STAT regs before the IIR */ 4290 if (pipe_stats[pipe] & 0x8000ffff) { 4291 I915_WRITE(reg, pipe_stats[pipe]); 4292 irq_received = true; 4293 } 4294 } 4295 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4296 4297 if (!irq_received) 4298 break; 4299 4300 /* Consume port. Then clear IIR or we'll miss events */ 4301 if (I915_HAS_HOTPLUG(dev) && 4302 iir & I915_DISPLAY_PORT_INTERRUPT) 4303 i9xx_hpd_irq_handler(dev); 4304 4305 I915_WRITE(IIR, iir & ~flip_mask); 4306 new_iir = I915_READ(IIR); /* Flush posted writes */ 4307 4308 if (iir & I915_USER_INTERRUPT) 4309 notify_ring(dev, &dev_priv->ring[RCS]); 4310 4311 for_each_pipe(pipe) { 4312 int plane = pipe; 4313 if (HAS_FBC(dev)) 4314 plane = !plane; 4315 4316 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4317 i915_handle_vblank(dev, plane, pipe, iir)) 4318 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4319 4320 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4321 blc_event = true; 4322 4323 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4324 i9xx_pipe_crc_irq_handler(dev, pipe); 4325 4326 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4327 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4328 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4329 } 4330 4331 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4332 intel_opregion_asle_intr(dev); 4333 4334 /* With MSI, interrupts are only generated when iir 4335 * transitions from zero to nonzero. If another bit got 4336 * set while we were handling the existing iir bits, then 4337 * we would never get another interrupt. 4338 * 4339 * This is fine on non-MSI as well, as if we hit this path 4340 * we avoid exiting the interrupt handler only to generate 4341 * another one. 4342 * 4343 * Note that for MSI this could cause a stray interrupt report 4344 * if an interrupt landed in the time between writing IIR and 4345 * the posting read. This should be rare enough to never 4346 * trigger the 99% of 100,000 interrupts test for disabling 4347 * stray interrupts. 4348 */ 4349 ret = IRQ_HANDLED; 4350 iir = new_iir; 4351 } while (iir & ~flip_mask); 4352 4353 i915_update_dri1_breadcrumb(dev); 4354 4355 return ret; 4356 } 4357 4358 static void i915_irq_uninstall(struct drm_device * dev) 4359 { 4360 struct drm_i915_private *dev_priv = dev->dev_private; 4361 int pipe; 4362 4363 intel_hpd_irq_uninstall(dev_priv); 4364 4365 if (I915_HAS_HOTPLUG(dev)) { 4366 I915_WRITE(PORT_HOTPLUG_EN, 0); 4367 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4368 } 4369 4370 I915_WRITE16(HWSTAM, 0xffff); 4371 for_each_pipe(pipe) { 4372 /* Clear enable bits; then clear status bits */ 4373 I915_WRITE(PIPESTAT(pipe), 0); 4374 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4375 } 4376 I915_WRITE(IMR, 0xffffffff); 4377 I915_WRITE(IER, 0x0); 4378 4379 I915_WRITE(IIR, I915_READ(IIR)); 4380 } 4381 4382 static void i965_irq_preinstall(struct drm_device * dev) 4383 { 4384 struct drm_i915_private *dev_priv = dev->dev_private; 4385 int pipe; 4386 4387 I915_WRITE(PORT_HOTPLUG_EN, 0); 4388 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4389 4390 I915_WRITE(HWSTAM, 0xeffe); 4391 for_each_pipe(pipe) 4392 I915_WRITE(PIPESTAT(pipe), 0); 4393 I915_WRITE(IMR, 0xffffffff); 4394 I915_WRITE(IER, 0x0); 4395 POSTING_READ(IER); 4396 } 4397 4398 static int i965_irq_postinstall(struct drm_device *dev) 4399 { 4400 struct drm_i915_private *dev_priv = dev->dev_private; 4401 u32 enable_mask; 4402 u32 error_mask; 4403 unsigned long irqflags; 4404 4405 /* Unmask the interrupts that we always want on. */ 4406 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4407 I915_DISPLAY_PORT_INTERRUPT | 4408 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4409 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4410 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4411 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4412 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4413 4414 enable_mask = ~dev_priv->irq_mask; 4415 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4416 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4417 enable_mask |= I915_USER_INTERRUPT; 4418 4419 if (IS_G4X(dev)) 4420 enable_mask |= I915_BSD_USER_INTERRUPT; 4421 4422 /* Interrupt setup is already guaranteed to be single-threaded, this is 4423 * just to make the assert_spin_locked check happy. */ 4424 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4425 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4426 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4427 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4428 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4429 4430 /* 4431 * Enable some error detection, note the instruction error mask 4432 * bit is reserved, so we leave it masked. 4433 */ 4434 if (IS_G4X(dev)) { 4435 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4436 GM45_ERROR_MEM_PRIV | 4437 GM45_ERROR_CP_PRIV | 4438 I915_ERROR_MEMORY_REFRESH); 4439 } else { 4440 error_mask = ~(I915_ERROR_PAGE_TABLE | 4441 I915_ERROR_MEMORY_REFRESH); 4442 } 4443 I915_WRITE(EMR, error_mask); 4444 4445 I915_WRITE(IMR, dev_priv->irq_mask); 4446 I915_WRITE(IER, enable_mask); 4447 POSTING_READ(IER); 4448 4449 I915_WRITE(PORT_HOTPLUG_EN, 0); 4450 POSTING_READ(PORT_HOTPLUG_EN); 4451 4452 i915_enable_asle_pipestat(dev); 4453 4454 return 0; 4455 } 4456 4457 static void i915_hpd_irq_setup(struct drm_device *dev) 4458 { 4459 struct drm_i915_private *dev_priv = dev->dev_private; 4460 struct drm_mode_config *mode_config = &dev->mode_config; 4461 struct intel_encoder *intel_encoder; 4462 u32 hotplug_en; 4463 4464 assert_spin_locked(&dev_priv->irq_lock); 4465 4466 if (I915_HAS_HOTPLUG(dev)) { 4467 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4468 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4469 /* Note HDMI and DP share hotplug bits */ 4470 /* enable bits are the same for all generations */ 4471 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 4472 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4473 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4474 /* Programming the CRT detection parameters tends 4475 to generate a spurious hotplug event about three 4476 seconds later. So just do it once. 4477 */ 4478 if (IS_G4X(dev)) 4479 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4480 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4481 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4482 4483 /* Ignore TV since it's buggy */ 4484 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4485 } 4486 } 4487 4488 static irqreturn_t i965_irq_handler(int irq, void *arg) 4489 { 4490 struct drm_device *dev = arg; 4491 struct drm_i915_private *dev_priv = dev->dev_private; 4492 u32 iir, new_iir; 4493 u32 pipe_stats[I915_MAX_PIPES]; 4494 unsigned long irqflags; 4495 int ret = IRQ_NONE, pipe; 4496 u32 flip_mask = 4497 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4498 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4499 4500 iir = I915_READ(IIR); 4501 4502 for (;;) { 4503 bool irq_received = (iir & ~flip_mask) != 0; 4504 bool blc_event = false; 4505 4506 /* Can't rely on pipestat interrupt bit in iir as it might 4507 * have been cleared after the pipestat interrupt was received. 4508 * It doesn't set the bit in iir again, but it still produces 4509 * interrupts (for non-MSI). 4510 */ 4511 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4512 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4513 i915_handle_error(dev, false, 4514 "Command parser error, iir 0x%08x", 4515 iir); 4516 4517 for_each_pipe(pipe) { 4518 int reg = PIPESTAT(pipe); 4519 pipe_stats[pipe] = I915_READ(reg); 4520 4521 /* 4522 * Clear the PIPE*STAT regs before the IIR 4523 */ 4524 if (pipe_stats[pipe] & 0x8000ffff) { 4525 I915_WRITE(reg, pipe_stats[pipe]); 4526 irq_received = true; 4527 } 4528 } 4529 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4530 4531 if (!irq_received) 4532 break; 4533 4534 ret = IRQ_HANDLED; 4535 4536 /* Consume port. Then clear IIR or we'll miss events */ 4537 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4538 i9xx_hpd_irq_handler(dev); 4539 4540 I915_WRITE(IIR, iir & ~flip_mask); 4541 new_iir = I915_READ(IIR); /* Flush posted writes */ 4542 4543 if (iir & I915_USER_INTERRUPT) 4544 notify_ring(dev, &dev_priv->ring[RCS]); 4545 if (iir & I915_BSD_USER_INTERRUPT) 4546 notify_ring(dev, &dev_priv->ring[VCS]); 4547 4548 for_each_pipe(pipe) { 4549 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4550 i915_handle_vblank(dev, pipe, pipe, iir)) 4551 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4552 4553 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4554 blc_event = true; 4555 4556 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4557 i9xx_pipe_crc_irq_handler(dev, pipe); 4558 4559 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4560 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4561 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4562 } 4563 4564 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4565 intel_opregion_asle_intr(dev); 4566 4567 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4568 gmbus_irq_handler(dev); 4569 4570 /* With MSI, interrupts are only generated when iir 4571 * transitions from zero to nonzero. If another bit got 4572 * set while we were handling the existing iir bits, then 4573 * we would never get another interrupt. 4574 * 4575 * This is fine on non-MSI as well, as if we hit this path 4576 * we avoid exiting the interrupt handler only to generate 4577 * another one. 4578 * 4579 * Note that for MSI this could cause a stray interrupt report 4580 * if an interrupt landed in the time between writing IIR and 4581 * the posting read. This should be rare enough to never 4582 * trigger the 99% of 100,000 interrupts test for disabling 4583 * stray interrupts. 4584 */ 4585 iir = new_iir; 4586 } 4587 4588 i915_update_dri1_breadcrumb(dev); 4589 4590 return ret; 4591 } 4592 4593 static void i965_irq_uninstall(struct drm_device * dev) 4594 { 4595 struct drm_i915_private *dev_priv = dev->dev_private; 4596 int pipe; 4597 4598 if (!dev_priv) 4599 return; 4600 4601 intel_hpd_irq_uninstall(dev_priv); 4602 4603 I915_WRITE(PORT_HOTPLUG_EN, 0); 4604 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4605 4606 I915_WRITE(HWSTAM, 0xffffffff); 4607 for_each_pipe(pipe) 4608 I915_WRITE(PIPESTAT(pipe), 0); 4609 I915_WRITE(IMR, 0xffffffff); 4610 I915_WRITE(IER, 0x0); 4611 4612 for_each_pipe(pipe) 4613 I915_WRITE(PIPESTAT(pipe), 4614 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4615 I915_WRITE(IIR, I915_READ(IIR)); 4616 } 4617 4618 static void intel_hpd_irq_reenable(unsigned long data) 4619 { 4620 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 4621 struct drm_device *dev = dev_priv->dev; 4622 struct drm_mode_config *mode_config = &dev->mode_config; 4623 unsigned long irqflags; 4624 int i; 4625 4626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4627 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4628 struct drm_connector *connector; 4629 4630 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4631 continue; 4632 4633 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4634 4635 list_for_each_entry(connector, &mode_config->connector_list, head) { 4636 struct intel_connector *intel_connector = to_intel_connector(connector); 4637 4638 if (intel_connector->encoder->hpd_pin == i) { 4639 if (connector->polled != intel_connector->polled) 4640 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4641 connector->name); 4642 connector->polled = intel_connector->polled; 4643 if (!connector->polled) 4644 connector->polled = DRM_CONNECTOR_POLL_HPD; 4645 } 4646 } 4647 } 4648 if (dev_priv->display.hpd_irq_setup) 4649 dev_priv->display.hpd_irq_setup(dev); 4650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4651 } 4652 4653 void intel_irq_init(struct drm_device *dev) 4654 { 4655 struct drm_i915_private *dev_priv = dev->dev_private; 4656 4657 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4658 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4659 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4660 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4661 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4662 4663 /* Let's track the enabled rps events */ 4664 if (IS_VALLEYVIEW(dev)) 4665 /* WaGsvRC0ResidenncyMethod:VLV */ 4666 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4667 else 4668 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4669 4670 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4671 i915_hangcheck_elapsed, 4672 (unsigned long) dev); 4673 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4674 (unsigned long) dev_priv); 4675 4676 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4677 4678 /* Haven't installed the IRQ handler yet */ 4679 dev_priv->pm._irqs_disabled = true; 4680 4681 if (IS_GEN2(dev)) { 4682 dev->max_vblank_count = 0; 4683 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4684 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 4685 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4686 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4687 } else { 4688 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4689 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4690 } 4691 4692 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4693 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4694 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4695 } 4696 4697 if (IS_CHERRYVIEW(dev)) { 4698 dev->driver->irq_handler = cherryview_irq_handler; 4699 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4700 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4701 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4702 dev->driver->enable_vblank = valleyview_enable_vblank; 4703 dev->driver->disable_vblank = valleyview_disable_vblank; 4704 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4705 } else if (IS_VALLEYVIEW(dev)) { 4706 dev->driver->irq_handler = valleyview_irq_handler; 4707 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4708 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4709 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4710 dev->driver->enable_vblank = valleyview_enable_vblank; 4711 dev->driver->disable_vblank = valleyview_disable_vblank; 4712 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4713 } else if (IS_GEN8(dev)) { 4714 dev->driver->irq_handler = gen8_irq_handler; 4715 dev->driver->irq_preinstall = gen8_irq_reset; 4716 dev->driver->irq_postinstall = gen8_irq_postinstall; 4717 dev->driver->irq_uninstall = gen8_irq_uninstall; 4718 dev->driver->enable_vblank = gen8_enable_vblank; 4719 dev->driver->disable_vblank = gen8_disable_vblank; 4720 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4721 } else if (HAS_PCH_SPLIT(dev)) { 4722 dev->driver->irq_handler = ironlake_irq_handler; 4723 dev->driver->irq_preinstall = ironlake_irq_reset; 4724 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4725 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4726 dev->driver->enable_vblank = ironlake_enable_vblank; 4727 dev->driver->disable_vblank = ironlake_disable_vblank; 4728 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4729 } else { 4730 if (INTEL_INFO(dev)->gen == 2) { 4731 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4732 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4733 dev->driver->irq_handler = i8xx_irq_handler; 4734 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4735 } else if (INTEL_INFO(dev)->gen == 3) { 4736 dev->driver->irq_preinstall = i915_irq_preinstall; 4737 dev->driver->irq_postinstall = i915_irq_postinstall; 4738 dev->driver->irq_uninstall = i915_irq_uninstall; 4739 dev->driver->irq_handler = i915_irq_handler; 4740 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4741 } else { 4742 dev->driver->irq_preinstall = i965_irq_preinstall; 4743 dev->driver->irq_postinstall = i965_irq_postinstall; 4744 dev->driver->irq_uninstall = i965_irq_uninstall; 4745 dev->driver->irq_handler = i965_irq_handler; 4746 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4747 } 4748 dev->driver->enable_vblank = i915_enable_vblank; 4749 dev->driver->disable_vblank = i915_disable_vblank; 4750 } 4751 } 4752 4753 void intel_hpd_init(struct drm_device *dev) 4754 { 4755 struct drm_i915_private *dev_priv = dev->dev_private; 4756 struct drm_mode_config *mode_config = &dev->mode_config; 4757 struct drm_connector *connector; 4758 unsigned long irqflags; 4759 int i; 4760 4761 for (i = 1; i < HPD_NUM_PINS; i++) { 4762 dev_priv->hpd_stats[i].hpd_cnt = 0; 4763 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4764 } 4765 list_for_each_entry(connector, &mode_config->connector_list, head) { 4766 struct intel_connector *intel_connector = to_intel_connector(connector); 4767 connector->polled = intel_connector->polled; 4768 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4769 connector->polled = DRM_CONNECTOR_POLL_HPD; 4770 if (intel_connector->mst_port) 4771 connector->polled = DRM_CONNECTOR_POLL_HPD; 4772 } 4773 4774 /* Interrupt setup is already guaranteed to be single-threaded, this is 4775 * just to make the assert_spin_locked checks happy. */ 4776 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4777 if (dev_priv->display.hpd_irq_setup) 4778 dev_priv->display.hpd_irq_setup(dev); 4779 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4780 } 4781 4782 /* Disable interrupts so we can allow runtime PM. */ 4783 void intel_runtime_pm_disable_interrupts(struct drm_device *dev) 4784 { 4785 struct drm_i915_private *dev_priv = dev->dev_private; 4786 4787 dev->driver->irq_uninstall(dev); 4788 dev_priv->pm._irqs_disabled = true; 4789 } 4790 4791 /* Restore interrupts so we can recover from runtime PM. */ 4792 void intel_runtime_pm_restore_interrupts(struct drm_device *dev) 4793 { 4794 struct drm_i915_private *dev_priv = dev->dev_private; 4795 4796 dev_priv->pm._irqs_disabled = false; 4797 dev->driver->irq_preinstall(dev); 4798 dev->driver->irq_postinstall(dev); 4799 } 4800