1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 static const u32 hpd_ibx[] = { 41 [HPD_CRT] = SDE_CRT_HOTPLUG, 42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46 }; 47 48 static const u32 hpd_cpt[] = { 49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54 }; 55 56 static const u32 hpd_mask_i915[] = { 57 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63 }; 64 65 static const u32 hpd_status_g4x[] = { 66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72 }; 73 74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81 }; 82 83 /* IIR can theoretically queue up two events. Be paranoid. */ 84 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 86 POSTING_READ(GEN8_##type##_IMR(which)); \ 87 I915_WRITE(GEN8_##type##_IER(which), 0); \ 88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 89 POSTING_READ(GEN8_##type##_IIR(which)); \ 90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 91 POSTING_READ(GEN8_##type##_IIR(which)); \ 92 } while (0) 93 94 #define GEN5_IRQ_RESET(type) do { \ 95 I915_WRITE(type##IMR, 0xffffffff); \ 96 POSTING_READ(type##IMR); \ 97 I915_WRITE(type##IER, 0); \ 98 I915_WRITE(type##IIR, 0xffffffff); \ 99 POSTING_READ(type##IIR); \ 100 I915_WRITE(type##IIR, 0xffffffff); \ 101 POSTING_READ(type##IIR); \ 102 } while (0) 103 104 /* 105 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 106 */ 107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 108 u32 val = I915_READ(reg); \ 109 if (val) { \ 110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 111 (reg), val); \ 112 I915_WRITE((reg), 0xffffffff); \ 113 POSTING_READ(reg); \ 114 I915_WRITE((reg), 0xffffffff); \ 115 POSTING_READ(reg); \ 116 } \ 117 } while (0) 118 119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 123 POSTING_READ(GEN8_##type##_IER(which)); \ 124 } while (0) 125 126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 128 I915_WRITE(type##IMR, (imr_val)); \ 129 I915_WRITE(type##IER, (ier_val)); \ 130 POSTING_READ(type##IER); \ 131 } while (0) 132 133 /* For display hotplug interrupt */ 134 static void 135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 136 { 137 assert_spin_locked(&dev_priv->irq_lock); 138 139 if (WARN_ON(dev_priv->pm.irqs_disabled)) 140 return; 141 142 if ((dev_priv->irq_mask & mask) != 0) { 143 dev_priv->irq_mask &= ~mask; 144 I915_WRITE(DEIMR, dev_priv->irq_mask); 145 POSTING_READ(DEIMR); 146 } 147 } 148 149 static void 150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 151 { 152 assert_spin_locked(&dev_priv->irq_lock); 153 154 if (WARN_ON(dev_priv->pm.irqs_disabled)) 155 return; 156 157 if ((dev_priv->irq_mask & mask) != mask) { 158 dev_priv->irq_mask |= mask; 159 I915_WRITE(DEIMR, dev_priv->irq_mask); 160 POSTING_READ(DEIMR); 161 } 162 } 163 164 /** 165 * ilk_update_gt_irq - update GTIMR 166 * @dev_priv: driver private 167 * @interrupt_mask: mask of interrupt bits to update 168 * @enabled_irq_mask: mask of interrupt bits to enable 169 */ 170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 171 uint32_t interrupt_mask, 172 uint32_t enabled_irq_mask) 173 { 174 assert_spin_locked(&dev_priv->irq_lock); 175 176 if (WARN_ON(dev_priv->pm.irqs_disabled)) 177 return; 178 179 dev_priv->gt_irq_mask &= ~interrupt_mask; 180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 182 POSTING_READ(GTIMR); 183 } 184 185 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 186 { 187 ilk_update_gt_irq(dev_priv, mask, mask); 188 } 189 190 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 191 { 192 ilk_update_gt_irq(dev_priv, mask, 0); 193 } 194 195 /** 196 * snb_update_pm_irq - update GEN6_PMIMR 197 * @dev_priv: driver private 198 * @interrupt_mask: mask of interrupt bits to update 199 * @enabled_irq_mask: mask of interrupt bits to enable 200 */ 201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 202 uint32_t interrupt_mask, 203 uint32_t enabled_irq_mask) 204 { 205 uint32_t new_val; 206 207 assert_spin_locked(&dev_priv->irq_lock); 208 209 if (WARN_ON(dev_priv->pm.irqs_disabled)) 210 return; 211 212 new_val = dev_priv->pm_irq_mask; 213 new_val &= ~interrupt_mask; 214 new_val |= (~enabled_irq_mask & interrupt_mask); 215 216 if (new_val != dev_priv->pm_irq_mask) { 217 dev_priv->pm_irq_mask = new_val; 218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 219 POSTING_READ(GEN6_PMIMR); 220 } 221 } 222 223 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 224 { 225 snb_update_pm_irq(dev_priv, mask, mask); 226 } 227 228 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 229 { 230 snb_update_pm_irq(dev_priv, mask, 0); 231 } 232 233 static bool ivb_can_enable_err_int(struct drm_device *dev) 234 { 235 struct drm_i915_private *dev_priv = dev->dev_private; 236 struct intel_crtc *crtc; 237 enum pipe pipe; 238 239 assert_spin_locked(&dev_priv->irq_lock); 240 241 for_each_pipe(pipe) { 242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 243 244 if (crtc->cpu_fifo_underrun_disabled) 245 return false; 246 } 247 248 return true; 249 } 250 251 /** 252 * bdw_update_pm_irq - update GT interrupt 2 253 * @dev_priv: driver private 254 * @interrupt_mask: mask of interrupt bits to update 255 * @enabled_irq_mask: mask of interrupt bits to enable 256 * 257 * Copied from the snb function, updated with relevant register offsets 258 */ 259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, 260 uint32_t interrupt_mask, 261 uint32_t enabled_irq_mask) 262 { 263 uint32_t new_val; 264 265 assert_spin_locked(&dev_priv->irq_lock); 266 267 if (WARN_ON(dev_priv->pm.irqs_disabled)) 268 return; 269 270 new_val = dev_priv->pm_irq_mask; 271 new_val &= ~interrupt_mask; 272 new_val |= (~enabled_irq_mask & interrupt_mask); 273 274 if (new_val != dev_priv->pm_irq_mask) { 275 dev_priv->pm_irq_mask = new_val; 276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); 277 POSTING_READ(GEN8_GT_IMR(2)); 278 } 279 } 280 281 void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 282 { 283 bdw_update_pm_irq(dev_priv, mask, mask); 284 } 285 286 void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 287 { 288 bdw_update_pm_irq(dev_priv, mask, 0); 289 } 290 291 static bool cpt_can_enable_serr_int(struct drm_device *dev) 292 { 293 struct drm_i915_private *dev_priv = dev->dev_private; 294 enum pipe pipe; 295 struct intel_crtc *crtc; 296 297 assert_spin_locked(&dev_priv->irq_lock); 298 299 for_each_pipe(pipe) { 300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 301 302 if (crtc->pch_fifo_underrun_disabled) 303 return false; 304 } 305 306 return true; 307 } 308 309 void i9xx_check_fifo_underruns(struct drm_device *dev) 310 { 311 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct intel_crtc *crtc; 313 unsigned long flags; 314 315 spin_lock_irqsave(&dev_priv->irq_lock, flags); 316 317 for_each_intel_crtc(dev, crtc) { 318 u32 reg = PIPESTAT(crtc->pipe); 319 u32 pipestat; 320 321 if (crtc->cpu_fifo_underrun_disabled) 322 continue; 323 324 pipestat = I915_READ(reg) & 0xffff0000; 325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) 326 continue; 327 328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 329 POSTING_READ(reg); 330 331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 332 } 333 334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 335 } 336 337 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 338 enum pipe pipe, 339 bool enable, bool old) 340 { 341 struct drm_i915_private *dev_priv = dev->dev_private; 342 u32 reg = PIPESTAT(pipe); 343 u32 pipestat = I915_READ(reg) & 0xffff0000; 344 345 assert_spin_locked(&dev_priv->irq_lock); 346 347 if (enable) { 348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 349 POSTING_READ(reg); 350 } else { 351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 353 } 354 } 355 356 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 357 enum pipe pipe, bool enable) 358 { 359 struct drm_i915_private *dev_priv = dev->dev_private; 360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 361 DE_PIPEB_FIFO_UNDERRUN; 362 363 if (enable) 364 ironlake_enable_display_irq(dev_priv, bit); 365 else 366 ironlake_disable_display_irq(dev_priv, bit); 367 } 368 369 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 370 enum pipe pipe, 371 bool enable, bool old) 372 { 373 struct drm_i915_private *dev_priv = dev->dev_private; 374 if (enable) { 375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 376 377 if (!ivb_can_enable_err_int(dev)) 378 return; 379 380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 381 } else { 382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 383 384 if (old && 385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { 386 DRM_ERROR("uncleared fifo underrun on pipe %c\n", 387 pipe_name(pipe)); 388 } 389 } 390 } 391 392 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 393 enum pipe pipe, bool enable) 394 { 395 struct drm_i915_private *dev_priv = dev->dev_private; 396 397 assert_spin_locked(&dev_priv->irq_lock); 398 399 if (enable) 400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 401 else 402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 405 } 406 407 /** 408 * ibx_display_interrupt_update - update SDEIMR 409 * @dev_priv: driver private 410 * @interrupt_mask: mask of interrupt bits to update 411 * @enabled_irq_mask: mask of interrupt bits to enable 412 */ 413 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 414 uint32_t interrupt_mask, 415 uint32_t enabled_irq_mask) 416 { 417 uint32_t sdeimr = I915_READ(SDEIMR); 418 sdeimr &= ~interrupt_mask; 419 sdeimr |= (~enabled_irq_mask & interrupt_mask); 420 421 assert_spin_locked(&dev_priv->irq_lock); 422 423 if (WARN_ON(dev_priv->pm.irqs_disabled)) 424 return; 425 426 I915_WRITE(SDEIMR, sdeimr); 427 POSTING_READ(SDEIMR); 428 } 429 #define ibx_enable_display_interrupt(dev_priv, bits) \ 430 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 431 #define ibx_disable_display_interrupt(dev_priv, bits) \ 432 ibx_display_interrupt_update((dev_priv), (bits), 0) 433 434 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 435 enum transcoder pch_transcoder, 436 bool enable) 437 { 438 struct drm_i915_private *dev_priv = dev->dev_private; 439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 441 442 if (enable) 443 ibx_enable_display_interrupt(dev_priv, bit); 444 else 445 ibx_disable_display_interrupt(dev_priv, bit); 446 } 447 448 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 449 enum transcoder pch_transcoder, 450 bool enable, bool old) 451 { 452 struct drm_i915_private *dev_priv = dev->dev_private; 453 454 if (enable) { 455 I915_WRITE(SERR_INT, 456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 457 458 if (!cpt_can_enable_serr_int(dev)) 459 return; 460 461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 462 } else { 463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 464 465 if (old && I915_READ(SERR_INT) & 466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { 467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", 468 transcoder_name(pch_transcoder)); 469 } 470 } 471 } 472 473 /** 474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 475 * @dev: drm device 476 * @pipe: pipe 477 * @enable: true if we want to report FIFO underrun errors, false otherwise 478 * 479 * This function makes us disable or enable CPU fifo underruns for a specific 480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 481 * reporting for one pipe may also disable all the other CPU error interruts for 482 * the other pipes, due to the fact that there's just one interrupt mask/enable 483 * bit for all the pipes. 484 * 485 * Returns the previous state of underrun reporting. 486 */ 487 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 488 enum pipe pipe, bool enable) 489 { 490 struct drm_i915_private *dev_priv = dev->dev_private; 491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 493 bool old; 494 495 assert_spin_locked(&dev_priv->irq_lock); 496 497 old = !intel_crtc->cpu_fifo_underrun_disabled; 498 intel_crtc->cpu_fifo_underrun_disabled = !enable; 499 500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 502 else if (IS_GEN5(dev) || IS_GEN6(dev)) 503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 504 else if (IS_GEN7(dev)) 505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 506 else if (IS_GEN8(dev)) 507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 508 509 return old; 510 } 511 512 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 513 enum pipe pipe, bool enable) 514 { 515 struct drm_i915_private *dev_priv = dev->dev_private; 516 unsigned long flags; 517 bool ret; 518 519 spin_lock_irqsave(&dev_priv->irq_lock, flags); 520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); 521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 522 523 return ret; 524 } 525 526 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, 527 enum pipe pipe) 528 { 529 struct drm_i915_private *dev_priv = dev->dev_private; 530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 532 533 return !intel_crtc->cpu_fifo_underrun_disabled; 534 } 535 536 /** 537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 538 * @dev: drm device 539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 540 * @enable: true if we want to report FIFO underrun errors, false otherwise 541 * 542 * This function makes us disable or enable PCH fifo underruns for a specific 543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 544 * underrun reporting for one transcoder may also disable all the other PCH 545 * error interruts for the other transcoders, due to the fact that there's just 546 * one interrupt mask/enable bit for all the transcoders. 547 * 548 * Returns the previous state of underrun reporting. 549 */ 550 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 551 enum transcoder pch_transcoder, 552 bool enable) 553 { 554 struct drm_i915_private *dev_priv = dev->dev_private; 555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 557 unsigned long flags; 558 bool old; 559 560 /* 561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 562 * has only one pch transcoder A that all pipes can use. To avoid racy 563 * pch transcoder -> pipe lookups from interrupt code simply store the 564 * underrun statistics in crtc A. Since we never expose this anywhere 565 * nor use it outside of the fifo underrun code here using the "wrong" 566 * crtc on LPT won't cause issues. 567 */ 568 569 spin_lock_irqsave(&dev_priv->irq_lock, flags); 570 571 old = !intel_crtc->pch_fifo_underrun_disabled; 572 intel_crtc->pch_fifo_underrun_disabled = !enable; 573 574 if (HAS_PCH_IBX(dev)) 575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 576 else 577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); 578 579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 580 return old; 581 } 582 583 584 static void 585 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 586 u32 enable_mask, u32 status_mask) 587 { 588 u32 reg = PIPESTAT(pipe); 589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 590 591 assert_spin_locked(&dev_priv->irq_lock); 592 593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 594 status_mask & ~PIPESTAT_INT_STATUS_MASK, 595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 596 pipe_name(pipe), enable_mask, status_mask)) 597 return; 598 599 if ((pipestat & enable_mask) == enable_mask) 600 return; 601 602 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 603 604 /* Enable the interrupt, clear any pending status */ 605 pipestat |= enable_mask | status_mask; 606 I915_WRITE(reg, pipestat); 607 POSTING_READ(reg); 608 } 609 610 static void 611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 612 u32 enable_mask, u32 status_mask) 613 { 614 u32 reg = PIPESTAT(pipe); 615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 616 617 assert_spin_locked(&dev_priv->irq_lock); 618 619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 620 status_mask & ~PIPESTAT_INT_STATUS_MASK, 621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 622 pipe_name(pipe), enable_mask, status_mask)) 623 return; 624 625 if ((pipestat & enable_mask) == 0) 626 return; 627 628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 629 630 pipestat &= ~enable_mask; 631 I915_WRITE(reg, pipestat); 632 POSTING_READ(reg); 633 } 634 635 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 636 { 637 u32 enable_mask = status_mask << 16; 638 639 /* 640 * On pipe A we don't support the PSR interrupt yet, 641 * on pipe B and C the same bit MBZ. 642 */ 643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 644 return 0; 645 /* 646 * On pipe B and C we don't support the PSR interrupt yet, on pipe 647 * A the same bit is for perf counters which we don't use either. 648 */ 649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 650 return 0; 651 652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 653 SPRITE0_FLIP_DONE_INT_EN_VLV | 654 SPRITE1_FLIP_DONE_INT_EN_VLV); 655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 659 660 return enable_mask; 661 } 662 663 void 664 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 665 u32 status_mask) 666 { 667 u32 enable_mask; 668 669 if (IS_VALLEYVIEW(dev_priv->dev)) 670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 671 status_mask); 672 else 673 enable_mask = status_mask << 16; 674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 675 } 676 677 void 678 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 679 u32 status_mask) 680 { 681 u32 enable_mask; 682 683 if (IS_VALLEYVIEW(dev_priv->dev)) 684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 685 status_mask); 686 else 687 enable_mask = status_mask << 16; 688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 689 } 690 691 /** 692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 693 */ 694 static void i915_enable_asle_pipestat(struct drm_device *dev) 695 { 696 struct drm_i915_private *dev_priv = dev->dev_private; 697 unsigned long irqflags; 698 699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 700 return; 701 702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 703 704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 705 if (INTEL_INFO(dev)->gen >= 4) 706 i915_enable_pipestat(dev_priv, PIPE_A, 707 PIPE_LEGACY_BLC_EVENT_STATUS); 708 709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 710 } 711 712 /** 713 * i915_pipe_enabled - check if a pipe is enabled 714 * @dev: DRM device 715 * @pipe: pipe to check 716 * 717 * Reading certain registers when the pipe is disabled can hang the chip. 718 * Use this routine to make sure the PLL is running and the pipe is active 719 * before reading such registers if unsure. 720 */ 721 static int 722 i915_pipe_enabled(struct drm_device *dev, int pipe) 723 { 724 struct drm_i915_private *dev_priv = dev->dev_private; 725 726 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 727 /* Locking is horribly broken here, but whatever. */ 728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 730 731 return intel_crtc->active; 732 } else { 733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 734 } 735 } 736 737 /* 738 * This timing diagram depicts the video signal in and 739 * around the vertical blanking period. 740 * 741 * Assumptions about the fictitious mode used in this example: 742 * vblank_start >= 3 743 * vsync_start = vblank_start + 1 744 * vsync_end = vblank_start + 2 745 * vtotal = vblank_start + 3 746 * 747 * start of vblank: 748 * latch double buffered registers 749 * increment frame counter (ctg+) 750 * generate start of vblank interrupt (gen4+) 751 * | 752 * | frame start: 753 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 754 * | may be shifted forward 1-3 extra lines via PIPECONF 755 * | | 756 * | | start of vsync: 757 * | | generate vsync interrupt 758 * | | | 759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 761 * ----va---> <-----------------vb--------------------> <--------va------------- 762 * | | <----vs-----> | 763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 766 * | | | 767 * last visible pixel first visible pixel 768 * | increment frame counter (gen3/4) 769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 770 * 771 * x = horizontal active 772 * _ = horizontal blanking 773 * hs = horizontal sync 774 * va = vertical active 775 * vb = vertical blanking 776 * vs = vertical sync 777 * vbs = vblank_start (number) 778 * 779 * Summary: 780 * - most events happen at the start of horizontal sync 781 * - frame start happens at the start of horizontal blank, 1-4 lines 782 * (depending on PIPECONF settings) after the start of vblank 783 * - gen3/4 pixel and frame counter are synchronized with the start 784 * of horizontal active on the first line of vertical active 785 */ 786 787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 788 { 789 /* Gen2 doesn't have a hardware frame counter */ 790 return 0; 791 } 792 793 /* Called from drm generic code, passed a 'crtc', which 794 * we use as a pipe index 795 */ 796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 797 { 798 struct drm_i915_private *dev_priv = dev->dev_private; 799 unsigned long high_frame; 800 unsigned long low_frame; 801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 802 803 if (!i915_pipe_enabled(dev, pipe)) { 804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 805 "pipe %c\n", pipe_name(pipe)); 806 return 0; 807 } 808 809 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 810 struct intel_crtc *intel_crtc = 811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 812 const struct drm_display_mode *mode = 813 &intel_crtc->config.adjusted_mode; 814 815 htotal = mode->crtc_htotal; 816 hsync_start = mode->crtc_hsync_start; 817 vbl_start = mode->crtc_vblank_start; 818 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 819 vbl_start = DIV_ROUND_UP(vbl_start, 2); 820 } else { 821 enum transcoder cpu_transcoder = (enum transcoder) pipe; 822 823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 826 if ((I915_READ(PIPECONF(cpu_transcoder)) & 827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 828 vbl_start = DIV_ROUND_UP(vbl_start, 2); 829 } 830 831 /* Convert to pixel count */ 832 vbl_start *= htotal; 833 834 /* Start of vblank event occurs at start of hsync */ 835 vbl_start -= htotal - hsync_start; 836 837 high_frame = PIPEFRAME(pipe); 838 low_frame = PIPEFRAMEPIXEL(pipe); 839 840 /* 841 * High & low register fields aren't synchronized, so make sure 842 * we get a low value that's stable across two reads of the high 843 * register. 844 */ 845 do { 846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 847 low = I915_READ(low_frame); 848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 849 } while (high1 != high2); 850 851 high1 >>= PIPE_FRAME_HIGH_SHIFT; 852 pixel = low & PIPE_PIXEL_MASK; 853 low >>= PIPE_FRAME_LOW_SHIFT; 854 855 /* 856 * The frame counter increments at beginning of active. 857 * Cook up a vblank counter by also checking the pixel 858 * counter against vblank start. 859 */ 860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 861 } 862 863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 864 { 865 struct drm_i915_private *dev_priv = dev->dev_private; 866 int reg = PIPE_FRMCOUNT_GM45(pipe); 867 868 if (!i915_pipe_enabled(dev, pipe)) { 869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 870 "pipe %c\n", pipe_name(pipe)); 871 return 0; 872 } 873 874 return I915_READ(reg); 875 } 876 877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 879 880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 881 { 882 struct drm_device *dev = crtc->base.dev; 883 struct drm_i915_private *dev_priv = dev->dev_private; 884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 885 enum pipe pipe = crtc->pipe; 886 int position, vtotal; 887 888 vtotal = mode->crtc_vtotal; 889 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 890 vtotal /= 2; 891 892 if (IS_GEN2(dev)) 893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 894 else 895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 896 897 /* 898 * See update_scanline_offset() for the details on the 899 * scanline_offset adjustment. 900 */ 901 return (position + crtc->scanline_offset) % vtotal; 902 } 903 904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 905 unsigned int flags, int *vpos, int *hpos, 906 ktime_t *stime, ktime_t *etime) 907 { 908 struct drm_i915_private *dev_priv = dev->dev_private; 909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 912 int position; 913 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 914 bool in_vbl = true; 915 int ret = 0; 916 unsigned long irqflags; 917 918 if (!intel_crtc->active) { 919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 920 "pipe %c\n", pipe_name(pipe)); 921 return 0; 922 } 923 924 htotal = mode->crtc_htotal; 925 hsync_start = mode->crtc_hsync_start; 926 vtotal = mode->crtc_vtotal; 927 vbl_start = mode->crtc_vblank_start; 928 vbl_end = mode->crtc_vblank_end; 929 930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 931 vbl_start = DIV_ROUND_UP(vbl_start, 2); 932 vbl_end /= 2; 933 vtotal /= 2; 934 } 935 936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 937 938 /* 939 * Lock uncore.lock, as we will do multiple timing critical raw 940 * register reads, potentially with preemption disabled, so the 941 * following code must not block on uncore.lock. 942 */ 943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 944 945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 946 947 /* Get optional system timestamp before query. */ 948 if (stime) 949 *stime = ktime_get(); 950 951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 952 /* No obvious pixelcount register. Only query vertical 953 * scanout position from Display scan line register. 954 */ 955 position = __intel_get_crtc_scanline(intel_crtc); 956 } else { 957 /* Have access to pixelcount since start of frame. 958 * We can split this into vertical and horizontal 959 * scanout position. 960 */ 961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 962 963 /* convert to pixel counts */ 964 vbl_start *= htotal; 965 vbl_end *= htotal; 966 vtotal *= htotal; 967 968 /* 969 * In interlaced modes, the pixel counter counts all pixels, 970 * so one field will have htotal more pixels. In order to avoid 971 * the reported position from jumping backwards when the pixel 972 * counter is beyond the length of the shorter field, just 973 * clamp the position the length of the shorter field. This 974 * matches how the scanline counter based position works since 975 * the scanline counter doesn't count the two half lines. 976 */ 977 if (position >= vtotal) 978 position = vtotal - 1; 979 980 /* 981 * Start of vblank interrupt is triggered at start of hsync, 982 * just prior to the first active line of vblank. However we 983 * consider lines to start at the leading edge of horizontal 984 * active. So, should we get here before we've crossed into 985 * the horizontal active of the first line in vblank, we would 986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 987 * always add htotal-hsync_start to the current pixel position. 988 */ 989 position = (position + htotal - hsync_start) % vtotal; 990 } 991 992 /* Get optional system timestamp after query. */ 993 if (etime) 994 *etime = ktime_get(); 995 996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 997 998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 999 1000 in_vbl = position >= vbl_start && position < vbl_end; 1001 1002 /* 1003 * While in vblank, position will be negative 1004 * counting up towards 0 at vbl_end. And outside 1005 * vblank, position will be positive counting 1006 * up since vbl_end. 1007 */ 1008 if (position >= vbl_start) 1009 position -= vbl_end; 1010 else 1011 position += vtotal - vbl_end; 1012 1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 1014 *vpos = position; 1015 *hpos = 0; 1016 } else { 1017 *vpos = position / htotal; 1018 *hpos = position - (*vpos * htotal); 1019 } 1020 1021 /* In vblank? */ 1022 if (in_vbl) 1023 ret |= DRM_SCANOUTPOS_INVBL; 1024 1025 return ret; 1026 } 1027 1028 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1029 { 1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1031 unsigned long irqflags; 1032 int position; 1033 1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1035 position = __intel_get_crtc_scanline(crtc); 1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1037 1038 return position; 1039 } 1040 1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 1042 int *max_error, 1043 struct timeval *vblank_time, 1044 unsigned flags) 1045 { 1046 struct drm_crtc *crtc; 1047 1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 1049 DRM_ERROR("Invalid crtc %d\n", pipe); 1050 return -EINVAL; 1051 } 1052 1053 /* Get drm_crtc to timestamp: */ 1054 crtc = intel_get_crtc_for_pipe(dev, pipe); 1055 if (crtc == NULL) { 1056 DRM_ERROR("Invalid crtc %d\n", pipe); 1057 return -EINVAL; 1058 } 1059 1060 if (!crtc->enabled) { 1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 1062 return -EBUSY; 1063 } 1064 1065 /* Helper routine in DRM core does all the work: */ 1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 1067 vblank_time, flags, 1068 crtc, 1069 &to_intel_crtc(crtc)->config.adjusted_mode); 1070 } 1071 1072 static bool intel_hpd_irq_event(struct drm_device *dev, 1073 struct drm_connector *connector) 1074 { 1075 enum drm_connector_status old_status; 1076 1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1078 old_status = connector->status; 1079 1080 connector->status = connector->funcs->detect(connector, false); 1081 if (old_status == connector->status) 1082 return false; 1083 1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 1085 connector->base.id, 1086 connector->name, 1087 drm_get_connector_status_name(old_status), 1088 drm_get_connector_status_name(connector->status)); 1089 1090 return true; 1091 } 1092 1093 /* 1094 * Handle hotplug events outside the interrupt handler proper. 1095 */ 1096 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 1097 1098 static void i915_hotplug_work_func(struct work_struct *work) 1099 { 1100 struct drm_i915_private *dev_priv = 1101 container_of(work, struct drm_i915_private, hotplug_work); 1102 struct drm_device *dev = dev_priv->dev; 1103 struct drm_mode_config *mode_config = &dev->mode_config; 1104 struct intel_connector *intel_connector; 1105 struct intel_encoder *intel_encoder; 1106 struct drm_connector *connector; 1107 unsigned long irqflags; 1108 bool hpd_disabled = false; 1109 bool changed = false; 1110 u32 hpd_event_bits; 1111 1112 /* HPD irq before everything is fully set up. */ 1113 if (!dev_priv->enable_hotplug_processing) 1114 return; 1115 1116 mutex_lock(&mode_config->mutex); 1117 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 1118 1119 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1120 1121 hpd_event_bits = dev_priv->hpd_event_bits; 1122 dev_priv->hpd_event_bits = 0; 1123 list_for_each_entry(connector, &mode_config->connector_list, head) { 1124 intel_connector = to_intel_connector(connector); 1125 intel_encoder = intel_connector->encoder; 1126 if (intel_encoder->hpd_pin > HPD_NONE && 1127 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 1128 connector->polled == DRM_CONNECTOR_POLL_HPD) { 1129 DRM_INFO("HPD interrupt storm detected on connector %s: " 1130 "switching from hotplug detection to polling\n", 1131 connector->name); 1132 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 1133 connector->polled = DRM_CONNECTOR_POLL_CONNECT 1134 | DRM_CONNECTOR_POLL_DISCONNECT; 1135 hpd_disabled = true; 1136 } 1137 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1138 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 1139 connector->name, intel_encoder->hpd_pin); 1140 } 1141 } 1142 /* if there were no outputs to poll, poll was disabled, 1143 * therefore make sure it's enabled when disabling HPD on 1144 * some connectors */ 1145 if (hpd_disabled) { 1146 drm_kms_helper_poll_enable(dev); 1147 mod_timer(&dev_priv->hotplug_reenable_timer, 1148 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1149 } 1150 1151 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1152 1153 list_for_each_entry(connector, &mode_config->connector_list, head) { 1154 intel_connector = to_intel_connector(connector); 1155 intel_encoder = intel_connector->encoder; 1156 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1157 if (intel_encoder->hot_plug) 1158 intel_encoder->hot_plug(intel_encoder); 1159 if (intel_hpd_irq_event(dev, connector)) 1160 changed = true; 1161 } 1162 } 1163 mutex_unlock(&mode_config->mutex); 1164 1165 if (changed) 1166 drm_kms_helper_hotplug_event(dev); 1167 } 1168 1169 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) 1170 { 1171 del_timer_sync(&dev_priv->hotplug_reenable_timer); 1172 } 1173 1174 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1175 { 1176 struct drm_i915_private *dev_priv = dev->dev_private; 1177 u32 busy_up, busy_down, max_avg, min_avg; 1178 u8 new_delay; 1179 1180 spin_lock(&mchdev_lock); 1181 1182 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1183 1184 new_delay = dev_priv->ips.cur_delay; 1185 1186 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1187 busy_up = I915_READ(RCPREVBSYTUPAVG); 1188 busy_down = I915_READ(RCPREVBSYTDNAVG); 1189 max_avg = I915_READ(RCBMAXAVG); 1190 min_avg = I915_READ(RCBMINAVG); 1191 1192 /* Handle RCS change request from hw */ 1193 if (busy_up > max_avg) { 1194 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1195 new_delay = dev_priv->ips.cur_delay - 1; 1196 if (new_delay < dev_priv->ips.max_delay) 1197 new_delay = dev_priv->ips.max_delay; 1198 } else if (busy_down < min_avg) { 1199 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1200 new_delay = dev_priv->ips.cur_delay + 1; 1201 if (new_delay > dev_priv->ips.min_delay) 1202 new_delay = dev_priv->ips.min_delay; 1203 } 1204 1205 if (ironlake_set_drps(dev, new_delay)) 1206 dev_priv->ips.cur_delay = new_delay; 1207 1208 spin_unlock(&mchdev_lock); 1209 1210 return; 1211 } 1212 1213 static void notify_ring(struct drm_device *dev, 1214 struct intel_engine_cs *ring) 1215 { 1216 if (!intel_ring_initialized(ring)) 1217 return; 1218 1219 trace_i915_gem_request_complete(ring); 1220 1221 wake_up_all(&ring->irq_queue); 1222 i915_queue_hangcheck(dev); 1223 } 1224 1225 static void gen6_pm_rps_work(struct work_struct *work) 1226 { 1227 struct drm_i915_private *dev_priv = 1228 container_of(work, struct drm_i915_private, rps.work); 1229 u32 pm_iir; 1230 int new_delay, adj; 1231 1232 spin_lock_irq(&dev_priv->irq_lock); 1233 pm_iir = dev_priv->rps.pm_iir; 1234 dev_priv->rps.pm_iir = 0; 1235 if (IS_BROADWELL(dev_priv->dev)) 1236 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1237 else { 1238 /* Make sure not to corrupt PMIMR state used by ringbuffer */ 1239 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1240 } 1241 spin_unlock_irq(&dev_priv->irq_lock); 1242 1243 /* Make sure we didn't queue anything we're not going to process. */ 1244 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1245 1246 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1247 return; 1248 1249 mutex_lock(&dev_priv->rps.hw_lock); 1250 1251 adj = dev_priv->rps.last_adj; 1252 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1253 if (adj > 0) 1254 adj *= 2; 1255 else 1256 adj = 1; 1257 new_delay = dev_priv->rps.cur_freq + adj; 1258 1259 /* 1260 * For better performance, jump directly 1261 * to RPe if we're below it. 1262 */ 1263 if (new_delay < dev_priv->rps.efficient_freq) 1264 new_delay = dev_priv->rps.efficient_freq; 1265 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1266 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1267 new_delay = dev_priv->rps.efficient_freq; 1268 else 1269 new_delay = dev_priv->rps.min_freq_softlimit; 1270 adj = 0; 1271 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1272 if (adj < 0) 1273 adj *= 2; 1274 else 1275 adj = -1; 1276 new_delay = dev_priv->rps.cur_freq + adj; 1277 } else { /* unknown event */ 1278 new_delay = dev_priv->rps.cur_freq; 1279 } 1280 1281 /* sysfs frequency interfaces may have snuck in while servicing the 1282 * interrupt 1283 */ 1284 new_delay = clamp_t(int, new_delay, 1285 dev_priv->rps.min_freq_softlimit, 1286 dev_priv->rps.max_freq_softlimit); 1287 1288 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1289 1290 if (IS_VALLEYVIEW(dev_priv->dev)) 1291 valleyview_set_rps(dev_priv->dev, new_delay); 1292 else 1293 gen6_set_rps(dev_priv->dev, new_delay); 1294 1295 mutex_unlock(&dev_priv->rps.hw_lock); 1296 } 1297 1298 1299 /** 1300 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1301 * occurred. 1302 * @work: workqueue struct 1303 * 1304 * Doesn't actually do anything except notify userspace. As a consequence of 1305 * this event, userspace should try to remap the bad rows since statistically 1306 * it is likely the same row is more likely to go bad again. 1307 */ 1308 static void ivybridge_parity_work(struct work_struct *work) 1309 { 1310 struct drm_i915_private *dev_priv = 1311 container_of(work, struct drm_i915_private, l3_parity.error_work); 1312 u32 error_status, row, bank, subbank; 1313 char *parity_event[6]; 1314 uint32_t misccpctl; 1315 unsigned long flags; 1316 uint8_t slice = 0; 1317 1318 /* We must turn off DOP level clock gating to access the L3 registers. 1319 * In order to prevent a get/put style interface, acquire struct mutex 1320 * any time we access those registers. 1321 */ 1322 mutex_lock(&dev_priv->dev->struct_mutex); 1323 1324 /* If we've screwed up tracking, just let the interrupt fire again */ 1325 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1326 goto out; 1327 1328 misccpctl = I915_READ(GEN7_MISCCPCTL); 1329 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1330 POSTING_READ(GEN7_MISCCPCTL); 1331 1332 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1333 u32 reg; 1334 1335 slice--; 1336 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1337 break; 1338 1339 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1340 1341 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1342 1343 error_status = I915_READ(reg); 1344 row = GEN7_PARITY_ERROR_ROW(error_status); 1345 bank = GEN7_PARITY_ERROR_BANK(error_status); 1346 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1347 1348 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1349 POSTING_READ(reg); 1350 1351 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1352 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1353 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1354 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1355 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1356 parity_event[5] = NULL; 1357 1358 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1359 KOBJ_CHANGE, parity_event); 1360 1361 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1362 slice, row, bank, subbank); 1363 1364 kfree(parity_event[4]); 1365 kfree(parity_event[3]); 1366 kfree(parity_event[2]); 1367 kfree(parity_event[1]); 1368 } 1369 1370 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1371 1372 out: 1373 WARN_ON(dev_priv->l3_parity.which_slice); 1374 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1375 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1376 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1377 1378 mutex_unlock(&dev_priv->dev->struct_mutex); 1379 } 1380 1381 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1382 { 1383 struct drm_i915_private *dev_priv = dev->dev_private; 1384 1385 if (!HAS_L3_DPF(dev)) 1386 return; 1387 1388 spin_lock(&dev_priv->irq_lock); 1389 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1390 spin_unlock(&dev_priv->irq_lock); 1391 1392 iir &= GT_PARITY_ERROR(dev); 1393 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1394 dev_priv->l3_parity.which_slice |= 1 << 1; 1395 1396 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1397 dev_priv->l3_parity.which_slice |= 1 << 0; 1398 1399 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1400 } 1401 1402 static void ilk_gt_irq_handler(struct drm_device *dev, 1403 struct drm_i915_private *dev_priv, 1404 u32 gt_iir) 1405 { 1406 if (gt_iir & 1407 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1408 notify_ring(dev, &dev_priv->ring[RCS]); 1409 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1410 notify_ring(dev, &dev_priv->ring[VCS]); 1411 } 1412 1413 static void snb_gt_irq_handler(struct drm_device *dev, 1414 struct drm_i915_private *dev_priv, 1415 u32 gt_iir) 1416 { 1417 1418 if (gt_iir & 1419 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1420 notify_ring(dev, &dev_priv->ring[RCS]); 1421 if (gt_iir & GT_BSD_USER_INTERRUPT) 1422 notify_ring(dev, &dev_priv->ring[VCS]); 1423 if (gt_iir & GT_BLT_USER_INTERRUPT) 1424 notify_ring(dev, &dev_priv->ring[BCS]); 1425 1426 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1427 GT_BSD_CS_ERROR_INTERRUPT | 1428 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1429 i915_handle_error(dev, false, "GT error interrupt 0x%08x", 1430 gt_iir); 1431 } 1432 1433 if (gt_iir & GT_PARITY_ERROR(dev)) 1434 ivybridge_parity_error_irq_handler(dev, gt_iir); 1435 } 1436 1437 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1438 { 1439 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1440 return; 1441 1442 spin_lock(&dev_priv->irq_lock); 1443 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1444 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1445 spin_unlock(&dev_priv->irq_lock); 1446 1447 queue_work(dev_priv->wq, &dev_priv->rps.work); 1448 } 1449 1450 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1451 struct drm_i915_private *dev_priv, 1452 u32 master_ctl) 1453 { 1454 u32 rcs, bcs, vcs; 1455 uint32_t tmp = 0; 1456 irqreturn_t ret = IRQ_NONE; 1457 1458 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1459 tmp = I915_READ(GEN8_GT_IIR(0)); 1460 if (tmp) { 1461 ret = IRQ_HANDLED; 1462 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1463 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1464 if (rcs & GT_RENDER_USER_INTERRUPT) 1465 notify_ring(dev, &dev_priv->ring[RCS]); 1466 if (bcs & GT_RENDER_USER_INTERRUPT) 1467 notify_ring(dev, &dev_priv->ring[BCS]); 1468 I915_WRITE(GEN8_GT_IIR(0), tmp); 1469 } else 1470 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1471 } 1472 1473 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1474 tmp = I915_READ(GEN8_GT_IIR(1)); 1475 if (tmp) { 1476 ret = IRQ_HANDLED; 1477 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1478 if (vcs & GT_RENDER_USER_INTERRUPT) 1479 notify_ring(dev, &dev_priv->ring[VCS]); 1480 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1481 if (vcs & GT_RENDER_USER_INTERRUPT) 1482 notify_ring(dev, &dev_priv->ring[VCS2]); 1483 I915_WRITE(GEN8_GT_IIR(1), tmp); 1484 } else 1485 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1486 } 1487 1488 if (master_ctl & GEN8_GT_PM_IRQ) { 1489 tmp = I915_READ(GEN8_GT_IIR(2)); 1490 if (tmp & dev_priv->pm_rps_events) { 1491 ret = IRQ_HANDLED; 1492 gen8_rps_irq_handler(dev_priv, tmp); 1493 I915_WRITE(GEN8_GT_IIR(2), 1494 tmp & dev_priv->pm_rps_events); 1495 } else 1496 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1497 } 1498 1499 if (master_ctl & GEN8_GT_VECS_IRQ) { 1500 tmp = I915_READ(GEN8_GT_IIR(3)); 1501 if (tmp) { 1502 ret = IRQ_HANDLED; 1503 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1504 if (vcs & GT_RENDER_USER_INTERRUPT) 1505 notify_ring(dev, &dev_priv->ring[VECS]); 1506 I915_WRITE(GEN8_GT_IIR(3), tmp); 1507 } else 1508 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1509 } 1510 1511 return ret; 1512 } 1513 1514 #define HPD_STORM_DETECT_PERIOD 1000 1515 #define HPD_STORM_THRESHOLD 5 1516 1517 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1518 u32 hotplug_trigger, 1519 const u32 *hpd) 1520 { 1521 struct drm_i915_private *dev_priv = dev->dev_private; 1522 int i; 1523 bool storm_detected = false; 1524 1525 if (!hotplug_trigger) 1526 return; 1527 1528 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1529 hotplug_trigger); 1530 1531 spin_lock(&dev_priv->irq_lock); 1532 for (i = 1; i < HPD_NUM_PINS; i++) { 1533 1534 if (hpd[i] & hotplug_trigger && 1535 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1536 /* 1537 * On GMCH platforms the interrupt mask bits only 1538 * prevent irq generation, not the setting of the 1539 * hotplug bits itself. So only WARN about unexpected 1540 * interrupts on saner platforms. 1541 */ 1542 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1543 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1544 hotplug_trigger, i, hpd[i]); 1545 1546 continue; 1547 } 1548 1549 if (!(hpd[i] & hotplug_trigger) || 1550 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1551 continue; 1552 1553 dev_priv->hpd_event_bits |= (1 << i); 1554 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1555 dev_priv->hpd_stats[i].hpd_last_jiffies 1556 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1557 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1558 dev_priv->hpd_stats[i].hpd_cnt = 0; 1559 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1560 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1561 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1562 dev_priv->hpd_event_bits &= ~(1 << i); 1563 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1564 storm_detected = true; 1565 } else { 1566 dev_priv->hpd_stats[i].hpd_cnt++; 1567 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1568 dev_priv->hpd_stats[i].hpd_cnt); 1569 } 1570 } 1571 1572 if (storm_detected) 1573 dev_priv->display.hpd_irq_setup(dev); 1574 spin_unlock(&dev_priv->irq_lock); 1575 1576 /* 1577 * Our hotplug handler can grab modeset locks (by calling down into the 1578 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1579 * queue for otherwise the flush_work in the pageflip code will 1580 * deadlock. 1581 */ 1582 schedule_work(&dev_priv->hotplug_work); 1583 } 1584 1585 static void gmbus_irq_handler(struct drm_device *dev) 1586 { 1587 struct drm_i915_private *dev_priv = dev->dev_private; 1588 1589 wake_up_all(&dev_priv->gmbus_wait_queue); 1590 } 1591 1592 static void dp_aux_irq_handler(struct drm_device *dev) 1593 { 1594 struct drm_i915_private *dev_priv = dev->dev_private; 1595 1596 wake_up_all(&dev_priv->gmbus_wait_queue); 1597 } 1598 1599 #if defined(CONFIG_DEBUG_FS) 1600 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1601 uint32_t crc0, uint32_t crc1, 1602 uint32_t crc2, uint32_t crc3, 1603 uint32_t crc4) 1604 { 1605 struct drm_i915_private *dev_priv = dev->dev_private; 1606 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1607 struct intel_pipe_crc_entry *entry; 1608 int head, tail; 1609 1610 spin_lock(&pipe_crc->lock); 1611 1612 if (!pipe_crc->entries) { 1613 spin_unlock(&pipe_crc->lock); 1614 DRM_ERROR("spurious interrupt\n"); 1615 return; 1616 } 1617 1618 head = pipe_crc->head; 1619 tail = pipe_crc->tail; 1620 1621 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1622 spin_unlock(&pipe_crc->lock); 1623 DRM_ERROR("CRC buffer overflowing\n"); 1624 return; 1625 } 1626 1627 entry = &pipe_crc->entries[head]; 1628 1629 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1630 entry->crc[0] = crc0; 1631 entry->crc[1] = crc1; 1632 entry->crc[2] = crc2; 1633 entry->crc[3] = crc3; 1634 entry->crc[4] = crc4; 1635 1636 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1637 pipe_crc->head = head; 1638 1639 spin_unlock(&pipe_crc->lock); 1640 1641 wake_up_interruptible(&pipe_crc->wq); 1642 } 1643 #else 1644 static inline void 1645 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1646 uint32_t crc0, uint32_t crc1, 1647 uint32_t crc2, uint32_t crc3, 1648 uint32_t crc4) {} 1649 #endif 1650 1651 1652 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1653 { 1654 struct drm_i915_private *dev_priv = dev->dev_private; 1655 1656 display_pipe_crc_irq_handler(dev, pipe, 1657 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1658 0, 0, 0, 0); 1659 } 1660 1661 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1662 { 1663 struct drm_i915_private *dev_priv = dev->dev_private; 1664 1665 display_pipe_crc_irq_handler(dev, pipe, 1666 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1667 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1668 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1669 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1670 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1671 } 1672 1673 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1674 { 1675 struct drm_i915_private *dev_priv = dev->dev_private; 1676 uint32_t res1, res2; 1677 1678 if (INTEL_INFO(dev)->gen >= 3) 1679 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1680 else 1681 res1 = 0; 1682 1683 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1684 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1685 else 1686 res2 = 0; 1687 1688 display_pipe_crc_irq_handler(dev, pipe, 1689 I915_READ(PIPE_CRC_RES_RED(pipe)), 1690 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1691 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1692 res1, res2); 1693 } 1694 1695 /* The RPS events need forcewake, so we add them to a work queue and mask their 1696 * IMR bits until the work is done. Other interrupts can be processed without 1697 * the work queue. */ 1698 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1699 { 1700 if (pm_iir & dev_priv->pm_rps_events) { 1701 spin_lock(&dev_priv->irq_lock); 1702 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1703 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1704 spin_unlock(&dev_priv->irq_lock); 1705 1706 queue_work(dev_priv->wq, &dev_priv->rps.work); 1707 } 1708 1709 if (HAS_VEBOX(dev_priv->dev)) { 1710 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1711 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1712 1713 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1714 i915_handle_error(dev_priv->dev, false, 1715 "VEBOX CS error interrupt 0x%08x", 1716 pm_iir); 1717 } 1718 } 1719 } 1720 1721 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1722 { 1723 struct intel_crtc *crtc; 1724 1725 if (!drm_handle_vblank(dev, pipe)) 1726 return false; 1727 1728 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 1729 wake_up(&crtc->vbl_wait); 1730 1731 return true; 1732 } 1733 1734 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1735 { 1736 struct drm_i915_private *dev_priv = dev->dev_private; 1737 u32 pipe_stats[I915_MAX_PIPES] = { }; 1738 int pipe; 1739 1740 spin_lock(&dev_priv->irq_lock); 1741 for_each_pipe(pipe) { 1742 int reg; 1743 u32 mask, iir_bit = 0; 1744 1745 /* 1746 * PIPESTAT bits get signalled even when the interrupt is 1747 * disabled with the mask bits, and some of the status bits do 1748 * not generate interrupts at all (like the underrun bit). Hence 1749 * we need to be careful that we only handle what we want to 1750 * handle. 1751 */ 1752 mask = 0; 1753 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1754 mask |= PIPE_FIFO_UNDERRUN_STATUS; 1755 1756 switch (pipe) { 1757 case PIPE_A: 1758 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1759 break; 1760 case PIPE_B: 1761 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1762 break; 1763 case PIPE_C: 1764 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1765 break; 1766 } 1767 if (iir & iir_bit) 1768 mask |= dev_priv->pipestat_irq_mask[pipe]; 1769 1770 if (!mask) 1771 continue; 1772 1773 reg = PIPESTAT(pipe); 1774 mask |= PIPESTAT_INT_ENABLE_MASK; 1775 pipe_stats[pipe] = I915_READ(reg) & mask; 1776 1777 /* 1778 * Clear the PIPE*STAT regs before the IIR 1779 */ 1780 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1781 PIPESTAT_INT_STATUS_MASK)) 1782 I915_WRITE(reg, pipe_stats[pipe]); 1783 } 1784 spin_unlock(&dev_priv->irq_lock); 1785 1786 for_each_pipe(pipe) { 1787 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1788 intel_pipe_handle_vblank(dev, pipe); 1789 1790 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1791 intel_prepare_page_flip(dev, pipe); 1792 intel_finish_page_flip(dev, pipe); 1793 } 1794 1795 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1796 i9xx_pipe_crc_irq_handler(dev, pipe); 1797 1798 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 1799 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1800 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 1801 } 1802 1803 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1804 gmbus_irq_handler(dev); 1805 } 1806 1807 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1808 { 1809 struct drm_i915_private *dev_priv = dev->dev_private; 1810 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1811 1812 if (IS_G4X(dev)) { 1813 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1814 1815 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); 1816 } else { 1817 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1818 1819 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1820 } 1821 1822 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1823 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1824 dp_aux_irq_handler(dev); 1825 1826 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1827 /* 1828 * Make sure hotplug status is cleared before we clear IIR, or else we 1829 * may miss hotplug events. 1830 */ 1831 POSTING_READ(PORT_HOTPLUG_STAT); 1832 } 1833 1834 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1835 { 1836 struct drm_device *dev = arg; 1837 struct drm_i915_private *dev_priv = dev->dev_private; 1838 u32 iir, gt_iir, pm_iir; 1839 irqreturn_t ret = IRQ_NONE; 1840 1841 while (true) { 1842 iir = I915_READ(VLV_IIR); 1843 gt_iir = I915_READ(GTIIR); 1844 pm_iir = I915_READ(GEN6_PMIIR); 1845 1846 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1847 goto out; 1848 1849 ret = IRQ_HANDLED; 1850 1851 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1852 1853 valleyview_pipestat_irq_handler(dev, iir); 1854 1855 /* Consume port. Then clear IIR or we'll miss events */ 1856 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1857 i9xx_hpd_irq_handler(dev); 1858 1859 if (pm_iir) 1860 gen6_rps_irq_handler(dev_priv, pm_iir); 1861 1862 I915_WRITE(GTIIR, gt_iir); 1863 I915_WRITE(GEN6_PMIIR, pm_iir); 1864 I915_WRITE(VLV_IIR, iir); 1865 } 1866 1867 out: 1868 return ret; 1869 } 1870 1871 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1872 { 1873 struct drm_device *dev = arg; 1874 struct drm_i915_private *dev_priv = dev->dev_private; 1875 u32 master_ctl, iir; 1876 irqreturn_t ret = IRQ_NONE; 1877 1878 for (;;) { 1879 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1880 iir = I915_READ(VLV_IIR); 1881 1882 if (master_ctl == 0 && iir == 0) 1883 break; 1884 1885 I915_WRITE(GEN8_MASTER_IRQ, 0); 1886 1887 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1888 1889 valleyview_pipestat_irq_handler(dev, iir); 1890 1891 /* Consume port. Then clear IIR or we'll miss events */ 1892 i9xx_hpd_irq_handler(dev); 1893 1894 I915_WRITE(VLV_IIR, iir); 1895 1896 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1897 POSTING_READ(GEN8_MASTER_IRQ); 1898 1899 ret = IRQ_HANDLED; 1900 } 1901 1902 return ret; 1903 } 1904 1905 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1906 { 1907 struct drm_i915_private *dev_priv = dev->dev_private; 1908 int pipe; 1909 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1910 1911 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1912 1913 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1914 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1915 SDE_AUDIO_POWER_SHIFT); 1916 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1917 port_name(port)); 1918 } 1919 1920 if (pch_iir & SDE_AUX_MASK) 1921 dp_aux_irq_handler(dev); 1922 1923 if (pch_iir & SDE_GMBUS) 1924 gmbus_irq_handler(dev); 1925 1926 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1927 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1928 1929 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1930 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1931 1932 if (pch_iir & SDE_POISON) 1933 DRM_ERROR("PCH poison interrupt\n"); 1934 1935 if (pch_iir & SDE_FDI_MASK) 1936 for_each_pipe(pipe) 1937 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1938 pipe_name(pipe), 1939 I915_READ(FDI_RX_IIR(pipe))); 1940 1941 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1942 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1943 1944 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1945 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1946 1947 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1948 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1949 false)) 1950 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1951 1952 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1953 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1954 false)) 1955 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 1956 } 1957 1958 static void ivb_err_int_handler(struct drm_device *dev) 1959 { 1960 struct drm_i915_private *dev_priv = dev->dev_private; 1961 u32 err_int = I915_READ(GEN7_ERR_INT); 1962 enum pipe pipe; 1963 1964 if (err_int & ERR_INT_POISON) 1965 DRM_ERROR("Poison interrupt\n"); 1966 1967 for_each_pipe(pipe) { 1968 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1969 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1970 false)) 1971 DRM_ERROR("Pipe %c FIFO underrun\n", 1972 pipe_name(pipe)); 1973 } 1974 1975 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1976 if (IS_IVYBRIDGE(dev)) 1977 ivb_pipe_crc_irq_handler(dev, pipe); 1978 else 1979 hsw_pipe_crc_irq_handler(dev, pipe); 1980 } 1981 } 1982 1983 I915_WRITE(GEN7_ERR_INT, err_int); 1984 } 1985 1986 static void cpt_serr_int_handler(struct drm_device *dev) 1987 { 1988 struct drm_i915_private *dev_priv = dev->dev_private; 1989 u32 serr_int = I915_READ(SERR_INT); 1990 1991 if (serr_int & SERR_INT_POISON) 1992 DRM_ERROR("PCH poison interrupt\n"); 1993 1994 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1995 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1996 false)) 1997 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1998 1999 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2000 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2001 false)) 2002 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 2003 2004 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2005 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 2006 false)) 2007 DRM_ERROR("PCH transcoder C FIFO underrun\n"); 2008 2009 I915_WRITE(SERR_INT, serr_int); 2010 } 2011 2012 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2013 { 2014 struct drm_i915_private *dev_priv = dev->dev_private; 2015 int pipe; 2016 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2017 2018 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2019 2020 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2021 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2022 SDE_AUDIO_POWER_SHIFT_CPT); 2023 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2024 port_name(port)); 2025 } 2026 2027 if (pch_iir & SDE_AUX_MASK_CPT) 2028 dp_aux_irq_handler(dev); 2029 2030 if (pch_iir & SDE_GMBUS_CPT) 2031 gmbus_irq_handler(dev); 2032 2033 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2034 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2035 2036 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2037 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2038 2039 if (pch_iir & SDE_FDI_MASK_CPT) 2040 for_each_pipe(pipe) 2041 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2042 pipe_name(pipe), 2043 I915_READ(FDI_RX_IIR(pipe))); 2044 2045 if (pch_iir & SDE_ERROR_CPT) 2046 cpt_serr_int_handler(dev); 2047 } 2048 2049 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2050 { 2051 struct drm_i915_private *dev_priv = dev->dev_private; 2052 enum pipe pipe; 2053 2054 if (de_iir & DE_AUX_CHANNEL_A) 2055 dp_aux_irq_handler(dev); 2056 2057 if (de_iir & DE_GSE) 2058 intel_opregion_asle_intr(dev); 2059 2060 if (de_iir & DE_POISON) 2061 DRM_ERROR("Poison interrupt\n"); 2062 2063 for_each_pipe(pipe) { 2064 if (de_iir & DE_PIPE_VBLANK(pipe)) 2065 intel_pipe_handle_vblank(dev, pipe); 2066 2067 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2068 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2069 DRM_ERROR("Pipe %c FIFO underrun\n", 2070 pipe_name(pipe)); 2071 2072 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2073 i9xx_pipe_crc_irq_handler(dev, pipe); 2074 2075 /* plane/pipes map 1:1 on ilk+ */ 2076 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2077 intel_prepare_page_flip(dev, pipe); 2078 intel_finish_page_flip_plane(dev, pipe); 2079 } 2080 } 2081 2082 /* check event from PCH */ 2083 if (de_iir & DE_PCH_EVENT) { 2084 u32 pch_iir = I915_READ(SDEIIR); 2085 2086 if (HAS_PCH_CPT(dev)) 2087 cpt_irq_handler(dev, pch_iir); 2088 else 2089 ibx_irq_handler(dev, pch_iir); 2090 2091 /* should clear PCH hotplug event before clear CPU irq */ 2092 I915_WRITE(SDEIIR, pch_iir); 2093 } 2094 2095 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2096 ironlake_rps_change_irq_handler(dev); 2097 } 2098 2099 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2100 { 2101 struct drm_i915_private *dev_priv = dev->dev_private; 2102 enum pipe pipe; 2103 2104 if (de_iir & DE_ERR_INT_IVB) 2105 ivb_err_int_handler(dev); 2106 2107 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2108 dp_aux_irq_handler(dev); 2109 2110 if (de_iir & DE_GSE_IVB) 2111 intel_opregion_asle_intr(dev); 2112 2113 for_each_pipe(pipe) { 2114 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2115 intel_pipe_handle_vblank(dev, pipe); 2116 2117 /* plane/pipes map 1:1 on ilk+ */ 2118 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2119 intel_prepare_page_flip(dev, pipe); 2120 intel_finish_page_flip_plane(dev, pipe); 2121 } 2122 } 2123 2124 /* check event from PCH */ 2125 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2126 u32 pch_iir = I915_READ(SDEIIR); 2127 2128 cpt_irq_handler(dev, pch_iir); 2129 2130 /* clear PCH hotplug event before clear CPU irq */ 2131 I915_WRITE(SDEIIR, pch_iir); 2132 } 2133 } 2134 2135 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2136 { 2137 struct drm_device *dev = arg; 2138 struct drm_i915_private *dev_priv = dev->dev_private; 2139 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2140 irqreturn_t ret = IRQ_NONE; 2141 2142 /* We get interrupts on unclaimed registers, so check for this before we 2143 * do any I915_{READ,WRITE}. */ 2144 intel_uncore_check_errors(dev); 2145 2146 /* disable master interrupt before clearing iir */ 2147 de_ier = I915_READ(DEIER); 2148 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2149 POSTING_READ(DEIER); 2150 2151 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2152 * interrupts will will be stored on its back queue, and then we'll be 2153 * able to process them after we restore SDEIER (as soon as we restore 2154 * it, we'll get an interrupt if SDEIIR still has something to process 2155 * due to its back queue). */ 2156 if (!HAS_PCH_NOP(dev)) { 2157 sde_ier = I915_READ(SDEIER); 2158 I915_WRITE(SDEIER, 0); 2159 POSTING_READ(SDEIER); 2160 } 2161 2162 gt_iir = I915_READ(GTIIR); 2163 if (gt_iir) { 2164 if (INTEL_INFO(dev)->gen >= 6) 2165 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2166 else 2167 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2168 I915_WRITE(GTIIR, gt_iir); 2169 ret = IRQ_HANDLED; 2170 } 2171 2172 de_iir = I915_READ(DEIIR); 2173 if (de_iir) { 2174 if (INTEL_INFO(dev)->gen >= 7) 2175 ivb_display_irq_handler(dev, de_iir); 2176 else 2177 ilk_display_irq_handler(dev, de_iir); 2178 I915_WRITE(DEIIR, de_iir); 2179 ret = IRQ_HANDLED; 2180 } 2181 2182 if (INTEL_INFO(dev)->gen >= 6) { 2183 u32 pm_iir = I915_READ(GEN6_PMIIR); 2184 if (pm_iir) { 2185 gen6_rps_irq_handler(dev_priv, pm_iir); 2186 I915_WRITE(GEN6_PMIIR, pm_iir); 2187 ret = IRQ_HANDLED; 2188 } 2189 } 2190 2191 I915_WRITE(DEIER, de_ier); 2192 POSTING_READ(DEIER); 2193 if (!HAS_PCH_NOP(dev)) { 2194 I915_WRITE(SDEIER, sde_ier); 2195 POSTING_READ(SDEIER); 2196 } 2197 2198 return ret; 2199 } 2200 2201 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2202 { 2203 struct drm_device *dev = arg; 2204 struct drm_i915_private *dev_priv = dev->dev_private; 2205 u32 master_ctl; 2206 irqreturn_t ret = IRQ_NONE; 2207 uint32_t tmp = 0; 2208 enum pipe pipe; 2209 2210 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2211 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2212 if (!master_ctl) 2213 return IRQ_NONE; 2214 2215 I915_WRITE(GEN8_MASTER_IRQ, 0); 2216 POSTING_READ(GEN8_MASTER_IRQ); 2217 2218 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2219 2220 if (master_ctl & GEN8_DE_MISC_IRQ) { 2221 tmp = I915_READ(GEN8_DE_MISC_IIR); 2222 if (tmp & GEN8_DE_MISC_GSE) 2223 intel_opregion_asle_intr(dev); 2224 else if (tmp) 2225 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2226 else 2227 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2228 2229 if (tmp) { 2230 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2231 ret = IRQ_HANDLED; 2232 } 2233 } 2234 2235 if (master_ctl & GEN8_DE_PORT_IRQ) { 2236 tmp = I915_READ(GEN8_DE_PORT_IIR); 2237 if (tmp & GEN8_AUX_CHANNEL_A) 2238 dp_aux_irq_handler(dev); 2239 else if (tmp) 2240 DRM_ERROR("Unexpected DE Port interrupt\n"); 2241 else 2242 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2243 2244 if (tmp) { 2245 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2246 ret = IRQ_HANDLED; 2247 } 2248 } 2249 2250 for_each_pipe(pipe) { 2251 uint32_t pipe_iir; 2252 2253 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2254 continue; 2255 2256 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2257 if (pipe_iir & GEN8_PIPE_VBLANK) 2258 intel_pipe_handle_vblank(dev, pipe); 2259 2260 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2261 intel_prepare_page_flip(dev, pipe); 2262 intel_finish_page_flip_plane(dev, pipe); 2263 } 2264 2265 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2266 hsw_pipe_crc_irq_handler(dev, pipe); 2267 2268 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2269 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2270 false)) 2271 DRM_ERROR("Pipe %c FIFO underrun\n", 2272 pipe_name(pipe)); 2273 } 2274 2275 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2276 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2277 pipe_name(pipe), 2278 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2279 } 2280 2281 if (pipe_iir) { 2282 ret = IRQ_HANDLED; 2283 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2284 } else 2285 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2286 } 2287 2288 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2289 /* 2290 * FIXME(BDW): Assume for now that the new interrupt handling 2291 * scheme also closed the SDE interrupt handling race we've seen 2292 * on older pch-split platforms. But this needs testing. 2293 */ 2294 u32 pch_iir = I915_READ(SDEIIR); 2295 2296 cpt_irq_handler(dev, pch_iir); 2297 2298 if (pch_iir) { 2299 I915_WRITE(SDEIIR, pch_iir); 2300 ret = IRQ_HANDLED; 2301 } 2302 } 2303 2304 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2305 POSTING_READ(GEN8_MASTER_IRQ); 2306 2307 return ret; 2308 } 2309 2310 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2311 bool reset_completed) 2312 { 2313 struct intel_engine_cs *ring; 2314 int i; 2315 2316 /* 2317 * Notify all waiters for GPU completion events that reset state has 2318 * been changed, and that they need to restart their wait after 2319 * checking for potential errors (and bail out to drop locks if there is 2320 * a gpu reset pending so that i915_error_work_func can acquire them). 2321 */ 2322 2323 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2324 for_each_ring(ring, dev_priv, i) 2325 wake_up_all(&ring->irq_queue); 2326 2327 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2328 wake_up_all(&dev_priv->pending_flip_queue); 2329 2330 /* 2331 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2332 * reset state is cleared. 2333 */ 2334 if (reset_completed) 2335 wake_up_all(&dev_priv->gpu_error.reset_queue); 2336 } 2337 2338 /** 2339 * i915_error_work_func - do process context error handling work 2340 * @work: work struct 2341 * 2342 * Fire an error uevent so userspace can see that a hang or error 2343 * was detected. 2344 */ 2345 static void i915_error_work_func(struct work_struct *work) 2346 { 2347 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2348 work); 2349 struct drm_i915_private *dev_priv = 2350 container_of(error, struct drm_i915_private, gpu_error); 2351 struct drm_device *dev = dev_priv->dev; 2352 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2353 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2354 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2355 int ret; 2356 2357 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2358 2359 /* 2360 * Note that there's only one work item which does gpu resets, so we 2361 * need not worry about concurrent gpu resets potentially incrementing 2362 * error->reset_counter twice. We only need to take care of another 2363 * racing irq/hangcheck declaring the gpu dead for a second time. A 2364 * quick check for that is good enough: schedule_work ensures the 2365 * correct ordering between hang detection and this work item, and since 2366 * the reset in-progress bit is only ever set by code outside of this 2367 * work we don't need to worry about any other races. 2368 */ 2369 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2370 DRM_DEBUG_DRIVER("resetting chip\n"); 2371 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2372 reset_event); 2373 2374 /* 2375 * In most cases it's guaranteed that we get here with an RPM 2376 * reference held, for example because there is a pending GPU 2377 * request that won't finish until the reset is done. This 2378 * isn't the case at least when we get here by doing a 2379 * simulated reset via debugs, so get an RPM reference. 2380 */ 2381 intel_runtime_pm_get(dev_priv); 2382 /* 2383 * All state reset _must_ be completed before we update the 2384 * reset counter, for otherwise waiters might miss the reset 2385 * pending state and not properly drop locks, resulting in 2386 * deadlocks with the reset work. 2387 */ 2388 ret = i915_reset(dev); 2389 2390 intel_display_handle_reset(dev); 2391 2392 intel_runtime_pm_put(dev_priv); 2393 2394 if (ret == 0) { 2395 /* 2396 * After all the gem state is reset, increment the reset 2397 * counter and wake up everyone waiting for the reset to 2398 * complete. 2399 * 2400 * Since unlock operations are a one-sided barrier only, 2401 * we need to insert a barrier here to order any seqno 2402 * updates before 2403 * the counter increment. 2404 */ 2405 smp_mb__before_atomic(); 2406 atomic_inc(&dev_priv->gpu_error.reset_counter); 2407 2408 kobject_uevent_env(&dev->primary->kdev->kobj, 2409 KOBJ_CHANGE, reset_done_event); 2410 } else { 2411 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2412 } 2413 2414 /* 2415 * Note: The wake_up also serves as a memory barrier so that 2416 * waiters see the update value of the reset counter atomic_t. 2417 */ 2418 i915_error_wake_up(dev_priv, true); 2419 } 2420 } 2421 2422 static void i915_report_and_clear_eir(struct drm_device *dev) 2423 { 2424 struct drm_i915_private *dev_priv = dev->dev_private; 2425 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2426 u32 eir = I915_READ(EIR); 2427 int pipe, i; 2428 2429 if (!eir) 2430 return; 2431 2432 pr_err("render error detected, EIR: 0x%08x\n", eir); 2433 2434 i915_get_extra_instdone(dev, instdone); 2435 2436 if (IS_G4X(dev)) { 2437 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2438 u32 ipeir = I915_READ(IPEIR_I965); 2439 2440 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2441 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2442 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2443 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2444 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2445 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2446 I915_WRITE(IPEIR_I965, ipeir); 2447 POSTING_READ(IPEIR_I965); 2448 } 2449 if (eir & GM45_ERROR_PAGE_TABLE) { 2450 u32 pgtbl_err = I915_READ(PGTBL_ER); 2451 pr_err("page table error\n"); 2452 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2453 I915_WRITE(PGTBL_ER, pgtbl_err); 2454 POSTING_READ(PGTBL_ER); 2455 } 2456 } 2457 2458 if (!IS_GEN2(dev)) { 2459 if (eir & I915_ERROR_PAGE_TABLE) { 2460 u32 pgtbl_err = I915_READ(PGTBL_ER); 2461 pr_err("page table error\n"); 2462 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2463 I915_WRITE(PGTBL_ER, pgtbl_err); 2464 POSTING_READ(PGTBL_ER); 2465 } 2466 } 2467 2468 if (eir & I915_ERROR_MEMORY_REFRESH) { 2469 pr_err("memory refresh error:\n"); 2470 for_each_pipe(pipe) 2471 pr_err("pipe %c stat: 0x%08x\n", 2472 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2473 /* pipestat has already been acked */ 2474 } 2475 if (eir & I915_ERROR_INSTRUCTION) { 2476 pr_err("instruction error\n"); 2477 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2478 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2479 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2480 if (INTEL_INFO(dev)->gen < 4) { 2481 u32 ipeir = I915_READ(IPEIR); 2482 2483 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2484 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2485 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2486 I915_WRITE(IPEIR, ipeir); 2487 POSTING_READ(IPEIR); 2488 } else { 2489 u32 ipeir = I915_READ(IPEIR_I965); 2490 2491 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2492 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2493 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2494 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2495 I915_WRITE(IPEIR_I965, ipeir); 2496 POSTING_READ(IPEIR_I965); 2497 } 2498 } 2499 2500 I915_WRITE(EIR, eir); 2501 POSTING_READ(EIR); 2502 eir = I915_READ(EIR); 2503 if (eir) { 2504 /* 2505 * some errors might have become stuck, 2506 * mask them. 2507 */ 2508 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2509 I915_WRITE(EMR, I915_READ(EMR) | eir); 2510 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2511 } 2512 } 2513 2514 /** 2515 * i915_handle_error - handle an error interrupt 2516 * @dev: drm device 2517 * 2518 * Do some basic checking of regsiter state at error interrupt time and 2519 * dump it to the syslog. Also call i915_capture_error_state() to make 2520 * sure we get a record and make it available in debugfs. Fire a uevent 2521 * so userspace knows something bad happened (should trigger collection 2522 * of a ring dump etc.). 2523 */ 2524 void i915_handle_error(struct drm_device *dev, bool wedged, 2525 const char *fmt, ...) 2526 { 2527 struct drm_i915_private *dev_priv = dev->dev_private; 2528 va_list args; 2529 char error_msg[80]; 2530 2531 va_start(args, fmt); 2532 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2533 va_end(args); 2534 2535 i915_capture_error_state(dev, wedged, error_msg); 2536 i915_report_and_clear_eir(dev); 2537 2538 if (wedged) { 2539 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2540 &dev_priv->gpu_error.reset_counter); 2541 2542 /* 2543 * Wakeup waiting processes so that the reset work function 2544 * i915_error_work_func doesn't deadlock trying to grab various 2545 * locks. By bumping the reset counter first, the woken 2546 * processes will see a reset in progress and back off, 2547 * releasing their locks and then wait for the reset completion. 2548 * We must do this for _all_ gpu waiters that might hold locks 2549 * that the reset work needs to acquire. 2550 * 2551 * Note: The wake_up serves as the required memory barrier to 2552 * ensure that the waiters see the updated value of the reset 2553 * counter atomic_t. 2554 */ 2555 i915_error_wake_up(dev_priv, false); 2556 } 2557 2558 /* 2559 * Our reset work can grab modeset locks (since it needs to reset the 2560 * state of outstanding pagelips). Hence it must not be run on our own 2561 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2562 * code will deadlock. 2563 */ 2564 schedule_work(&dev_priv->gpu_error.work); 2565 } 2566 2567 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2568 { 2569 struct drm_i915_private *dev_priv = dev->dev_private; 2570 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2572 struct drm_i915_gem_object *obj; 2573 struct intel_unpin_work *work; 2574 unsigned long flags; 2575 bool stall_detected; 2576 2577 /* Ignore early vblank irqs */ 2578 if (intel_crtc == NULL) 2579 return; 2580 2581 spin_lock_irqsave(&dev->event_lock, flags); 2582 work = intel_crtc->unpin_work; 2583 2584 if (work == NULL || 2585 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2586 !work->enable_stall_check) { 2587 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2588 spin_unlock_irqrestore(&dev->event_lock, flags); 2589 return; 2590 } 2591 2592 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2593 obj = work->pending_flip_obj; 2594 if (INTEL_INFO(dev)->gen >= 4) { 2595 int dspsurf = DSPSURF(intel_crtc->plane); 2596 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2597 i915_gem_obj_ggtt_offset(obj); 2598 } else { 2599 int dspaddr = DSPADDR(intel_crtc->plane); 2600 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2601 crtc->y * crtc->primary->fb->pitches[0] + 2602 crtc->x * crtc->primary->fb->bits_per_pixel/8); 2603 } 2604 2605 spin_unlock_irqrestore(&dev->event_lock, flags); 2606 2607 if (stall_detected) { 2608 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2609 intel_prepare_page_flip(dev, intel_crtc->plane); 2610 } 2611 } 2612 2613 /* Called from drm generic code, passed 'crtc' which 2614 * we use as a pipe index 2615 */ 2616 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2617 { 2618 struct drm_i915_private *dev_priv = dev->dev_private; 2619 unsigned long irqflags; 2620 2621 if (!i915_pipe_enabled(dev, pipe)) 2622 return -EINVAL; 2623 2624 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2625 if (INTEL_INFO(dev)->gen >= 4) 2626 i915_enable_pipestat(dev_priv, pipe, 2627 PIPE_START_VBLANK_INTERRUPT_STATUS); 2628 else 2629 i915_enable_pipestat(dev_priv, pipe, 2630 PIPE_VBLANK_INTERRUPT_STATUS); 2631 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2632 2633 return 0; 2634 } 2635 2636 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2637 { 2638 struct drm_i915_private *dev_priv = dev->dev_private; 2639 unsigned long irqflags; 2640 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2641 DE_PIPE_VBLANK(pipe); 2642 2643 if (!i915_pipe_enabled(dev, pipe)) 2644 return -EINVAL; 2645 2646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2647 ironlake_enable_display_irq(dev_priv, bit); 2648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2649 2650 return 0; 2651 } 2652 2653 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2654 { 2655 struct drm_i915_private *dev_priv = dev->dev_private; 2656 unsigned long irqflags; 2657 2658 if (!i915_pipe_enabled(dev, pipe)) 2659 return -EINVAL; 2660 2661 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2662 i915_enable_pipestat(dev_priv, pipe, 2663 PIPE_START_VBLANK_INTERRUPT_STATUS); 2664 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2665 2666 return 0; 2667 } 2668 2669 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2670 { 2671 struct drm_i915_private *dev_priv = dev->dev_private; 2672 unsigned long irqflags; 2673 2674 if (!i915_pipe_enabled(dev, pipe)) 2675 return -EINVAL; 2676 2677 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2678 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2679 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2680 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2681 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2682 return 0; 2683 } 2684 2685 /* Called from drm generic code, passed 'crtc' which 2686 * we use as a pipe index 2687 */ 2688 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2689 { 2690 struct drm_i915_private *dev_priv = dev->dev_private; 2691 unsigned long irqflags; 2692 2693 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2694 i915_disable_pipestat(dev_priv, pipe, 2695 PIPE_VBLANK_INTERRUPT_STATUS | 2696 PIPE_START_VBLANK_INTERRUPT_STATUS); 2697 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2698 } 2699 2700 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2701 { 2702 struct drm_i915_private *dev_priv = dev->dev_private; 2703 unsigned long irqflags; 2704 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2705 DE_PIPE_VBLANK(pipe); 2706 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2708 ironlake_disable_display_irq(dev_priv, bit); 2709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2710 } 2711 2712 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2713 { 2714 struct drm_i915_private *dev_priv = dev->dev_private; 2715 unsigned long irqflags; 2716 2717 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2718 i915_disable_pipestat(dev_priv, pipe, 2719 PIPE_START_VBLANK_INTERRUPT_STATUS); 2720 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2721 } 2722 2723 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2724 { 2725 struct drm_i915_private *dev_priv = dev->dev_private; 2726 unsigned long irqflags; 2727 2728 if (!i915_pipe_enabled(dev, pipe)) 2729 return; 2730 2731 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2732 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2733 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2734 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2735 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2736 } 2737 2738 static u32 2739 ring_last_seqno(struct intel_engine_cs *ring) 2740 { 2741 return list_entry(ring->request_list.prev, 2742 struct drm_i915_gem_request, list)->seqno; 2743 } 2744 2745 static bool 2746 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2747 { 2748 return (list_empty(&ring->request_list) || 2749 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2750 } 2751 2752 static bool 2753 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2754 { 2755 if (INTEL_INFO(dev)->gen >= 8) { 2756 /* 2757 * FIXME: gen8 semaphore support - currently we don't emit 2758 * semaphores on bdw anyway, but this needs to be addressed when 2759 * we merge that code. 2760 */ 2761 return false; 2762 } else { 2763 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2764 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2765 MI_SEMAPHORE_REGISTER); 2766 } 2767 } 2768 2769 static struct intel_engine_cs * 2770 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) 2771 { 2772 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2773 struct intel_engine_cs *signaller; 2774 int i; 2775 2776 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2777 /* 2778 * FIXME: gen8 semaphore support - currently we don't emit 2779 * semaphores on bdw anyway, but this needs to be addressed when 2780 * we merge that code. 2781 */ 2782 return NULL; 2783 } else { 2784 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2785 2786 for_each_ring(signaller, dev_priv, i) { 2787 if(ring == signaller) 2788 continue; 2789 2790 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2791 return signaller; 2792 } 2793 } 2794 2795 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", 2796 ring->id, ipehr); 2797 2798 return NULL; 2799 } 2800 2801 static struct intel_engine_cs * 2802 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2803 { 2804 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2805 u32 cmd, ipehr, head; 2806 int i; 2807 2808 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2809 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2810 return NULL; 2811 2812 /* 2813 * HEAD is likely pointing to the dword after the actual command, 2814 * so scan backwards until we find the MBOX. But limit it to just 3 2815 * dwords. Note that we don't care about ACTHD here since that might 2816 * point at at batch, and semaphores are always emitted into the 2817 * ringbuffer itself. 2818 */ 2819 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2820 2821 for (i = 4; i; --i) { 2822 /* 2823 * Be paranoid and presume the hw has gone off into the wild - 2824 * our ring is smaller than what the hardware (and hence 2825 * HEAD_ADDR) allows. Also handles wrap-around. 2826 */ 2827 head &= ring->buffer->size - 1; 2828 2829 /* This here seems to blow up */ 2830 cmd = ioread32(ring->buffer->virtual_start + head); 2831 if (cmd == ipehr) 2832 break; 2833 2834 head -= 4; 2835 } 2836 2837 if (!i) 2838 return NULL; 2839 2840 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2841 return semaphore_wait_to_signaller_ring(ring, ipehr); 2842 } 2843 2844 static int semaphore_passed(struct intel_engine_cs *ring) 2845 { 2846 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2847 struct intel_engine_cs *signaller; 2848 u32 seqno, ctl; 2849 2850 ring->hangcheck.deadlock++; 2851 2852 signaller = semaphore_waits_for(ring, &seqno); 2853 if (signaller == NULL) 2854 return -1; 2855 2856 /* Prevent pathological recursion due to driver bugs */ 2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2858 return -1; 2859 2860 /* cursory check for an unkickable deadlock */ 2861 ctl = I915_READ_CTL(signaller); 2862 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2863 return -1; 2864 2865 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2866 return 1; 2867 2868 if (signaller->hangcheck.deadlock) 2869 return -1; 2870 2871 return 0; 2872 } 2873 2874 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2875 { 2876 struct intel_engine_cs *ring; 2877 int i; 2878 2879 for_each_ring(ring, dev_priv, i) 2880 ring->hangcheck.deadlock = 0; 2881 } 2882 2883 static enum intel_ring_hangcheck_action 2884 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2885 { 2886 struct drm_device *dev = ring->dev; 2887 struct drm_i915_private *dev_priv = dev->dev_private; 2888 u32 tmp; 2889 2890 if (ring->hangcheck.acthd != acthd) 2891 return HANGCHECK_ACTIVE; 2892 2893 if (IS_GEN2(dev)) 2894 return HANGCHECK_HUNG; 2895 2896 /* Is the chip hanging on a WAIT_FOR_EVENT? 2897 * If so we can simply poke the RB_WAIT bit 2898 * and break the hang. This should work on 2899 * all but the second generation chipsets. 2900 */ 2901 tmp = I915_READ_CTL(ring); 2902 if (tmp & RING_WAIT) { 2903 i915_handle_error(dev, false, 2904 "Kicking stuck wait on %s", 2905 ring->name); 2906 I915_WRITE_CTL(ring, tmp); 2907 return HANGCHECK_KICK; 2908 } 2909 2910 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2911 switch (semaphore_passed(ring)) { 2912 default: 2913 return HANGCHECK_HUNG; 2914 case 1: 2915 i915_handle_error(dev, false, 2916 "Kicking stuck semaphore on %s", 2917 ring->name); 2918 I915_WRITE_CTL(ring, tmp); 2919 return HANGCHECK_KICK; 2920 case 0: 2921 return HANGCHECK_WAIT; 2922 } 2923 } 2924 2925 return HANGCHECK_HUNG; 2926 } 2927 2928 /** 2929 * This is called when the chip hasn't reported back with completed 2930 * batchbuffers in a long time. We keep track per ring seqno progress and 2931 * if there are no progress, hangcheck score for that ring is increased. 2932 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2933 * we kick the ring. If we see no progress on three subsequent calls 2934 * we assume chip is wedged and try to fix it by resetting the chip. 2935 */ 2936 static void i915_hangcheck_elapsed(unsigned long data) 2937 { 2938 struct drm_device *dev = (struct drm_device *)data; 2939 struct drm_i915_private *dev_priv = dev->dev_private; 2940 struct intel_engine_cs *ring; 2941 int i; 2942 int busy_count = 0, rings_hung = 0; 2943 bool stuck[I915_NUM_RINGS] = { 0 }; 2944 #define BUSY 1 2945 #define KICK 5 2946 #define HUNG 20 2947 2948 if (!i915.enable_hangcheck) 2949 return; 2950 2951 for_each_ring(ring, dev_priv, i) { 2952 u64 acthd; 2953 u32 seqno; 2954 bool busy = true; 2955 2956 semaphore_clear_deadlocks(dev_priv); 2957 2958 seqno = ring->get_seqno(ring, false); 2959 acthd = intel_ring_get_active_head(ring); 2960 2961 if (ring->hangcheck.seqno == seqno) { 2962 if (ring_idle(ring, seqno)) { 2963 ring->hangcheck.action = HANGCHECK_IDLE; 2964 2965 if (waitqueue_active(&ring->irq_queue)) { 2966 /* Issue a wake-up to catch stuck h/w. */ 2967 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2968 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2969 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2970 ring->name); 2971 else 2972 DRM_INFO("Fake missed irq on %s\n", 2973 ring->name); 2974 wake_up_all(&ring->irq_queue); 2975 } 2976 /* Safeguard against driver failure */ 2977 ring->hangcheck.score += BUSY; 2978 } else 2979 busy = false; 2980 } else { 2981 /* We always increment the hangcheck score 2982 * if the ring is busy and still processing 2983 * the same request, so that no single request 2984 * can run indefinitely (such as a chain of 2985 * batches). The only time we do not increment 2986 * the hangcheck score on this ring, if this 2987 * ring is in a legitimate wait for another 2988 * ring. In that case the waiting ring is a 2989 * victim and we want to be sure we catch the 2990 * right culprit. Then every time we do kick 2991 * the ring, add a small increment to the 2992 * score so that we can catch a batch that is 2993 * being repeatedly kicked and so responsible 2994 * for stalling the machine. 2995 */ 2996 ring->hangcheck.action = ring_stuck(ring, 2997 acthd); 2998 2999 switch (ring->hangcheck.action) { 3000 case HANGCHECK_IDLE: 3001 case HANGCHECK_WAIT: 3002 break; 3003 case HANGCHECK_ACTIVE: 3004 ring->hangcheck.score += BUSY; 3005 break; 3006 case HANGCHECK_KICK: 3007 ring->hangcheck.score += KICK; 3008 break; 3009 case HANGCHECK_HUNG: 3010 ring->hangcheck.score += HUNG; 3011 stuck[i] = true; 3012 break; 3013 } 3014 } 3015 } else { 3016 ring->hangcheck.action = HANGCHECK_ACTIVE; 3017 3018 /* Gradually reduce the count so that we catch DoS 3019 * attempts across multiple batches. 3020 */ 3021 if (ring->hangcheck.score > 0) 3022 ring->hangcheck.score--; 3023 } 3024 3025 ring->hangcheck.seqno = seqno; 3026 ring->hangcheck.acthd = acthd; 3027 busy_count += busy; 3028 } 3029 3030 for_each_ring(ring, dev_priv, i) { 3031 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3032 DRM_INFO("%s on %s\n", 3033 stuck[i] ? "stuck" : "no progress", 3034 ring->name); 3035 rings_hung++; 3036 } 3037 } 3038 3039 if (rings_hung) 3040 return i915_handle_error(dev, true, "Ring hung"); 3041 3042 if (busy_count) 3043 /* Reset timer case chip hangs without another request 3044 * being added */ 3045 i915_queue_hangcheck(dev); 3046 } 3047 3048 void i915_queue_hangcheck(struct drm_device *dev) 3049 { 3050 struct drm_i915_private *dev_priv = dev->dev_private; 3051 if (!i915.enable_hangcheck) 3052 return; 3053 3054 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 3055 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3056 } 3057 3058 static void ibx_irq_reset(struct drm_device *dev) 3059 { 3060 struct drm_i915_private *dev_priv = dev->dev_private; 3061 3062 if (HAS_PCH_NOP(dev)) 3063 return; 3064 3065 GEN5_IRQ_RESET(SDE); 3066 3067 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3068 I915_WRITE(SERR_INT, 0xffffffff); 3069 } 3070 3071 /* 3072 * SDEIER is also touched by the interrupt handler to work around missed PCH 3073 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3074 * instead we unconditionally enable all PCH interrupt sources here, but then 3075 * only unmask them as needed with SDEIMR. 3076 * 3077 * This function needs to be called before interrupts are enabled. 3078 */ 3079 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3080 { 3081 struct drm_i915_private *dev_priv = dev->dev_private; 3082 3083 if (HAS_PCH_NOP(dev)) 3084 return; 3085 3086 WARN_ON(I915_READ(SDEIER) != 0); 3087 I915_WRITE(SDEIER, 0xffffffff); 3088 POSTING_READ(SDEIER); 3089 } 3090 3091 static void gen5_gt_irq_reset(struct drm_device *dev) 3092 { 3093 struct drm_i915_private *dev_priv = dev->dev_private; 3094 3095 GEN5_IRQ_RESET(GT); 3096 if (INTEL_INFO(dev)->gen >= 6) 3097 GEN5_IRQ_RESET(GEN6_PM); 3098 } 3099 3100 /* drm_dma.h hooks 3101 */ 3102 static void ironlake_irq_reset(struct drm_device *dev) 3103 { 3104 struct drm_i915_private *dev_priv = dev->dev_private; 3105 3106 I915_WRITE(HWSTAM, 0xffffffff); 3107 3108 GEN5_IRQ_RESET(DE); 3109 if (IS_GEN7(dev)) 3110 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3111 3112 gen5_gt_irq_reset(dev); 3113 3114 ibx_irq_reset(dev); 3115 } 3116 3117 static void valleyview_irq_preinstall(struct drm_device *dev) 3118 { 3119 struct drm_i915_private *dev_priv = dev->dev_private; 3120 int pipe; 3121 3122 /* VLV magic */ 3123 I915_WRITE(VLV_IMR, 0); 3124 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3125 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3126 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3127 3128 /* and GT */ 3129 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3130 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3131 3132 gen5_gt_irq_reset(dev); 3133 3134 I915_WRITE(DPINVGTT, 0xff); 3135 3136 I915_WRITE(PORT_HOTPLUG_EN, 0); 3137 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3138 for_each_pipe(pipe) 3139 I915_WRITE(PIPESTAT(pipe), 0xffff); 3140 I915_WRITE(VLV_IIR, 0xffffffff); 3141 I915_WRITE(VLV_IMR, 0xffffffff); 3142 I915_WRITE(VLV_IER, 0x0); 3143 POSTING_READ(VLV_IER); 3144 } 3145 3146 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3147 { 3148 GEN8_IRQ_RESET_NDX(GT, 0); 3149 GEN8_IRQ_RESET_NDX(GT, 1); 3150 GEN8_IRQ_RESET_NDX(GT, 2); 3151 GEN8_IRQ_RESET_NDX(GT, 3); 3152 } 3153 3154 static void gen8_irq_reset(struct drm_device *dev) 3155 { 3156 struct drm_i915_private *dev_priv = dev->dev_private; 3157 int pipe; 3158 3159 I915_WRITE(GEN8_MASTER_IRQ, 0); 3160 POSTING_READ(GEN8_MASTER_IRQ); 3161 3162 gen8_gt_irq_reset(dev_priv); 3163 3164 for_each_pipe(pipe) 3165 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3166 3167 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3168 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3169 GEN5_IRQ_RESET(GEN8_PCU_); 3170 3171 ibx_irq_reset(dev); 3172 } 3173 3174 static void cherryview_irq_preinstall(struct drm_device *dev) 3175 { 3176 struct drm_i915_private *dev_priv = dev->dev_private; 3177 int pipe; 3178 3179 I915_WRITE(GEN8_MASTER_IRQ, 0); 3180 POSTING_READ(GEN8_MASTER_IRQ); 3181 3182 gen8_gt_irq_reset(dev_priv); 3183 3184 GEN5_IRQ_RESET(GEN8_PCU_); 3185 3186 POSTING_READ(GEN8_PCU_IIR); 3187 3188 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3189 3190 I915_WRITE(PORT_HOTPLUG_EN, 0); 3191 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3192 3193 for_each_pipe(pipe) 3194 I915_WRITE(PIPESTAT(pipe), 0xffff); 3195 3196 I915_WRITE(VLV_IMR, 0xffffffff); 3197 I915_WRITE(VLV_IER, 0x0); 3198 I915_WRITE(VLV_IIR, 0xffffffff); 3199 POSTING_READ(VLV_IIR); 3200 } 3201 3202 static void ibx_hpd_irq_setup(struct drm_device *dev) 3203 { 3204 struct drm_i915_private *dev_priv = dev->dev_private; 3205 struct drm_mode_config *mode_config = &dev->mode_config; 3206 struct intel_encoder *intel_encoder; 3207 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3208 3209 if (HAS_PCH_IBX(dev)) { 3210 hotplug_irqs = SDE_HOTPLUG_MASK; 3211 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3212 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3213 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3214 } else { 3215 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3216 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3217 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3218 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3219 } 3220 3221 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3222 3223 /* 3224 * Enable digital hotplug on the PCH, and configure the DP short pulse 3225 * duration to 2ms (which is the minimum in the Display Port spec) 3226 * 3227 * This register is the same on all known PCH chips. 3228 */ 3229 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3230 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3231 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3232 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3233 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3234 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3235 } 3236 3237 static void ibx_irq_postinstall(struct drm_device *dev) 3238 { 3239 struct drm_i915_private *dev_priv = dev->dev_private; 3240 u32 mask; 3241 3242 if (HAS_PCH_NOP(dev)) 3243 return; 3244 3245 if (HAS_PCH_IBX(dev)) 3246 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3247 else 3248 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3249 3250 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3251 I915_WRITE(SDEIMR, ~mask); 3252 } 3253 3254 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3255 { 3256 struct drm_i915_private *dev_priv = dev->dev_private; 3257 u32 pm_irqs, gt_irqs; 3258 3259 pm_irqs = gt_irqs = 0; 3260 3261 dev_priv->gt_irq_mask = ~0; 3262 if (HAS_L3_DPF(dev)) { 3263 /* L3 parity interrupt is always unmasked. */ 3264 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3265 gt_irqs |= GT_PARITY_ERROR(dev); 3266 } 3267 3268 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3269 if (IS_GEN5(dev)) { 3270 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3271 ILK_BSD_USER_INTERRUPT; 3272 } else { 3273 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3274 } 3275 3276 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3277 3278 if (INTEL_INFO(dev)->gen >= 6) { 3279 pm_irqs |= dev_priv->pm_rps_events; 3280 3281 if (HAS_VEBOX(dev)) 3282 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3283 3284 dev_priv->pm_irq_mask = 0xffffffff; 3285 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3286 } 3287 } 3288 3289 static int ironlake_irq_postinstall(struct drm_device *dev) 3290 { 3291 unsigned long irqflags; 3292 struct drm_i915_private *dev_priv = dev->dev_private; 3293 u32 display_mask, extra_mask; 3294 3295 if (INTEL_INFO(dev)->gen >= 7) { 3296 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3297 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3298 DE_PLANEB_FLIP_DONE_IVB | 3299 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3300 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3301 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3302 } else { 3303 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3304 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3305 DE_AUX_CHANNEL_A | 3306 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3307 DE_POISON); 3308 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3309 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3310 } 3311 3312 dev_priv->irq_mask = ~display_mask; 3313 3314 I915_WRITE(HWSTAM, 0xeffe); 3315 3316 ibx_irq_pre_postinstall(dev); 3317 3318 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3319 3320 gen5_gt_irq_postinstall(dev); 3321 3322 ibx_irq_postinstall(dev); 3323 3324 if (IS_IRONLAKE_M(dev)) { 3325 /* Enable PCU event interrupts 3326 * 3327 * spinlocking not required here for correctness since interrupt 3328 * setup is guaranteed to run in single-threaded context. But we 3329 * need it to make the assert_spin_locked happy. */ 3330 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3331 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3332 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3333 } 3334 3335 return 0; 3336 } 3337 3338 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3339 { 3340 u32 pipestat_mask; 3341 u32 iir_mask; 3342 3343 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3344 PIPE_FIFO_UNDERRUN_STATUS; 3345 3346 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3347 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3348 POSTING_READ(PIPESTAT(PIPE_A)); 3349 3350 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3351 PIPE_CRC_DONE_INTERRUPT_STATUS; 3352 3353 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3354 PIPE_GMBUS_INTERRUPT_STATUS); 3355 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3356 3357 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3358 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3359 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3360 dev_priv->irq_mask &= ~iir_mask; 3361 3362 I915_WRITE(VLV_IIR, iir_mask); 3363 I915_WRITE(VLV_IIR, iir_mask); 3364 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3365 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3366 POSTING_READ(VLV_IER); 3367 } 3368 3369 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3370 { 3371 u32 pipestat_mask; 3372 u32 iir_mask; 3373 3374 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3375 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3376 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3377 3378 dev_priv->irq_mask |= iir_mask; 3379 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3380 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3381 I915_WRITE(VLV_IIR, iir_mask); 3382 I915_WRITE(VLV_IIR, iir_mask); 3383 POSTING_READ(VLV_IIR); 3384 3385 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3386 PIPE_CRC_DONE_INTERRUPT_STATUS; 3387 3388 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3389 PIPE_GMBUS_INTERRUPT_STATUS); 3390 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3391 3392 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3393 PIPE_FIFO_UNDERRUN_STATUS; 3394 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3395 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3396 POSTING_READ(PIPESTAT(PIPE_A)); 3397 } 3398 3399 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3400 { 3401 assert_spin_locked(&dev_priv->irq_lock); 3402 3403 if (dev_priv->display_irqs_enabled) 3404 return; 3405 3406 dev_priv->display_irqs_enabled = true; 3407 3408 if (dev_priv->dev->irq_enabled) 3409 valleyview_display_irqs_install(dev_priv); 3410 } 3411 3412 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3413 { 3414 assert_spin_locked(&dev_priv->irq_lock); 3415 3416 if (!dev_priv->display_irqs_enabled) 3417 return; 3418 3419 dev_priv->display_irqs_enabled = false; 3420 3421 if (dev_priv->dev->irq_enabled) 3422 valleyview_display_irqs_uninstall(dev_priv); 3423 } 3424 3425 static int valleyview_irq_postinstall(struct drm_device *dev) 3426 { 3427 struct drm_i915_private *dev_priv = dev->dev_private; 3428 unsigned long irqflags; 3429 3430 dev_priv->irq_mask = ~0; 3431 3432 I915_WRITE(PORT_HOTPLUG_EN, 0); 3433 POSTING_READ(PORT_HOTPLUG_EN); 3434 3435 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3436 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3437 I915_WRITE(VLV_IIR, 0xffffffff); 3438 POSTING_READ(VLV_IER); 3439 3440 /* Interrupt setup is already guaranteed to be single-threaded, this is 3441 * just to make the assert_spin_locked check happy. */ 3442 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3443 if (dev_priv->display_irqs_enabled) 3444 valleyview_display_irqs_install(dev_priv); 3445 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3446 3447 I915_WRITE(VLV_IIR, 0xffffffff); 3448 I915_WRITE(VLV_IIR, 0xffffffff); 3449 3450 gen5_gt_irq_postinstall(dev); 3451 3452 /* ack & enable invalid PTE error interrupts */ 3453 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3454 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3455 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3456 #endif 3457 3458 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3459 3460 return 0; 3461 } 3462 3463 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3464 { 3465 int i; 3466 3467 /* These are interrupts we'll toggle with the ring mask register */ 3468 uint32_t gt_interrupts[] = { 3469 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3470 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3471 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3472 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3473 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3474 0, 3475 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3476 }; 3477 3478 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) 3479 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]); 3480 3481 dev_priv->pm_irq_mask = 0xffffffff; 3482 } 3483 3484 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3485 { 3486 struct drm_device *dev = dev_priv->dev; 3487 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3488 GEN8_PIPE_CDCLK_CRC_DONE | 3489 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3490 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3491 GEN8_PIPE_FIFO_UNDERRUN; 3492 int pipe; 3493 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3494 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3495 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3496 3497 for_each_pipe(pipe) 3498 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], 3499 de_pipe_enables); 3500 3501 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3502 } 3503 3504 static int gen8_irq_postinstall(struct drm_device *dev) 3505 { 3506 struct drm_i915_private *dev_priv = dev->dev_private; 3507 3508 ibx_irq_pre_postinstall(dev); 3509 3510 gen8_gt_irq_postinstall(dev_priv); 3511 gen8_de_irq_postinstall(dev_priv); 3512 3513 ibx_irq_postinstall(dev); 3514 3515 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3516 POSTING_READ(GEN8_MASTER_IRQ); 3517 3518 return 0; 3519 } 3520 3521 static int cherryview_irq_postinstall(struct drm_device *dev) 3522 { 3523 struct drm_i915_private *dev_priv = dev->dev_private; 3524 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3525 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3526 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3527 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3528 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | 3529 PIPE_CRC_DONE_INTERRUPT_STATUS; 3530 unsigned long irqflags; 3531 int pipe; 3532 3533 /* 3534 * Leave vblank interrupts masked initially. enable/disable will 3535 * toggle them based on usage. 3536 */ 3537 dev_priv->irq_mask = ~enable_mask; 3538 3539 for_each_pipe(pipe) 3540 I915_WRITE(PIPESTAT(pipe), 0xffff); 3541 3542 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3543 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3544 for_each_pipe(pipe) 3545 i915_enable_pipestat(dev_priv, pipe, pipestat_enable); 3546 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3547 3548 I915_WRITE(VLV_IIR, 0xffffffff); 3549 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3550 I915_WRITE(VLV_IER, enable_mask); 3551 3552 gen8_gt_irq_postinstall(dev_priv); 3553 3554 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3555 POSTING_READ(GEN8_MASTER_IRQ); 3556 3557 return 0; 3558 } 3559 3560 static void gen8_irq_uninstall(struct drm_device *dev) 3561 { 3562 struct drm_i915_private *dev_priv = dev->dev_private; 3563 3564 if (!dev_priv) 3565 return; 3566 3567 intel_hpd_irq_uninstall(dev_priv); 3568 3569 gen8_irq_reset(dev); 3570 } 3571 3572 static void valleyview_irq_uninstall(struct drm_device *dev) 3573 { 3574 struct drm_i915_private *dev_priv = dev->dev_private; 3575 unsigned long irqflags; 3576 int pipe; 3577 3578 if (!dev_priv) 3579 return; 3580 3581 I915_WRITE(VLV_MASTER_IER, 0); 3582 3583 intel_hpd_irq_uninstall(dev_priv); 3584 3585 for_each_pipe(pipe) 3586 I915_WRITE(PIPESTAT(pipe), 0xffff); 3587 3588 I915_WRITE(HWSTAM, 0xffffffff); 3589 I915_WRITE(PORT_HOTPLUG_EN, 0); 3590 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3591 3592 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3593 if (dev_priv->display_irqs_enabled) 3594 valleyview_display_irqs_uninstall(dev_priv); 3595 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3596 3597 dev_priv->irq_mask = 0; 3598 3599 I915_WRITE(VLV_IIR, 0xffffffff); 3600 I915_WRITE(VLV_IMR, 0xffffffff); 3601 I915_WRITE(VLV_IER, 0x0); 3602 POSTING_READ(VLV_IER); 3603 } 3604 3605 static void cherryview_irq_uninstall(struct drm_device *dev) 3606 { 3607 struct drm_i915_private *dev_priv = dev->dev_private; 3608 int pipe; 3609 3610 if (!dev_priv) 3611 return; 3612 3613 I915_WRITE(GEN8_MASTER_IRQ, 0); 3614 POSTING_READ(GEN8_MASTER_IRQ); 3615 3616 #define GEN8_IRQ_FINI_NDX(type, which) \ 3617 do { \ 3618 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3619 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3620 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3621 POSTING_READ(GEN8_##type##_IIR(which)); \ 3622 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3623 } while (0) 3624 3625 #define GEN8_IRQ_FINI(type) \ 3626 do { \ 3627 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3628 I915_WRITE(GEN8_##type##_IER, 0); \ 3629 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3630 POSTING_READ(GEN8_##type##_IIR); \ 3631 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3632 } while (0) 3633 3634 GEN8_IRQ_FINI_NDX(GT, 0); 3635 GEN8_IRQ_FINI_NDX(GT, 1); 3636 GEN8_IRQ_FINI_NDX(GT, 2); 3637 GEN8_IRQ_FINI_NDX(GT, 3); 3638 3639 GEN8_IRQ_FINI(PCU); 3640 3641 #undef GEN8_IRQ_FINI 3642 #undef GEN8_IRQ_FINI_NDX 3643 3644 I915_WRITE(PORT_HOTPLUG_EN, 0); 3645 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3646 3647 for_each_pipe(pipe) 3648 I915_WRITE(PIPESTAT(pipe), 0xffff); 3649 3650 I915_WRITE(VLV_IMR, 0xffffffff); 3651 I915_WRITE(VLV_IER, 0x0); 3652 I915_WRITE(VLV_IIR, 0xffffffff); 3653 POSTING_READ(VLV_IIR); 3654 } 3655 3656 static void ironlake_irq_uninstall(struct drm_device *dev) 3657 { 3658 struct drm_i915_private *dev_priv = dev->dev_private; 3659 3660 if (!dev_priv) 3661 return; 3662 3663 intel_hpd_irq_uninstall(dev_priv); 3664 3665 ironlake_irq_reset(dev); 3666 } 3667 3668 static void i8xx_irq_preinstall(struct drm_device * dev) 3669 { 3670 struct drm_i915_private *dev_priv = dev->dev_private; 3671 int pipe; 3672 3673 for_each_pipe(pipe) 3674 I915_WRITE(PIPESTAT(pipe), 0); 3675 I915_WRITE16(IMR, 0xffff); 3676 I915_WRITE16(IER, 0x0); 3677 POSTING_READ16(IER); 3678 } 3679 3680 static int i8xx_irq_postinstall(struct drm_device *dev) 3681 { 3682 struct drm_i915_private *dev_priv = dev->dev_private; 3683 unsigned long irqflags; 3684 3685 I915_WRITE16(EMR, 3686 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3687 3688 /* Unmask the interrupts that we always want on. */ 3689 dev_priv->irq_mask = 3690 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3691 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3692 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3693 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3694 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3695 I915_WRITE16(IMR, dev_priv->irq_mask); 3696 3697 I915_WRITE16(IER, 3698 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3699 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3700 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3701 I915_USER_INTERRUPT); 3702 POSTING_READ16(IER); 3703 3704 /* Interrupt setup is already guaranteed to be single-threaded, this is 3705 * just to make the assert_spin_locked check happy. */ 3706 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3707 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3708 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3710 3711 return 0; 3712 } 3713 3714 /* 3715 * Returns true when a page flip has completed. 3716 */ 3717 static bool i8xx_handle_vblank(struct drm_device *dev, 3718 int plane, int pipe, u32 iir) 3719 { 3720 struct drm_i915_private *dev_priv = dev->dev_private; 3721 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3722 3723 if (!intel_pipe_handle_vblank(dev, pipe)) 3724 return false; 3725 3726 if ((iir & flip_pending) == 0) 3727 return false; 3728 3729 intel_prepare_page_flip(dev, plane); 3730 3731 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3732 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3733 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3734 * the flip is completed (no longer pending). Since this doesn't raise 3735 * an interrupt per se, we watch for the change at vblank. 3736 */ 3737 if (I915_READ16(ISR) & flip_pending) 3738 return false; 3739 3740 intel_finish_page_flip(dev, pipe); 3741 3742 return true; 3743 } 3744 3745 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3746 { 3747 struct drm_device *dev = arg; 3748 struct drm_i915_private *dev_priv = dev->dev_private; 3749 u16 iir, new_iir; 3750 u32 pipe_stats[2]; 3751 unsigned long irqflags; 3752 int pipe; 3753 u16 flip_mask = 3754 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3755 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3756 3757 iir = I915_READ16(IIR); 3758 if (iir == 0) 3759 return IRQ_NONE; 3760 3761 while (iir & ~flip_mask) { 3762 /* Can't rely on pipestat interrupt bit in iir as it might 3763 * have been cleared after the pipestat interrupt was received. 3764 * It doesn't set the bit in iir again, but it still produces 3765 * interrupts (for non-MSI). 3766 */ 3767 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3768 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3769 i915_handle_error(dev, false, 3770 "Command parser error, iir 0x%08x", 3771 iir); 3772 3773 for_each_pipe(pipe) { 3774 int reg = PIPESTAT(pipe); 3775 pipe_stats[pipe] = I915_READ(reg); 3776 3777 /* 3778 * Clear the PIPE*STAT regs before the IIR 3779 */ 3780 if (pipe_stats[pipe] & 0x8000ffff) 3781 I915_WRITE(reg, pipe_stats[pipe]); 3782 } 3783 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3784 3785 I915_WRITE16(IIR, iir & ~flip_mask); 3786 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3787 3788 i915_update_dri1_breadcrumb(dev); 3789 3790 if (iir & I915_USER_INTERRUPT) 3791 notify_ring(dev, &dev_priv->ring[RCS]); 3792 3793 for_each_pipe(pipe) { 3794 int plane = pipe; 3795 if (HAS_FBC(dev)) 3796 plane = !plane; 3797 3798 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3799 i8xx_handle_vblank(dev, plane, pipe, iir)) 3800 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3801 3802 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3803 i9xx_pipe_crc_irq_handler(dev, pipe); 3804 3805 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3806 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3807 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3808 } 3809 3810 iir = new_iir; 3811 } 3812 3813 return IRQ_HANDLED; 3814 } 3815 3816 static void i8xx_irq_uninstall(struct drm_device * dev) 3817 { 3818 struct drm_i915_private *dev_priv = dev->dev_private; 3819 int pipe; 3820 3821 for_each_pipe(pipe) { 3822 /* Clear enable bits; then clear status bits */ 3823 I915_WRITE(PIPESTAT(pipe), 0); 3824 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3825 } 3826 I915_WRITE16(IMR, 0xffff); 3827 I915_WRITE16(IER, 0x0); 3828 I915_WRITE16(IIR, I915_READ16(IIR)); 3829 } 3830 3831 static void i915_irq_preinstall(struct drm_device * dev) 3832 { 3833 struct drm_i915_private *dev_priv = dev->dev_private; 3834 int pipe; 3835 3836 if (I915_HAS_HOTPLUG(dev)) { 3837 I915_WRITE(PORT_HOTPLUG_EN, 0); 3838 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3839 } 3840 3841 I915_WRITE16(HWSTAM, 0xeffe); 3842 for_each_pipe(pipe) 3843 I915_WRITE(PIPESTAT(pipe), 0); 3844 I915_WRITE(IMR, 0xffffffff); 3845 I915_WRITE(IER, 0x0); 3846 POSTING_READ(IER); 3847 } 3848 3849 static int i915_irq_postinstall(struct drm_device *dev) 3850 { 3851 struct drm_i915_private *dev_priv = dev->dev_private; 3852 u32 enable_mask; 3853 unsigned long irqflags; 3854 3855 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3856 3857 /* Unmask the interrupts that we always want on. */ 3858 dev_priv->irq_mask = 3859 ~(I915_ASLE_INTERRUPT | 3860 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3861 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3862 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3863 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3864 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3865 3866 enable_mask = 3867 I915_ASLE_INTERRUPT | 3868 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3869 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3870 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3871 I915_USER_INTERRUPT; 3872 3873 if (I915_HAS_HOTPLUG(dev)) { 3874 I915_WRITE(PORT_HOTPLUG_EN, 0); 3875 POSTING_READ(PORT_HOTPLUG_EN); 3876 3877 /* Enable in IER... */ 3878 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3879 /* and unmask in IMR */ 3880 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3881 } 3882 3883 I915_WRITE(IMR, dev_priv->irq_mask); 3884 I915_WRITE(IER, enable_mask); 3885 POSTING_READ(IER); 3886 3887 i915_enable_asle_pipestat(dev); 3888 3889 /* Interrupt setup is already guaranteed to be single-threaded, this is 3890 * just to make the assert_spin_locked check happy. */ 3891 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3892 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3893 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3894 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3895 3896 return 0; 3897 } 3898 3899 /* 3900 * Returns true when a page flip has completed. 3901 */ 3902 static bool i915_handle_vblank(struct drm_device *dev, 3903 int plane, int pipe, u32 iir) 3904 { 3905 struct drm_i915_private *dev_priv = dev->dev_private; 3906 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3907 3908 if (!intel_pipe_handle_vblank(dev, pipe)) 3909 return false; 3910 3911 if ((iir & flip_pending) == 0) 3912 return false; 3913 3914 intel_prepare_page_flip(dev, plane); 3915 3916 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3917 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3918 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3919 * the flip is completed (no longer pending). Since this doesn't raise 3920 * an interrupt per se, we watch for the change at vblank. 3921 */ 3922 if (I915_READ(ISR) & flip_pending) 3923 return false; 3924 3925 intel_finish_page_flip(dev, pipe); 3926 3927 return true; 3928 } 3929 3930 static irqreturn_t i915_irq_handler(int irq, void *arg) 3931 { 3932 struct drm_device *dev = arg; 3933 struct drm_i915_private *dev_priv = dev->dev_private; 3934 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3935 unsigned long irqflags; 3936 u32 flip_mask = 3937 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3938 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3939 int pipe, ret = IRQ_NONE; 3940 3941 iir = I915_READ(IIR); 3942 do { 3943 bool irq_received = (iir & ~flip_mask) != 0; 3944 bool blc_event = false; 3945 3946 /* Can't rely on pipestat interrupt bit in iir as it might 3947 * have been cleared after the pipestat interrupt was received. 3948 * It doesn't set the bit in iir again, but it still produces 3949 * interrupts (for non-MSI). 3950 */ 3951 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3952 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3953 i915_handle_error(dev, false, 3954 "Command parser error, iir 0x%08x", 3955 iir); 3956 3957 for_each_pipe(pipe) { 3958 int reg = PIPESTAT(pipe); 3959 pipe_stats[pipe] = I915_READ(reg); 3960 3961 /* Clear the PIPE*STAT regs before the IIR */ 3962 if (pipe_stats[pipe] & 0x8000ffff) { 3963 I915_WRITE(reg, pipe_stats[pipe]); 3964 irq_received = true; 3965 } 3966 } 3967 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3968 3969 if (!irq_received) 3970 break; 3971 3972 /* Consume port. Then clear IIR or we'll miss events */ 3973 if (I915_HAS_HOTPLUG(dev) && 3974 iir & I915_DISPLAY_PORT_INTERRUPT) 3975 i9xx_hpd_irq_handler(dev); 3976 3977 I915_WRITE(IIR, iir & ~flip_mask); 3978 new_iir = I915_READ(IIR); /* Flush posted writes */ 3979 3980 if (iir & I915_USER_INTERRUPT) 3981 notify_ring(dev, &dev_priv->ring[RCS]); 3982 3983 for_each_pipe(pipe) { 3984 int plane = pipe; 3985 if (HAS_FBC(dev)) 3986 plane = !plane; 3987 3988 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3989 i915_handle_vblank(dev, plane, pipe, iir)) 3990 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3991 3992 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3993 blc_event = true; 3994 3995 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3996 i9xx_pipe_crc_irq_handler(dev, pipe); 3997 3998 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3999 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4000 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4001 } 4002 4003 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4004 intel_opregion_asle_intr(dev); 4005 4006 /* With MSI, interrupts are only generated when iir 4007 * transitions from zero to nonzero. If another bit got 4008 * set while we were handling the existing iir bits, then 4009 * we would never get another interrupt. 4010 * 4011 * This is fine on non-MSI as well, as if we hit this path 4012 * we avoid exiting the interrupt handler only to generate 4013 * another one. 4014 * 4015 * Note that for MSI this could cause a stray interrupt report 4016 * if an interrupt landed in the time between writing IIR and 4017 * the posting read. This should be rare enough to never 4018 * trigger the 99% of 100,000 interrupts test for disabling 4019 * stray interrupts. 4020 */ 4021 ret = IRQ_HANDLED; 4022 iir = new_iir; 4023 } while (iir & ~flip_mask); 4024 4025 i915_update_dri1_breadcrumb(dev); 4026 4027 return ret; 4028 } 4029 4030 static void i915_irq_uninstall(struct drm_device * dev) 4031 { 4032 struct drm_i915_private *dev_priv = dev->dev_private; 4033 int pipe; 4034 4035 intel_hpd_irq_uninstall(dev_priv); 4036 4037 if (I915_HAS_HOTPLUG(dev)) { 4038 I915_WRITE(PORT_HOTPLUG_EN, 0); 4039 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4040 } 4041 4042 I915_WRITE16(HWSTAM, 0xffff); 4043 for_each_pipe(pipe) { 4044 /* Clear enable bits; then clear status bits */ 4045 I915_WRITE(PIPESTAT(pipe), 0); 4046 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4047 } 4048 I915_WRITE(IMR, 0xffffffff); 4049 I915_WRITE(IER, 0x0); 4050 4051 I915_WRITE(IIR, I915_READ(IIR)); 4052 } 4053 4054 static void i965_irq_preinstall(struct drm_device * dev) 4055 { 4056 struct drm_i915_private *dev_priv = dev->dev_private; 4057 int pipe; 4058 4059 I915_WRITE(PORT_HOTPLUG_EN, 0); 4060 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4061 4062 I915_WRITE(HWSTAM, 0xeffe); 4063 for_each_pipe(pipe) 4064 I915_WRITE(PIPESTAT(pipe), 0); 4065 I915_WRITE(IMR, 0xffffffff); 4066 I915_WRITE(IER, 0x0); 4067 POSTING_READ(IER); 4068 } 4069 4070 static int i965_irq_postinstall(struct drm_device *dev) 4071 { 4072 struct drm_i915_private *dev_priv = dev->dev_private; 4073 u32 enable_mask; 4074 u32 error_mask; 4075 unsigned long irqflags; 4076 4077 /* Unmask the interrupts that we always want on. */ 4078 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4079 I915_DISPLAY_PORT_INTERRUPT | 4080 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4081 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4082 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4083 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4084 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4085 4086 enable_mask = ~dev_priv->irq_mask; 4087 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4088 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4089 enable_mask |= I915_USER_INTERRUPT; 4090 4091 if (IS_G4X(dev)) 4092 enable_mask |= I915_BSD_USER_INTERRUPT; 4093 4094 /* Interrupt setup is already guaranteed to be single-threaded, this is 4095 * just to make the assert_spin_locked check happy. */ 4096 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4097 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4098 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4099 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4100 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4101 4102 /* 4103 * Enable some error detection, note the instruction error mask 4104 * bit is reserved, so we leave it masked. 4105 */ 4106 if (IS_G4X(dev)) { 4107 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4108 GM45_ERROR_MEM_PRIV | 4109 GM45_ERROR_CP_PRIV | 4110 I915_ERROR_MEMORY_REFRESH); 4111 } else { 4112 error_mask = ~(I915_ERROR_PAGE_TABLE | 4113 I915_ERROR_MEMORY_REFRESH); 4114 } 4115 I915_WRITE(EMR, error_mask); 4116 4117 I915_WRITE(IMR, dev_priv->irq_mask); 4118 I915_WRITE(IER, enable_mask); 4119 POSTING_READ(IER); 4120 4121 I915_WRITE(PORT_HOTPLUG_EN, 0); 4122 POSTING_READ(PORT_HOTPLUG_EN); 4123 4124 i915_enable_asle_pipestat(dev); 4125 4126 return 0; 4127 } 4128 4129 static void i915_hpd_irq_setup(struct drm_device *dev) 4130 { 4131 struct drm_i915_private *dev_priv = dev->dev_private; 4132 struct drm_mode_config *mode_config = &dev->mode_config; 4133 struct intel_encoder *intel_encoder; 4134 u32 hotplug_en; 4135 4136 assert_spin_locked(&dev_priv->irq_lock); 4137 4138 if (I915_HAS_HOTPLUG(dev)) { 4139 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4140 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4141 /* Note HDMI and DP share hotplug bits */ 4142 /* enable bits are the same for all generations */ 4143 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 4144 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4145 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4146 /* Programming the CRT detection parameters tends 4147 to generate a spurious hotplug event about three 4148 seconds later. So just do it once. 4149 */ 4150 if (IS_G4X(dev)) 4151 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4152 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4153 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4154 4155 /* Ignore TV since it's buggy */ 4156 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4157 } 4158 } 4159 4160 static irqreturn_t i965_irq_handler(int irq, void *arg) 4161 { 4162 struct drm_device *dev = arg; 4163 struct drm_i915_private *dev_priv = dev->dev_private; 4164 u32 iir, new_iir; 4165 u32 pipe_stats[I915_MAX_PIPES]; 4166 unsigned long irqflags; 4167 int ret = IRQ_NONE, pipe; 4168 u32 flip_mask = 4169 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4170 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4171 4172 iir = I915_READ(IIR); 4173 4174 for (;;) { 4175 bool irq_received = (iir & ~flip_mask) != 0; 4176 bool blc_event = false; 4177 4178 /* Can't rely on pipestat interrupt bit in iir as it might 4179 * have been cleared after the pipestat interrupt was received. 4180 * It doesn't set the bit in iir again, but it still produces 4181 * interrupts (for non-MSI). 4182 */ 4183 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4184 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4185 i915_handle_error(dev, false, 4186 "Command parser error, iir 0x%08x", 4187 iir); 4188 4189 for_each_pipe(pipe) { 4190 int reg = PIPESTAT(pipe); 4191 pipe_stats[pipe] = I915_READ(reg); 4192 4193 /* 4194 * Clear the PIPE*STAT regs before the IIR 4195 */ 4196 if (pipe_stats[pipe] & 0x8000ffff) { 4197 I915_WRITE(reg, pipe_stats[pipe]); 4198 irq_received = true; 4199 } 4200 } 4201 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4202 4203 if (!irq_received) 4204 break; 4205 4206 ret = IRQ_HANDLED; 4207 4208 /* Consume port. Then clear IIR or we'll miss events */ 4209 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4210 i9xx_hpd_irq_handler(dev); 4211 4212 I915_WRITE(IIR, iir & ~flip_mask); 4213 new_iir = I915_READ(IIR); /* Flush posted writes */ 4214 4215 if (iir & I915_USER_INTERRUPT) 4216 notify_ring(dev, &dev_priv->ring[RCS]); 4217 if (iir & I915_BSD_USER_INTERRUPT) 4218 notify_ring(dev, &dev_priv->ring[VCS]); 4219 4220 for_each_pipe(pipe) { 4221 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4222 i915_handle_vblank(dev, pipe, pipe, iir)) 4223 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4224 4225 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4226 blc_event = true; 4227 4228 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4229 i9xx_pipe_crc_irq_handler(dev, pipe); 4230 4231 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4232 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4233 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4234 } 4235 4236 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4237 intel_opregion_asle_intr(dev); 4238 4239 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4240 gmbus_irq_handler(dev); 4241 4242 /* With MSI, interrupts are only generated when iir 4243 * transitions from zero to nonzero. If another bit got 4244 * set while we were handling the existing iir bits, then 4245 * we would never get another interrupt. 4246 * 4247 * This is fine on non-MSI as well, as if we hit this path 4248 * we avoid exiting the interrupt handler only to generate 4249 * another one. 4250 * 4251 * Note that for MSI this could cause a stray interrupt report 4252 * if an interrupt landed in the time between writing IIR and 4253 * the posting read. This should be rare enough to never 4254 * trigger the 99% of 100,000 interrupts test for disabling 4255 * stray interrupts. 4256 */ 4257 iir = new_iir; 4258 } 4259 4260 i915_update_dri1_breadcrumb(dev); 4261 4262 return ret; 4263 } 4264 4265 static void i965_irq_uninstall(struct drm_device * dev) 4266 { 4267 struct drm_i915_private *dev_priv = dev->dev_private; 4268 int pipe; 4269 4270 if (!dev_priv) 4271 return; 4272 4273 intel_hpd_irq_uninstall(dev_priv); 4274 4275 I915_WRITE(PORT_HOTPLUG_EN, 0); 4276 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4277 4278 I915_WRITE(HWSTAM, 0xffffffff); 4279 for_each_pipe(pipe) 4280 I915_WRITE(PIPESTAT(pipe), 0); 4281 I915_WRITE(IMR, 0xffffffff); 4282 I915_WRITE(IER, 0x0); 4283 4284 for_each_pipe(pipe) 4285 I915_WRITE(PIPESTAT(pipe), 4286 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4287 I915_WRITE(IIR, I915_READ(IIR)); 4288 } 4289 4290 static void intel_hpd_irq_reenable(unsigned long data) 4291 { 4292 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 4293 struct drm_device *dev = dev_priv->dev; 4294 struct drm_mode_config *mode_config = &dev->mode_config; 4295 unsigned long irqflags; 4296 int i; 4297 4298 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4299 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4300 struct drm_connector *connector; 4301 4302 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4303 continue; 4304 4305 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4306 4307 list_for_each_entry(connector, &mode_config->connector_list, head) { 4308 struct intel_connector *intel_connector = to_intel_connector(connector); 4309 4310 if (intel_connector->encoder->hpd_pin == i) { 4311 if (connector->polled != intel_connector->polled) 4312 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4313 connector->name); 4314 connector->polled = intel_connector->polled; 4315 if (!connector->polled) 4316 connector->polled = DRM_CONNECTOR_POLL_HPD; 4317 } 4318 } 4319 } 4320 if (dev_priv->display.hpd_irq_setup) 4321 dev_priv->display.hpd_irq_setup(dev); 4322 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4323 } 4324 4325 void intel_irq_init(struct drm_device *dev) 4326 { 4327 struct drm_i915_private *dev_priv = dev->dev_private; 4328 4329 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4330 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4331 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4332 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4333 4334 /* Let's track the enabled rps events */ 4335 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4336 4337 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4338 i915_hangcheck_elapsed, 4339 (unsigned long) dev); 4340 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4341 (unsigned long) dev_priv); 4342 4343 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4344 4345 if (IS_GEN2(dev)) { 4346 dev->max_vblank_count = 0; 4347 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4348 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 4349 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4350 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4351 } else { 4352 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4353 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4354 } 4355 4356 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4357 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4358 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4359 } 4360 4361 if (IS_CHERRYVIEW(dev)) { 4362 dev->driver->irq_handler = cherryview_irq_handler; 4363 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4364 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4365 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4366 dev->driver->enable_vblank = valleyview_enable_vblank; 4367 dev->driver->disable_vblank = valleyview_disable_vblank; 4368 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4369 } else if (IS_VALLEYVIEW(dev)) { 4370 dev->driver->irq_handler = valleyview_irq_handler; 4371 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4372 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4373 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4374 dev->driver->enable_vblank = valleyview_enable_vblank; 4375 dev->driver->disable_vblank = valleyview_disable_vblank; 4376 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4377 } else if (IS_GEN8(dev)) { 4378 dev->driver->irq_handler = gen8_irq_handler; 4379 dev->driver->irq_preinstall = gen8_irq_reset; 4380 dev->driver->irq_postinstall = gen8_irq_postinstall; 4381 dev->driver->irq_uninstall = gen8_irq_uninstall; 4382 dev->driver->enable_vblank = gen8_enable_vblank; 4383 dev->driver->disable_vblank = gen8_disable_vblank; 4384 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4385 } else if (HAS_PCH_SPLIT(dev)) { 4386 dev->driver->irq_handler = ironlake_irq_handler; 4387 dev->driver->irq_preinstall = ironlake_irq_reset; 4388 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4389 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4390 dev->driver->enable_vblank = ironlake_enable_vblank; 4391 dev->driver->disable_vblank = ironlake_disable_vblank; 4392 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4393 } else { 4394 if (INTEL_INFO(dev)->gen == 2) { 4395 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4396 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4397 dev->driver->irq_handler = i8xx_irq_handler; 4398 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4399 } else if (INTEL_INFO(dev)->gen == 3) { 4400 dev->driver->irq_preinstall = i915_irq_preinstall; 4401 dev->driver->irq_postinstall = i915_irq_postinstall; 4402 dev->driver->irq_uninstall = i915_irq_uninstall; 4403 dev->driver->irq_handler = i915_irq_handler; 4404 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4405 } else { 4406 dev->driver->irq_preinstall = i965_irq_preinstall; 4407 dev->driver->irq_postinstall = i965_irq_postinstall; 4408 dev->driver->irq_uninstall = i965_irq_uninstall; 4409 dev->driver->irq_handler = i965_irq_handler; 4410 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4411 } 4412 dev->driver->enable_vblank = i915_enable_vblank; 4413 dev->driver->disable_vblank = i915_disable_vblank; 4414 } 4415 } 4416 4417 void intel_hpd_init(struct drm_device *dev) 4418 { 4419 struct drm_i915_private *dev_priv = dev->dev_private; 4420 struct drm_mode_config *mode_config = &dev->mode_config; 4421 struct drm_connector *connector; 4422 unsigned long irqflags; 4423 int i; 4424 4425 for (i = 1; i < HPD_NUM_PINS; i++) { 4426 dev_priv->hpd_stats[i].hpd_cnt = 0; 4427 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4428 } 4429 list_for_each_entry(connector, &mode_config->connector_list, head) { 4430 struct intel_connector *intel_connector = to_intel_connector(connector); 4431 connector->polled = intel_connector->polled; 4432 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4433 connector->polled = DRM_CONNECTOR_POLL_HPD; 4434 } 4435 4436 /* Interrupt setup is already guaranteed to be single-threaded, this is 4437 * just to make the assert_spin_locked checks happy. */ 4438 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4439 if (dev_priv->display.hpd_irq_setup) 4440 dev_priv->display.hpd_irq_setup(dev); 4441 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4442 } 4443 4444 /* Disable interrupts so we can allow runtime PM. */ 4445 void intel_runtime_pm_disable_interrupts(struct drm_device *dev) 4446 { 4447 struct drm_i915_private *dev_priv = dev->dev_private; 4448 4449 dev->driver->irq_uninstall(dev); 4450 dev_priv->pm.irqs_disabled = true; 4451 } 4452 4453 /* Restore interrupts so we can recover from runtime PM. */ 4454 void intel_runtime_pm_restore_interrupts(struct drm_device *dev) 4455 { 4456 struct drm_i915_private *dev_priv = dev->dev_private; 4457 4458 dev_priv->pm.irqs_disabled = false; 4459 dev->driver->irq_preinstall(dev); 4460 dev->driver->irq_postinstall(dev); 4461 } 4462