1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/circ_buf.h> 32 #include <linux/slab.h> 33 #include <linux/sysrq.h> 34 35 #include <drm/drm_drv.h> 36 #include <drm/drm_irq.h> 37 #include <drm/i915_drm.h> 38 39 #include "display/intel_display_types.h" 40 #include "display/intel_fifo_underrun.h" 41 #include "display/intel_hotplug.h" 42 #include "display/intel_lpe_audio.h" 43 #include "display/intel_psr.h" 44 45 #include "gt/intel_gt.h" 46 #include "gt/intel_gt_irq.h" 47 #include "gt/intel_gt_pm_irq.h" 48 #include "gt/intel_rps.h" 49 50 #include "i915_drv.h" 51 #include "i915_irq.h" 52 #include "i915_trace.h" 53 #include "intel_pm.h" 54 55 /** 56 * DOC: interrupt handling 57 * 58 * These functions provide the basic support for enabling and disabling the 59 * interrupt handling support. There's a lot more functionality in i915_irq.c 60 * and related files, but that will be described in separate chapters. 61 */ 62 63 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 64 65 static const u32 hpd_ilk[HPD_NUM_PINS] = { 66 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 67 }; 68 69 static const u32 hpd_ivb[HPD_NUM_PINS] = { 70 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 71 }; 72 73 static const u32 hpd_bdw[HPD_NUM_PINS] = { 74 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 75 }; 76 77 static const u32 hpd_ibx[HPD_NUM_PINS] = { 78 [HPD_CRT] = SDE_CRT_HOTPLUG, 79 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 80 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 81 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 82 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 83 }; 84 85 static const u32 hpd_cpt[HPD_NUM_PINS] = { 86 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 87 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 88 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 89 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 90 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 91 }; 92 93 static const u32 hpd_spt[HPD_NUM_PINS] = { 94 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 95 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 96 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 97 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 98 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 99 }; 100 101 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 102 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 103 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 104 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 105 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 106 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 107 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 108 }; 109 110 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 111 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 112 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 113 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 114 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 115 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 116 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 117 }; 118 119 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 120 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 121 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 122 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 123 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 124 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 125 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 126 }; 127 128 /* BXT hpd list */ 129 static const u32 hpd_bxt[HPD_NUM_PINS] = { 130 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 131 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 132 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 133 }; 134 135 static const u32 hpd_gen11[HPD_NUM_PINS] = { 136 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 137 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 138 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 139 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 140 }; 141 142 static const u32 hpd_gen12[HPD_NUM_PINS] = { 143 [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 144 [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 145 [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 146 [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, 147 [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG, 148 [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG 149 }; 150 151 static const u32 hpd_icp[HPD_NUM_PINS] = { 152 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), 153 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), 154 [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), 155 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2), 156 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3), 157 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4), 158 }; 159 160 static const u32 hpd_tgp[HPD_NUM_PINS] = { 161 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), 162 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), 163 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C), 164 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1), 165 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2), 166 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3), 167 [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4), 168 [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5), 169 [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6), 170 }; 171 172 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 173 i915_reg_t iir, i915_reg_t ier) 174 { 175 intel_uncore_write(uncore, imr, 0xffffffff); 176 intel_uncore_posting_read(uncore, imr); 177 178 intel_uncore_write(uncore, ier, 0); 179 180 /* IIR can theoretically queue up two events. Be paranoid. */ 181 intel_uncore_write(uncore, iir, 0xffffffff); 182 intel_uncore_posting_read(uncore, iir); 183 intel_uncore_write(uncore, iir, 0xffffffff); 184 intel_uncore_posting_read(uncore, iir); 185 } 186 187 void gen2_irq_reset(struct intel_uncore *uncore) 188 { 189 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 190 intel_uncore_posting_read16(uncore, GEN2_IMR); 191 192 intel_uncore_write16(uncore, GEN2_IER, 0); 193 194 /* IIR can theoretically queue up two events. Be paranoid. */ 195 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 196 intel_uncore_posting_read16(uncore, GEN2_IIR); 197 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 198 intel_uncore_posting_read16(uncore, GEN2_IIR); 199 } 200 201 /* 202 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 203 */ 204 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 205 { 206 u32 val = intel_uncore_read(uncore, reg); 207 208 if (val == 0) 209 return; 210 211 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 212 i915_mmio_reg_offset(reg), val); 213 intel_uncore_write(uncore, reg, 0xffffffff); 214 intel_uncore_posting_read(uncore, reg); 215 intel_uncore_write(uncore, reg, 0xffffffff); 216 intel_uncore_posting_read(uncore, reg); 217 } 218 219 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 220 { 221 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 222 223 if (val == 0) 224 return; 225 226 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 227 i915_mmio_reg_offset(GEN2_IIR), val); 228 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 229 intel_uncore_posting_read16(uncore, GEN2_IIR); 230 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 231 intel_uncore_posting_read16(uncore, GEN2_IIR); 232 } 233 234 void gen3_irq_init(struct intel_uncore *uncore, 235 i915_reg_t imr, u32 imr_val, 236 i915_reg_t ier, u32 ier_val, 237 i915_reg_t iir) 238 { 239 gen3_assert_iir_is_zero(uncore, iir); 240 241 intel_uncore_write(uncore, ier, ier_val); 242 intel_uncore_write(uncore, imr, imr_val); 243 intel_uncore_posting_read(uncore, imr); 244 } 245 246 void gen2_irq_init(struct intel_uncore *uncore, 247 u32 imr_val, u32 ier_val) 248 { 249 gen2_assert_iir_is_zero(uncore); 250 251 intel_uncore_write16(uncore, GEN2_IER, ier_val); 252 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 253 intel_uncore_posting_read16(uncore, GEN2_IMR); 254 } 255 256 /* For display hotplug interrupt */ 257 static inline void 258 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 259 u32 mask, 260 u32 bits) 261 { 262 u32 val; 263 264 lockdep_assert_held(&dev_priv->irq_lock); 265 WARN_ON(bits & ~mask); 266 267 val = I915_READ(PORT_HOTPLUG_EN); 268 val &= ~mask; 269 val |= bits; 270 I915_WRITE(PORT_HOTPLUG_EN, val); 271 } 272 273 /** 274 * i915_hotplug_interrupt_update - update hotplug interrupt enable 275 * @dev_priv: driver private 276 * @mask: bits to update 277 * @bits: bits to enable 278 * NOTE: the HPD enable bits are modified both inside and outside 279 * of an interrupt context. To avoid that read-modify-write cycles 280 * interfer, these bits are protected by a spinlock. Since this 281 * function is usually not called from a context where the lock is 282 * held already, this function acquires the lock itself. A non-locking 283 * version is also available. 284 */ 285 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 286 u32 mask, 287 u32 bits) 288 { 289 spin_lock_irq(&dev_priv->irq_lock); 290 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 291 spin_unlock_irq(&dev_priv->irq_lock); 292 } 293 294 /** 295 * ilk_update_display_irq - update DEIMR 296 * @dev_priv: driver private 297 * @interrupt_mask: mask of interrupt bits to update 298 * @enabled_irq_mask: mask of interrupt bits to enable 299 */ 300 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 301 u32 interrupt_mask, 302 u32 enabled_irq_mask) 303 { 304 u32 new_val; 305 306 lockdep_assert_held(&dev_priv->irq_lock); 307 308 WARN_ON(enabled_irq_mask & ~interrupt_mask); 309 310 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 311 return; 312 313 new_val = dev_priv->irq_mask; 314 new_val &= ~interrupt_mask; 315 new_val |= (~enabled_irq_mask & interrupt_mask); 316 317 if (new_val != dev_priv->irq_mask) { 318 dev_priv->irq_mask = new_val; 319 I915_WRITE(DEIMR, dev_priv->irq_mask); 320 POSTING_READ(DEIMR); 321 } 322 } 323 324 /** 325 * bdw_update_port_irq - update DE port interrupt 326 * @dev_priv: driver private 327 * @interrupt_mask: mask of interrupt bits to update 328 * @enabled_irq_mask: mask of interrupt bits to enable 329 */ 330 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 331 u32 interrupt_mask, 332 u32 enabled_irq_mask) 333 { 334 u32 new_val; 335 u32 old_val; 336 337 lockdep_assert_held(&dev_priv->irq_lock); 338 339 WARN_ON(enabled_irq_mask & ~interrupt_mask); 340 341 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342 return; 343 344 old_val = I915_READ(GEN8_DE_PORT_IMR); 345 346 new_val = old_val; 347 new_val &= ~interrupt_mask; 348 new_val |= (~enabled_irq_mask & interrupt_mask); 349 350 if (new_val != old_val) { 351 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 352 POSTING_READ(GEN8_DE_PORT_IMR); 353 } 354 } 355 356 /** 357 * bdw_update_pipe_irq - update DE pipe interrupt 358 * @dev_priv: driver private 359 * @pipe: pipe whose interrupt to update 360 * @interrupt_mask: mask of interrupt bits to update 361 * @enabled_irq_mask: mask of interrupt bits to enable 362 */ 363 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 364 enum pipe pipe, 365 u32 interrupt_mask, 366 u32 enabled_irq_mask) 367 { 368 u32 new_val; 369 370 lockdep_assert_held(&dev_priv->irq_lock); 371 372 WARN_ON(enabled_irq_mask & ~interrupt_mask); 373 374 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 375 return; 376 377 new_val = dev_priv->de_irq_mask[pipe]; 378 new_val &= ~interrupt_mask; 379 new_val |= (~enabled_irq_mask & interrupt_mask); 380 381 if (new_val != dev_priv->de_irq_mask[pipe]) { 382 dev_priv->de_irq_mask[pipe] = new_val; 383 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 384 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 385 } 386 } 387 388 /** 389 * ibx_display_interrupt_update - update SDEIMR 390 * @dev_priv: driver private 391 * @interrupt_mask: mask of interrupt bits to update 392 * @enabled_irq_mask: mask of interrupt bits to enable 393 */ 394 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 395 u32 interrupt_mask, 396 u32 enabled_irq_mask) 397 { 398 u32 sdeimr = I915_READ(SDEIMR); 399 sdeimr &= ~interrupt_mask; 400 sdeimr |= (~enabled_irq_mask & interrupt_mask); 401 402 WARN_ON(enabled_irq_mask & ~interrupt_mask); 403 404 lockdep_assert_held(&dev_priv->irq_lock); 405 406 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 407 return; 408 409 I915_WRITE(SDEIMR, sdeimr); 410 POSTING_READ(SDEIMR); 411 } 412 413 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 414 enum pipe pipe) 415 { 416 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 417 u32 enable_mask = status_mask << 16; 418 419 lockdep_assert_held(&dev_priv->irq_lock); 420 421 if (INTEL_GEN(dev_priv) < 5) 422 goto out; 423 424 /* 425 * On pipe A we don't support the PSR interrupt yet, 426 * on pipe B and C the same bit MBZ. 427 */ 428 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 429 return 0; 430 /* 431 * On pipe B and C we don't support the PSR interrupt yet, on pipe 432 * A the same bit is for perf counters which we don't use either. 433 */ 434 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 435 return 0; 436 437 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 438 SPRITE0_FLIP_DONE_INT_EN_VLV | 439 SPRITE1_FLIP_DONE_INT_EN_VLV); 440 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 441 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 442 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 443 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 444 445 out: 446 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 447 status_mask & ~PIPESTAT_INT_STATUS_MASK, 448 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 449 pipe_name(pipe), enable_mask, status_mask); 450 451 return enable_mask; 452 } 453 454 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 455 enum pipe pipe, u32 status_mask) 456 { 457 i915_reg_t reg = PIPESTAT(pipe); 458 u32 enable_mask; 459 460 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 461 "pipe %c: status_mask=0x%x\n", 462 pipe_name(pipe), status_mask); 463 464 lockdep_assert_held(&dev_priv->irq_lock); 465 WARN_ON(!intel_irqs_enabled(dev_priv)); 466 467 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 468 return; 469 470 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 471 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 472 473 I915_WRITE(reg, enable_mask | status_mask); 474 POSTING_READ(reg); 475 } 476 477 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 478 enum pipe pipe, u32 status_mask) 479 { 480 i915_reg_t reg = PIPESTAT(pipe); 481 u32 enable_mask; 482 483 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 484 "pipe %c: status_mask=0x%x\n", 485 pipe_name(pipe), status_mask); 486 487 lockdep_assert_held(&dev_priv->irq_lock); 488 WARN_ON(!intel_irqs_enabled(dev_priv)); 489 490 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 491 return; 492 493 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 494 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 495 496 I915_WRITE(reg, enable_mask | status_mask); 497 POSTING_READ(reg); 498 } 499 500 static bool i915_has_asle(struct drm_i915_private *dev_priv) 501 { 502 if (!dev_priv->opregion.asle) 503 return false; 504 505 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 506 } 507 508 /** 509 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 510 * @dev_priv: i915 device private 511 */ 512 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 513 { 514 if (!i915_has_asle(dev_priv)) 515 return; 516 517 spin_lock_irq(&dev_priv->irq_lock); 518 519 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 520 if (INTEL_GEN(dev_priv) >= 4) 521 i915_enable_pipestat(dev_priv, PIPE_A, 522 PIPE_LEGACY_BLC_EVENT_STATUS); 523 524 spin_unlock_irq(&dev_priv->irq_lock); 525 } 526 527 /* 528 * This timing diagram depicts the video signal in and 529 * around the vertical blanking period. 530 * 531 * Assumptions about the fictitious mode used in this example: 532 * vblank_start >= 3 533 * vsync_start = vblank_start + 1 534 * vsync_end = vblank_start + 2 535 * vtotal = vblank_start + 3 536 * 537 * start of vblank: 538 * latch double buffered registers 539 * increment frame counter (ctg+) 540 * generate start of vblank interrupt (gen4+) 541 * | 542 * | frame start: 543 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 544 * | may be shifted forward 1-3 extra lines via PIPECONF 545 * | | 546 * | | start of vsync: 547 * | | generate vsync interrupt 548 * | | | 549 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 550 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 551 * ----va---> <-----------------vb--------------------> <--------va------------- 552 * | | <----vs-----> | 553 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 554 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 555 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 556 * | | | 557 * last visible pixel first visible pixel 558 * | increment frame counter (gen3/4) 559 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 560 * 561 * x = horizontal active 562 * _ = horizontal blanking 563 * hs = horizontal sync 564 * va = vertical active 565 * vb = vertical blanking 566 * vs = vertical sync 567 * vbs = vblank_start (number) 568 * 569 * Summary: 570 * - most events happen at the start of horizontal sync 571 * - frame start happens at the start of horizontal blank, 1-4 lines 572 * (depending on PIPECONF settings) after the start of vblank 573 * - gen3/4 pixel and frame counter are synchronized with the start 574 * of horizontal active on the first line of vertical active 575 */ 576 577 /* Called from drm generic code, passed a 'crtc', which 578 * we use as a pipe index 579 */ 580 u32 i915_get_vblank_counter(struct drm_crtc *crtc) 581 { 582 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 583 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 584 const struct drm_display_mode *mode = &vblank->hwmode; 585 enum pipe pipe = to_intel_crtc(crtc)->pipe; 586 i915_reg_t high_frame, low_frame; 587 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 588 unsigned long irqflags; 589 590 /* 591 * On i965gm TV output the frame counter only works up to 592 * the point when we enable the TV encoder. After that the 593 * frame counter ceases to work and reads zero. We need a 594 * vblank wait before enabling the TV encoder and so we 595 * have to enable vblank interrupts while the frame counter 596 * is still in a working state. However the core vblank code 597 * does not like us returning non-zero frame counter values 598 * when we've told it that we don't have a working frame 599 * counter. Thus we must stop non-zero values leaking out. 600 */ 601 if (!vblank->max_vblank_count) 602 return 0; 603 604 htotal = mode->crtc_htotal; 605 hsync_start = mode->crtc_hsync_start; 606 vbl_start = mode->crtc_vblank_start; 607 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 608 vbl_start = DIV_ROUND_UP(vbl_start, 2); 609 610 /* Convert to pixel count */ 611 vbl_start *= htotal; 612 613 /* Start of vblank event occurs at start of hsync */ 614 vbl_start -= htotal - hsync_start; 615 616 high_frame = PIPEFRAME(pipe); 617 low_frame = PIPEFRAMEPIXEL(pipe); 618 619 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 620 621 /* 622 * High & low register fields aren't synchronized, so make sure 623 * we get a low value that's stable across two reads of the high 624 * register. 625 */ 626 do { 627 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 628 low = I915_READ_FW(low_frame); 629 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 630 } while (high1 != high2); 631 632 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 633 634 high1 >>= PIPE_FRAME_HIGH_SHIFT; 635 pixel = low & PIPE_PIXEL_MASK; 636 low >>= PIPE_FRAME_LOW_SHIFT; 637 638 /* 639 * The frame counter increments at beginning of active. 640 * Cook up a vblank counter by also checking the pixel 641 * counter against vblank start. 642 */ 643 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 644 } 645 646 u32 g4x_get_vblank_counter(struct drm_crtc *crtc) 647 { 648 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 649 enum pipe pipe = to_intel_crtc(crtc)->pipe; 650 651 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 652 } 653 654 /* 655 * On certain encoders on certain platforms, pipe 656 * scanline register will not work to get the scanline, 657 * since the timings are driven from the PORT or issues 658 * with scanline register updates. 659 * This function will use Framestamp and current 660 * timestamp registers to calculate the scanline. 661 */ 662 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 663 { 664 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 665 struct drm_vblank_crtc *vblank = 666 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 667 const struct drm_display_mode *mode = &vblank->hwmode; 668 u32 vblank_start = mode->crtc_vblank_start; 669 u32 vtotal = mode->crtc_vtotal; 670 u32 htotal = mode->crtc_htotal; 671 u32 clock = mode->crtc_clock; 672 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 673 674 /* 675 * To avoid the race condition where we might cross into the 676 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 677 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 678 * during the same frame. 679 */ 680 do { 681 /* 682 * This field provides read back of the display 683 * pipe frame time stamp. The time stamp value 684 * is sampled at every start of vertical blank. 685 */ 686 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 687 688 /* 689 * The TIMESTAMP_CTR register has the current 690 * time stamp value. 691 */ 692 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 693 694 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 695 } while (scan_post_time != scan_prev_time); 696 697 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 698 clock), 1000 * htotal); 699 scanline = min(scanline, vtotal - 1); 700 scanline = (scanline + vblank_start) % vtotal; 701 702 return scanline; 703 } 704 705 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 706 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 707 { 708 struct drm_device *dev = crtc->base.dev; 709 struct drm_i915_private *dev_priv = to_i915(dev); 710 const struct drm_display_mode *mode; 711 struct drm_vblank_crtc *vblank; 712 enum pipe pipe = crtc->pipe; 713 int position, vtotal; 714 715 if (!crtc->active) 716 return -1; 717 718 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 719 mode = &vblank->hwmode; 720 721 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 722 return __intel_get_crtc_scanline_from_timestamp(crtc); 723 724 vtotal = mode->crtc_vtotal; 725 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 726 vtotal /= 2; 727 728 if (IS_GEN(dev_priv, 2)) 729 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 730 else 731 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 732 733 /* 734 * On HSW, the DSL reg (0x70000) appears to return 0 if we 735 * read it just before the start of vblank. So try it again 736 * so we don't accidentally end up spanning a vblank frame 737 * increment, causing the pipe_update_end() code to squak at us. 738 * 739 * The nature of this problem means we can't simply check the ISR 740 * bit and return the vblank start value; nor can we use the scanline 741 * debug register in the transcoder as it appears to have the same 742 * problem. We may need to extend this to include other platforms, 743 * but so far testing only shows the problem on HSW. 744 */ 745 if (HAS_DDI(dev_priv) && !position) { 746 int i, temp; 747 748 for (i = 0; i < 100; i++) { 749 udelay(1); 750 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 751 if (temp != position) { 752 position = temp; 753 break; 754 } 755 } 756 } 757 758 /* 759 * See update_scanline_offset() for the details on the 760 * scanline_offset adjustment. 761 */ 762 return (position + crtc->scanline_offset) % vtotal; 763 } 764 765 bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, 766 bool in_vblank_irq, int *vpos, int *hpos, 767 ktime_t *stime, ktime_t *etime, 768 const struct drm_display_mode *mode) 769 { 770 struct drm_i915_private *dev_priv = to_i915(dev); 771 struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index)); 772 enum pipe pipe = crtc->pipe; 773 int position; 774 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 775 unsigned long irqflags; 776 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 || 777 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || 778 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 779 780 if (WARN_ON(!mode->crtc_clock)) { 781 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 782 "pipe %c\n", pipe_name(pipe)); 783 return false; 784 } 785 786 htotal = mode->crtc_htotal; 787 hsync_start = mode->crtc_hsync_start; 788 vtotal = mode->crtc_vtotal; 789 vbl_start = mode->crtc_vblank_start; 790 vbl_end = mode->crtc_vblank_end; 791 792 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 793 vbl_start = DIV_ROUND_UP(vbl_start, 2); 794 vbl_end /= 2; 795 vtotal /= 2; 796 } 797 798 /* 799 * Lock uncore.lock, as we will do multiple timing critical raw 800 * register reads, potentially with preemption disabled, so the 801 * following code must not block on uncore.lock. 802 */ 803 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 804 805 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 806 807 /* Get optional system timestamp before query. */ 808 if (stime) 809 *stime = ktime_get(); 810 811 if (use_scanline_counter) { 812 /* No obvious pixelcount register. Only query vertical 813 * scanout position from Display scan line register. 814 */ 815 position = __intel_get_crtc_scanline(crtc); 816 } else { 817 /* Have access to pixelcount since start of frame. 818 * We can split this into vertical and horizontal 819 * scanout position. 820 */ 821 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 822 823 /* convert to pixel counts */ 824 vbl_start *= htotal; 825 vbl_end *= htotal; 826 vtotal *= htotal; 827 828 /* 829 * In interlaced modes, the pixel counter counts all pixels, 830 * so one field will have htotal more pixels. In order to avoid 831 * the reported position from jumping backwards when the pixel 832 * counter is beyond the length of the shorter field, just 833 * clamp the position the length of the shorter field. This 834 * matches how the scanline counter based position works since 835 * the scanline counter doesn't count the two half lines. 836 */ 837 if (position >= vtotal) 838 position = vtotal - 1; 839 840 /* 841 * Start of vblank interrupt is triggered at start of hsync, 842 * just prior to the first active line of vblank. However we 843 * consider lines to start at the leading edge of horizontal 844 * active. So, should we get here before we've crossed into 845 * the horizontal active of the first line in vblank, we would 846 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 847 * always add htotal-hsync_start to the current pixel position. 848 */ 849 position = (position + htotal - hsync_start) % vtotal; 850 } 851 852 /* Get optional system timestamp after query. */ 853 if (etime) 854 *etime = ktime_get(); 855 856 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 857 858 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859 860 /* 861 * While in vblank, position will be negative 862 * counting up towards 0 at vbl_end. And outside 863 * vblank, position will be positive counting 864 * up since vbl_end. 865 */ 866 if (position >= vbl_start) 867 position -= vbl_end; 868 else 869 position += vtotal - vbl_end; 870 871 if (use_scanline_counter) { 872 *vpos = position; 873 *hpos = 0; 874 } else { 875 *vpos = position / htotal; 876 *hpos = position - (*vpos * htotal); 877 } 878 879 return true; 880 } 881 882 int intel_get_crtc_scanline(struct intel_crtc *crtc) 883 { 884 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 885 unsigned long irqflags; 886 int position; 887 888 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 889 position = __intel_get_crtc_scanline(crtc); 890 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 891 892 return position; 893 } 894 895 /** 896 * ivybridge_parity_work - Workqueue called when a parity error interrupt 897 * occurred. 898 * @work: workqueue struct 899 * 900 * Doesn't actually do anything except notify userspace. As a consequence of 901 * this event, userspace should try to remap the bad rows since statistically 902 * it is likely the same row is more likely to go bad again. 903 */ 904 static void ivybridge_parity_work(struct work_struct *work) 905 { 906 struct drm_i915_private *dev_priv = 907 container_of(work, typeof(*dev_priv), l3_parity.error_work); 908 struct intel_gt *gt = &dev_priv->gt; 909 u32 error_status, row, bank, subbank; 910 char *parity_event[6]; 911 u32 misccpctl; 912 u8 slice = 0; 913 914 /* We must turn off DOP level clock gating to access the L3 registers. 915 * In order to prevent a get/put style interface, acquire struct mutex 916 * any time we access those registers. 917 */ 918 mutex_lock(&dev_priv->drm.struct_mutex); 919 920 /* If we've screwed up tracking, just let the interrupt fire again */ 921 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 922 goto out; 923 924 misccpctl = I915_READ(GEN7_MISCCPCTL); 925 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 926 POSTING_READ(GEN7_MISCCPCTL); 927 928 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 929 i915_reg_t reg; 930 931 slice--; 932 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 933 break; 934 935 dev_priv->l3_parity.which_slice &= ~(1<<slice); 936 937 reg = GEN7_L3CDERRST1(slice); 938 939 error_status = I915_READ(reg); 940 row = GEN7_PARITY_ERROR_ROW(error_status); 941 bank = GEN7_PARITY_ERROR_BANK(error_status); 942 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 943 944 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 945 POSTING_READ(reg); 946 947 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 948 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 949 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 950 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 951 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 952 parity_event[5] = NULL; 953 954 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 955 KOBJ_CHANGE, parity_event); 956 957 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 958 slice, row, bank, subbank); 959 960 kfree(parity_event[4]); 961 kfree(parity_event[3]); 962 kfree(parity_event[2]); 963 kfree(parity_event[1]); 964 } 965 966 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 967 968 out: 969 WARN_ON(dev_priv->l3_parity.which_slice); 970 spin_lock_irq(>->irq_lock); 971 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 972 spin_unlock_irq(>->irq_lock); 973 974 mutex_unlock(&dev_priv->drm.struct_mutex); 975 } 976 977 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 978 { 979 switch (pin) { 980 case HPD_PORT_C: 981 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 982 case HPD_PORT_D: 983 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 984 case HPD_PORT_E: 985 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 986 case HPD_PORT_F: 987 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 988 default: 989 return false; 990 } 991 } 992 993 static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 994 { 995 switch (pin) { 996 case HPD_PORT_D: 997 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 998 case HPD_PORT_E: 999 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1000 case HPD_PORT_F: 1001 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1002 case HPD_PORT_G: 1003 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1004 case HPD_PORT_H: 1005 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5); 1006 case HPD_PORT_I: 1007 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6); 1008 default: 1009 return false; 1010 } 1011 } 1012 1013 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1014 { 1015 switch (pin) { 1016 case HPD_PORT_A: 1017 return val & PORTA_HOTPLUG_LONG_DETECT; 1018 case HPD_PORT_B: 1019 return val & PORTB_HOTPLUG_LONG_DETECT; 1020 case HPD_PORT_C: 1021 return val & PORTC_HOTPLUG_LONG_DETECT; 1022 default: 1023 return false; 1024 } 1025 } 1026 1027 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1028 { 1029 switch (pin) { 1030 case HPD_PORT_A: 1031 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A); 1032 case HPD_PORT_B: 1033 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B); 1034 case HPD_PORT_C: 1035 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C); 1036 default: 1037 return false; 1038 } 1039 } 1040 1041 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1042 { 1043 switch (pin) { 1044 case HPD_PORT_C: 1045 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1046 case HPD_PORT_D: 1047 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1048 case HPD_PORT_E: 1049 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1050 case HPD_PORT_F: 1051 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1052 default: 1053 return false; 1054 } 1055 } 1056 1057 static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1058 { 1059 switch (pin) { 1060 case HPD_PORT_D: 1061 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1062 case HPD_PORT_E: 1063 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1064 case HPD_PORT_F: 1065 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1066 case HPD_PORT_G: 1067 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1068 case HPD_PORT_H: 1069 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5); 1070 case HPD_PORT_I: 1071 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6); 1072 default: 1073 return false; 1074 } 1075 } 1076 1077 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1078 { 1079 switch (pin) { 1080 case HPD_PORT_E: 1081 return val & PORTE_HOTPLUG_LONG_DETECT; 1082 default: 1083 return false; 1084 } 1085 } 1086 1087 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1088 { 1089 switch (pin) { 1090 case HPD_PORT_A: 1091 return val & PORTA_HOTPLUG_LONG_DETECT; 1092 case HPD_PORT_B: 1093 return val & PORTB_HOTPLUG_LONG_DETECT; 1094 case HPD_PORT_C: 1095 return val & PORTC_HOTPLUG_LONG_DETECT; 1096 case HPD_PORT_D: 1097 return val & PORTD_HOTPLUG_LONG_DETECT; 1098 default: 1099 return false; 1100 } 1101 } 1102 1103 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1104 { 1105 switch (pin) { 1106 case HPD_PORT_A: 1107 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1108 default: 1109 return false; 1110 } 1111 } 1112 1113 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1114 { 1115 switch (pin) { 1116 case HPD_PORT_B: 1117 return val & PORTB_HOTPLUG_LONG_DETECT; 1118 case HPD_PORT_C: 1119 return val & PORTC_HOTPLUG_LONG_DETECT; 1120 case HPD_PORT_D: 1121 return val & PORTD_HOTPLUG_LONG_DETECT; 1122 default: 1123 return false; 1124 } 1125 } 1126 1127 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1128 { 1129 switch (pin) { 1130 case HPD_PORT_B: 1131 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1132 case HPD_PORT_C: 1133 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1134 case HPD_PORT_D: 1135 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1136 default: 1137 return false; 1138 } 1139 } 1140 1141 /* 1142 * Get a bit mask of pins that have triggered, and which ones may be long. 1143 * This can be called multiple times with the same masks to accumulate 1144 * hotplug detection results from several registers. 1145 * 1146 * Note that the caller is expected to zero out the masks initially. 1147 */ 1148 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1149 u32 *pin_mask, u32 *long_mask, 1150 u32 hotplug_trigger, u32 dig_hotplug_reg, 1151 const u32 hpd[HPD_NUM_PINS], 1152 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1153 { 1154 enum hpd_pin pin; 1155 1156 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); 1157 1158 for_each_hpd_pin(pin) { 1159 if ((hpd[pin] & hotplug_trigger) == 0) 1160 continue; 1161 1162 *pin_mask |= BIT(pin); 1163 1164 if (long_pulse_detect(pin, dig_hotplug_reg)) 1165 *long_mask |= BIT(pin); 1166 } 1167 1168 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1169 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1170 1171 } 1172 1173 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1174 { 1175 wake_up_all(&dev_priv->gmbus_wait_queue); 1176 } 1177 1178 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1179 { 1180 wake_up_all(&dev_priv->gmbus_wait_queue); 1181 } 1182 1183 #if defined(CONFIG_DEBUG_FS) 1184 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1185 enum pipe pipe, 1186 u32 crc0, u32 crc1, 1187 u32 crc2, u32 crc3, 1188 u32 crc4) 1189 { 1190 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1191 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1192 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1193 1194 trace_intel_pipe_crc(crtc, crcs); 1195 1196 spin_lock(&pipe_crc->lock); 1197 /* 1198 * For some not yet identified reason, the first CRC is 1199 * bonkers. So let's just wait for the next vblank and read 1200 * out the buggy result. 1201 * 1202 * On GEN8+ sometimes the second CRC is bonkers as well, so 1203 * don't trust that one either. 1204 */ 1205 if (pipe_crc->skipped <= 0 || 1206 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1207 pipe_crc->skipped++; 1208 spin_unlock(&pipe_crc->lock); 1209 return; 1210 } 1211 spin_unlock(&pipe_crc->lock); 1212 1213 drm_crtc_add_crc_entry(&crtc->base, true, 1214 drm_crtc_accurate_vblank_count(&crtc->base), 1215 crcs); 1216 } 1217 #else 1218 static inline void 1219 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1220 enum pipe pipe, 1221 u32 crc0, u32 crc1, 1222 u32 crc2, u32 crc3, 1223 u32 crc4) {} 1224 #endif 1225 1226 1227 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1228 enum pipe pipe) 1229 { 1230 display_pipe_crc_irq_handler(dev_priv, pipe, 1231 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1232 0, 0, 0, 0); 1233 } 1234 1235 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1236 enum pipe pipe) 1237 { 1238 display_pipe_crc_irq_handler(dev_priv, pipe, 1239 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1240 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1241 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1242 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1243 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1244 } 1245 1246 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1247 enum pipe pipe) 1248 { 1249 u32 res1, res2; 1250 1251 if (INTEL_GEN(dev_priv) >= 3) 1252 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1253 else 1254 res1 = 0; 1255 1256 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1257 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1258 else 1259 res2 = 0; 1260 1261 display_pipe_crc_irq_handler(dev_priv, pipe, 1262 I915_READ(PIPE_CRC_RES_RED(pipe)), 1263 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1264 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1265 res1, res2); 1266 } 1267 1268 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1269 { 1270 enum pipe pipe; 1271 1272 for_each_pipe(dev_priv, pipe) { 1273 I915_WRITE(PIPESTAT(pipe), 1274 PIPESTAT_INT_STATUS_MASK | 1275 PIPE_FIFO_UNDERRUN_STATUS); 1276 1277 dev_priv->pipestat_irq_mask[pipe] = 0; 1278 } 1279 } 1280 1281 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1282 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1283 { 1284 enum pipe pipe; 1285 1286 spin_lock(&dev_priv->irq_lock); 1287 1288 if (!dev_priv->display_irqs_enabled) { 1289 spin_unlock(&dev_priv->irq_lock); 1290 return; 1291 } 1292 1293 for_each_pipe(dev_priv, pipe) { 1294 i915_reg_t reg; 1295 u32 status_mask, enable_mask, iir_bit = 0; 1296 1297 /* 1298 * PIPESTAT bits get signalled even when the interrupt is 1299 * disabled with the mask bits, and some of the status bits do 1300 * not generate interrupts at all (like the underrun bit). Hence 1301 * we need to be careful that we only handle what we want to 1302 * handle. 1303 */ 1304 1305 /* fifo underruns are filterered in the underrun handler. */ 1306 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1307 1308 switch (pipe) { 1309 default: 1310 case PIPE_A: 1311 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1312 break; 1313 case PIPE_B: 1314 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1315 break; 1316 case PIPE_C: 1317 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1318 break; 1319 } 1320 if (iir & iir_bit) 1321 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1322 1323 if (!status_mask) 1324 continue; 1325 1326 reg = PIPESTAT(pipe); 1327 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1328 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1329 1330 /* 1331 * Clear the PIPE*STAT regs before the IIR 1332 * 1333 * Toggle the enable bits to make sure we get an 1334 * edge in the ISR pipe event bit if we don't clear 1335 * all the enabled status bits. Otherwise the edge 1336 * triggered IIR on i965/g4x wouldn't notice that 1337 * an interrupt is still pending. 1338 */ 1339 if (pipe_stats[pipe]) { 1340 I915_WRITE(reg, pipe_stats[pipe]); 1341 I915_WRITE(reg, enable_mask); 1342 } 1343 } 1344 spin_unlock(&dev_priv->irq_lock); 1345 } 1346 1347 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1348 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1349 { 1350 enum pipe pipe; 1351 1352 for_each_pipe(dev_priv, pipe) { 1353 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1354 drm_handle_vblank(&dev_priv->drm, pipe); 1355 1356 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1357 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1358 1359 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1360 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1361 } 1362 } 1363 1364 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1365 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1366 { 1367 bool blc_event = false; 1368 enum pipe pipe; 1369 1370 for_each_pipe(dev_priv, pipe) { 1371 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1372 drm_handle_vblank(&dev_priv->drm, pipe); 1373 1374 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1375 blc_event = true; 1376 1377 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1378 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1379 1380 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1381 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1382 } 1383 1384 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1385 intel_opregion_asle_intr(dev_priv); 1386 } 1387 1388 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1389 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1390 { 1391 bool blc_event = false; 1392 enum pipe pipe; 1393 1394 for_each_pipe(dev_priv, pipe) { 1395 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1396 drm_handle_vblank(&dev_priv->drm, pipe); 1397 1398 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1399 blc_event = true; 1400 1401 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1402 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1403 1404 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1405 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1406 } 1407 1408 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1409 intel_opregion_asle_intr(dev_priv); 1410 1411 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1412 gmbus_irq_handler(dev_priv); 1413 } 1414 1415 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1416 u32 pipe_stats[I915_MAX_PIPES]) 1417 { 1418 enum pipe pipe; 1419 1420 for_each_pipe(dev_priv, pipe) { 1421 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1422 drm_handle_vblank(&dev_priv->drm, pipe); 1423 1424 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1425 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1426 1427 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1428 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1429 } 1430 1431 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1432 gmbus_irq_handler(dev_priv); 1433 } 1434 1435 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1436 { 1437 u32 hotplug_status = 0, hotplug_status_mask; 1438 int i; 1439 1440 if (IS_G4X(dev_priv) || 1441 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1442 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1443 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1444 else 1445 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1446 1447 /* 1448 * We absolutely have to clear all the pending interrupt 1449 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1450 * interrupt bit won't have an edge, and the i965/g4x 1451 * edge triggered IIR will not notice that an interrupt 1452 * is still pending. We can't use PORT_HOTPLUG_EN to 1453 * guarantee the edge as the act of toggling the enable 1454 * bits can itself generate a new hotplug interrupt :( 1455 */ 1456 for (i = 0; i < 10; i++) { 1457 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 1458 1459 if (tmp == 0) 1460 return hotplug_status; 1461 1462 hotplug_status |= tmp; 1463 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1464 } 1465 1466 WARN_ONCE(1, 1467 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1468 I915_READ(PORT_HOTPLUG_STAT)); 1469 1470 return hotplug_status; 1471 } 1472 1473 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1474 u32 hotplug_status) 1475 { 1476 u32 pin_mask = 0, long_mask = 0; 1477 1478 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1479 IS_CHERRYVIEW(dev_priv)) { 1480 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1481 1482 if (hotplug_trigger) { 1483 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1484 hotplug_trigger, hotplug_trigger, 1485 hpd_status_g4x, 1486 i9xx_port_hotplug_long_detect); 1487 1488 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1489 } 1490 1491 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1492 dp_aux_irq_handler(dev_priv); 1493 } else { 1494 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1495 1496 if (hotplug_trigger) { 1497 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1498 hotplug_trigger, hotplug_trigger, 1499 hpd_status_i915, 1500 i9xx_port_hotplug_long_detect); 1501 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1502 } 1503 } 1504 } 1505 1506 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1507 { 1508 struct drm_i915_private *dev_priv = arg; 1509 irqreturn_t ret = IRQ_NONE; 1510 1511 if (!intel_irqs_enabled(dev_priv)) 1512 return IRQ_NONE; 1513 1514 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1515 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1516 1517 do { 1518 u32 iir, gt_iir, pm_iir; 1519 u32 pipe_stats[I915_MAX_PIPES] = {}; 1520 u32 hotplug_status = 0; 1521 u32 ier = 0; 1522 1523 gt_iir = I915_READ(GTIIR); 1524 pm_iir = I915_READ(GEN6_PMIIR); 1525 iir = I915_READ(VLV_IIR); 1526 1527 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1528 break; 1529 1530 ret = IRQ_HANDLED; 1531 1532 /* 1533 * Theory on interrupt generation, based on empirical evidence: 1534 * 1535 * x = ((VLV_IIR & VLV_IER) || 1536 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1537 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1538 * 1539 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1540 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1541 * guarantee the CPU interrupt will be raised again even if we 1542 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1543 * bits this time around. 1544 */ 1545 I915_WRITE(VLV_MASTER_IER, 0); 1546 ier = I915_READ(VLV_IER); 1547 I915_WRITE(VLV_IER, 0); 1548 1549 if (gt_iir) 1550 I915_WRITE(GTIIR, gt_iir); 1551 if (pm_iir) 1552 I915_WRITE(GEN6_PMIIR, pm_iir); 1553 1554 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1555 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1556 1557 /* Call regardless, as some status bits might not be 1558 * signalled in iir */ 1559 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1560 1561 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1562 I915_LPE_PIPE_B_INTERRUPT)) 1563 intel_lpe_audio_irq_handler(dev_priv); 1564 1565 /* 1566 * VLV_IIR is single buffered, and reflects the level 1567 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1568 */ 1569 if (iir) 1570 I915_WRITE(VLV_IIR, iir); 1571 1572 I915_WRITE(VLV_IER, ier); 1573 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1574 1575 if (gt_iir) 1576 gen6_gt_irq_handler(&dev_priv->gt, gt_iir); 1577 if (pm_iir) 1578 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); 1579 1580 if (hotplug_status) 1581 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1582 1583 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1584 } while (0); 1585 1586 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1587 1588 return ret; 1589 } 1590 1591 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1592 { 1593 struct drm_i915_private *dev_priv = arg; 1594 irqreturn_t ret = IRQ_NONE; 1595 1596 if (!intel_irqs_enabled(dev_priv)) 1597 return IRQ_NONE; 1598 1599 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1600 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1601 1602 do { 1603 u32 master_ctl, iir; 1604 u32 pipe_stats[I915_MAX_PIPES] = {}; 1605 u32 hotplug_status = 0; 1606 u32 gt_iir[4]; 1607 u32 ier = 0; 1608 1609 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1610 iir = I915_READ(VLV_IIR); 1611 1612 if (master_ctl == 0 && iir == 0) 1613 break; 1614 1615 ret = IRQ_HANDLED; 1616 1617 /* 1618 * Theory on interrupt generation, based on empirical evidence: 1619 * 1620 * x = ((VLV_IIR & VLV_IER) || 1621 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1622 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1623 * 1624 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1625 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1626 * guarantee the CPU interrupt will be raised again even if we 1627 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1628 * bits this time around. 1629 */ 1630 I915_WRITE(GEN8_MASTER_IRQ, 0); 1631 ier = I915_READ(VLV_IER); 1632 I915_WRITE(VLV_IER, 0); 1633 1634 gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir); 1635 1636 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1637 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1638 1639 /* Call regardless, as some status bits might not be 1640 * signalled in iir */ 1641 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1642 1643 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1644 I915_LPE_PIPE_B_INTERRUPT | 1645 I915_LPE_PIPE_C_INTERRUPT)) 1646 intel_lpe_audio_irq_handler(dev_priv); 1647 1648 /* 1649 * VLV_IIR is single buffered, and reflects the level 1650 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1651 */ 1652 if (iir) 1653 I915_WRITE(VLV_IIR, iir); 1654 1655 I915_WRITE(VLV_IER, ier); 1656 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1657 1658 gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir); 1659 1660 if (hotplug_status) 1661 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1662 1663 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1664 } while (0); 1665 1666 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1667 1668 return ret; 1669 } 1670 1671 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1672 u32 hotplug_trigger, 1673 const u32 hpd[HPD_NUM_PINS]) 1674 { 1675 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1676 1677 /* 1678 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1679 * unless we touch the hotplug register, even if hotplug_trigger is 1680 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1681 * errors. 1682 */ 1683 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1684 if (!hotplug_trigger) { 1685 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1686 PORTD_HOTPLUG_STATUS_MASK | 1687 PORTC_HOTPLUG_STATUS_MASK | 1688 PORTB_HOTPLUG_STATUS_MASK; 1689 dig_hotplug_reg &= ~mask; 1690 } 1691 1692 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1693 if (!hotplug_trigger) 1694 return; 1695 1696 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 1697 dig_hotplug_reg, hpd, 1698 pch_port_hotplug_long_detect); 1699 1700 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1701 } 1702 1703 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1704 { 1705 enum pipe pipe; 1706 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1707 1708 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 1709 1710 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1711 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1712 SDE_AUDIO_POWER_SHIFT); 1713 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1714 port_name(port)); 1715 } 1716 1717 if (pch_iir & SDE_AUX_MASK) 1718 dp_aux_irq_handler(dev_priv); 1719 1720 if (pch_iir & SDE_GMBUS) 1721 gmbus_irq_handler(dev_priv); 1722 1723 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1724 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1725 1726 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1727 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1728 1729 if (pch_iir & SDE_POISON) 1730 DRM_ERROR("PCH poison interrupt\n"); 1731 1732 if (pch_iir & SDE_FDI_MASK) 1733 for_each_pipe(dev_priv, pipe) 1734 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1735 pipe_name(pipe), 1736 I915_READ(FDI_RX_IIR(pipe))); 1737 1738 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1739 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1740 1741 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1742 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1743 1744 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1745 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 1746 1747 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1748 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 1749 } 1750 1751 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1752 { 1753 u32 err_int = I915_READ(GEN7_ERR_INT); 1754 enum pipe pipe; 1755 1756 if (err_int & ERR_INT_POISON) 1757 DRM_ERROR("Poison interrupt\n"); 1758 1759 for_each_pipe(dev_priv, pipe) { 1760 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1761 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1762 1763 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1764 if (IS_IVYBRIDGE(dev_priv)) 1765 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1766 else 1767 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1768 } 1769 } 1770 1771 I915_WRITE(GEN7_ERR_INT, err_int); 1772 } 1773 1774 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1775 { 1776 u32 serr_int = I915_READ(SERR_INT); 1777 enum pipe pipe; 1778 1779 if (serr_int & SERR_INT_POISON) 1780 DRM_ERROR("PCH poison interrupt\n"); 1781 1782 for_each_pipe(dev_priv, pipe) 1783 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1784 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1785 1786 I915_WRITE(SERR_INT, serr_int); 1787 } 1788 1789 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1790 { 1791 enum pipe pipe; 1792 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1793 1794 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 1795 1796 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1797 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1798 SDE_AUDIO_POWER_SHIFT_CPT); 1799 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1800 port_name(port)); 1801 } 1802 1803 if (pch_iir & SDE_AUX_MASK_CPT) 1804 dp_aux_irq_handler(dev_priv); 1805 1806 if (pch_iir & SDE_GMBUS_CPT) 1807 gmbus_irq_handler(dev_priv); 1808 1809 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1810 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1811 1812 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1813 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1814 1815 if (pch_iir & SDE_FDI_MASK_CPT) 1816 for_each_pipe(dev_priv, pipe) 1817 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1818 pipe_name(pipe), 1819 I915_READ(FDI_RX_IIR(pipe))); 1820 1821 if (pch_iir & SDE_ERROR_CPT) 1822 cpt_serr_int_handler(dev_priv); 1823 } 1824 1825 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1826 { 1827 u32 ddi_hotplug_trigger, tc_hotplug_trigger; 1828 u32 pin_mask = 0, long_mask = 0; 1829 bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val); 1830 const u32 *pins; 1831 1832 if (HAS_PCH_TGP(dev_priv)) { 1833 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; 1834 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; 1835 tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect; 1836 pins = hpd_tgp; 1837 } else if (HAS_PCH_JSP(dev_priv)) { 1838 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; 1839 tc_hotplug_trigger = 0; 1840 pins = hpd_tgp; 1841 } else if (HAS_PCH_MCC(dev_priv)) { 1842 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 1843 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1); 1844 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; 1845 pins = hpd_icp; 1846 } else { 1847 WARN(!HAS_PCH_ICP(dev_priv), 1848 "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv)); 1849 1850 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 1851 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 1852 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; 1853 pins = hpd_icp; 1854 } 1855 1856 if (ddi_hotplug_trigger) { 1857 u32 dig_hotplug_reg; 1858 1859 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 1860 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 1861 1862 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1863 ddi_hotplug_trigger, 1864 dig_hotplug_reg, pins, 1865 icp_ddi_port_hotplug_long_detect); 1866 } 1867 1868 if (tc_hotplug_trigger) { 1869 u32 dig_hotplug_reg; 1870 1871 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 1872 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 1873 1874 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1875 tc_hotplug_trigger, 1876 dig_hotplug_reg, pins, 1877 tc_port_hotplug_long_detect); 1878 } 1879 1880 if (pin_mask) 1881 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1882 1883 if (pch_iir & SDE_GMBUS_ICP) 1884 gmbus_irq_handler(dev_priv); 1885 } 1886 1887 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1888 { 1889 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 1890 ~SDE_PORTE_HOTPLUG_SPT; 1891 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 1892 u32 pin_mask = 0, long_mask = 0; 1893 1894 if (hotplug_trigger) { 1895 u32 dig_hotplug_reg; 1896 1897 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1898 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1899 1900 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1901 hotplug_trigger, dig_hotplug_reg, hpd_spt, 1902 spt_port_hotplug_long_detect); 1903 } 1904 1905 if (hotplug2_trigger) { 1906 u32 dig_hotplug_reg; 1907 1908 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 1909 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 1910 1911 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1912 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 1913 spt_port_hotplug2_long_detect); 1914 } 1915 1916 if (pin_mask) 1917 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1918 1919 if (pch_iir & SDE_GMBUS_CPT) 1920 gmbus_irq_handler(dev_priv); 1921 } 1922 1923 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 1924 u32 hotplug_trigger, 1925 const u32 hpd[HPD_NUM_PINS]) 1926 { 1927 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1928 1929 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 1930 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 1931 1932 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 1933 dig_hotplug_reg, hpd, 1934 ilk_port_hotplug_long_detect); 1935 1936 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1937 } 1938 1939 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 1940 u32 de_iir) 1941 { 1942 enum pipe pipe; 1943 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 1944 1945 if (hotplug_trigger) 1946 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 1947 1948 if (de_iir & DE_AUX_CHANNEL_A) 1949 dp_aux_irq_handler(dev_priv); 1950 1951 if (de_iir & DE_GSE) 1952 intel_opregion_asle_intr(dev_priv); 1953 1954 if (de_iir & DE_POISON) 1955 DRM_ERROR("Poison interrupt\n"); 1956 1957 for_each_pipe(dev_priv, pipe) { 1958 if (de_iir & DE_PIPE_VBLANK(pipe)) 1959 drm_handle_vblank(&dev_priv->drm, pipe); 1960 1961 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1962 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1963 1964 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1965 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1966 } 1967 1968 /* check event from PCH */ 1969 if (de_iir & DE_PCH_EVENT) { 1970 u32 pch_iir = I915_READ(SDEIIR); 1971 1972 if (HAS_PCH_CPT(dev_priv)) 1973 cpt_irq_handler(dev_priv, pch_iir); 1974 else 1975 ibx_irq_handler(dev_priv, pch_iir); 1976 1977 /* should clear PCH hotplug event before clear CPU irq */ 1978 I915_WRITE(SDEIIR, pch_iir); 1979 } 1980 1981 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 1982 gen5_rps_irq_handler(&dev_priv->gt.rps); 1983 } 1984 1985 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 1986 u32 de_iir) 1987 { 1988 enum pipe pipe; 1989 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 1990 1991 if (hotplug_trigger) 1992 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 1993 1994 if (de_iir & DE_ERR_INT_IVB) 1995 ivb_err_int_handler(dev_priv); 1996 1997 if (de_iir & DE_EDP_PSR_INT_HSW) { 1998 u32 psr_iir = I915_READ(EDP_PSR_IIR); 1999 2000 intel_psr_irq_handler(dev_priv, psr_iir); 2001 I915_WRITE(EDP_PSR_IIR, psr_iir); 2002 } 2003 2004 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2005 dp_aux_irq_handler(dev_priv); 2006 2007 if (de_iir & DE_GSE_IVB) 2008 intel_opregion_asle_intr(dev_priv); 2009 2010 for_each_pipe(dev_priv, pipe) { 2011 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2012 drm_handle_vblank(&dev_priv->drm, pipe); 2013 } 2014 2015 /* check event from PCH */ 2016 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2017 u32 pch_iir = I915_READ(SDEIIR); 2018 2019 cpt_irq_handler(dev_priv, pch_iir); 2020 2021 /* clear PCH hotplug event before clear CPU irq */ 2022 I915_WRITE(SDEIIR, pch_iir); 2023 } 2024 } 2025 2026 /* 2027 * To handle irqs with the minimum potential races with fresh interrupts, we: 2028 * 1 - Disable Master Interrupt Control. 2029 * 2 - Find the source(s) of the interrupt. 2030 * 3 - Clear the Interrupt Identity bits (IIR). 2031 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2032 * 5 - Re-enable Master Interrupt Control. 2033 */ 2034 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2035 { 2036 struct drm_i915_private *dev_priv = arg; 2037 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2038 irqreturn_t ret = IRQ_NONE; 2039 2040 if (!intel_irqs_enabled(dev_priv)) 2041 return IRQ_NONE; 2042 2043 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2044 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2045 2046 /* disable master interrupt before clearing iir */ 2047 de_ier = I915_READ(DEIER); 2048 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2049 2050 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2051 * interrupts will will be stored on its back queue, and then we'll be 2052 * able to process them after we restore SDEIER (as soon as we restore 2053 * it, we'll get an interrupt if SDEIIR still has something to process 2054 * due to its back queue). */ 2055 if (!HAS_PCH_NOP(dev_priv)) { 2056 sde_ier = I915_READ(SDEIER); 2057 I915_WRITE(SDEIER, 0); 2058 } 2059 2060 /* Find, clear, then process each source of interrupt */ 2061 2062 gt_iir = I915_READ(GTIIR); 2063 if (gt_iir) { 2064 I915_WRITE(GTIIR, gt_iir); 2065 ret = IRQ_HANDLED; 2066 if (INTEL_GEN(dev_priv) >= 6) 2067 gen6_gt_irq_handler(&dev_priv->gt, gt_iir); 2068 else 2069 gen5_gt_irq_handler(&dev_priv->gt, gt_iir); 2070 } 2071 2072 de_iir = I915_READ(DEIIR); 2073 if (de_iir) { 2074 I915_WRITE(DEIIR, de_iir); 2075 ret = IRQ_HANDLED; 2076 if (INTEL_GEN(dev_priv) >= 7) 2077 ivb_display_irq_handler(dev_priv, de_iir); 2078 else 2079 ilk_display_irq_handler(dev_priv, de_iir); 2080 } 2081 2082 if (INTEL_GEN(dev_priv) >= 6) { 2083 u32 pm_iir = I915_READ(GEN6_PMIIR); 2084 if (pm_iir) { 2085 I915_WRITE(GEN6_PMIIR, pm_iir); 2086 ret = IRQ_HANDLED; 2087 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); 2088 } 2089 } 2090 2091 I915_WRITE(DEIER, de_ier); 2092 if (!HAS_PCH_NOP(dev_priv)) 2093 I915_WRITE(SDEIER, sde_ier); 2094 2095 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2096 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2097 2098 return ret; 2099 } 2100 2101 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2102 u32 hotplug_trigger, 2103 const u32 hpd[HPD_NUM_PINS]) 2104 { 2105 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2106 2107 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2108 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2109 2110 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2111 dig_hotplug_reg, hpd, 2112 bxt_port_hotplug_long_detect); 2113 2114 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2115 } 2116 2117 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2118 { 2119 u32 pin_mask = 0, long_mask = 0; 2120 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2121 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2122 long_pulse_detect_func long_pulse_detect; 2123 const u32 *hpd; 2124 2125 if (INTEL_GEN(dev_priv) >= 12) { 2126 long_pulse_detect = gen12_port_hotplug_long_detect; 2127 hpd = hpd_gen12; 2128 } else { 2129 long_pulse_detect = gen11_port_hotplug_long_detect; 2130 hpd = hpd_gen11; 2131 } 2132 2133 if (trigger_tc) { 2134 u32 dig_hotplug_reg; 2135 2136 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2137 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2138 2139 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2140 dig_hotplug_reg, hpd, long_pulse_detect); 2141 } 2142 2143 if (trigger_tbt) { 2144 u32 dig_hotplug_reg; 2145 2146 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2147 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2148 2149 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2150 dig_hotplug_reg, hpd, long_pulse_detect); 2151 } 2152 2153 if (pin_mask) 2154 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2155 else 2156 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2157 } 2158 2159 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2160 { 2161 u32 mask; 2162 2163 if (INTEL_GEN(dev_priv) >= 12) 2164 return TGL_DE_PORT_AUX_DDIA | 2165 TGL_DE_PORT_AUX_DDIB | 2166 TGL_DE_PORT_AUX_DDIC | 2167 TGL_DE_PORT_AUX_USBC1 | 2168 TGL_DE_PORT_AUX_USBC2 | 2169 TGL_DE_PORT_AUX_USBC3 | 2170 TGL_DE_PORT_AUX_USBC4 | 2171 TGL_DE_PORT_AUX_USBC5 | 2172 TGL_DE_PORT_AUX_USBC6; 2173 2174 2175 mask = GEN8_AUX_CHANNEL_A; 2176 if (INTEL_GEN(dev_priv) >= 9) 2177 mask |= GEN9_AUX_CHANNEL_B | 2178 GEN9_AUX_CHANNEL_C | 2179 GEN9_AUX_CHANNEL_D; 2180 2181 if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11)) 2182 mask |= CNL_AUX_CHANNEL_F; 2183 2184 if (IS_GEN(dev_priv, 11)) 2185 mask |= ICL_AUX_CHANNEL_E; 2186 2187 return mask; 2188 } 2189 2190 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 2191 { 2192 if (INTEL_GEN(dev_priv) >= 11) 2193 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 2194 else if (INTEL_GEN(dev_priv) >= 9) 2195 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2196 else 2197 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2198 } 2199 2200 static void 2201 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2202 { 2203 bool found = false; 2204 2205 if (iir & GEN8_DE_MISC_GSE) { 2206 intel_opregion_asle_intr(dev_priv); 2207 found = true; 2208 } 2209 2210 if (iir & GEN8_DE_EDP_PSR) { 2211 u32 psr_iir; 2212 i915_reg_t iir_reg; 2213 2214 if (INTEL_GEN(dev_priv) >= 12) 2215 iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder); 2216 else 2217 iir_reg = EDP_PSR_IIR; 2218 2219 psr_iir = I915_READ(iir_reg); 2220 I915_WRITE(iir_reg, psr_iir); 2221 2222 if (psr_iir) 2223 found = true; 2224 2225 intel_psr_irq_handler(dev_priv, psr_iir); 2226 } 2227 2228 if (!found) 2229 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2230 } 2231 2232 static irqreturn_t 2233 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2234 { 2235 irqreturn_t ret = IRQ_NONE; 2236 u32 iir; 2237 enum pipe pipe; 2238 2239 if (master_ctl & GEN8_DE_MISC_IRQ) { 2240 iir = I915_READ(GEN8_DE_MISC_IIR); 2241 if (iir) { 2242 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2243 ret = IRQ_HANDLED; 2244 gen8_de_misc_irq_handler(dev_priv, iir); 2245 } else { 2246 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2247 } 2248 } 2249 2250 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2251 iir = I915_READ(GEN11_DE_HPD_IIR); 2252 if (iir) { 2253 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2254 ret = IRQ_HANDLED; 2255 gen11_hpd_irq_handler(dev_priv, iir); 2256 } else { 2257 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2258 } 2259 } 2260 2261 if (master_ctl & GEN8_DE_PORT_IRQ) { 2262 iir = I915_READ(GEN8_DE_PORT_IIR); 2263 if (iir) { 2264 u32 tmp_mask; 2265 bool found = false; 2266 2267 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2268 ret = IRQ_HANDLED; 2269 2270 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2271 dp_aux_irq_handler(dev_priv); 2272 found = true; 2273 } 2274 2275 if (IS_GEN9_LP(dev_priv)) { 2276 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2277 if (tmp_mask) { 2278 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2279 hpd_bxt); 2280 found = true; 2281 } 2282 } else if (IS_BROADWELL(dev_priv)) { 2283 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2284 if (tmp_mask) { 2285 ilk_hpd_irq_handler(dev_priv, 2286 tmp_mask, hpd_bdw); 2287 found = true; 2288 } 2289 } 2290 2291 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2292 gmbus_irq_handler(dev_priv); 2293 found = true; 2294 } 2295 2296 if (!found) 2297 DRM_ERROR("Unexpected DE Port interrupt\n"); 2298 } 2299 else 2300 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2301 } 2302 2303 for_each_pipe(dev_priv, pipe) { 2304 u32 fault_errors; 2305 2306 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2307 continue; 2308 2309 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2310 if (!iir) { 2311 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2312 continue; 2313 } 2314 2315 ret = IRQ_HANDLED; 2316 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2317 2318 if (iir & GEN8_PIPE_VBLANK) 2319 drm_handle_vblank(&dev_priv->drm, pipe); 2320 2321 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2322 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2323 2324 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2325 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2326 2327 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2328 if (fault_errors) 2329 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2330 pipe_name(pipe), 2331 fault_errors); 2332 } 2333 2334 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2335 master_ctl & GEN8_DE_PCH_IRQ) { 2336 /* 2337 * FIXME(BDW): Assume for now that the new interrupt handling 2338 * scheme also closed the SDE interrupt handling race we've seen 2339 * on older pch-split platforms. But this needs testing. 2340 */ 2341 iir = I915_READ(SDEIIR); 2342 if (iir) { 2343 I915_WRITE(SDEIIR, iir); 2344 ret = IRQ_HANDLED; 2345 2346 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2347 icp_irq_handler(dev_priv, iir); 2348 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2349 spt_irq_handler(dev_priv, iir); 2350 else 2351 cpt_irq_handler(dev_priv, iir); 2352 } else { 2353 /* 2354 * Like on previous PCH there seems to be something 2355 * fishy going on with forwarding PCH interrupts. 2356 */ 2357 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2358 } 2359 } 2360 2361 return ret; 2362 } 2363 2364 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2365 { 2366 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2367 2368 /* 2369 * Now with master disabled, get a sample of level indications 2370 * for this interrupt. Indications will be cleared on related acks. 2371 * New indications can and will light up during processing, 2372 * and will generate new interrupt after enabling master. 2373 */ 2374 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2375 } 2376 2377 static inline void gen8_master_intr_enable(void __iomem * const regs) 2378 { 2379 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2380 } 2381 2382 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2383 { 2384 struct drm_i915_private *dev_priv = arg; 2385 void __iomem * const regs = dev_priv->uncore.regs; 2386 u32 master_ctl; 2387 u32 gt_iir[4]; 2388 2389 if (!intel_irqs_enabled(dev_priv)) 2390 return IRQ_NONE; 2391 2392 master_ctl = gen8_master_intr_disable(regs); 2393 if (!master_ctl) { 2394 gen8_master_intr_enable(regs); 2395 return IRQ_NONE; 2396 } 2397 2398 /* Find, clear, then process each source of interrupt */ 2399 gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir); 2400 2401 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2402 if (master_ctl & ~GEN8_GT_IRQS) { 2403 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2404 gen8_de_irq_handler(dev_priv, master_ctl); 2405 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2406 } 2407 2408 gen8_master_intr_enable(regs); 2409 2410 gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir); 2411 2412 return IRQ_HANDLED; 2413 } 2414 2415 static u32 2416 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) 2417 { 2418 void __iomem * const regs = gt->uncore->regs; 2419 u32 iir; 2420 2421 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2422 return 0; 2423 2424 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2425 if (likely(iir)) 2426 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2427 2428 return iir; 2429 } 2430 2431 static void 2432 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) 2433 { 2434 if (iir & GEN11_GU_MISC_GSE) 2435 intel_opregion_asle_intr(gt->i915); 2436 } 2437 2438 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2439 { 2440 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2441 2442 /* 2443 * Now with master disabled, get a sample of level indications 2444 * for this interrupt. Indications will be cleared on related acks. 2445 * New indications can and will light up during processing, 2446 * and will generate new interrupt after enabling master. 2447 */ 2448 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2449 } 2450 2451 static inline void gen11_master_intr_enable(void __iomem * const regs) 2452 { 2453 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2454 } 2455 2456 static __always_inline irqreturn_t 2457 __gen11_irq_handler(struct drm_i915_private * const i915, 2458 u32 (*intr_disable)(void __iomem * const regs), 2459 void (*intr_enable)(void __iomem * const regs)) 2460 { 2461 void __iomem * const regs = i915->uncore.regs; 2462 struct intel_gt *gt = &i915->gt; 2463 u32 master_ctl; 2464 u32 gu_misc_iir; 2465 2466 if (!intel_irqs_enabled(i915)) 2467 return IRQ_NONE; 2468 2469 master_ctl = intr_disable(regs); 2470 if (!master_ctl) { 2471 intr_enable(regs); 2472 return IRQ_NONE; 2473 } 2474 2475 /* Find, clear, then process each source of interrupt. */ 2476 gen11_gt_irq_handler(gt, master_ctl); 2477 2478 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2479 if (master_ctl & GEN11_DISPLAY_IRQ) { 2480 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2481 2482 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2483 /* 2484 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2485 * for the display related bits. 2486 */ 2487 gen8_de_irq_handler(i915, disp_ctl); 2488 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2489 } 2490 2491 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2492 2493 intr_enable(regs); 2494 2495 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2496 2497 return IRQ_HANDLED; 2498 } 2499 2500 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2501 { 2502 return __gen11_irq_handler(arg, 2503 gen11_master_intr_disable, 2504 gen11_master_intr_enable); 2505 } 2506 2507 /* Called from drm generic code, passed 'crtc' which 2508 * we use as a pipe index 2509 */ 2510 int i8xx_enable_vblank(struct drm_crtc *crtc) 2511 { 2512 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2513 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2514 unsigned long irqflags; 2515 2516 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2517 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2518 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2519 2520 return 0; 2521 } 2522 2523 int i915gm_enable_vblank(struct drm_crtc *crtc) 2524 { 2525 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2526 2527 /* 2528 * Vblank interrupts fail to wake the device up from C2+. 2529 * Disabling render clock gating during C-states avoids 2530 * the problem. There is a small power cost so we do this 2531 * only when vblank interrupts are actually enabled. 2532 */ 2533 if (dev_priv->vblank_enabled++ == 0) 2534 I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2535 2536 return i8xx_enable_vblank(crtc); 2537 } 2538 2539 int i965_enable_vblank(struct drm_crtc *crtc) 2540 { 2541 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2542 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2543 unsigned long irqflags; 2544 2545 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2546 i915_enable_pipestat(dev_priv, pipe, 2547 PIPE_START_VBLANK_INTERRUPT_STATUS); 2548 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2549 2550 return 0; 2551 } 2552 2553 int ilk_enable_vblank(struct drm_crtc *crtc) 2554 { 2555 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2556 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2557 unsigned long irqflags; 2558 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 2559 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2560 2561 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2562 ilk_enable_display_irq(dev_priv, bit); 2563 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2564 2565 /* Even though there is no DMC, frame counter can get stuck when 2566 * PSR is active as no frames are generated. 2567 */ 2568 if (HAS_PSR(dev_priv)) 2569 drm_crtc_vblank_restore(crtc); 2570 2571 return 0; 2572 } 2573 2574 int bdw_enable_vblank(struct drm_crtc *crtc) 2575 { 2576 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2577 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2578 unsigned long irqflags; 2579 2580 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2581 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2582 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2583 2584 /* Even if there is no DMC, frame counter can get stuck when 2585 * PSR is active as no frames are generated, so check only for PSR. 2586 */ 2587 if (HAS_PSR(dev_priv)) 2588 drm_crtc_vblank_restore(crtc); 2589 2590 return 0; 2591 } 2592 2593 /* Called from drm generic code, passed 'crtc' which 2594 * we use as a pipe index 2595 */ 2596 void i8xx_disable_vblank(struct drm_crtc *crtc) 2597 { 2598 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2599 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2600 unsigned long irqflags; 2601 2602 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2603 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2604 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2605 } 2606 2607 void i915gm_disable_vblank(struct drm_crtc *crtc) 2608 { 2609 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2610 2611 i8xx_disable_vblank(crtc); 2612 2613 if (--dev_priv->vblank_enabled == 0) 2614 I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2615 } 2616 2617 void i965_disable_vblank(struct drm_crtc *crtc) 2618 { 2619 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2620 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2621 unsigned long irqflags; 2622 2623 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2624 i915_disable_pipestat(dev_priv, pipe, 2625 PIPE_START_VBLANK_INTERRUPT_STATUS); 2626 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2627 } 2628 2629 void ilk_disable_vblank(struct drm_crtc *crtc) 2630 { 2631 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2632 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2633 unsigned long irqflags; 2634 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 2635 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2636 2637 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2638 ilk_disable_display_irq(dev_priv, bit); 2639 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2640 } 2641 2642 void bdw_disable_vblank(struct drm_crtc *crtc) 2643 { 2644 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2645 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2646 unsigned long irqflags; 2647 2648 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2649 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2651 } 2652 2653 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2654 { 2655 struct intel_uncore *uncore = &dev_priv->uncore; 2656 2657 if (HAS_PCH_NOP(dev_priv)) 2658 return; 2659 2660 GEN3_IRQ_RESET(uncore, SDE); 2661 2662 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2663 I915_WRITE(SERR_INT, 0xffffffff); 2664 } 2665 2666 /* 2667 * SDEIER is also touched by the interrupt handler to work around missed PCH 2668 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2669 * instead we unconditionally enable all PCH interrupt sources here, but then 2670 * only unmask them as needed with SDEIMR. 2671 * 2672 * This function needs to be called before interrupts are enabled. 2673 */ 2674 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv) 2675 { 2676 if (HAS_PCH_NOP(dev_priv)) 2677 return; 2678 2679 WARN_ON(I915_READ(SDEIER) != 0); 2680 I915_WRITE(SDEIER, 0xffffffff); 2681 POSTING_READ(SDEIER); 2682 } 2683 2684 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2685 { 2686 struct intel_uncore *uncore = &dev_priv->uncore; 2687 2688 if (IS_CHERRYVIEW(dev_priv)) 2689 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2690 else 2691 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); 2692 2693 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2694 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2695 2696 i9xx_pipestat_irq_reset(dev_priv); 2697 2698 GEN3_IRQ_RESET(uncore, VLV_); 2699 dev_priv->irq_mask = ~0u; 2700 } 2701 2702 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2703 { 2704 struct intel_uncore *uncore = &dev_priv->uncore; 2705 2706 u32 pipestat_mask; 2707 u32 enable_mask; 2708 enum pipe pipe; 2709 2710 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 2711 2712 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2713 for_each_pipe(dev_priv, pipe) 2714 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2715 2716 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2717 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2718 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2719 I915_LPE_PIPE_A_INTERRUPT | 2720 I915_LPE_PIPE_B_INTERRUPT; 2721 2722 if (IS_CHERRYVIEW(dev_priv)) 2723 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2724 I915_LPE_PIPE_C_INTERRUPT; 2725 2726 WARN_ON(dev_priv->irq_mask != ~0u); 2727 2728 dev_priv->irq_mask = ~enable_mask; 2729 2730 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 2731 } 2732 2733 /* drm_dma.h hooks 2734 */ 2735 static void ironlake_irq_reset(struct drm_i915_private *dev_priv) 2736 { 2737 struct intel_uncore *uncore = &dev_priv->uncore; 2738 2739 GEN3_IRQ_RESET(uncore, DE); 2740 if (IS_GEN(dev_priv, 7)) 2741 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 2742 2743 if (IS_HASWELL(dev_priv)) { 2744 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2745 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2746 } 2747 2748 gen5_gt_irq_reset(&dev_priv->gt); 2749 2750 ibx_irq_reset(dev_priv); 2751 } 2752 2753 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 2754 { 2755 I915_WRITE(VLV_MASTER_IER, 0); 2756 POSTING_READ(VLV_MASTER_IER); 2757 2758 gen5_gt_irq_reset(&dev_priv->gt); 2759 2760 spin_lock_irq(&dev_priv->irq_lock); 2761 if (dev_priv->display_irqs_enabled) 2762 vlv_display_irq_reset(dev_priv); 2763 spin_unlock_irq(&dev_priv->irq_lock); 2764 } 2765 2766 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 2767 { 2768 struct intel_uncore *uncore = &dev_priv->uncore; 2769 enum pipe pipe; 2770 2771 gen8_master_intr_disable(dev_priv->uncore.regs); 2772 2773 gen8_gt_irq_reset(&dev_priv->gt); 2774 2775 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2776 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2777 2778 for_each_pipe(dev_priv, pipe) 2779 if (intel_display_power_is_enabled(dev_priv, 2780 POWER_DOMAIN_PIPE(pipe))) 2781 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2782 2783 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 2784 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 2785 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2786 2787 if (HAS_PCH_SPLIT(dev_priv)) 2788 ibx_irq_reset(dev_priv); 2789 } 2790 2791 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 2792 { 2793 struct intel_uncore *uncore = &dev_priv->uncore; 2794 enum pipe pipe; 2795 2796 gen11_master_intr_disable(dev_priv->uncore.regs); 2797 2798 gen11_gt_irq_reset(&dev_priv->gt); 2799 2800 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 2801 2802 if (INTEL_GEN(dev_priv) >= 12) { 2803 enum transcoder trans; 2804 2805 for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) { 2806 enum intel_display_power_domain domain; 2807 2808 domain = POWER_DOMAIN_TRANSCODER(trans); 2809 if (!intel_display_power_is_enabled(dev_priv, domain)) 2810 continue; 2811 2812 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 2813 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 2814 } 2815 } else { 2816 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2817 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2818 } 2819 2820 for_each_pipe(dev_priv, pipe) 2821 if (intel_display_power_is_enabled(dev_priv, 2822 POWER_DOMAIN_PIPE(pipe))) 2823 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2824 2825 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 2826 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 2827 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 2828 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 2829 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2830 2831 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2832 GEN3_IRQ_RESET(uncore, SDE); 2833 } 2834 2835 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 2836 u8 pipe_mask) 2837 { 2838 struct intel_uncore *uncore = &dev_priv->uncore; 2839 2840 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 2841 enum pipe pipe; 2842 2843 spin_lock_irq(&dev_priv->irq_lock); 2844 2845 if (!intel_irqs_enabled(dev_priv)) { 2846 spin_unlock_irq(&dev_priv->irq_lock); 2847 return; 2848 } 2849 2850 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 2851 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 2852 dev_priv->de_irq_mask[pipe], 2853 ~dev_priv->de_irq_mask[pipe] | extra_ier); 2854 2855 spin_unlock_irq(&dev_priv->irq_lock); 2856 } 2857 2858 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 2859 u8 pipe_mask) 2860 { 2861 struct intel_uncore *uncore = &dev_priv->uncore; 2862 enum pipe pipe; 2863 2864 spin_lock_irq(&dev_priv->irq_lock); 2865 2866 if (!intel_irqs_enabled(dev_priv)) { 2867 spin_unlock_irq(&dev_priv->irq_lock); 2868 return; 2869 } 2870 2871 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 2872 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2873 2874 spin_unlock_irq(&dev_priv->irq_lock); 2875 2876 /* make sure we're done processing display irqs */ 2877 intel_synchronize_irq(dev_priv); 2878 } 2879 2880 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 2881 { 2882 struct intel_uncore *uncore = &dev_priv->uncore; 2883 2884 I915_WRITE(GEN8_MASTER_IRQ, 0); 2885 POSTING_READ(GEN8_MASTER_IRQ); 2886 2887 gen8_gt_irq_reset(&dev_priv->gt); 2888 2889 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2890 2891 spin_lock_irq(&dev_priv->irq_lock); 2892 if (dev_priv->display_irqs_enabled) 2893 vlv_display_irq_reset(dev_priv); 2894 spin_unlock_irq(&dev_priv->irq_lock); 2895 } 2896 2897 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 2898 const u32 hpd[HPD_NUM_PINS]) 2899 { 2900 struct intel_encoder *encoder; 2901 u32 enabled_irqs = 0; 2902 2903 for_each_intel_encoder(&dev_priv->drm, encoder) 2904 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 2905 enabled_irqs |= hpd[encoder->hpd_pin]; 2906 2907 return enabled_irqs; 2908 } 2909 2910 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 2911 { 2912 u32 hotplug; 2913 2914 /* 2915 * Enable digital hotplug on the PCH, and configure the DP short pulse 2916 * duration to 2ms (which is the minimum in the Display Port spec). 2917 * The pulse duration bits are reserved on LPT+. 2918 */ 2919 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2920 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 2921 PORTC_PULSE_DURATION_MASK | 2922 PORTD_PULSE_DURATION_MASK); 2923 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2924 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2925 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2926 /* 2927 * When CPU and PCH are on the same package, port A 2928 * HPD must be enabled in both north and south. 2929 */ 2930 if (HAS_PCH_LPT_LP(dev_priv)) 2931 hotplug |= PORTA_HOTPLUG_ENABLE; 2932 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2933 } 2934 2935 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 2936 { 2937 u32 hotplug_irqs, enabled_irqs; 2938 2939 if (HAS_PCH_IBX(dev_priv)) { 2940 hotplug_irqs = SDE_HOTPLUG_MASK; 2941 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 2942 } else { 2943 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2944 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 2945 } 2946 2947 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2948 2949 ibx_hpd_detection_setup(dev_priv); 2950 } 2951 2952 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, 2953 u32 ddi_hotplug_enable_mask, 2954 u32 tc_hotplug_enable_mask) 2955 { 2956 u32 hotplug; 2957 2958 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 2959 hotplug |= ddi_hotplug_enable_mask; 2960 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 2961 2962 if (tc_hotplug_enable_mask) { 2963 hotplug = I915_READ(SHOTPLUG_CTL_TC); 2964 hotplug |= tc_hotplug_enable_mask; 2965 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 2966 } 2967 } 2968 2969 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, 2970 u32 sde_ddi_mask, u32 sde_tc_mask, 2971 u32 ddi_enable_mask, u32 tc_enable_mask, 2972 const u32 *pins) 2973 { 2974 u32 hotplug_irqs, enabled_irqs; 2975 2976 hotplug_irqs = sde_ddi_mask | sde_tc_mask; 2977 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins); 2978 2979 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2980 2981 icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask); 2982 } 2983 2984 /* 2985 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the 2986 * equivalent of SDE. 2987 */ 2988 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) 2989 { 2990 icp_hpd_irq_setup(dev_priv, 2991 SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1), 2992 ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1), 2993 hpd_icp); 2994 } 2995 2996 /* 2997 * JSP behaves exactly the same as MCC above except that port C is mapped to 2998 * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's 2999 * masks & tables rather than ICP's masks & tables. 3000 */ 3001 static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3002 { 3003 icp_hpd_irq_setup(dev_priv, 3004 SDE_DDI_MASK_TGP, 0, 3005 TGP_DDI_HPD_ENABLE_MASK, 0, 3006 hpd_tgp); 3007 } 3008 3009 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3010 { 3011 u32 hotplug; 3012 3013 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3014 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3015 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3016 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3017 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3018 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3019 3020 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3021 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3022 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3023 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3024 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3025 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3026 } 3027 3028 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3029 { 3030 u32 hotplug_irqs, enabled_irqs; 3031 const u32 *hpd; 3032 u32 val; 3033 3034 hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11; 3035 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd); 3036 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3037 3038 val = I915_READ(GEN11_DE_HPD_IMR); 3039 val &= ~hotplug_irqs; 3040 I915_WRITE(GEN11_DE_HPD_IMR, val); 3041 POSTING_READ(GEN11_DE_HPD_IMR); 3042 3043 gen11_hpd_detection_setup(dev_priv); 3044 3045 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) 3046 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP, 3047 TGP_DDI_HPD_ENABLE_MASK, 3048 TGP_TC_HPD_ENABLE_MASK, hpd_tgp); 3049 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3050 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP, 3051 ICP_DDI_HPD_ENABLE_MASK, 3052 ICP_TC_HPD_ENABLE_MASK, hpd_icp); 3053 } 3054 3055 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3056 { 3057 u32 val, hotplug; 3058 3059 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3060 if (HAS_PCH_CNP(dev_priv)) { 3061 val = I915_READ(SOUTH_CHICKEN1); 3062 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3063 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3064 I915_WRITE(SOUTH_CHICKEN1, val); 3065 } 3066 3067 /* Enable digital hotplug on the PCH */ 3068 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3069 hotplug |= PORTA_HOTPLUG_ENABLE | 3070 PORTB_HOTPLUG_ENABLE | 3071 PORTC_HOTPLUG_ENABLE | 3072 PORTD_HOTPLUG_ENABLE; 3073 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3074 3075 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3076 hotplug |= PORTE_HOTPLUG_ENABLE; 3077 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3078 } 3079 3080 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3081 { 3082 u32 hotplug_irqs, enabled_irqs; 3083 3084 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3085 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3086 3087 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3088 3089 spt_hpd_detection_setup(dev_priv); 3090 } 3091 3092 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3093 { 3094 u32 hotplug; 3095 3096 /* 3097 * Enable digital hotplug on the CPU, and configure the DP short pulse 3098 * duration to 2ms (which is the minimum in the Display Port spec) 3099 * The pulse duration bits are reserved on HSW+. 3100 */ 3101 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3102 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3103 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3104 DIGITAL_PORTA_PULSE_DURATION_2ms; 3105 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3106 } 3107 3108 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3109 { 3110 u32 hotplug_irqs, enabled_irqs; 3111 3112 if (INTEL_GEN(dev_priv) >= 8) { 3113 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3114 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3115 3116 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3117 } else if (INTEL_GEN(dev_priv) >= 7) { 3118 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3119 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3120 3121 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3122 } else { 3123 hotplug_irqs = DE_DP_A_HOTPLUG; 3124 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3125 3126 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3127 } 3128 3129 ilk_hpd_detection_setup(dev_priv); 3130 3131 ibx_hpd_irq_setup(dev_priv); 3132 } 3133 3134 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3135 u32 enabled_irqs) 3136 { 3137 u32 hotplug; 3138 3139 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3140 hotplug |= PORTA_HOTPLUG_ENABLE | 3141 PORTB_HOTPLUG_ENABLE | 3142 PORTC_HOTPLUG_ENABLE; 3143 3144 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3145 hotplug, enabled_irqs); 3146 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3147 3148 /* 3149 * For BXT invert bit has to be set based on AOB design 3150 * for HPD detection logic, update it based on VBT fields. 3151 */ 3152 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3153 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3154 hotplug |= BXT_DDIA_HPD_INVERT; 3155 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3156 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3157 hotplug |= BXT_DDIB_HPD_INVERT; 3158 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3159 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3160 hotplug |= BXT_DDIC_HPD_INVERT; 3161 3162 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3163 } 3164 3165 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3166 { 3167 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3168 } 3169 3170 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3171 { 3172 u32 hotplug_irqs, enabled_irqs; 3173 3174 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3175 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3176 3177 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3178 3179 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3180 } 3181 3182 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3183 { 3184 u32 mask; 3185 3186 if (HAS_PCH_NOP(dev_priv)) 3187 return; 3188 3189 if (HAS_PCH_IBX(dev_priv)) 3190 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3191 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3192 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3193 else 3194 mask = SDE_GMBUS_CPT; 3195 3196 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 3197 I915_WRITE(SDEIMR, ~mask); 3198 3199 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3200 HAS_PCH_LPT(dev_priv)) 3201 ibx_hpd_detection_setup(dev_priv); 3202 else 3203 spt_hpd_detection_setup(dev_priv); 3204 } 3205 3206 static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) 3207 { 3208 struct intel_uncore *uncore = &dev_priv->uncore; 3209 u32 display_mask, extra_mask; 3210 3211 if (INTEL_GEN(dev_priv) >= 7) { 3212 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3213 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3214 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3215 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3216 DE_DP_A_HOTPLUG_IVB); 3217 } else { 3218 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3219 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3220 DE_PIPEA_CRC_DONE | DE_POISON); 3221 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3222 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3223 DE_DP_A_HOTPLUG); 3224 } 3225 3226 if (IS_HASWELL(dev_priv)) { 3227 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3228 display_mask |= DE_EDP_PSR_INT_HSW; 3229 } 3230 3231 dev_priv->irq_mask = ~display_mask; 3232 3233 ibx_irq_pre_postinstall(dev_priv); 3234 3235 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3236 display_mask | extra_mask); 3237 3238 gen5_gt_irq_postinstall(&dev_priv->gt); 3239 3240 ilk_hpd_detection_setup(dev_priv); 3241 3242 ibx_irq_postinstall(dev_priv); 3243 3244 if (IS_IRONLAKE_M(dev_priv)) { 3245 /* Enable PCU event interrupts 3246 * 3247 * spinlocking not required here for correctness since interrupt 3248 * setup is guaranteed to run in single-threaded context. But we 3249 * need it to make the assert_spin_locked happy. */ 3250 spin_lock_irq(&dev_priv->irq_lock); 3251 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3252 spin_unlock_irq(&dev_priv->irq_lock); 3253 } 3254 } 3255 3256 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3257 { 3258 lockdep_assert_held(&dev_priv->irq_lock); 3259 3260 if (dev_priv->display_irqs_enabled) 3261 return; 3262 3263 dev_priv->display_irqs_enabled = true; 3264 3265 if (intel_irqs_enabled(dev_priv)) { 3266 vlv_display_irq_reset(dev_priv); 3267 vlv_display_irq_postinstall(dev_priv); 3268 } 3269 } 3270 3271 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3272 { 3273 lockdep_assert_held(&dev_priv->irq_lock); 3274 3275 if (!dev_priv->display_irqs_enabled) 3276 return; 3277 3278 dev_priv->display_irqs_enabled = false; 3279 3280 if (intel_irqs_enabled(dev_priv)) 3281 vlv_display_irq_reset(dev_priv); 3282 } 3283 3284 3285 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 3286 { 3287 gen5_gt_irq_postinstall(&dev_priv->gt); 3288 3289 spin_lock_irq(&dev_priv->irq_lock); 3290 if (dev_priv->display_irqs_enabled) 3291 vlv_display_irq_postinstall(dev_priv); 3292 spin_unlock_irq(&dev_priv->irq_lock); 3293 3294 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3295 POSTING_READ(VLV_MASTER_IER); 3296 } 3297 3298 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3299 { 3300 struct intel_uncore *uncore = &dev_priv->uncore; 3301 3302 u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3303 u32 de_pipe_enables; 3304 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3305 u32 de_port_enables; 3306 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3307 enum pipe pipe; 3308 3309 if (INTEL_GEN(dev_priv) <= 10) 3310 de_misc_masked |= GEN8_DE_MISC_GSE; 3311 3312 if (INTEL_GEN(dev_priv) >= 9) { 3313 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3314 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3315 GEN9_AUX_CHANNEL_D; 3316 if (IS_GEN9_LP(dev_priv)) 3317 de_port_masked |= BXT_DE_PORT_GMBUS; 3318 } else { 3319 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3320 } 3321 3322 if (INTEL_GEN(dev_priv) >= 11) 3323 de_port_masked |= ICL_AUX_CHANNEL_E; 3324 3325 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 3326 de_port_masked |= CNL_AUX_CHANNEL_F; 3327 3328 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3329 GEN8_PIPE_FIFO_UNDERRUN; 3330 3331 de_port_enables = de_port_masked; 3332 if (IS_GEN9_LP(dev_priv)) 3333 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3334 else if (IS_BROADWELL(dev_priv)) 3335 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3336 3337 if (INTEL_GEN(dev_priv) >= 12) { 3338 enum transcoder trans; 3339 3340 for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) { 3341 enum intel_display_power_domain domain; 3342 3343 domain = POWER_DOMAIN_TRANSCODER(trans); 3344 if (!intel_display_power_is_enabled(dev_priv, domain)) 3345 continue; 3346 3347 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 3348 } 3349 } else { 3350 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3351 } 3352 3353 for_each_pipe(dev_priv, pipe) { 3354 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3355 3356 if (intel_display_power_is_enabled(dev_priv, 3357 POWER_DOMAIN_PIPE(pipe))) 3358 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3359 dev_priv->de_irq_mask[pipe], 3360 de_pipe_enables); 3361 } 3362 3363 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3364 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3365 3366 if (INTEL_GEN(dev_priv) >= 11) { 3367 u32 de_hpd_masked = 0; 3368 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3369 GEN11_DE_TBT_HOTPLUG_MASK; 3370 3371 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 3372 de_hpd_enables); 3373 gen11_hpd_detection_setup(dev_priv); 3374 } else if (IS_GEN9_LP(dev_priv)) { 3375 bxt_hpd_detection_setup(dev_priv); 3376 } else if (IS_BROADWELL(dev_priv)) { 3377 ilk_hpd_detection_setup(dev_priv); 3378 } 3379 } 3380 3381 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 3382 { 3383 if (HAS_PCH_SPLIT(dev_priv)) 3384 ibx_irq_pre_postinstall(dev_priv); 3385 3386 gen8_gt_irq_postinstall(&dev_priv->gt); 3387 gen8_de_irq_postinstall(dev_priv); 3388 3389 if (HAS_PCH_SPLIT(dev_priv)) 3390 ibx_irq_postinstall(dev_priv); 3391 3392 gen8_master_intr_enable(dev_priv->uncore.regs); 3393 } 3394 3395 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 3396 { 3397 u32 mask = SDE_GMBUS_ICP; 3398 3399 WARN_ON(I915_READ(SDEIER) != 0); 3400 I915_WRITE(SDEIER, 0xffffffff); 3401 POSTING_READ(SDEIER); 3402 3403 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 3404 I915_WRITE(SDEIMR, ~mask); 3405 3406 if (HAS_PCH_TGP(dev_priv)) 3407 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 3408 TGP_TC_HPD_ENABLE_MASK); 3409 else if (HAS_PCH_JSP(dev_priv)) 3410 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); 3411 else if (HAS_PCH_MCC(dev_priv)) 3412 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, 3413 ICP_TC_HPD_ENABLE(PORT_TC1)); 3414 else 3415 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, 3416 ICP_TC_HPD_ENABLE_MASK); 3417 } 3418 3419 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 3420 { 3421 struct intel_uncore *uncore = &dev_priv->uncore; 3422 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3423 3424 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3425 icp_irq_postinstall(dev_priv); 3426 3427 gen11_gt_irq_postinstall(&dev_priv->gt); 3428 gen8_de_irq_postinstall(dev_priv); 3429 3430 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3431 3432 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 3433 3434 gen11_master_intr_enable(uncore->regs); 3435 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3436 } 3437 3438 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 3439 { 3440 gen8_gt_irq_postinstall(&dev_priv->gt); 3441 3442 spin_lock_irq(&dev_priv->irq_lock); 3443 if (dev_priv->display_irqs_enabled) 3444 vlv_display_irq_postinstall(dev_priv); 3445 spin_unlock_irq(&dev_priv->irq_lock); 3446 3447 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3448 POSTING_READ(GEN8_MASTER_IRQ); 3449 } 3450 3451 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 3452 { 3453 struct intel_uncore *uncore = &dev_priv->uncore; 3454 3455 i9xx_pipestat_irq_reset(dev_priv); 3456 3457 GEN2_IRQ_RESET(uncore); 3458 } 3459 3460 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3461 { 3462 struct intel_uncore *uncore = &dev_priv->uncore; 3463 u16 enable_mask; 3464 3465 intel_uncore_write16(uncore, 3466 EMR, 3467 ~(I915_ERROR_PAGE_TABLE | 3468 I915_ERROR_MEMORY_REFRESH)); 3469 3470 /* Unmask the interrupts that we always want on. */ 3471 dev_priv->irq_mask = 3472 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3473 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3474 I915_MASTER_ERROR_INTERRUPT); 3475 3476 enable_mask = 3477 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3478 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3479 I915_MASTER_ERROR_INTERRUPT | 3480 I915_USER_INTERRUPT; 3481 3482 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask); 3483 3484 /* Interrupt setup is already guaranteed to be single-threaded, this is 3485 * just to make the assert_spin_locked check happy. */ 3486 spin_lock_irq(&dev_priv->irq_lock); 3487 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3488 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3489 spin_unlock_irq(&dev_priv->irq_lock); 3490 } 3491 3492 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 3493 u16 *eir, u16 *eir_stuck) 3494 { 3495 struct intel_uncore *uncore = &i915->uncore; 3496 u16 emr; 3497 3498 *eir = intel_uncore_read16(uncore, EIR); 3499 3500 if (*eir) 3501 intel_uncore_write16(uncore, EIR, *eir); 3502 3503 *eir_stuck = intel_uncore_read16(uncore, EIR); 3504 if (*eir_stuck == 0) 3505 return; 3506 3507 /* 3508 * Toggle all EMR bits to make sure we get an edge 3509 * in the ISR master error bit if we don't clear 3510 * all the EIR bits. Otherwise the edge triggered 3511 * IIR on i965/g4x wouldn't notice that an interrupt 3512 * is still pending. Also some EIR bits can't be 3513 * cleared except by handling the underlying error 3514 * (or by a GPU reset) so we mask any bit that 3515 * remains set. 3516 */ 3517 emr = intel_uncore_read16(uncore, EMR); 3518 intel_uncore_write16(uncore, EMR, 0xffff); 3519 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 3520 } 3521 3522 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 3523 u16 eir, u16 eir_stuck) 3524 { 3525 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 3526 3527 if (eir_stuck) 3528 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 3529 } 3530 3531 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 3532 u32 *eir, u32 *eir_stuck) 3533 { 3534 u32 emr; 3535 3536 *eir = I915_READ(EIR); 3537 3538 I915_WRITE(EIR, *eir); 3539 3540 *eir_stuck = I915_READ(EIR); 3541 if (*eir_stuck == 0) 3542 return; 3543 3544 /* 3545 * Toggle all EMR bits to make sure we get an edge 3546 * in the ISR master error bit if we don't clear 3547 * all the EIR bits. Otherwise the edge triggered 3548 * IIR on i965/g4x wouldn't notice that an interrupt 3549 * is still pending. Also some EIR bits can't be 3550 * cleared except by handling the underlying error 3551 * (or by a GPU reset) so we mask any bit that 3552 * remains set. 3553 */ 3554 emr = I915_READ(EMR); 3555 I915_WRITE(EMR, 0xffffffff); 3556 I915_WRITE(EMR, emr | *eir_stuck); 3557 } 3558 3559 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 3560 u32 eir, u32 eir_stuck) 3561 { 3562 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 3563 3564 if (eir_stuck) 3565 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 3566 } 3567 3568 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3569 { 3570 struct drm_i915_private *dev_priv = arg; 3571 irqreturn_t ret = IRQ_NONE; 3572 3573 if (!intel_irqs_enabled(dev_priv)) 3574 return IRQ_NONE; 3575 3576 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3577 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3578 3579 do { 3580 u32 pipe_stats[I915_MAX_PIPES] = {}; 3581 u16 eir = 0, eir_stuck = 0; 3582 u16 iir; 3583 3584 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 3585 if (iir == 0) 3586 break; 3587 3588 ret = IRQ_HANDLED; 3589 3590 /* Call regardless, as some status bits might not be 3591 * signalled in iir */ 3592 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3593 3594 if (iir & I915_MASTER_ERROR_INTERRUPT) 3595 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3596 3597 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 3598 3599 if (iir & I915_USER_INTERRUPT) 3600 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 3601 3602 if (iir & I915_MASTER_ERROR_INTERRUPT) 3603 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 3604 3605 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3606 } while (0); 3607 3608 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3609 3610 return ret; 3611 } 3612 3613 static void i915_irq_reset(struct drm_i915_private *dev_priv) 3614 { 3615 struct intel_uncore *uncore = &dev_priv->uncore; 3616 3617 if (I915_HAS_HOTPLUG(dev_priv)) { 3618 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3619 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3620 } 3621 3622 i9xx_pipestat_irq_reset(dev_priv); 3623 3624 GEN3_IRQ_RESET(uncore, GEN2_); 3625 } 3626 3627 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 3628 { 3629 struct intel_uncore *uncore = &dev_priv->uncore; 3630 u32 enable_mask; 3631 3632 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 3633 I915_ERROR_MEMORY_REFRESH)); 3634 3635 /* Unmask the interrupts that we always want on. */ 3636 dev_priv->irq_mask = 3637 ~(I915_ASLE_INTERRUPT | 3638 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3639 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3640 I915_MASTER_ERROR_INTERRUPT); 3641 3642 enable_mask = 3643 I915_ASLE_INTERRUPT | 3644 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3645 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3646 I915_MASTER_ERROR_INTERRUPT | 3647 I915_USER_INTERRUPT; 3648 3649 if (I915_HAS_HOTPLUG(dev_priv)) { 3650 /* Enable in IER... */ 3651 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3652 /* and unmask in IMR */ 3653 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3654 } 3655 3656 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 3657 3658 /* Interrupt setup is already guaranteed to be single-threaded, this is 3659 * just to make the assert_spin_locked check happy. */ 3660 spin_lock_irq(&dev_priv->irq_lock); 3661 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3662 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3663 spin_unlock_irq(&dev_priv->irq_lock); 3664 3665 i915_enable_asle_pipestat(dev_priv); 3666 } 3667 3668 static irqreturn_t i915_irq_handler(int irq, void *arg) 3669 { 3670 struct drm_i915_private *dev_priv = arg; 3671 irqreturn_t ret = IRQ_NONE; 3672 3673 if (!intel_irqs_enabled(dev_priv)) 3674 return IRQ_NONE; 3675 3676 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3677 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3678 3679 do { 3680 u32 pipe_stats[I915_MAX_PIPES] = {}; 3681 u32 eir = 0, eir_stuck = 0; 3682 u32 hotplug_status = 0; 3683 u32 iir; 3684 3685 iir = I915_READ(GEN2_IIR); 3686 if (iir == 0) 3687 break; 3688 3689 ret = IRQ_HANDLED; 3690 3691 if (I915_HAS_HOTPLUG(dev_priv) && 3692 iir & I915_DISPLAY_PORT_INTERRUPT) 3693 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3694 3695 /* Call regardless, as some status bits might not be 3696 * signalled in iir */ 3697 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3698 3699 if (iir & I915_MASTER_ERROR_INTERRUPT) 3700 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3701 3702 I915_WRITE(GEN2_IIR, iir); 3703 3704 if (iir & I915_USER_INTERRUPT) 3705 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 3706 3707 if (iir & I915_MASTER_ERROR_INTERRUPT) 3708 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 3709 3710 if (hotplug_status) 3711 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3712 3713 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3714 } while (0); 3715 3716 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3717 3718 return ret; 3719 } 3720 3721 static void i965_irq_reset(struct drm_i915_private *dev_priv) 3722 { 3723 struct intel_uncore *uncore = &dev_priv->uncore; 3724 3725 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3726 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3727 3728 i9xx_pipestat_irq_reset(dev_priv); 3729 3730 GEN3_IRQ_RESET(uncore, GEN2_); 3731 } 3732 3733 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 3734 { 3735 struct intel_uncore *uncore = &dev_priv->uncore; 3736 u32 enable_mask; 3737 u32 error_mask; 3738 3739 /* 3740 * Enable some error detection, note the instruction error mask 3741 * bit is reserved, so we leave it masked. 3742 */ 3743 if (IS_G4X(dev_priv)) { 3744 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3745 GM45_ERROR_MEM_PRIV | 3746 GM45_ERROR_CP_PRIV | 3747 I915_ERROR_MEMORY_REFRESH); 3748 } else { 3749 error_mask = ~(I915_ERROR_PAGE_TABLE | 3750 I915_ERROR_MEMORY_REFRESH); 3751 } 3752 I915_WRITE(EMR, error_mask); 3753 3754 /* Unmask the interrupts that we always want on. */ 3755 dev_priv->irq_mask = 3756 ~(I915_ASLE_INTERRUPT | 3757 I915_DISPLAY_PORT_INTERRUPT | 3758 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3759 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3760 I915_MASTER_ERROR_INTERRUPT); 3761 3762 enable_mask = 3763 I915_ASLE_INTERRUPT | 3764 I915_DISPLAY_PORT_INTERRUPT | 3765 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3766 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3767 I915_MASTER_ERROR_INTERRUPT | 3768 I915_USER_INTERRUPT; 3769 3770 if (IS_G4X(dev_priv)) 3771 enable_mask |= I915_BSD_USER_INTERRUPT; 3772 3773 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 3774 3775 /* Interrupt setup is already guaranteed to be single-threaded, this is 3776 * just to make the assert_spin_locked check happy. */ 3777 spin_lock_irq(&dev_priv->irq_lock); 3778 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3779 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3780 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3781 spin_unlock_irq(&dev_priv->irq_lock); 3782 3783 i915_enable_asle_pipestat(dev_priv); 3784 } 3785 3786 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 3787 { 3788 u32 hotplug_en; 3789 3790 lockdep_assert_held(&dev_priv->irq_lock); 3791 3792 /* Note HDMI and DP share hotplug bits */ 3793 /* enable bits are the same for all generations */ 3794 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 3795 /* Programming the CRT detection parameters tends 3796 to generate a spurious hotplug event about three 3797 seconds later. So just do it once. 3798 */ 3799 if (IS_G4X(dev_priv)) 3800 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3801 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3802 3803 /* Ignore TV since it's buggy */ 3804 i915_hotplug_interrupt_update_locked(dev_priv, 3805 HOTPLUG_INT_EN_MASK | 3806 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 3807 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 3808 hotplug_en); 3809 } 3810 3811 static irqreturn_t i965_irq_handler(int irq, void *arg) 3812 { 3813 struct drm_i915_private *dev_priv = arg; 3814 irqreturn_t ret = IRQ_NONE; 3815 3816 if (!intel_irqs_enabled(dev_priv)) 3817 return IRQ_NONE; 3818 3819 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3820 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3821 3822 do { 3823 u32 pipe_stats[I915_MAX_PIPES] = {}; 3824 u32 eir = 0, eir_stuck = 0; 3825 u32 hotplug_status = 0; 3826 u32 iir; 3827 3828 iir = I915_READ(GEN2_IIR); 3829 if (iir == 0) 3830 break; 3831 3832 ret = IRQ_HANDLED; 3833 3834 if (iir & I915_DISPLAY_PORT_INTERRUPT) 3835 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3836 3837 /* Call regardless, as some status bits might not be 3838 * signalled in iir */ 3839 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3840 3841 if (iir & I915_MASTER_ERROR_INTERRUPT) 3842 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3843 3844 I915_WRITE(GEN2_IIR, iir); 3845 3846 if (iir & I915_USER_INTERRUPT) 3847 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 3848 3849 if (iir & I915_BSD_USER_INTERRUPT) 3850 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); 3851 3852 if (iir & I915_MASTER_ERROR_INTERRUPT) 3853 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 3854 3855 if (hotplug_status) 3856 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3857 3858 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3859 } while (0); 3860 3861 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3862 3863 return ret; 3864 } 3865 3866 /** 3867 * intel_irq_init - initializes irq support 3868 * @dev_priv: i915 device instance 3869 * 3870 * This function initializes all the irq support including work items, timers 3871 * and all the vtables. It does not setup the interrupt itself though. 3872 */ 3873 void intel_irq_init(struct drm_i915_private *dev_priv) 3874 { 3875 struct drm_device *dev = &dev_priv->drm; 3876 int i; 3877 3878 intel_hpd_init_work(dev_priv); 3879 3880 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3881 for (i = 0; i < MAX_L3_SLICES; ++i) 3882 dev_priv->l3_parity.remap_info[i] = NULL; 3883 3884 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 3885 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) 3886 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; 3887 3888 dev->vblank_disable_immediate = true; 3889 3890 /* Most platforms treat the display irq block as an always-on 3891 * power domain. vlv/chv can disable it at runtime and need 3892 * special care to avoid writing any of the display block registers 3893 * outside of the power domain. We defer setting up the display irqs 3894 * in this case to the runtime pm. 3895 */ 3896 dev_priv->display_irqs_enabled = true; 3897 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3898 dev_priv->display_irqs_enabled = false; 3899 3900 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 3901 /* If we have MST support, we want to avoid doing short HPD IRQ storm 3902 * detection, as short HPD storms will occur as a natural part of 3903 * sideband messaging with MST. 3904 * On older platforms however, IRQ storms can occur with both long and 3905 * short pulses, as seen on some G4x systems. 3906 */ 3907 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 3908 3909 if (HAS_GMCH(dev_priv)) { 3910 if (I915_HAS_HOTPLUG(dev_priv)) 3911 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3912 } else { 3913 if (HAS_PCH_JSP(dev_priv)) 3914 dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup; 3915 else if (HAS_PCH_MCC(dev_priv)) 3916 dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup; 3917 else if (INTEL_GEN(dev_priv) >= 11) 3918 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 3919 else if (IS_GEN9_LP(dev_priv)) 3920 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 3921 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 3922 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 3923 else 3924 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 3925 } 3926 } 3927 3928 /** 3929 * intel_irq_fini - deinitializes IRQ support 3930 * @i915: i915 device instance 3931 * 3932 * This function deinitializes all the IRQ support. 3933 */ 3934 void intel_irq_fini(struct drm_i915_private *i915) 3935 { 3936 int i; 3937 3938 for (i = 0; i < MAX_L3_SLICES; ++i) 3939 kfree(i915->l3_parity.remap_info[i]); 3940 } 3941 3942 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 3943 { 3944 if (HAS_GMCH(dev_priv)) { 3945 if (IS_CHERRYVIEW(dev_priv)) 3946 return cherryview_irq_handler; 3947 else if (IS_VALLEYVIEW(dev_priv)) 3948 return valleyview_irq_handler; 3949 else if (IS_GEN(dev_priv, 4)) 3950 return i965_irq_handler; 3951 else if (IS_GEN(dev_priv, 3)) 3952 return i915_irq_handler; 3953 else 3954 return i8xx_irq_handler; 3955 } else { 3956 if (INTEL_GEN(dev_priv) >= 11) 3957 return gen11_irq_handler; 3958 else if (INTEL_GEN(dev_priv) >= 8) 3959 return gen8_irq_handler; 3960 else 3961 return ironlake_irq_handler; 3962 } 3963 } 3964 3965 static void intel_irq_reset(struct drm_i915_private *dev_priv) 3966 { 3967 if (HAS_GMCH(dev_priv)) { 3968 if (IS_CHERRYVIEW(dev_priv)) 3969 cherryview_irq_reset(dev_priv); 3970 else if (IS_VALLEYVIEW(dev_priv)) 3971 valleyview_irq_reset(dev_priv); 3972 else if (IS_GEN(dev_priv, 4)) 3973 i965_irq_reset(dev_priv); 3974 else if (IS_GEN(dev_priv, 3)) 3975 i915_irq_reset(dev_priv); 3976 else 3977 i8xx_irq_reset(dev_priv); 3978 } else { 3979 if (INTEL_GEN(dev_priv) >= 11) 3980 gen11_irq_reset(dev_priv); 3981 else if (INTEL_GEN(dev_priv) >= 8) 3982 gen8_irq_reset(dev_priv); 3983 else 3984 ironlake_irq_reset(dev_priv); 3985 } 3986 } 3987 3988 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 3989 { 3990 if (HAS_GMCH(dev_priv)) { 3991 if (IS_CHERRYVIEW(dev_priv)) 3992 cherryview_irq_postinstall(dev_priv); 3993 else if (IS_VALLEYVIEW(dev_priv)) 3994 valleyview_irq_postinstall(dev_priv); 3995 else if (IS_GEN(dev_priv, 4)) 3996 i965_irq_postinstall(dev_priv); 3997 else if (IS_GEN(dev_priv, 3)) 3998 i915_irq_postinstall(dev_priv); 3999 else 4000 i8xx_irq_postinstall(dev_priv); 4001 } else { 4002 if (INTEL_GEN(dev_priv) >= 11) 4003 gen11_irq_postinstall(dev_priv); 4004 else if (INTEL_GEN(dev_priv) >= 8) 4005 gen8_irq_postinstall(dev_priv); 4006 else 4007 ironlake_irq_postinstall(dev_priv); 4008 } 4009 } 4010 4011 /** 4012 * intel_irq_install - enables the hardware interrupt 4013 * @dev_priv: i915 device instance 4014 * 4015 * This function enables the hardware interrupt handling, but leaves the hotplug 4016 * handling still disabled. It is called after intel_irq_init(). 4017 * 4018 * In the driver load and resume code we need working interrupts in a few places 4019 * but don't want to deal with the hassle of concurrent probe and hotplug 4020 * workers. Hence the split into this two-stage approach. 4021 */ 4022 int intel_irq_install(struct drm_i915_private *dev_priv) 4023 { 4024 int irq = dev_priv->drm.pdev->irq; 4025 int ret; 4026 4027 /* 4028 * We enable some interrupt sources in our postinstall hooks, so mark 4029 * interrupts as enabled _before_ actually enabling them to avoid 4030 * special cases in our ordering checks. 4031 */ 4032 dev_priv->runtime_pm.irqs_enabled = true; 4033 4034 dev_priv->drm.irq_enabled = true; 4035 4036 intel_irq_reset(dev_priv); 4037 4038 ret = request_irq(irq, intel_irq_handler(dev_priv), 4039 IRQF_SHARED, DRIVER_NAME, dev_priv); 4040 if (ret < 0) { 4041 dev_priv->drm.irq_enabled = false; 4042 return ret; 4043 } 4044 4045 intel_irq_postinstall(dev_priv); 4046 4047 return ret; 4048 } 4049 4050 /** 4051 * intel_irq_uninstall - finilizes all irq handling 4052 * @dev_priv: i915 device instance 4053 * 4054 * This stops interrupt and hotplug handling and unregisters and frees all 4055 * resources acquired in the init functions. 4056 */ 4057 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4058 { 4059 int irq = dev_priv->drm.pdev->irq; 4060 4061 /* 4062 * FIXME we can get called twice during driver probe 4063 * error handling as well as during driver remove due to 4064 * intel_modeset_driver_remove() calling us out of sequence. 4065 * Would be nice if it didn't do that... 4066 */ 4067 if (!dev_priv->drm.irq_enabled) 4068 return; 4069 4070 dev_priv->drm.irq_enabled = false; 4071 4072 intel_irq_reset(dev_priv); 4073 4074 free_irq(irq, dev_priv); 4075 4076 intel_hpd_cancel_work(dev_priv); 4077 dev_priv->runtime_pm.irqs_enabled = false; 4078 } 4079 4080 /** 4081 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4082 * @dev_priv: i915 device instance 4083 * 4084 * This function is used to disable interrupts at runtime, both in the runtime 4085 * pm and the system suspend/resume code. 4086 */ 4087 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4088 { 4089 intel_irq_reset(dev_priv); 4090 dev_priv->runtime_pm.irqs_enabled = false; 4091 intel_synchronize_irq(dev_priv); 4092 } 4093 4094 /** 4095 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4096 * @dev_priv: i915 device instance 4097 * 4098 * This function is used to enable interrupts at runtime, both in the runtime 4099 * pm and the system suspend/resume code. 4100 */ 4101 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4102 { 4103 dev_priv->runtime_pm.irqs_enabled = true; 4104 intel_irq_reset(dev_priv); 4105 intel_irq_postinstall(dev_priv); 4106 } 4107 4108 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 4109 { 4110 /* 4111 * We only use drm_irq_uninstall() at unload and VT switch, so 4112 * this is the only thing we need to check. 4113 */ 4114 return dev_priv->runtime_pm.irqs_enabled; 4115 } 4116 4117 void intel_synchronize_irq(struct drm_i915_private *i915) 4118 { 4119 synchronize_irq(i915->drm.pdev->irq); 4120 } 4121