1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/slab.h> 32 #include <linux/sysrq.h> 33 34 #include <drm/drm_drv.h> 35 36 #include "display/icl_dsi_regs.h" 37 #include "display/intel_de.h" 38 #include "display/intel_display_trace.h" 39 #include "display/intel_display_types.h" 40 #include "display/intel_fifo_underrun.h" 41 #include "display/intel_hotplug.h" 42 #include "display/intel_lpe_audio.h" 43 #include "display/intel_psr.h" 44 45 #include "gt/intel_breadcrumbs.h" 46 #include "gt/intel_gt.h" 47 #include "gt/intel_gt_irq.h" 48 #include "gt/intel_gt_pm_irq.h" 49 #include "gt/intel_gt_regs.h" 50 #include "gt/intel_rps.h" 51 52 #include "i915_driver.h" 53 #include "i915_drv.h" 54 #include "i915_irq.h" 55 56 /** 57 * DOC: interrupt handling 58 * 59 * These functions provide the basic support for enabling and disabling the 60 * interrupt handling support. There's a lot more functionality in i915_irq.c 61 * and related files, but that will be described in separate chapters. 62 */ 63 64 /* 65 * Interrupt statistic for PMU. Increments the counter only if the 66 * interrupt originated from the GPU so interrupts from a device which 67 * shares the interrupt line are not accounted. 68 */ 69 static inline void pmu_irq_stats(struct drm_i915_private *i915, 70 irqreturn_t res) 71 { 72 if (unlikely(res != IRQ_HANDLED)) 73 return; 74 75 /* 76 * A clever compiler translates that into INC. A not so clever one 77 * should at least prevent store tearing. 78 */ 79 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); 80 } 81 82 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 83 typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder); 84 85 static const u32 hpd_ilk[HPD_NUM_PINS] = { 86 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 87 }; 88 89 static const u32 hpd_ivb[HPD_NUM_PINS] = { 90 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 91 }; 92 93 static const u32 hpd_bdw[HPD_NUM_PINS] = { 94 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 95 }; 96 97 static const u32 hpd_ibx[HPD_NUM_PINS] = { 98 [HPD_CRT] = SDE_CRT_HOTPLUG, 99 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 100 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 101 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 102 [HPD_PORT_D] = SDE_PORTD_HOTPLUG, 103 }; 104 105 static const u32 hpd_cpt[HPD_NUM_PINS] = { 106 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 107 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 108 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 109 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 110 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 111 }; 112 113 static const u32 hpd_spt[HPD_NUM_PINS] = { 114 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 115 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 116 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 117 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 118 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, 119 }; 120 121 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 122 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 123 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 124 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 125 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 126 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 127 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, 128 }; 129 130 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 131 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 132 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 133 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 134 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 135 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 136 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 137 }; 138 139 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 140 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 141 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 142 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 143 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 144 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 145 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 146 }; 147 148 static const u32 hpd_bxt[HPD_NUM_PINS] = { 149 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 150 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), 151 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), 152 }; 153 154 static const u32 hpd_gen11[HPD_NUM_PINS] = { 155 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), 156 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), 157 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), 158 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), 159 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), 160 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), 161 }; 162 163 static const u32 hpd_icp[HPD_NUM_PINS] = { 164 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 165 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 166 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 167 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), 168 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), 169 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), 170 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), 171 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), 172 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), 173 }; 174 175 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { 176 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 177 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 178 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 179 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), 180 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1), 181 }; 182 183 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) 184 { 185 struct intel_hotplug *hpd = &dev_priv->display.hotplug; 186 187 if (HAS_GMCH(dev_priv)) { 188 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 189 IS_CHERRYVIEW(dev_priv)) 190 hpd->hpd = hpd_status_g4x; 191 else 192 hpd->hpd = hpd_status_i915; 193 return; 194 } 195 196 if (DISPLAY_VER(dev_priv) >= 11) 197 hpd->hpd = hpd_gen11; 198 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 199 hpd->hpd = hpd_bxt; 200 else if (DISPLAY_VER(dev_priv) == 9) 201 hpd->hpd = NULL; /* no north HPD on SKL */ 202 else if (DISPLAY_VER(dev_priv) >= 8) 203 hpd->hpd = hpd_bdw; 204 else if (DISPLAY_VER(dev_priv) >= 7) 205 hpd->hpd = hpd_ivb; 206 else 207 hpd->hpd = hpd_ilk; 208 209 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && 210 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) 211 return; 212 213 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) 214 hpd->pch_hpd = hpd_sde_dg1; 215 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 216 hpd->pch_hpd = hpd_icp; 217 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) 218 hpd->pch_hpd = hpd_spt; 219 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) 220 hpd->pch_hpd = hpd_cpt; 221 else if (HAS_PCH_IBX(dev_priv)) 222 hpd->pch_hpd = hpd_ibx; 223 else 224 MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); 225 } 226 227 static void 228 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 229 { 230 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 231 232 drm_crtc_handle_vblank(&crtc->base); 233 } 234 235 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 236 i915_reg_t iir, i915_reg_t ier) 237 { 238 intel_uncore_write(uncore, imr, 0xffffffff); 239 intel_uncore_posting_read(uncore, imr); 240 241 intel_uncore_write(uncore, ier, 0); 242 243 /* IIR can theoretically queue up two events. Be paranoid. */ 244 intel_uncore_write(uncore, iir, 0xffffffff); 245 intel_uncore_posting_read(uncore, iir); 246 intel_uncore_write(uncore, iir, 0xffffffff); 247 intel_uncore_posting_read(uncore, iir); 248 } 249 250 static void gen2_irq_reset(struct intel_uncore *uncore) 251 { 252 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 253 intel_uncore_posting_read16(uncore, GEN2_IMR); 254 255 intel_uncore_write16(uncore, GEN2_IER, 0); 256 257 /* IIR can theoretically queue up two events. Be paranoid. */ 258 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 259 intel_uncore_posting_read16(uncore, GEN2_IIR); 260 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 261 intel_uncore_posting_read16(uncore, GEN2_IIR); 262 } 263 264 /* 265 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 266 */ 267 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 268 { 269 u32 val = intel_uncore_read(uncore, reg); 270 271 if (val == 0) 272 return; 273 274 drm_WARN(&uncore->i915->drm, 1, 275 "Interrupt register 0x%x is not zero: 0x%08x\n", 276 i915_mmio_reg_offset(reg), val); 277 intel_uncore_write(uncore, reg, 0xffffffff); 278 intel_uncore_posting_read(uncore, reg); 279 intel_uncore_write(uncore, reg, 0xffffffff); 280 intel_uncore_posting_read(uncore, reg); 281 } 282 283 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 284 { 285 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 286 287 if (val == 0) 288 return; 289 290 drm_WARN(&uncore->i915->drm, 1, 291 "Interrupt register 0x%x is not zero: 0x%08x\n", 292 i915_mmio_reg_offset(GEN2_IIR), val); 293 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 294 intel_uncore_posting_read16(uncore, GEN2_IIR); 295 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 296 intel_uncore_posting_read16(uncore, GEN2_IIR); 297 } 298 299 void gen3_irq_init(struct intel_uncore *uncore, 300 i915_reg_t imr, u32 imr_val, 301 i915_reg_t ier, u32 ier_val, 302 i915_reg_t iir) 303 { 304 gen3_assert_iir_is_zero(uncore, iir); 305 306 intel_uncore_write(uncore, ier, ier_val); 307 intel_uncore_write(uncore, imr, imr_val); 308 intel_uncore_posting_read(uncore, imr); 309 } 310 311 static void gen2_irq_init(struct intel_uncore *uncore, 312 u32 imr_val, u32 ier_val) 313 { 314 gen2_assert_iir_is_zero(uncore); 315 316 intel_uncore_write16(uncore, GEN2_IER, ier_val); 317 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 318 intel_uncore_posting_read16(uncore, GEN2_IMR); 319 } 320 321 /* For display hotplug interrupt */ 322 static inline void 323 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 324 u32 mask, 325 u32 bits) 326 { 327 lockdep_assert_held(&dev_priv->irq_lock); 328 drm_WARN_ON(&dev_priv->drm, bits & ~mask); 329 330 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits); 331 } 332 333 /** 334 * i915_hotplug_interrupt_update - update hotplug interrupt enable 335 * @dev_priv: driver private 336 * @mask: bits to update 337 * @bits: bits to enable 338 * NOTE: the HPD enable bits are modified both inside and outside 339 * of an interrupt context. To avoid that read-modify-write cycles 340 * interfer, these bits are protected by a spinlock. Since this 341 * function is usually not called from a context where the lock is 342 * held already, this function acquires the lock itself. A non-locking 343 * version is also available. 344 */ 345 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 346 u32 mask, 347 u32 bits) 348 { 349 spin_lock_irq(&dev_priv->irq_lock); 350 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 351 spin_unlock_irq(&dev_priv->irq_lock); 352 } 353 354 /** 355 * ilk_update_display_irq - update DEIMR 356 * @dev_priv: driver private 357 * @interrupt_mask: mask of interrupt bits to update 358 * @enabled_irq_mask: mask of interrupt bits to enable 359 */ 360 static void ilk_update_display_irq(struct drm_i915_private *dev_priv, 361 u32 interrupt_mask, u32 enabled_irq_mask) 362 { 363 u32 new_val; 364 365 lockdep_assert_held(&dev_priv->irq_lock); 366 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 367 368 new_val = dev_priv->irq_mask; 369 new_val &= ~interrupt_mask; 370 new_val |= (~enabled_irq_mask & interrupt_mask); 371 372 if (new_val != dev_priv->irq_mask && 373 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 374 dev_priv->irq_mask = new_val; 375 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 376 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 377 } 378 } 379 380 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 381 { 382 ilk_update_display_irq(i915, bits, bits); 383 } 384 385 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 386 { 387 ilk_update_display_irq(i915, bits, 0); 388 } 389 390 /** 391 * bdw_update_port_irq - update DE port interrupt 392 * @dev_priv: driver private 393 * @interrupt_mask: mask of interrupt bits to update 394 * @enabled_irq_mask: mask of interrupt bits to enable 395 */ 396 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 397 u32 interrupt_mask, 398 u32 enabled_irq_mask) 399 { 400 u32 new_val; 401 u32 old_val; 402 403 lockdep_assert_held(&dev_priv->irq_lock); 404 405 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 406 407 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 408 return; 409 410 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 411 412 new_val = old_val; 413 new_val &= ~interrupt_mask; 414 new_val |= (~enabled_irq_mask & interrupt_mask); 415 416 if (new_val != old_val) { 417 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 418 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 419 } 420 } 421 422 /** 423 * bdw_update_pipe_irq - update DE pipe interrupt 424 * @dev_priv: driver private 425 * @pipe: pipe whose interrupt to update 426 * @interrupt_mask: mask of interrupt bits to update 427 * @enabled_irq_mask: mask of interrupt bits to enable 428 */ 429 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 430 enum pipe pipe, u32 interrupt_mask, 431 u32 enabled_irq_mask) 432 { 433 u32 new_val; 434 435 lockdep_assert_held(&dev_priv->irq_lock); 436 437 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 438 439 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 440 return; 441 442 new_val = dev_priv->de_irq_mask[pipe]; 443 new_val &= ~interrupt_mask; 444 new_val |= (~enabled_irq_mask & interrupt_mask); 445 446 if (new_val != dev_priv->de_irq_mask[pipe]) { 447 dev_priv->de_irq_mask[pipe] = new_val; 448 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 449 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 450 } 451 } 452 453 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 454 enum pipe pipe, u32 bits) 455 { 456 bdw_update_pipe_irq(i915, pipe, bits, bits); 457 } 458 459 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 460 enum pipe pipe, u32 bits) 461 { 462 bdw_update_pipe_irq(i915, pipe, bits, 0); 463 } 464 465 /** 466 * ibx_display_interrupt_update - update SDEIMR 467 * @dev_priv: driver private 468 * @interrupt_mask: mask of interrupt bits to update 469 * @enabled_irq_mask: mask of interrupt bits to enable 470 */ 471 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 472 u32 interrupt_mask, 473 u32 enabled_irq_mask) 474 { 475 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 476 sdeimr &= ~interrupt_mask; 477 sdeimr |= (~enabled_irq_mask & interrupt_mask); 478 479 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 480 481 lockdep_assert_held(&dev_priv->irq_lock); 482 483 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 484 return; 485 486 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 487 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 488 } 489 490 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 491 { 492 ibx_display_interrupt_update(i915, bits, bits); 493 } 494 495 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 496 { 497 ibx_display_interrupt_update(i915, bits, 0); 498 } 499 500 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 501 enum pipe pipe) 502 { 503 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 504 u32 enable_mask = status_mask << 16; 505 506 lockdep_assert_held(&dev_priv->irq_lock); 507 508 if (DISPLAY_VER(dev_priv) < 5) 509 goto out; 510 511 /* 512 * On pipe A we don't support the PSR interrupt yet, 513 * on pipe B and C the same bit MBZ. 514 */ 515 if (drm_WARN_ON_ONCE(&dev_priv->drm, 516 status_mask & PIPE_A_PSR_STATUS_VLV)) 517 return 0; 518 /* 519 * On pipe B and C we don't support the PSR interrupt yet, on pipe 520 * A the same bit is for perf counters which we don't use either. 521 */ 522 if (drm_WARN_ON_ONCE(&dev_priv->drm, 523 status_mask & PIPE_B_PSR_STATUS_VLV)) 524 return 0; 525 526 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 527 SPRITE0_FLIP_DONE_INT_EN_VLV | 528 SPRITE1_FLIP_DONE_INT_EN_VLV); 529 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 530 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 531 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 532 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 533 534 out: 535 drm_WARN_ONCE(&dev_priv->drm, 536 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 537 status_mask & ~PIPESTAT_INT_STATUS_MASK, 538 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 539 pipe_name(pipe), enable_mask, status_mask); 540 541 return enable_mask; 542 } 543 544 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 545 enum pipe pipe, u32 status_mask) 546 { 547 i915_reg_t reg = PIPESTAT(pipe); 548 u32 enable_mask; 549 550 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 551 "pipe %c: status_mask=0x%x\n", 552 pipe_name(pipe), status_mask); 553 554 lockdep_assert_held(&dev_priv->irq_lock); 555 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 556 557 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 558 return; 559 560 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 561 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 562 563 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 564 intel_uncore_posting_read(&dev_priv->uncore, reg); 565 } 566 567 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 568 enum pipe pipe, u32 status_mask) 569 { 570 i915_reg_t reg = PIPESTAT(pipe); 571 u32 enable_mask; 572 573 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 574 "pipe %c: status_mask=0x%x\n", 575 pipe_name(pipe), status_mask); 576 577 lockdep_assert_held(&dev_priv->irq_lock); 578 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 579 580 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 581 return; 582 583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 584 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 585 586 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 587 intel_uncore_posting_read(&dev_priv->uncore, reg); 588 } 589 590 static bool i915_has_asle(struct drm_i915_private *dev_priv) 591 { 592 if (!dev_priv->display.opregion.asle) 593 return false; 594 595 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 596 } 597 598 /** 599 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 600 * @dev_priv: i915 device private 601 */ 602 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 603 { 604 if (!i915_has_asle(dev_priv)) 605 return; 606 607 spin_lock_irq(&dev_priv->irq_lock); 608 609 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 610 if (DISPLAY_VER(dev_priv) >= 4) 611 i915_enable_pipestat(dev_priv, PIPE_A, 612 PIPE_LEGACY_BLC_EVENT_STATUS); 613 614 spin_unlock_irq(&dev_priv->irq_lock); 615 } 616 617 /** 618 * ivb_parity_work - Workqueue called when a parity error interrupt 619 * occurred. 620 * @work: workqueue struct 621 * 622 * Doesn't actually do anything except notify userspace. As a consequence of 623 * this event, userspace should try to remap the bad rows since statistically 624 * it is likely the same row is more likely to go bad again. 625 */ 626 static void ivb_parity_work(struct work_struct *work) 627 { 628 struct drm_i915_private *dev_priv = 629 container_of(work, typeof(*dev_priv), l3_parity.error_work); 630 struct intel_gt *gt = to_gt(dev_priv); 631 u32 error_status, row, bank, subbank; 632 char *parity_event[6]; 633 u32 misccpctl; 634 u8 slice = 0; 635 636 /* We must turn off DOP level clock gating to access the L3 registers. 637 * In order to prevent a get/put style interface, acquire struct mutex 638 * any time we access those registers. 639 */ 640 mutex_lock(&dev_priv->drm.struct_mutex); 641 642 /* If we've screwed up tracking, just let the interrupt fire again */ 643 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 644 goto out; 645 646 misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL, 647 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 648 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 649 650 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 651 i915_reg_t reg; 652 653 slice--; 654 if (drm_WARN_ON_ONCE(&dev_priv->drm, 655 slice >= NUM_L3_SLICES(dev_priv))) 656 break; 657 658 dev_priv->l3_parity.which_slice &= ~(1<<slice); 659 660 reg = GEN7_L3CDERRST1(slice); 661 662 error_status = intel_uncore_read(&dev_priv->uncore, reg); 663 row = GEN7_PARITY_ERROR_ROW(error_status); 664 bank = GEN7_PARITY_ERROR_BANK(error_status); 665 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 666 667 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 668 intel_uncore_posting_read(&dev_priv->uncore, reg); 669 670 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 671 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 672 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 673 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 674 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 675 parity_event[5] = NULL; 676 677 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 678 KOBJ_CHANGE, parity_event); 679 680 drm_dbg(&dev_priv->drm, 681 "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 682 slice, row, bank, subbank); 683 684 kfree(parity_event[4]); 685 kfree(parity_event[3]); 686 kfree(parity_event[2]); 687 kfree(parity_event[1]); 688 } 689 690 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 691 692 out: 693 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 694 spin_lock_irq(gt->irq_lock); 695 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 696 spin_unlock_irq(gt->irq_lock); 697 698 mutex_unlock(&dev_priv->drm.struct_mutex); 699 } 700 701 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 702 { 703 switch (pin) { 704 case HPD_PORT_TC1: 705 case HPD_PORT_TC2: 706 case HPD_PORT_TC3: 707 case HPD_PORT_TC4: 708 case HPD_PORT_TC5: 709 case HPD_PORT_TC6: 710 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); 711 default: 712 return false; 713 } 714 } 715 716 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 717 { 718 switch (pin) { 719 case HPD_PORT_A: 720 return val & PORTA_HOTPLUG_LONG_DETECT; 721 case HPD_PORT_B: 722 return val & PORTB_HOTPLUG_LONG_DETECT; 723 case HPD_PORT_C: 724 return val & PORTC_HOTPLUG_LONG_DETECT; 725 default: 726 return false; 727 } 728 } 729 730 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 731 { 732 switch (pin) { 733 case HPD_PORT_A: 734 case HPD_PORT_B: 735 case HPD_PORT_C: 736 case HPD_PORT_D: 737 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); 738 default: 739 return false; 740 } 741 } 742 743 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 744 { 745 switch (pin) { 746 case HPD_PORT_TC1: 747 case HPD_PORT_TC2: 748 case HPD_PORT_TC3: 749 case HPD_PORT_TC4: 750 case HPD_PORT_TC5: 751 case HPD_PORT_TC6: 752 return val & ICP_TC_HPD_LONG_DETECT(pin); 753 default: 754 return false; 755 } 756 } 757 758 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 759 { 760 switch (pin) { 761 case HPD_PORT_E: 762 return val & PORTE_HOTPLUG_LONG_DETECT; 763 default: 764 return false; 765 } 766 } 767 768 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 769 { 770 switch (pin) { 771 case HPD_PORT_A: 772 return val & PORTA_HOTPLUG_LONG_DETECT; 773 case HPD_PORT_B: 774 return val & PORTB_HOTPLUG_LONG_DETECT; 775 case HPD_PORT_C: 776 return val & PORTC_HOTPLUG_LONG_DETECT; 777 case HPD_PORT_D: 778 return val & PORTD_HOTPLUG_LONG_DETECT; 779 default: 780 return false; 781 } 782 } 783 784 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 785 { 786 switch (pin) { 787 case HPD_PORT_A: 788 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 789 default: 790 return false; 791 } 792 } 793 794 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 795 { 796 switch (pin) { 797 case HPD_PORT_B: 798 return val & PORTB_HOTPLUG_LONG_DETECT; 799 case HPD_PORT_C: 800 return val & PORTC_HOTPLUG_LONG_DETECT; 801 case HPD_PORT_D: 802 return val & PORTD_HOTPLUG_LONG_DETECT; 803 default: 804 return false; 805 } 806 } 807 808 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 809 { 810 switch (pin) { 811 case HPD_PORT_B: 812 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 813 case HPD_PORT_C: 814 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 815 case HPD_PORT_D: 816 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 817 default: 818 return false; 819 } 820 } 821 822 /* 823 * Get a bit mask of pins that have triggered, and which ones may be long. 824 * This can be called multiple times with the same masks to accumulate 825 * hotplug detection results from several registers. 826 * 827 * Note that the caller is expected to zero out the masks initially. 828 */ 829 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 830 u32 *pin_mask, u32 *long_mask, 831 u32 hotplug_trigger, u32 dig_hotplug_reg, 832 const u32 hpd[HPD_NUM_PINS], 833 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 834 { 835 enum hpd_pin pin; 836 837 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); 838 839 for_each_hpd_pin(pin) { 840 if ((hpd[pin] & hotplug_trigger) == 0) 841 continue; 842 843 *pin_mask |= BIT(pin); 844 845 if (long_pulse_detect(pin, dig_hotplug_reg)) 846 *long_mask |= BIT(pin); 847 } 848 849 drm_dbg(&dev_priv->drm, 850 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 851 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 852 853 } 854 855 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 856 const u32 hpd[HPD_NUM_PINS]) 857 { 858 struct intel_encoder *encoder; 859 u32 enabled_irqs = 0; 860 861 for_each_intel_encoder(&dev_priv->drm, encoder) 862 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 863 enabled_irqs |= hpd[encoder->hpd_pin]; 864 865 return enabled_irqs; 866 } 867 868 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, 869 const u32 hpd[HPD_NUM_PINS]) 870 { 871 struct intel_encoder *encoder; 872 u32 hotplug_irqs = 0; 873 874 for_each_intel_encoder(&dev_priv->drm, encoder) 875 hotplug_irqs |= hpd[encoder->hpd_pin]; 876 877 return hotplug_irqs; 878 } 879 880 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, 881 hotplug_enables_func hotplug_enables) 882 { 883 struct intel_encoder *encoder; 884 u32 hotplug = 0; 885 886 for_each_intel_encoder(&i915->drm, encoder) 887 hotplug |= hotplug_enables(encoder); 888 889 return hotplug; 890 } 891 892 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 893 { 894 wake_up_all(&dev_priv->display.gmbus.wait_queue); 895 } 896 897 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 898 { 899 wake_up_all(&dev_priv->display.gmbus.wait_queue); 900 } 901 902 #if defined(CONFIG_DEBUG_FS) 903 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 904 enum pipe pipe, 905 u32 crc0, u32 crc1, 906 u32 crc2, u32 crc3, 907 u32 crc4) 908 { 909 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 910 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 911 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 912 913 trace_intel_pipe_crc(crtc, crcs); 914 915 spin_lock(&pipe_crc->lock); 916 /* 917 * For some not yet identified reason, the first CRC is 918 * bonkers. So let's just wait for the next vblank and read 919 * out the buggy result. 920 * 921 * On GEN8+ sometimes the second CRC is bonkers as well, so 922 * don't trust that one either. 923 */ 924 if (pipe_crc->skipped <= 0 || 925 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 926 pipe_crc->skipped++; 927 spin_unlock(&pipe_crc->lock); 928 return; 929 } 930 spin_unlock(&pipe_crc->lock); 931 932 drm_crtc_add_crc_entry(&crtc->base, true, 933 drm_crtc_accurate_vblank_count(&crtc->base), 934 crcs); 935 } 936 #else 937 static inline void 938 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 939 enum pipe pipe, 940 u32 crc0, u32 crc1, 941 u32 crc2, u32 crc3, 942 u32 crc4) {} 943 #endif 944 945 static void flip_done_handler(struct drm_i915_private *i915, 946 enum pipe pipe) 947 { 948 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); 949 struct drm_crtc_state *crtc_state = crtc->base.state; 950 struct drm_pending_vblank_event *e = crtc_state->event; 951 struct drm_device *dev = &i915->drm; 952 unsigned long irqflags; 953 954 spin_lock_irqsave(&dev->event_lock, irqflags); 955 956 crtc_state->event = NULL; 957 958 drm_crtc_send_vblank_event(&crtc->base, e); 959 960 spin_unlock_irqrestore(&dev->event_lock, irqflags); 961 } 962 963 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 964 enum pipe pipe) 965 { 966 display_pipe_crc_irq_handler(dev_priv, pipe, 967 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 968 0, 0, 0, 0); 969 } 970 971 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 972 enum pipe pipe) 973 { 974 display_pipe_crc_irq_handler(dev_priv, pipe, 975 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 976 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 977 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 978 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 979 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 980 } 981 982 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 983 enum pipe pipe) 984 { 985 u32 res1, res2; 986 987 if (DISPLAY_VER(dev_priv) >= 3) 988 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); 989 else 990 res1 = 0; 991 992 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 993 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); 994 else 995 res2 = 0; 996 997 display_pipe_crc_irq_handler(dev_priv, pipe, 998 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), 999 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), 1000 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), 1001 res1, res2); 1002 } 1003 1004 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1005 { 1006 enum pipe pipe; 1007 1008 for_each_pipe(dev_priv, pipe) { 1009 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), 1010 PIPESTAT_INT_STATUS_MASK | 1011 PIPE_FIFO_UNDERRUN_STATUS); 1012 1013 dev_priv->pipestat_irq_mask[pipe] = 0; 1014 } 1015 } 1016 1017 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1018 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1019 { 1020 enum pipe pipe; 1021 1022 spin_lock(&dev_priv->irq_lock); 1023 1024 if (!dev_priv->display_irqs_enabled) { 1025 spin_unlock(&dev_priv->irq_lock); 1026 return; 1027 } 1028 1029 for_each_pipe(dev_priv, pipe) { 1030 i915_reg_t reg; 1031 u32 status_mask, enable_mask, iir_bit = 0; 1032 1033 /* 1034 * PIPESTAT bits get signalled even when the interrupt is 1035 * disabled with the mask bits, and some of the status bits do 1036 * not generate interrupts at all (like the underrun bit). Hence 1037 * we need to be careful that we only handle what we want to 1038 * handle. 1039 */ 1040 1041 /* fifo underruns are filterered in the underrun handler. */ 1042 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1043 1044 switch (pipe) { 1045 default: 1046 case PIPE_A: 1047 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1048 break; 1049 case PIPE_B: 1050 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1051 break; 1052 case PIPE_C: 1053 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1054 break; 1055 } 1056 if (iir & iir_bit) 1057 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1058 1059 if (!status_mask) 1060 continue; 1061 1062 reg = PIPESTAT(pipe); 1063 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 1064 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1065 1066 /* 1067 * Clear the PIPE*STAT regs before the IIR 1068 * 1069 * Toggle the enable bits to make sure we get an 1070 * edge in the ISR pipe event bit if we don't clear 1071 * all the enabled status bits. Otherwise the edge 1072 * triggered IIR on i965/g4x wouldn't notice that 1073 * an interrupt is still pending. 1074 */ 1075 if (pipe_stats[pipe]) { 1076 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 1077 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 1078 } 1079 } 1080 spin_unlock(&dev_priv->irq_lock); 1081 } 1082 1083 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1084 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1085 { 1086 enum pipe pipe; 1087 1088 for_each_pipe(dev_priv, pipe) { 1089 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1090 intel_handle_vblank(dev_priv, pipe); 1091 1092 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1093 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1094 1095 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1096 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1097 } 1098 } 1099 1100 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1101 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1102 { 1103 bool blc_event = false; 1104 enum pipe pipe; 1105 1106 for_each_pipe(dev_priv, pipe) { 1107 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1108 intel_handle_vblank(dev_priv, pipe); 1109 1110 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1111 blc_event = true; 1112 1113 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1114 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1115 1116 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1117 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1118 } 1119 1120 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1121 intel_opregion_asle_intr(dev_priv); 1122 } 1123 1124 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1125 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1126 { 1127 bool blc_event = false; 1128 enum pipe pipe; 1129 1130 for_each_pipe(dev_priv, pipe) { 1131 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1132 intel_handle_vblank(dev_priv, pipe); 1133 1134 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1135 blc_event = true; 1136 1137 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1138 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1139 1140 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1141 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1142 } 1143 1144 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1145 intel_opregion_asle_intr(dev_priv); 1146 1147 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1148 gmbus_irq_handler(dev_priv); 1149 } 1150 1151 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1152 u32 pipe_stats[I915_MAX_PIPES]) 1153 { 1154 enum pipe pipe; 1155 1156 for_each_pipe(dev_priv, pipe) { 1157 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1158 intel_handle_vblank(dev_priv, pipe); 1159 1160 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1161 flip_done_handler(dev_priv, pipe); 1162 1163 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1164 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1165 1166 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1167 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1168 } 1169 1170 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1171 gmbus_irq_handler(dev_priv); 1172 } 1173 1174 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1175 { 1176 u32 hotplug_status = 0, hotplug_status_mask; 1177 int i; 1178 1179 if (IS_G4X(dev_priv) || 1180 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1181 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1182 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1183 else 1184 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1185 1186 /* 1187 * We absolutely have to clear all the pending interrupt 1188 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1189 * interrupt bit won't have an edge, and the i965/g4x 1190 * edge triggered IIR will not notice that an interrupt 1191 * is still pending. We can't use PORT_HOTPLUG_EN to 1192 * guarantee the edge as the act of toggling the enable 1193 * bits can itself generate a new hotplug interrupt :( 1194 */ 1195 for (i = 0; i < 10; i++) { 1196 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; 1197 1198 if (tmp == 0) 1199 return hotplug_status; 1200 1201 hotplug_status |= tmp; 1202 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); 1203 } 1204 1205 drm_WARN_ONCE(&dev_priv->drm, 1, 1206 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1207 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 1208 1209 return hotplug_status; 1210 } 1211 1212 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1213 u32 hotplug_status) 1214 { 1215 u32 pin_mask = 0, long_mask = 0; 1216 u32 hotplug_trigger; 1217 1218 if (IS_G4X(dev_priv) || 1219 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1220 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1221 else 1222 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1223 1224 if (hotplug_trigger) { 1225 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1226 hotplug_trigger, hotplug_trigger, 1227 dev_priv->display.hotplug.hpd, 1228 i9xx_port_hotplug_long_detect); 1229 1230 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1231 } 1232 1233 if ((IS_G4X(dev_priv) || 1234 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1235 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1236 dp_aux_irq_handler(dev_priv); 1237 } 1238 1239 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1240 { 1241 struct drm_i915_private *dev_priv = arg; 1242 irqreturn_t ret = IRQ_NONE; 1243 1244 if (!intel_irqs_enabled(dev_priv)) 1245 return IRQ_NONE; 1246 1247 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1248 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1249 1250 do { 1251 u32 iir, gt_iir, pm_iir; 1252 u32 pipe_stats[I915_MAX_PIPES] = {}; 1253 u32 hotplug_status = 0; 1254 u32 ier = 0; 1255 1256 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 1257 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 1258 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1259 1260 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1261 break; 1262 1263 ret = IRQ_HANDLED; 1264 1265 /* 1266 * Theory on interrupt generation, based on empirical evidence: 1267 * 1268 * x = ((VLV_IIR & VLV_IER) || 1269 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1270 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1271 * 1272 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1273 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1274 * guarantee the CPU interrupt will be raised again even if we 1275 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1276 * bits this time around. 1277 */ 1278 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 1279 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 1280 1281 if (gt_iir) 1282 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 1283 if (pm_iir) 1284 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 1285 1286 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1287 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1288 1289 /* Call regardless, as some status bits might not be 1290 * signalled in iir */ 1291 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1292 1293 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1294 I915_LPE_PIPE_B_INTERRUPT)) 1295 intel_lpe_audio_irq_handler(dev_priv); 1296 1297 /* 1298 * VLV_IIR is single buffered, and reflects the level 1299 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1300 */ 1301 if (iir) 1302 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1303 1304 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1305 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1306 1307 if (gt_iir) 1308 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir); 1309 if (pm_iir) 1310 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); 1311 1312 if (hotplug_status) 1313 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1314 1315 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1316 } while (0); 1317 1318 pmu_irq_stats(dev_priv, ret); 1319 1320 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1321 1322 return ret; 1323 } 1324 1325 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1326 { 1327 struct drm_i915_private *dev_priv = arg; 1328 irqreturn_t ret = IRQ_NONE; 1329 1330 if (!intel_irqs_enabled(dev_priv)) 1331 return IRQ_NONE; 1332 1333 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1334 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1335 1336 do { 1337 u32 master_ctl, iir; 1338 u32 pipe_stats[I915_MAX_PIPES] = {}; 1339 u32 hotplug_status = 0; 1340 u32 ier = 0; 1341 1342 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1343 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1344 1345 if (master_ctl == 0 && iir == 0) 1346 break; 1347 1348 ret = IRQ_HANDLED; 1349 1350 /* 1351 * Theory on interrupt generation, based on empirical evidence: 1352 * 1353 * x = ((VLV_IIR & VLV_IER) || 1354 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1355 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1356 * 1357 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1358 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1359 * guarantee the CPU interrupt will be raised again even if we 1360 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1361 * bits this time around. 1362 */ 1363 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 1364 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 1365 1366 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 1367 1368 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1369 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1370 1371 /* Call regardless, as some status bits might not be 1372 * signalled in iir */ 1373 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1374 1375 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1376 I915_LPE_PIPE_B_INTERRUPT | 1377 I915_LPE_PIPE_C_INTERRUPT)) 1378 intel_lpe_audio_irq_handler(dev_priv); 1379 1380 /* 1381 * VLV_IIR is single buffered, and reflects the level 1382 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1383 */ 1384 if (iir) 1385 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1386 1387 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1388 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1389 1390 if (hotplug_status) 1391 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1392 1393 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1394 } while (0); 1395 1396 pmu_irq_stats(dev_priv, ret); 1397 1398 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1399 1400 return ret; 1401 } 1402 1403 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1404 u32 hotplug_trigger) 1405 { 1406 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1407 1408 /* 1409 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1410 * unless we touch the hotplug register, even if hotplug_trigger is 1411 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1412 * errors. 1413 */ 1414 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1415 if (!hotplug_trigger) { 1416 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1417 PORTD_HOTPLUG_STATUS_MASK | 1418 PORTC_HOTPLUG_STATUS_MASK | 1419 PORTB_HOTPLUG_STATUS_MASK; 1420 dig_hotplug_reg &= ~mask; 1421 } 1422 1423 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1424 if (!hotplug_trigger) 1425 return; 1426 1427 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1428 hotplug_trigger, dig_hotplug_reg, 1429 dev_priv->display.hotplug.pch_hpd, 1430 pch_port_hotplug_long_detect); 1431 1432 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1433 } 1434 1435 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1436 { 1437 enum pipe pipe; 1438 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1439 1440 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1441 1442 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1443 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1444 SDE_AUDIO_POWER_SHIFT); 1445 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 1446 port_name(port)); 1447 } 1448 1449 if (pch_iir & SDE_AUX_MASK) 1450 dp_aux_irq_handler(dev_priv); 1451 1452 if (pch_iir & SDE_GMBUS) 1453 gmbus_irq_handler(dev_priv); 1454 1455 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1456 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 1457 1458 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1459 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 1460 1461 if (pch_iir & SDE_POISON) 1462 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1463 1464 if (pch_iir & SDE_FDI_MASK) { 1465 for_each_pipe(dev_priv, pipe) 1466 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1467 pipe_name(pipe), 1468 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1469 } 1470 1471 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1472 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 1473 1474 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1475 drm_dbg(&dev_priv->drm, 1476 "PCH transcoder CRC error interrupt\n"); 1477 1478 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1479 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 1480 1481 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1482 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 1483 } 1484 1485 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1486 { 1487 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 1488 enum pipe pipe; 1489 1490 if (err_int & ERR_INT_POISON) 1491 drm_err(&dev_priv->drm, "Poison interrupt\n"); 1492 1493 for_each_pipe(dev_priv, pipe) { 1494 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1495 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1496 1497 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1498 if (IS_IVYBRIDGE(dev_priv)) 1499 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1500 else 1501 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1502 } 1503 } 1504 1505 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 1506 } 1507 1508 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1509 { 1510 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 1511 enum pipe pipe; 1512 1513 if (serr_int & SERR_INT_POISON) 1514 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1515 1516 for_each_pipe(dev_priv, pipe) 1517 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1518 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1519 1520 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 1521 } 1522 1523 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1524 { 1525 enum pipe pipe; 1526 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1527 1528 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1529 1530 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1531 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1532 SDE_AUDIO_POWER_SHIFT_CPT); 1533 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 1534 port_name(port)); 1535 } 1536 1537 if (pch_iir & SDE_AUX_MASK_CPT) 1538 dp_aux_irq_handler(dev_priv); 1539 1540 if (pch_iir & SDE_GMBUS_CPT) 1541 gmbus_irq_handler(dev_priv); 1542 1543 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1544 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 1545 1546 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1547 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 1548 1549 if (pch_iir & SDE_FDI_MASK_CPT) { 1550 for_each_pipe(dev_priv, pipe) 1551 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1552 pipe_name(pipe), 1553 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1554 } 1555 1556 if (pch_iir & SDE_ERROR_CPT) 1557 cpt_serr_int_handler(dev_priv); 1558 } 1559 1560 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1561 { 1562 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; 1563 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; 1564 u32 pin_mask = 0, long_mask = 0; 1565 1566 if (ddi_hotplug_trigger) { 1567 u32 dig_hotplug_reg; 1568 1569 /* Locking due to DSI native GPIO sequences */ 1570 spin_lock(&dev_priv->irq_lock); 1571 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); 1572 spin_unlock(&dev_priv->irq_lock); 1573 1574 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1575 ddi_hotplug_trigger, dig_hotplug_reg, 1576 dev_priv->display.hotplug.pch_hpd, 1577 icp_ddi_port_hotplug_long_detect); 1578 } 1579 1580 if (tc_hotplug_trigger) { 1581 u32 dig_hotplug_reg; 1582 1583 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0); 1584 1585 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1586 tc_hotplug_trigger, dig_hotplug_reg, 1587 dev_priv->display.hotplug.pch_hpd, 1588 icp_tc_port_hotplug_long_detect); 1589 } 1590 1591 if (pin_mask) 1592 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1593 1594 if (pch_iir & SDE_GMBUS_ICP) 1595 gmbus_irq_handler(dev_priv); 1596 } 1597 1598 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1599 { 1600 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 1601 ~SDE_PORTE_HOTPLUG_SPT; 1602 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 1603 u32 pin_mask = 0, long_mask = 0; 1604 1605 if (hotplug_trigger) { 1606 u32 dig_hotplug_reg; 1607 1608 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); 1609 1610 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1611 hotplug_trigger, dig_hotplug_reg, 1612 dev_priv->display.hotplug.pch_hpd, 1613 spt_port_hotplug_long_detect); 1614 } 1615 1616 if (hotplug2_trigger) { 1617 u32 dig_hotplug_reg; 1618 1619 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0); 1620 1621 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1622 hotplug2_trigger, dig_hotplug_reg, 1623 dev_priv->display.hotplug.pch_hpd, 1624 spt_port_hotplug2_long_detect); 1625 } 1626 1627 if (pin_mask) 1628 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1629 1630 if (pch_iir & SDE_GMBUS_CPT) 1631 gmbus_irq_handler(dev_priv); 1632 } 1633 1634 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 1635 u32 hotplug_trigger) 1636 { 1637 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1638 1639 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); 1640 1641 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1642 hotplug_trigger, dig_hotplug_reg, 1643 dev_priv->display.hotplug.hpd, 1644 ilk_port_hotplug_long_detect); 1645 1646 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1647 } 1648 1649 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 1650 u32 de_iir) 1651 { 1652 enum pipe pipe; 1653 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 1654 1655 if (hotplug_trigger) 1656 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 1657 1658 if (de_iir & DE_AUX_CHANNEL_A) 1659 dp_aux_irq_handler(dev_priv); 1660 1661 if (de_iir & DE_GSE) 1662 intel_opregion_asle_intr(dev_priv); 1663 1664 if (de_iir & DE_POISON) 1665 drm_err(&dev_priv->drm, "Poison interrupt\n"); 1666 1667 for_each_pipe(dev_priv, pipe) { 1668 if (de_iir & DE_PIPE_VBLANK(pipe)) 1669 intel_handle_vblank(dev_priv, pipe); 1670 1671 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 1672 flip_done_handler(dev_priv, pipe); 1673 1674 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1675 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1676 1677 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1678 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1679 } 1680 1681 /* check event from PCH */ 1682 if (de_iir & DE_PCH_EVENT) { 1683 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 1684 1685 if (HAS_PCH_CPT(dev_priv)) 1686 cpt_irq_handler(dev_priv, pch_iir); 1687 else 1688 ibx_irq_handler(dev_priv, pch_iir); 1689 1690 /* should clear PCH hotplug event before clear CPU irq */ 1691 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 1692 } 1693 1694 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 1695 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 1696 } 1697 1698 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 1699 u32 de_iir) 1700 { 1701 enum pipe pipe; 1702 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 1703 1704 if (hotplug_trigger) 1705 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 1706 1707 if (de_iir & DE_ERR_INT_IVB) 1708 ivb_err_int_handler(dev_priv); 1709 1710 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1711 dp_aux_irq_handler(dev_priv); 1712 1713 if (de_iir & DE_GSE_IVB) 1714 intel_opregion_asle_intr(dev_priv); 1715 1716 for_each_pipe(dev_priv, pipe) { 1717 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 1718 intel_handle_vblank(dev_priv, pipe); 1719 1720 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 1721 flip_done_handler(dev_priv, pipe); 1722 } 1723 1724 /* check event from PCH */ 1725 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 1726 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 1727 1728 cpt_irq_handler(dev_priv, pch_iir); 1729 1730 /* clear PCH hotplug event before clear CPU irq */ 1731 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 1732 } 1733 } 1734 1735 /* 1736 * To handle irqs with the minimum potential races with fresh interrupts, we: 1737 * 1 - Disable Master Interrupt Control. 1738 * 2 - Find the source(s) of the interrupt. 1739 * 3 - Clear the Interrupt Identity bits (IIR). 1740 * 4 - Process the interrupt(s) that had bits set in the IIRs. 1741 * 5 - Re-enable Master Interrupt Control. 1742 */ 1743 static irqreturn_t ilk_irq_handler(int irq, void *arg) 1744 { 1745 struct drm_i915_private *i915 = arg; 1746 void __iomem * const regs = i915->uncore.regs; 1747 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1748 irqreturn_t ret = IRQ_NONE; 1749 1750 if (unlikely(!intel_irqs_enabled(i915))) 1751 return IRQ_NONE; 1752 1753 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1754 disable_rpm_wakeref_asserts(&i915->runtime_pm); 1755 1756 /* disable master interrupt before clearing iir */ 1757 de_ier = raw_reg_read(regs, DEIER); 1758 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1759 1760 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1761 * interrupts will will be stored on its back queue, and then we'll be 1762 * able to process them after we restore SDEIER (as soon as we restore 1763 * it, we'll get an interrupt if SDEIIR still has something to process 1764 * due to its back queue). */ 1765 if (!HAS_PCH_NOP(i915)) { 1766 sde_ier = raw_reg_read(regs, SDEIER); 1767 raw_reg_write(regs, SDEIER, 0); 1768 } 1769 1770 /* Find, clear, then process each source of interrupt */ 1771 1772 gt_iir = raw_reg_read(regs, GTIIR); 1773 if (gt_iir) { 1774 raw_reg_write(regs, GTIIR, gt_iir); 1775 if (GRAPHICS_VER(i915) >= 6) 1776 gen6_gt_irq_handler(to_gt(i915), gt_iir); 1777 else 1778 gen5_gt_irq_handler(to_gt(i915), gt_iir); 1779 ret = IRQ_HANDLED; 1780 } 1781 1782 de_iir = raw_reg_read(regs, DEIIR); 1783 if (de_iir) { 1784 raw_reg_write(regs, DEIIR, de_iir); 1785 if (DISPLAY_VER(i915) >= 7) 1786 ivb_display_irq_handler(i915, de_iir); 1787 else 1788 ilk_display_irq_handler(i915, de_iir); 1789 ret = IRQ_HANDLED; 1790 } 1791 1792 if (GRAPHICS_VER(i915) >= 6) { 1793 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 1794 if (pm_iir) { 1795 raw_reg_write(regs, GEN6_PMIIR, pm_iir); 1796 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir); 1797 ret = IRQ_HANDLED; 1798 } 1799 } 1800 1801 raw_reg_write(regs, DEIER, de_ier); 1802 if (sde_ier) 1803 raw_reg_write(regs, SDEIER, sde_ier); 1804 1805 pmu_irq_stats(i915, ret); 1806 1807 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1808 enable_rpm_wakeref_asserts(&i915->runtime_pm); 1809 1810 return ret; 1811 } 1812 1813 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 1814 u32 hotplug_trigger) 1815 { 1816 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1817 1818 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); 1819 1820 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1821 hotplug_trigger, dig_hotplug_reg, 1822 dev_priv->display.hotplug.hpd, 1823 bxt_port_hotplug_long_detect); 1824 1825 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1826 } 1827 1828 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 1829 { 1830 u32 pin_mask = 0, long_mask = 0; 1831 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 1832 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 1833 1834 if (trigger_tc) { 1835 u32 dig_hotplug_reg; 1836 1837 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0); 1838 1839 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1840 trigger_tc, dig_hotplug_reg, 1841 dev_priv->display.hotplug.hpd, 1842 gen11_port_hotplug_long_detect); 1843 } 1844 1845 if (trigger_tbt) { 1846 u32 dig_hotplug_reg; 1847 1848 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0); 1849 1850 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1851 trigger_tbt, dig_hotplug_reg, 1852 dev_priv->display.hotplug.hpd, 1853 gen11_port_hotplug_long_detect); 1854 } 1855 1856 if (pin_mask) 1857 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1858 else 1859 drm_err(&dev_priv->drm, 1860 "Unexpected DE HPD interrupt 0x%08x\n", iir); 1861 } 1862 1863 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 1864 { 1865 u32 mask; 1866 1867 if (DISPLAY_VER(dev_priv) >= 13) 1868 return TGL_DE_PORT_AUX_DDIA | 1869 TGL_DE_PORT_AUX_DDIB | 1870 TGL_DE_PORT_AUX_DDIC | 1871 XELPD_DE_PORT_AUX_DDID | 1872 XELPD_DE_PORT_AUX_DDIE | 1873 TGL_DE_PORT_AUX_USBC1 | 1874 TGL_DE_PORT_AUX_USBC2 | 1875 TGL_DE_PORT_AUX_USBC3 | 1876 TGL_DE_PORT_AUX_USBC4; 1877 else if (DISPLAY_VER(dev_priv) >= 12) 1878 return TGL_DE_PORT_AUX_DDIA | 1879 TGL_DE_PORT_AUX_DDIB | 1880 TGL_DE_PORT_AUX_DDIC | 1881 TGL_DE_PORT_AUX_USBC1 | 1882 TGL_DE_PORT_AUX_USBC2 | 1883 TGL_DE_PORT_AUX_USBC3 | 1884 TGL_DE_PORT_AUX_USBC4 | 1885 TGL_DE_PORT_AUX_USBC5 | 1886 TGL_DE_PORT_AUX_USBC6; 1887 1888 1889 mask = GEN8_AUX_CHANNEL_A; 1890 if (DISPLAY_VER(dev_priv) >= 9) 1891 mask |= GEN9_AUX_CHANNEL_B | 1892 GEN9_AUX_CHANNEL_C | 1893 GEN9_AUX_CHANNEL_D; 1894 1895 if (DISPLAY_VER(dev_priv) == 11) { 1896 mask |= ICL_AUX_CHANNEL_F; 1897 mask |= ICL_AUX_CHANNEL_E; 1898 } 1899 1900 return mask; 1901 } 1902 1903 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 1904 { 1905 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) 1906 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 1907 else if (DISPLAY_VER(dev_priv) >= 11) 1908 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 1909 else if (DISPLAY_VER(dev_priv) >= 9) 1910 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 1911 else 1912 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 1913 } 1914 1915 static void 1916 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 1917 { 1918 bool found = false; 1919 1920 if (iir & GEN8_DE_MISC_GSE) { 1921 intel_opregion_asle_intr(dev_priv); 1922 found = true; 1923 } 1924 1925 if (iir & GEN8_DE_EDP_PSR) { 1926 struct intel_encoder *encoder; 1927 u32 psr_iir; 1928 i915_reg_t iir_reg; 1929 1930 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 1931 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1932 1933 if (DISPLAY_VER(dev_priv) >= 12) 1934 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); 1935 else 1936 iir_reg = EDP_PSR_IIR; 1937 1938 psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); 1939 1940 if (psr_iir) 1941 found = true; 1942 1943 intel_psr_irq_handler(intel_dp, psr_iir); 1944 1945 /* prior GEN12 only have one EDP PSR */ 1946 if (DISPLAY_VER(dev_priv) < 12) 1947 break; 1948 } 1949 } 1950 1951 if (!found) 1952 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); 1953 } 1954 1955 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 1956 u32 te_trigger) 1957 { 1958 enum pipe pipe = INVALID_PIPE; 1959 enum transcoder dsi_trans; 1960 enum port port; 1961 u32 val, tmp; 1962 1963 /* 1964 * Incase of dual link, TE comes from DSI_1 1965 * this is to check if dual link is enabled 1966 */ 1967 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 1968 val &= PORT_SYNC_MODE_ENABLE; 1969 1970 /* 1971 * if dual link is enabled, then read DSI_0 1972 * transcoder registers 1973 */ 1974 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 1975 PORT_A : PORT_B; 1976 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 1977 1978 /* Check if DSI configured in command mode */ 1979 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 1980 val = val & OP_MODE_MASK; 1981 1982 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 1983 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 1984 return; 1985 } 1986 1987 /* Get PIPE for handling VBLANK event */ 1988 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); 1989 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 1990 case TRANS_DDI_EDP_INPUT_A_ON: 1991 pipe = PIPE_A; 1992 break; 1993 case TRANS_DDI_EDP_INPUT_B_ONOFF: 1994 pipe = PIPE_B; 1995 break; 1996 case TRANS_DDI_EDP_INPUT_C_ONOFF: 1997 pipe = PIPE_C; 1998 break; 1999 default: 2000 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 2001 return; 2002 } 2003 2004 intel_handle_vblank(dev_priv, pipe); 2005 2006 /* clear TE in dsi IIR */ 2007 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 2008 tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 2009 } 2010 2011 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 2012 { 2013 if (DISPLAY_VER(i915) >= 9) 2014 return GEN9_PIPE_PLANE1_FLIP_DONE; 2015 else 2016 return GEN8_PIPE_PRIMARY_FLIP_DONE; 2017 } 2018 2019 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) 2020 { 2021 u32 mask = GEN8_PIPE_FIFO_UNDERRUN; 2022 2023 if (DISPLAY_VER(dev_priv) >= 13) 2024 mask |= XELPD_PIPE_SOFT_UNDERRUN | 2025 XELPD_PIPE_HARD_UNDERRUN; 2026 2027 return mask; 2028 } 2029 2030 static irqreturn_t 2031 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2032 { 2033 irqreturn_t ret = IRQ_NONE; 2034 u32 iir; 2035 enum pipe pipe; 2036 2037 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 2038 2039 if (master_ctl & GEN8_DE_MISC_IRQ) { 2040 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 2041 if (iir) { 2042 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 2043 ret = IRQ_HANDLED; 2044 gen8_de_misc_irq_handler(dev_priv, iir); 2045 } else { 2046 drm_err_ratelimited(&dev_priv->drm, 2047 "The master control interrupt lied (DE MISC)!\n"); 2048 } 2049 } 2050 2051 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2052 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 2053 if (iir) { 2054 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 2055 ret = IRQ_HANDLED; 2056 gen11_hpd_irq_handler(dev_priv, iir); 2057 } else { 2058 drm_err_ratelimited(&dev_priv->drm, 2059 "The master control interrupt lied, (DE HPD)!\n"); 2060 } 2061 } 2062 2063 if (master_ctl & GEN8_DE_PORT_IRQ) { 2064 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 2065 if (iir) { 2066 bool found = false; 2067 2068 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 2069 ret = IRQ_HANDLED; 2070 2071 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2072 dp_aux_irq_handler(dev_priv); 2073 found = true; 2074 } 2075 2076 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 2077 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 2078 2079 if (hotplug_trigger) { 2080 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 2081 found = true; 2082 } 2083 } else if (IS_BROADWELL(dev_priv)) { 2084 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 2085 2086 if (hotplug_trigger) { 2087 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2088 found = true; 2089 } 2090 } 2091 2092 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 2093 (iir & BXT_DE_PORT_GMBUS)) { 2094 gmbus_irq_handler(dev_priv); 2095 found = true; 2096 } 2097 2098 if (DISPLAY_VER(dev_priv) >= 11) { 2099 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 2100 2101 if (te_trigger) { 2102 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 2103 found = true; 2104 } 2105 } 2106 2107 if (!found) 2108 drm_err_ratelimited(&dev_priv->drm, 2109 "Unexpected DE Port interrupt\n"); 2110 } 2111 else 2112 drm_err_ratelimited(&dev_priv->drm, 2113 "The master control interrupt lied (DE PORT)!\n"); 2114 } 2115 2116 for_each_pipe(dev_priv, pipe) { 2117 u32 fault_errors; 2118 2119 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2120 continue; 2121 2122 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 2123 if (!iir) { 2124 drm_err_ratelimited(&dev_priv->drm, 2125 "The master control interrupt lied (DE PIPE)!\n"); 2126 continue; 2127 } 2128 2129 ret = IRQ_HANDLED; 2130 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 2131 2132 if (iir & GEN8_PIPE_VBLANK) 2133 intel_handle_vblank(dev_priv, pipe); 2134 2135 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 2136 flip_done_handler(dev_priv, pipe); 2137 2138 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2139 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2140 2141 if (iir & gen8_de_pipe_underrun_mask(dev_priv)) 2142 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2143 2144 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2145 if (fault_errors) 2146 drm_err_ratelimited(&dev_priv->drm, 2147 "Fault errors on pipe %c: 0x%08x\n", 2148 pipe_name(pipe), 2149 fault_errors); 2150 } 2151 2152 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2153 master_ctl & GEN8_DE_PCH_IRQ) { 2154 /* 2155 * FIXME(BDW): Assume for now that the new interrupt handling 2156 * scheme also closed the SDE interrupt handling race we've seen 2157 * on older pch-split platforms. But this needs testing. 2158 */ 2159 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2160 if (iir) { 2161 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); 2162 ret = IRQ_HANDLED; 2163 2164 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2165 icp_irq_handler(dev_priv, iir); 2166 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2167 spt_irq_handler(dev_priv, iir); 2168 else 2169 cpt_irq_handler(dev_priv, iir); 2170 } else { 2171 /* 2172 * Like on previous PCH there seems to be something 2173 * fishy going on with forwarding PCH interrupts. 2174 */ 2175 drm_dbg(&dev_priv->drm, 2176 "The master control interrupt lied (SDE)!\n"); 2177 } 2178 } 2179 2180 return ret; 2181 } 2182 2183 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2184 { 2185 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2186 2187 /* 2188 * Now with master disabled, get a sample of level indications 2189 * for this interrupt. Indications will be cleared on related acks. 2190 * New indications can and will light up during processing, 2191 * and will generate new interrupt after enabling master. 2192 */ 2193 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2194 } 2195 2196 static inline void gen8_master_intr_enable(void __iomem * const regs) 2197 { 2198 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2199 } 2200 2201 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2202 { 2203 struct drm_i915_private *dev_priv = arg; 2204 void __iomem * const regs = dev_priv->uncore.regs; 2205 u32 master_ctl; 2206 2207 if (!intel_irqs_enabled(dev_priv)) 2208 return IRQ_NONE; 2209 2210 master_ctl = gen8_master_intr_disable(regs); 2211 if (!master_ctl) { 2212 gen8_master_intr_enable(regs); 2213 return IRQ_NONE; 2214 } 2215 2216 /* Find, queue (onto bottom-halves), then clear each source */ 2217 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 2218 2219 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2220 if (master_ctl & ~GEN8_GT_IRQS) { 2221 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2222 gen8_de_irq_handler(dev_priv, master_ctl); 2223 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2224 } 2225 2226 gen8_master_intr_enable(regs); 2227 2228 pmu_irq_stats(dev_priv, IRQ_HANDLED); 2229 2230 return IRQ_HANDLED; 2231 } 2232 2233 static u32 2234 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) 2235 { 2236 void __iomem * const regs = i915->uncore.regs; 2237 u32 iir; 2238 2239 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2240 return 0; 2241 2242 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2243 if (likely(iir)) 2244 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2245 2246 return iir; 2247 } 2248 2249 static void 2250 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) 2251 { 2252 if (iir & GEN11_GU_MISC_GSE) 2253 intel_opregion_asle_intr(i915); 2254 } 2255 2256 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2257 { 2258 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2259 2260 /* 2261 * Now with master disabled, get a sample of level indications 2262 * for this interrupt. Indications will be cleared on related acks. 2263 * New indications can and will light up during processing, 2264 * and will generate new interrupt after enabling master. 2265 */ 2266 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2267 } 2268 2269 static inline void gen11_master_intr_enable(void __iomem * const regs) 2270 { 2271 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2272 } 2273 2274 static void 2275 gen11_display_irq_handler(struct drm_i915_private *i915) 2276 { 2277 void __iomem * const regs = i915->uncore.regs; 2278 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2279 2280 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2281 /* 2282 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2283 * for the display related bits. 2284 */ 2285 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 2286 gen8_de_irq_handler(i915, disp_ctl); 2287 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 2288 GEN11_DISPLAY_IRQ_ENABLE); 2289 2290 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2291 } 2292 2293 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2294 { 2295 struct drm_i915_private *i915 = arg; 2296 void __iomem * const regs = i915->uncore.regs; 2297 struct intel_gt *gt = to_gt(i915); 2298 u32 master_ctl; 2299 u32 gu_misc_iir; 2300 2301 if (!intel_irqs_enabled(i915)) 2302 return IRQ_NONE; 2303 2304 master_ctl = gen11_master_intr_disable(regs); 2305 if (!master_ctl) { 2306 gen11_master_intr_enable(regs); 2307 return IRQ_NONE; 2308 } 2309 2310 /* Find, queue (onto bottom-halves), then clear each source */ 2311 gen11_gt_irq_handler(gt, master_ctl); 2312 2313 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2314 if (master_ctl & GEN11_DISPLAY_IRQ) 2315 gen11_display_irq_handler(i915); 2316 2317 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 2318 2319 gen11_master_intr_enable(regs); 2320 2321 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 2322 2323 pmu_irq_stats(i915, IRQ_HANDLED); 2324 2325 return IRQ_HANDLED; 2326 } 2327 2328 static inline u32 dg1_master_intr_disable(void __iomem * const regs) 2329 { 2330 u32 val; 2331 2332 /* First disable interrupts */ 2333 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); 2334 2335 /* Get the indication levels and ack the master unit */ 2336 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); 2337 if (unlikely(!val)) 2338 return 0; 2339 2340 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); 2341 2342 return val; 2343 } 2344 2345 static inline void dg1_master_intr_enable(void __iomem * const regs) 2346 { 2347 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 2348 } 2349 2350 static irqreturn_t dg1_irq_handler(int irq, void *arg) 2351 { 2352 struct drm_i915_private * const i915 = arg; 2353 struct intel_gt *gt = to_gt(i915); 2354 void __iomem * const regs = gt->uncore->regs; 2355 u32 master_tile_ctl, master_ctl; 2356 u32 gu_misc_iir; 2357 2358 if (!intel_irqs_enabled(i915)) 2359 return IRQ_NONE; 2360 2361 master_tile_ctl = dg1_master_intr_disable(regs); 2362 if (!master_tile_ctl) { 2363 dg1_master_intr_enable(regs); 2364 return IRQ_NONE; 2365 } 2366 2367 /* FIXME: we only support tile 0 for now. */ 2368 if (master_tile_ctl & DG1_MSTR_TILE(0)) { 2369 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2370 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); 2371 } else { 2372 drm_err(&i915->drm, "Tile not supported: 0x%08x\n", 2373 master_tile_ctl); 2374 dg1_master_intr_enable(regs); 2375 return IRQ_NONE; 2376 } 2377 2378 gen11_gt_irq_handler(gt, master_ctl); 2379 2380 if (master_ctl & GEN11_DISPLAY_IRQ) 2381 gen11_display_irq_handler(i915); 2382 2383 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 2384 2385 dg1_master_intr_enable(regs); 2386 2387 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 2388 2389 pmu_irq_stats(i915, IRQ_HANDLED); 2390 2391 return IRQ_HANDLED; 2392 } 2393 2394 /* Called from drm generic code, passed 'crtc' which 2395 * we use as a pipe index 2396 */ 2397 int i8xx_enable_vblank(struct drm_crtc *crtc) 2398 { 2399 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2400 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2401 unsigned long irqflags; 2402 2403 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2404 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2405 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2406 2407 return 0; 2408 } 2409 2410 int i915gm_enable_vblank(struct drm_crtc *crtc) 2411 { 2412 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2413 2414 /* 2415 * Vblank interrupts fail to wake the device up from C2+. 2416 * Disabling render clock gating during C-states avoids 2417 * the problem. There is a small power cost so we do this 2418 * only when vblank interrupts are actually enabled. 2419 */ 2420 if (dev_priv->vblank_enabled++ == 0) 2421 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2422 2423 return i8xx_enable_vblank(crtc); 2424 } 2425 2426 int i965_enable_vblank(struct drm_crtc *crtc) 2427 { 2428 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2429 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2430 unsigned long irqflags; 2431 2432 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2433 i915_enable_pipestat(dev_priv, pipe, 2434 PIPE_START_VBLANK_INTERRUPT_STATUS); 2435 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2436 2437 return 0; 2438 } 2439 2440 int ilk_enable_vblank(struct drm_crtc *crtc) 2441 { 2442 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2443 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2444 unsigned long irqflags; 2445 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2446 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2447 2448 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2449 ilk_enable_display_irq(dev_priv, bit); 2450 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2451 2452 /* Even though there is no DMC, frame counter can get stuck when 2453 * PSR is active as no frames are generated. 2454 */ 2455 if (HAS_PSR(dev_priv)) 2456 drm_crtc_vblank_restore(crtc); 2457 2458 return 0; 2459 } 2460 2461 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 2462 bool enable) 2463 { 2464 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 2465 enum port port; 2466 2467 if (!(intel_crtc->mode_flags & 2468 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 2469 return false; 2470 2471 /* for dual link cases we consider TE from slave */ 2472 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 2473 port = PORT_B; 2474 else 2475 port = PORT_A; 2476 2477 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, 2478 enable ? 0 : DSI_TE_EVENT); 2479 2480 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 2481 2482 return true; 2483 } 2484 2485 int bdw_enable_vblank(struct drm_crtc *_crtc) 2486 { 2487 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2488 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2489 enum pipe pipe = crtc->pipe; 2490 unsigned long irqflags; 2491 2492 if (gen11_dsi_configure_te(crtc, true)) 2493 return 0; 2494 2495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2496 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2497 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2498 2499 /* Even if there is no DMC, frame counter can get stuck when 2500 * PSR is active as no frames are generated, so check only for PSR. 2501 */ 2502 if (HAS_PSR(dev_priv)) 2503 drm_crtc_vblank_restore(&crtc->base); 2504 2505 return 0; 2506 } 2507 2508 /* Called from drm generic code, passed 'crtc' which 2509 * we use as a pipe index 2510 */ 2511 void i8xx_disable_vblank(struct drm_crtc *crtc) 2512 { 2513 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2514 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2515 unsigned long irqflags; 2516 2517 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2518 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2519 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2520 } 2521 2522 void i915gm_disable_vblank(struct drm_crtc *crtc) 2523 { 2524 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2525 2526 i8xx_disable_vblank(crtc); 2527 2528 if (--dev_priv->vblank_enabled == 0) 2529 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2530 } 2531 2532 void i965_disable_vblank(struct drm_crtc *crtc) 2533 { 2534 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2535 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2536 unsigned long irqflags; 2537 2538 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2539 i915_disable_pipestat(dev_priv, pipe, 2540 PIPE_START_VBLANK_INTERRUPT_STATUS); 2541 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2542 } 2543 2544 void ilk_disable_vblank(struct drm_crtc *crtc) 2545 { 2546 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2547 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2548 unsigned long irqflags; 2549 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2550 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2551 2552 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2553 ilk_disable_display_irq(dev_priv, bit); 2554 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2555 } 2556 2557 void bdw_disable_vblank(struct drm_crtc *_crtc) 2558 { 2559 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2560 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2561 enum pipe pipe = crtc->pipe; 2562 unsigned long irqflags; 2563 2564 if (gen11_dsi_configure_te(crtc, false)) 2565 return; 2566 2567 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2568 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2569 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2570 } 2571 2572 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2573 { 2574 struct intel_uncore *uncore = &dev_priv->uncore; 2575 2576 if (HAS_PCH_NOP(dev_priv)) 2577 return; 2578 2579 GEN3_IRQ_RESET(uncore, SDE); 2580 2581 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2582 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); 2583 } 2584 2585 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2586 { 2587 struct intel_uncore *uncore = &dev_priv->uncore; 2588 2589 if (IS_CHERRYVIEW(dev_priv)) 2590 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2591 else 2592 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 2593 2594 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2595 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); 2596 2597 i9xx_pipestat_irq_reset(dev_priv); 2598 2599 GEN3_IRQ_RESET(uncore, VLV_); 2600 dev_priv->irq_mask = ~0u; 2601 } 2602 2603 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2604 { 2605 struct intel_uncore *uncore = &dev_priv->uncore; 2606 2607 u32 pipestat_mask; 2608 u32 enable_mask; 2609 enum pipe pipe; 2610 2611 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 2612 2613 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2614 for_each_pipe(dev_priv, pipe) 2615 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2616 2617 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2618 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2619 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2620 I915_LPE_PIPE_A_INTERRUPT | 2621 I915_LPE_PIPE_B_INTERRUPT; 2622 2623 if (IS_CHERRYVIEW(dev_priv)) 2624 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2625 I915_LPE_PIPE_C_INTERRUPT; 2626 2627 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 2628 2629 dev_priv->irq_mask = ~enable_mask; 2630 2631 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 2632 } 2633 2634 /* drm_dma.h hooks 2635 */ 2636 static void ilk_irq_reset(struct drm_i915_private *dev_priv) 2637 { 2638 struct intel_uncore *uncore = &dev_priv->uncore; 2639 2640 GEN3_IRQ_RESET(uncore, DE); 2641 dev_priv->irq_mask = ~0u; 2642 2643 if (GRAPHICS_VER(dev_priv) == 7) 2644 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 2645 2646 if (IS_HASWELL(dev_priv)) { 2647 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2648 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2649 } 2650 2651 gen5_gt_irq_reset(to_gt(dev_priv)); 2652 2653 ibx_irq_reset(dev_priv); 2654 } 2655 2656 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 2657 { 2658 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 2659 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 2660 2661 gen5_gt_irq_reset(to_gt(dev_priv)); 2662 2663 spin_lock_irq(&dev_priv->irq_lock); 2664 if (dev_priv->display_irqs_enabled) 2665 vlv_display_irq_reset(dev_priv); 2666 spin_unlock_irq(&dev_priv->irq_lock); 2667 } 2668 2669 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 2670 { 2671 struct intel_uncore *uncore = &dev_priv->uncore; 2672 enum pipe pipe; 2673 2674 if (!HAS_DISPLAY(dev_priv)) 2675 return; 2676 2677 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2678 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2679 2680 for_each_pipe(dev_priv, pipe) 2681 if (intel_display_power_is_enabled(dev_priv, 2682 POWER_DOMAIN_PIPE(pipe))) 2683 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2684 2685 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 2686 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 2687 } 2688 2689 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 2690 { 2691 struct intel_uncore *uncore = &dev_priv->uncore; 2692 2693 gen8_master_intr_disable(uncore->regs); 2694 2695 gen8_gt_irq_reset(to_gt(dev_priv)); 2696 gen8_display_irq_reset(dev_priv); 2697 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2698 2699 if (HAS_PCH_SPLIT(dev_priv)) 2700 ibx_irq_reset(dev_priv); 2701 2702 } 2703 2704 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 2705 { 2706 struct intel_uncore *uncore = &dev_priv->uncore; 2707 enum pipe pipe; 2708 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2709 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2710 2711 if (!HAS_DISPLAY(dev_priv)) 2712 return; 2713 2714 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 2715 2716 if (DISPLAY_VER(dev_priv) >= 12) { 2717 enum transcoder trans; 2718 2719 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 2720 enum intel_display_power_domain domain; 2721 2722 domain = POWER_DOMAIN_TRANSCODER(trans); 2723 if (!intel_display_power_is_enabled(dev_priv, domain)) 2724 continue; 2725 2726 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 2727 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 2728 } 2729 } else { 2730 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2731 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2732 } 2733 2734 for_each_pipe(dev_priv, pipe) 2735 if (intel_display_power_is_enabled(dev_priv, 2736 POWER_DOMAIN_PIPE(pipe))) 2737 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2738 2739 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 2740 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 2741 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 2742 2743 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2744 GEN3_IRQ_RESET(uncore, SDE); 2745 } 2746 2747 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 2748 { 2749 struct intel_gt *gt = to_gt(dev_priv); 2750 struct intel_uncore *uncore = gt->uncore; 2751 2752 gen11_master_intr_disable(dev_priv->uncore.regs); 2753 2754 gen11_gt_irq_reset(gt); 2755 gen11_display_irq_reset(dev_priv); 2756 2757 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 2758 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2759 } 2760 2761 static void dg1_irq_reset(struct drm_i915_private *dev_priv) 2762 { 2763 struct intel_gt *gt = to_gt(dev_priv); 2764 struct intel_uncore *uncore = gt->uncore; 2765 2766 dg1_master_intr_disable(dev_priv->uncore.regs); 2767 2768 gen11_gt_irq_reset(gt); 2769 gen11_display_irq_reset(dev_priv); 2770 2771 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 2772 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2773 } 2774 2775 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 2776 u8 pipe_mask) 2777 { 2778 struct intel_uncore *uncore = &dev_priv->uncore; 2779 u32 extra_ier = GEN8_PIPE_VBLANK | 2780 gen8_de_pipe_underrun_mask(dev_priv) | 2781 gen8_de_pipe_flip_done_mask(dev_priv); 2782 enum pipe pipe; 2783 2784 spin_lock_irq(&dev_priv->irq_lock); 2785 2786 if (!intel_irqs_enabled(dev_priv)) { 2787 spin_unlock_irq(&dev_priv->irq_lock); 2788 return; 2789 } 2790 2791 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 2792 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 2793 dev_priv->de_irq_mask[pipe], 2794 ~dev_priv->de_irq_mask[pipe] | extra_ier); 2795 2796 spin_unlock_irq(&dev_priv->irq_lock); 2797 } 2798 2799 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 2800 u8 pipe_mask) 2801 { 2802 struct intel_uncore *uncore = &dev_priv->uncore; 2803 enum pipe pipe; 2804 2805 spin_lock_irq(&dev_priv->irq_lock); 2806 2807 if (!intel_irqs_enabled(dev_priv)) { 2808 spin_unlock_irq(&dev_priv->irq_lock); 2809 return; 2810 } 2811 2812 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 2813 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2814 2815 spin_unlock_irq(&dev_priv->irq_lock); 2816 2817 /* make sure we're done processing display irqs */ 2818 intel_synchronize_irq(dev_priv); 2819 } 2820 2821 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 2822 { 2823 struct intel_uncore *uncore = &dev_priv->uncore; 2824 2825 intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0); 2826 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 2827 2828 gen8_gt_irq_reset(to_gt(dev_priv)); 2829 2830 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2831 2832 spin_lock_irq(&dev_priv->irq_lock); 2833 if (dev_priv->display_irqs_enabled) 2834 vlv_display_irq_reset(dev_priv); 2835 spin_unlock_irq(&dev_priv->irq_lock); 2836 } 2837 2838 static u32 ibx_hotplug_enables(struct intel_encoder *encoder) 2839 { 2840 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2841 2842 switch (encoder->hpd_pin) { 2843 case HPD_PORT_A: 2844 /* 2845 * When CPU and PCH are on the same package, port A 2846 * HPD must be enabled in both north and south. 2847 */ 2848 return HAS_PCH_LPT_LP(i915) ? 2849 PORTA_HOTPLUG_ENABLE : 0; 2850 case HPD_PORT_B: 2851 return PORTB_HOTPLUG_ENABLE | 2852 PORTB_PULSE_DURATION_2ms; 2853 case HPD_PORT_C: 2854 return PORTC_HOTPLUG_ENABLE | 2855 PORTC_PULSE_DURATION_2ms; 2856 case HPD_PORT_D: 2857 return PORTD_HOTPLUG_ENABLE | 2858 PORTD_PULSE_DURATION_2ms; 2859 default: 2860 return 0; 2861 } 2862 } 2863 2864 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 2865 { 2866 /* 2867 * Enable digital hotplug on the PCH, and configure the DP short pulse 2868 * duration to 2ms (which is the minimum in the Display Port spec). 2869 * The pulse duration bits are reserved on LPT+. 2870 */ 2871 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 2872 PORTA_HOTPLUG_ENABLE | 2873 PORTB_HOTPLUG_ENABLE | 2874 PORTC_HOTPLUG_ENABLE | 2875 PORTD_HOTPLUG_ENABLE | 2876 PORTB_PULSE_DURATION_MASK | 2877 PORTC_PULSE_DURATION_MASK | 2878 PORTD_PULSE_DURATION_MASK, 2879 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); 2880 } 2881 2882 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 2883 { 2884 u32 hotplug_irqs, enabled_irqs; 2885 2886 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 2887 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 2888 2889 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2890 2891 ibx_hpd_detection_setup(dev_priv); 2892 } 2893 2894 static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder) 2895 { 2896 switch (encoder->hpd_pin) { 2897 case HPD_PORT_A: 2898 case HPD_PORT_B: 2899 case HPD_PORT_C: 2900 case HPD_PORT_D: 2901 return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin); 2902 default: 2903 return 0; 2904 } 2905 } 2906 2907 static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) 2908 { 2909 switch (encoder->hpd_pin) { 2910 case HPD_PORT_TC1: 2911 case HPD_PORT_TC2: 2912 case HPD_PORT_TC3: 2913 case HPD_PORT_TC4: 2914 case HPD_PORT_TC5: 2915 case HPD_PORT_TC6: 2916 return ICP_TC_HPD_ENABLE(encoder->hpd_pin); 2917 default: 2918 return 0; 2919 } 2920 } 2921 2922 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) 2923 { 2924 intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 2925 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | 2926 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | 2927 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | 2928 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D), 2929 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); 2930 } 2931 2932 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 2933 { 2934 intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 2935 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | 2936 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | 2937 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | 2938 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) | 2939 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | 2940 ICP_TC_HPD_ENABLE(HPD_PORT_TC6), 2941 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); 2942 } 2943 2944 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 2945 { 2946 u32 hotplug_irqs, enabled_irqs; 2947 2948 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 2949 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 2950 2951 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) 2952 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 2953 2954 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2955 2956 icp_ddi_hpd_detection_setup(dev_priv); 2957 icp_tc_hpd_detection_setup(dev_priv); 2958 } 2959 2960 static u32 gen11_hotplug_enables(struct intel_encoder *encoder) 2961 { 2962 switch (encoder->hpd_pin) { 2963 case HPD_PORT_TC1: 2964 case HPD_PORT_TC2: 2965 case HPD_PORT_TC3: 2966 case HPD_PORT_TC4: 2967 case HPD_PORT_TC5: 2968 case HPD_PORT_TC6: 2969 return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin); 2970 default: 2971 return 0; 2972 } 2973 } 2974 2975 static void dg1_hpd_invert(struct drm_i915_private *i915) 2976 { 2977 u32 val = (INVERT_DDIA_HPD | 2978 INVERT_DDIB_HPD | 2979 INVERT_DDIC_HPD | 2980 INVERT_DDID_HPD); 2981 intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val); 2982 } 2983 2984 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) 2985 { 2986 dg1_hpd_invert(dev_priv); 2987 icp_hpd_irq_setup(dev_priv); 2988 } 2989 2990 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 2991 { 2992 intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 2993 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 2994 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 2995 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 2996 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 2997 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 2998 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6), 2999 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); 3000 } 3001 3002 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3003 { 3004 intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 3005 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3006 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3007 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3008 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3009 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3010 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6), 3011 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); 3012 } 3013 3014 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3015 { 3016 u32 hotplug_irqs, enabled_irqs; 3017 3018 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3019 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3020 3021 intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs, 3022 ~enabled_irqs & hotplug_irqs); 3023 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3024 3025 gen11_tc_hpd_detection_setup(dev_priv); 3026 gen11_tbt_hpd_detection_setup(dev_priv); 3027 3028 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3029 icp_hpd_irq_setup(dev_priv); 3030 } 3031 3032 static u32 spt_hotplug_enables(struct intel_encoder *encoder) 3033 { 3034 switch (encoder->hpd_pin) { 3035 case HPD_PORT_A: 3036 return PORTA_HOTPLUG_ENABLE; 3037 case HPD_PORT_B: 3038 return PORTB_HOTPLUG_ENABLE; 3039 case HPD_PORT_C: 3040 return PORTC_HOTPLUG_ENABLE; 3041 case HPD_PORT_D: 3042 return PORTD_HOTPLUG_ENABLE; 3043 default: 3044 return 0; 3045 } 3046 } 3047 3048 static u32 spt_hotplug2_enables(struct intel_encoder *encoder) 3049 { 3050 switch (encoder->hpd_pin) { 3051 case HPD_PORT_E: 3052 return PORTE_HOTPLUG_ENABLE; 3053 default: 3054 return 0; 3055 } 3056 } 3057 3058 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3059 { 3060 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3061 if (HAS_PCH_CNP(dev_priv)) { 3062 intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, 3063 CHASSIS_CLK_REQ_DURATION(0xf)); 3064 } 3065 3066 /* Enable digital hotplug on the PCH */ 3067 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 3068 PORTA_HOTPLUG_ENABLE | 3069 PORTB_HOTPLUG_ENABLE | 3070 PORTC_HOTPLUG_ENABLE | 3071 PORTD_HOTPLUG_ENABLE, 3072 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); 3073 3074 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE, 3075 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); 3076 } 3077 3078 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3079 { 3080 u32 hotplug_irqs, enabled_irqs; 3081 3082 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3083 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3084 3085 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3086 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3087 3088 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3089 3090 spt_hpd_detection_setup(dev_priv); 3091 } 3092 3093 static u32 ilk_hotplug_enables(struct intel_encoder *encoder) 3094 { 3095 switch (encoder->hpd_pin) { 3096 case HPD_PORT_A: 3097 return DIGITAL_PORTA_HOTPLUG_ENABLE | 3098 DIGITAL_PORTA_PULSE_DURATION_2ms; 3099 default: 3100 return 0; 3101 } 3102 } 3103 3104 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3105 { 3106 /* 3107 * Enable digital hotplug on the CPU, and configure the DP short pulse 3108 * duration to 2ms (which is the minimum in the Display Port spec) 3109 * The pulse duration bits are reserved on HSW+. 3110 */ 3111 intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 3112 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK, 3113 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); 3114 } 3115 3116 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3117 { 3118 u32 hotplug_irqs, enabled_irqs; 3119 3120 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3121 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3122 3123 if (DISPLAY_VER(dev_priv) >= 8) 3124 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3125 else 3126 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3127 3128 ilk_hpd_detection_setup(dev_priv); 3129 3130 ibx_hpd_irq_setup(dev_priv); 3131 } 3132 3133 static u32 bxt_hotplug_enables(struct intel_encoder *encoder) 3134 { 3135 u32 hotplug; 3136 3137 switch (encoder->hpd_pin) { 3138 case HPD_PORT_A: 3139 hotplug = PORTA_HOTPLUG_ENABLE; 3140 if (intel_bios_encoder_hpd_invert(encoder->devdata)) 3141 hotplug |= BXT_DDIA_HPD_INVERT; 3142 return hotplug; 3143 case HPD_PORT_B: 3144 hotplug = PORTB_HOTPLUG_ENABLE; 3145 if (intel_bios_encoder_hpd_invert(encoder->devdata)) 3146 hotplug |= BXT_DDIB_HPD_INVERT; 3147 return hotplug; 3148 case HPD_PORT_C: 3149 hotplug = PORTC_HOTPLUG_ENABLE; 3150 if (intel_bios_encoder_hpd_invert(encoder->devdata)) 3151 hotplug |= BXT_DDIC_HPD_INVERT; 3152 return hotplug; 3153 default: 3154 return 0; 3155 } 3156 } 3157 3158 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3159 { 3160 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 3161 PORTA_HOTPLUG_ENABLE | 3162 PORTB_HOTPLUG_ENABLE | 3163 PORTC_HOTPLUG_ENABLE | 3164 BXT_DDI_HPD_INVERT_MASK, 3165 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); 3166 } 3167 3168 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3169 { 3170 u32 hotplug_irqs, enabled_irqs; 3171 3172 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3173 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3174 3175 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3176 3177 bxt_hpd_detection_setup(dev_priv); 3178 } 3179 3180 /* 3181 * SDEIER is also touched by the interrupt handler to work around missed PCH 3182 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3183 * instead we unconditionally enable all PCH interrupt sources here, but then 3184 * only unmask them as needed with SDEIMR. 3185 * 3186 * Note that we currently do this after installing the interrupt handler, 3187 * but before we enable the master interrupt. That should be sufficient 3188 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 3189 * interrupts could still race. 3190 */ 3191 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3192 { 3193 struct intel_uncore *uncore = &dev_priv->uncore; 3194 u32 mask; 3195 3196 if (HAS_PCH_NOP(dev_priv)) 3197 return; 3198 3199 if (HAS_PCH_IBX(dev_priv)) 3200 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3201 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3202 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3203 else 3204 mask = SDE_GMBUS_CPT; 3205 3206 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3207 } 3208 3209 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 3210 { 3211 struct intel_uncore *uncore = &dev_priv->uncore; 3212 u32 display_mask, extra_mask; 3213 3214 if (GRAPHICS_VER(dev_priv) >= 7) { 3215 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3216 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3217 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3218 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3219 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 3220 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 3221 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 3222 DE_DP_A_HOTPLUG_IVB); 3223 } else { 3224 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3225 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3226 DE_PIPEA_CRC_DONE | DE_POISON); 3227 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 3228 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3229 DE_PLANE_FLIP_DONE(PLANE_A) | 3230 DE_PLANE_FLIP_DONE(PLANE_B) | 3231 DE_DP_A_HOTPLUG); 3232 } 3233 3234 if (IS_HASWELL(dev_priv)) { 3235 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3236 display_mask |= DE_EDP_PSR_INT_HSW; 3237 } 3238 3239 if (IS_IRONLAKE_M(dev_priv)) 3240 extra_mask |= DE_PCU_EVENT; 3241 3242 dev_priv->irq_mask = ~display_mask; 3243 3244 ibx_irq_postinstall(dev_priv); 3245 3246 gen5_gt_irq_postinstall(to_gt(dev_priv)); 3247 3248 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3249 display_mask | extra_mask); 3250 } 3251 3252 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3253 { 3254 lockdep_assert_held(&dev_priv->irq_lock); 3255 3256 if (dev_priv->display_irqs_enabled) 3257 return; 3258 3259 dev_priv->display_irqs_enabled = true; 3260 3261 if (intel_irqs_enabled(dev_priv)) { 3262 vlv_display_irq_reset(dev_priv); 3263 vlv_display_irq_postinstall(dev_priv); 3264 } 3265 } 3266 3267 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3268 { 3269 lockdep_assert_held(&dev_priv->irq_lock); 3270 3271 if (!dev_priv->display_irqs_enabled) 3272 return; 3273 3274 dev_priv->display_irqs_enabled = false; 3275 3276 if (intel_irqs_enabled(dev_priv)) 3277 vlv_display_irq_reset(dev_priv); 3278 } 3279 3280 3281 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 3282 { 3283 gen5_gt_irq_postinstall(to_gt(dev_priv)); 3284 3285 spin_lock_irq(&dev_priv->irq_lock); 3286 if (dev_priv->display_irqs_enabled) 3287 vlv_display_irq_postinstall(dev_priv); 3288 spin_unlock_irq(&dev_priv->irq_lock); 3289 3290 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3291 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3292 } 3293 3294 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3295 { 3296 struct intel_uncore *uncore = &dev_priv->uncore; 3297 3298 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 3299 GEN8_PIPE_CDCLK_CRC_DONE; 3300 u32 de_pipe_enables; 3301 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 3302 u32 de_port_enables; 3303 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3304 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3305 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3306 enum pipe pipe; 3307 3308 if (!HAS_DISPLAY(dev_priv)) 3309 return; 3310 3311 if (DISPLAY_VER(dev_priv) <= 10) 3312 de_misc_masked |= GEN8_DE_MISC_GSE; 3313 3314 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3315 de_port_masked |= BXT_DE_PORT_GMBUS; 3316 3317 if (DISPLAY_VER(dev_priv) >= 11) { 3318 enum port port; 3319 3320 if (intel_bios_is_dsi_present(dev_priv, &port)) 3321 de_port_masked |= DSI0_TE | DSI1_TE; 3322 } 3323 3324 de_pipe_enables = de_pipe_masked | 3325 GEN8_PIPE_VBLANK | 3326 gen8_de_pipe_underrun_mask(dev_priv) | 3327 gen8_de_pipe_flip_done_mask(dev_priv); 3328 3329 de_port_enables = de_port_masked; 3330 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3331 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3332 else if (IS_BROADWELL(dev_priv)) 3333 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 3334 3335 if (DISPLAY_VER(dev_priv) >= 12) { 3336 enum transcoder trans; 3337 3338 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3339 enum intel_display_power_domain domain; 3340 3341 domain = POWER_DOMAIN_TRANSCODER(trans); 3342 if (!intel_display_power_is_enabled(dev_priv, domain)) 3343 continue; 3344 3345 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 3346 } 3347 } else { 3348 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3349 } 3350 3351 for_each_pipe(dev_priv, pipe) { 3352 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3353 3354 if (intel_display_power_is_enabled(dev_priv, 3355 POWER_DOMAIN_PIPE(pipe))) 3356 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3357 dev_priv->de_irq_mask[pipe], 3358 de_pipe_enables); 3359 } 3360 3361 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3362 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3363 3364 if (DISPLAY_VER(dev_priv) >= 11) { 3365 u32 de_hpd_masked = 0; 3366 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3367 GEN11_DE_TBT_HOTPLUG_MASK; 3368 3369 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 3370 de_hpd_enables); 3371 } 3372 } 3373 3374 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 3375 { 3376 struct intel_uncore *uncore = &dev_priv->uncore; 3377 u32 mask = SDE_GMBUS_ICP; 3378 3379 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3380 } 3381 3382 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 3383 { 3384 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3385 icp_irq_postinstall(dev_priv); 3386 else if (HAS_PCH_SPLIT(dev_priv)) 3387 ibx_irq_postinstall(dev_priv); 3388 3389 gen8_gt_irq_postinstall(to_gt(dev_priv)); 3390 gen8_de_irq_postinstall(dev_priv); 3391 3392 gen8_master_intr_enable(dev_priv->uncore.regs); 3393 } 3394 3395 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 3396 { 3397 if (!HAS_DISPLAY(dev_priv)) 3398 return; 3399 3400 gen8_de_irq_postinstall(dev_priv); 3401 3402 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3403 GEN11_DISPLAY_IRQ_ENABLE); 3404 } 3405 3406 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 3407 { 3408 struct intel_gt *gt = to_gt(dev_priv); 3409 struct intel_uncore *uncore = gt->uncore; 3410 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3411 3412 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3413 icp_irq_postinstall(dev_priv); 3414 3415 gen11_gt_irq_postinstall(gt); 3416 gen11_de_irq_postinstall(dev_priv); 3417 3418 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3419 3420 gen11_master_intr_enable(uncore->regs); 3421 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 3422 } 3423 3424 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) 3425 { 3426 struct intel_gt *gt = to_gt(dev_priv); 3427 struct intel_uncore *uncore = gt->uncore; 3428 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3429 3430 gen11_gt_irq_postinstall(gt); 3431 3432 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3433 3434 if (HAS_DISPLAY(dev_priv)) { 3435 icp_irq_postinstall(dev_priv); 3436 gen8_de_irq_postinstall(dev_priv); 3437 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3438 GEN11_DISPLAY_IRQ_ENABLE); 3439 } 3440 3441 dg1_master_intr_enable(uncore->regs); 3442 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); 3443 } 3444 3445 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 3446 { 3447 gen8_gt_irq_postinstall(to_gt(dev_priv)); 3448 3449 spin_lock_irq(&dev_priv->irq_lock); 3450 if (dev_priv->display_irqs_enabled) 3451 vlv_display_irq_postinstall(dev_priv); 3452 spin_unlock_irq(&dev_priv->irq_lock); 3453 3454 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3455 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3456 } 3457 3458 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 3459 { 3460 struct intel_uncore *uncore = &dev_priv->uncore; 3461 3462 i9xx_pipestat_irq_reset(dev_priv); 3463 3464 gen2_irq_reset(uncore); 3465 dev_priv->irq_mask = ~0u; 3466 } 3467 3468 static u32 i9xx_error_mask(struct drm_i915_private *i915) 3469 { 3470 /* 3471 * On gen2/3 FBC generates (seemingly spurious) 3472 * display INVALID_GTT/INVALID_GTT_PTE table errors. 3473 * 3474 * Also gen3 bspec has this to say: 3475 * "DISPA_INVALID_GTT_PTE 3476 " [DevNapa] : Reserved. This bit does not reflect the page 3477 " table error for the display plane A." 3478 * 3479 * Unfortunately we can't mask off individual PGTBL_ER bits, 3480 * so we just have to mask off all page table errors via EMR. 3481 */ 3482 if (HAS_FBC(i915)) 3483 return ~I915_ERROR_MEMORY_REFRESH; 3484 else 3485 return ~(I915_ERROR_PAGE_TABLE | 3486 I915_ERROR_MEMORY_REFRESH); 3487 } 3488 3489 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3490 { 3491 struct intel_uncore *uncore = &dev_priv->uncore; 3492 u16 enable_mask; 3493 3494 intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv)); 3495 3496 /* Unmask the interrupts that we always want on. */ 3497 dev_priv->irq_mask = 3498 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3499 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3500 I915_MASTER_ERROR_INTERRUPT); 3501 3502 enable_mask = 3503 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3504 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3505 I915_MASTER_ERROR_INTERRUPT | 3506 I915_USER_INTERRUPT; 3507 3508 gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask); 3509 3510 /* Interrupt setup is already guaranteed to be single-threaded, this is 3511 * just to make the assert_spin_locked check happy. */ 3512 spin_lock_irq(&dev_priv->irq_lock); 3513 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3514 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3515 spin_unlock_irq(&dev_priv->irq_lock); 3516 } 3517 3518 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 3519 u16 *eir, u16 *eir_stuck) 3520 { 3521 struct intel_uncore *uncore = &i915->uncore; 3522 u16 emr; 3523 3524 *eir = intel_uncore_read16(uncore, EIR); 3525 intel_uncore_write16(uncore, EIR, *eir); 3526 3527 *eir_stuck = intel_uncore_read16(uncore, EIR); 3528 if (*eir_stuck == 0) 3529 return; 3530 3531 /* 3532 * Toggle all EMR bits to make sure we get an edge 3533 * in the ISR master error bit if we don't clear 3534 * all the EIR bits. Otherwise the edge triggered 3535 * IIR on i965/g4x wouldn't notice that an interrupt 3536 * is still pending. Also some EIR bits can't be 3537 * cleared except by handling the underlying error 3538 * (or by a GPU reset) so we mask any bit that 3539 * remains set. 3540 */ 3541 emr = intel_uncore_read16(uncore, EMR); 3542 intel_uncore_write16(uncore, EMR, 0xffff); 3543 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 3544 } 3545 3546 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 3547 u16 eir, u16 eir_stuck) 3548 { 3549 drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir); 3550 3551 if (eir_stuck) 3552 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 3553 eir_stuck); 3554 3555 drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", 3556 intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); 3557 } 3558 3559 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 3560 u32 *eir, u32 *eir_stuck) 3561 { 3562 u32 emr; 3563 3564 *eir = intel_uncore_read(&dev_priv->uncore, EIR); 3565 intel_uncore_write(&dev_priv->uncore, EIR, *eir); 3566 3567 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 3568 if (*eir_stuck == 0) 3569 return; 3570 3571 /* 3572 * Toggle all EMR bits to make sure we get an edge 3573 * in the ISR master error bit if we don't clear 3574 * all the EIR bits. Otherwise the edge triggered 3575 * IIR on i965/g4x wouldn't notice that an interrupt 3576 * is still pending. Also some EIR bits can't be 3577 * cleared except by handling the underlying error 3578 * (or by a GPU reset) so we mask any bit that 3579 * remains set. 3580 */ 3581 emr = intel_uncore_read(&dev_priv->uncore, EMR); 3582 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 3583 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 3584 } 3585 3586 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 3587 u32 eir, u32 eir_stuck) 3588 { 3589 drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir); 3590 3591 if (eir_stuck) 3592 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 3593 eir_stuck); 3594 3595 drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", 3596 intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); 3597 } 3598 3599 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3600 { 3601 struct drm_i915_private *dev_priv = arg; 3602 irqreturn_t ret = IRQ_NONE; 3603 3604 if (!intel_irqs_enabled(dev_priv)) 3605 return IRQ_NONE; 3606 3607 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3608 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3609 3610 do { 3611 u32 pipe_stats[I915_MAX_PIPES] = {}; 3612 u16 eir = 0, eir_stuck = 0; 3613 u16 iir; 3614 3615 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 3616 if (iir == 0) 3617 break; 3618 3619 ret = IRQ_HANDLED; 3620 3621 /* Call regardless, as some status bits might not be 3622 * signalled in iir */ 3623 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3624 3625 if (iir & I915_MASTER_ERROR_INTERRUPT) 3626 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3627 3628 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 3629 3630 if (iir & I915_USER_INTERRUPT) 3631 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 3632 3633 if (iir & I915_MASTER_ERROR_INTERRUPT) 3634 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 3635 3636 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3637 } while (0); 3638 3639 pmu_irq_stats(dev_priv, ret); 3640 3641 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3642 3643 return ret; 3644 } 3645 3646 static void i915_irq_reset(struct drm_i915_private *dev_priv) 3647 { 3648 struct intel_uncore *uncore = &dev_priv->uncore; 3649 3650 if (I915_HAS_HOTPLUG(dev_priv)) { 3651 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3652 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0); 3653 } 3654 3655 i9xx_pipestat_irq_reset(dev_priv); 3656 3657 GEN3_IRQ_RESET(uncore, GEN2_); 3658 dev_priv->irq_mask = ~0u; 3659 } 3660 3661 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 3662 { 3663 struct intel_uncore *uncore = &dev_priv->uncore; 3664 u32 enable_mask; 3665 3666 intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv)); 3667 3668 /* Unmask the interrupts that we always want on. */ 3669 dev_priv->irq_mask = 3670 ~(I915_ASLE_INTERRUPT | 3671 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3672 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3673 I915_MASTER_ERROR_INTERRUPT); 3674 3675 enable_mask = 3676 I915_ASLE_INTERRUPT | 3677 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3678 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3679 I915_MASTER_ERROR_INTERRUPT | 3680 I915_USER_INTERRUPT; 3681 3682 if (I915_HAS_HOTPLUG(dev_priv)) { 3683 /* Enable in IER... */ 3684 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3685 /* and unmask in IMR */ 3686 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3687 } 3688 3689 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 3690 3691 /* Interrupt setup is already guaranteed to be single-threaded, this is 3692 * just to make the assert_spin_locked check happy. */ 3693 spin_lock_irq(&dev_priv->irq_lock); 3694 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3695 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3696 spin_unlock_irq(&dev_priv->irq_lock); 3697 3698 i915_enable_asle_pipestat(dev_priv); 3699 } 3700 3701 static irqreturn_t i915_irq_handler(int irq, void *arg) 3702 { 3703 struct drm_i915_private *dev_priv = arg; 3704 irqreturn_t ret = IRQ_NONE; 3705 3706 if (!intel_irqs_enabled(dev_priv)) 3707 return IRQ_NONE; 3708 3709 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3710 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3711 3712 do { 3713 u32 pipe_stats[I915_MAX_PIPES] = {}; 3714 u32 eir = 0, eir_stuck = 0; 3715 u32 hotplug_status = 0; 3716 u32 iir; 3717 3718 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 3719 if (iir == 0) 3720 break; 3721 3722 ret = IRQ_HANDLED; 3723 3724 if (I915_HAS_HOTPLUG(dev_priv) && 3725 iir & I915_DISPLAY_PORT_INTERRUPT) 3726 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3727 3728 /* Call regardless, as some status bits might not be 3729 * signalled in iir */ 3730 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3731 3732 if (iir & I915_MASTER_ERROR_INTERRUPT) 3733 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3734 3735 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 3736 3737 if (iir & I915_USER_INTERRUPT) 3738 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 3739 3740 if (iir & I915_MASTER_ERROR_INTERRUPT) 3741 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 3742 3743 if (hotplug_status) 3744 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3745 3746 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3747 } while (0); 3748 3749 pmu_irq_stats(dev_priv, ret); 3750 3751 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3752 3753 return ret; 3754 } 3755 3756 static void i965_irq_reset(struct drm_i915_private *dev_priv) 3757 { 3758 struct intel_uncore *uncore = &dev_priv->uncore; 3759 3760 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3761 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); 3762 3763 i9xx_pipestat_irq_reset(dev_priv); 3764 3765 GEN3_IRQ_RESET(uncore, GEN2_); 3766 dev_priv->irq_mask = ~0u; 3767 } 3768 3769 static u32 i965_error_mask(struct drm_i915_private *i915) 3770 { 3771 /* 3772 * Enable some error detection, note the instruction error mask 3773 * bit is reserved, so we leave it masked. 3774 * 3775 * i965 FBC no longer generates spurious GTT errors, 3776 * so we can always enable the page table errors. 3777 */ 3778 if (IS_G4X(i915)) 3779 return ~(GM45_ERROR_PAGE_TABLE | 3780 GM45_ERROR_MEM_PRIV | 3781 GM45_ERROR_CP_PRIV | 3782 I915_ERROR_MEMORY_REFRESH); 3783 else 3784 return ~(I915_ERROR_PAGE_TABLE | 3785 I915_ERROR_MEMORY_REFRESH); 3786 } 3787 3788 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 3789 { 3790 struct intel_uncore *uncore = &dev_priv->uncore; 3791 u32 enable_mask; 3792 3793 intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv)); 3794 3795 /* Unmask the interrupts that we always want on. */ 3796 dev_priv->irq_mask = 3797 ~(I915_ASLE_INTERRUPT | 3798 I915_DISPLAY_PORT_INTERRUPT | 3799 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3800 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3801 I915_MASTER_ERROR_INTERRUPT); 3802 3803 enable_mask = 3804 I915_ASLE_INTERRUPT | 3805 I915_DISPLAY_PORT_INTERRUPT | 3806 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3807 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3808 I915_MASTER_ERROR_INTERRUPT | 3809 I915_USER_INTERRUPT; 3810 3811 if (IS_G4X(dev_priv)) 3812 enable_mask |= I915_BSD_USER_INTERRUPT; 3813 3814 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 3815 3816 /* Interrupt setup is already guaranteed to be single-threaded, this is 3817 * just to make the assert_spin_locked check happy. */ 3818 spin_lock_irq(&dev_priv->irq_lock); 3819 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3820 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3821 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3822 spin_unlock_irq(&dev_priv->irq_lock); 3823 3824 i915_enable_asle_pipestat(dev_priv); 3825 } 3826 3827 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 3828 { 3829 u32 hotplug_en; 3830 3831 lockdep_assert_held(&dev_priv->irq_lock); 3832 3833 /* Note HDMI and DP share hotplug bits */ 3834 /* enable bits are the same for all generations */ 3835 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 3836 /* Programming the CRT detection parameters tends 3837 to generate a spurious hotplug event about three 3838 seconds later. So just do it once. 3839 */ 3840 if (IS_G4X(dev_priv)) 3841 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3842 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3843 3844 /* Ignore TV since it's buggy */ 3845 i915_hotplug_interrupt_update_locked(dev_priv, 3846 HOTPLUG_INT_EN_MASK | 3847 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 3848 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 3849 hotplug_en); 3850 } 3851 3852 static irqreturn_t i965_irq_handler(int irq, void *arg) 3853 { 3854 struct drm_i915_private *dev_priv = arg; 3855 irqreturn_t ret = IRQ_NONE; 3856 3857 if (!intel_irqs_enabled(dev_priv)) 3858 return IRQ_NONE; 3859 3860 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3861 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3862 3863 do { 3864 u32 pipe_stats[I915_MAX_PIPES] = {}; 3865 u32 eir = 0, eir_stuck = 0; 3866 u32 hotplug_status = 0; 3867 u32 iir; 3868 3869 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 3870 if (iir == 0) 3871 break; 3872 3873 ret = IRQ_HANDLED; 3874 3875 if (iir & I915_DISPLAY_PORT_INTERRUPT) 3876 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3877 3878 /* Call regardless, as some status bits might not be 3879 * signalled in iir */ 3880 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3881 3882 if (iir & I915_MASTER_ERROR_INTERRUPT) 3883 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3884 3885 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 3886 3887 if (iir & I915_USER_INTERRUPT) 3888 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], 3889 iir); 3890 3891 if (iir & I915_BSD_USER_INTERRUPT) 3892 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0], 3893 iir >> 25); 3894 3895 if (iir & I915_MASTER_ERROR_INTERRUPT) 3896 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 3897 3898 if (hotplug_status) 3899 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3900 3901 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3902 } while (0); 3903 3904 pmu_irq_stats(dev_priv, IRQ_HANDLED); 3905 3906 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3907 3908 return ret; 3909 } 3910 3911 struct intel_hotplug_funcs { 3912 void (*hpd_irq_setup)(struct drm_i915_private *i915); 3913 }; 3914 3915 #define HPD_FUNCS(platform) \ 3916 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ 3917 .hpd_irq_setup = platform##_hpd_irq_setup, \ 3918 } 3919 3920 HPD_FUNCS(i915); 3921 HPD_FUNCS(dg1); 3922 HPD_FUNCS(gen11); 3923 HPD_FUNCS(bxt); 3924 HPD_FUNCS(icp); 3925 HPD_FUNCS(spt); 3926 HPD_FUNCS(ilk); 3927 #undef HPD_FUNCS 3928 3929 void intel_hpd_irq_setup(struct drm_i915_private *i915) 3930 { 3931 if (i915->display_irqs_enabled && i915->display.funcs.hotplug) 3932 i915->display.funcs.hotplug->hpd_irq_setup(i915); 3933 } 3934 3935 /** 3936 * intel_irq_init - initializes irq support 3937 * @dev_priv: i915 device instance 3938 * 3939 * This function initializes all the irq support including work items, timers 3940 * and all the vtables. It does not setup the interrupt itself though. 3941 */ 3942 void intel_irq_init(struct drm_i915_private *dev_priv) 3943 { 3944 int i; 3945 3946 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 3947 for (i = 0; i < MAX_L3_SLICES; ++i) 3948 dev_priv->l3_parity.remap_info[i] = NULL; 3949 3950 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 3951 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) 3952 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; 3953 3954 if (!HAS_DISPLAY(dev_priv)) 3955 return; 3956 3957 intel_hpd_init_pins(dev_priv); 3958 3959 intel_hpd_init_early(dev_priv); 3960 3961 dev_priv->drm.vblank_disable_immediate = true; 3962 3963 /* Most platforms treat the display irq block as an always-on 3964 * power domain. vlv/chv can disable it at runtime and need 3965 * special care to avoid writing any of the display block registers 3966 * outside of the power domain. We defer setting up the display irqs 3967 * in this case to the runtime pm. 3968 */ 3969 dev_priv->display_irqs_enabled = true; 3970 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3971 dev_priv->display_irqs_enabled = false; 3972 3973 if (HAS_GMCH(dev_priv)) { 3974 if (I915_HAS_HOTPLUG(dev_priv)) 3975 dev_priv->display.funcs.hotplug = &i915_hpd_funcs; 3976 } else { 3977 if (HAS_PCH_DG2(dev_priv)) 3978 dev_priv->display.funcs.hotplug = &icp_hpd_funcs; 3979 else if (HAS_PCH_DG1(dev_priv)) 3980 dev_priv->display.funcs.hotplug = &dg1_hpd_funcs; 3981 else if (DISPLAY_VER(dev_priv) >= 11) 3982 dev_priv->display.funcs.hotplug = &gen11_hpd_funcs; 3983 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3984 dev_priv->display.funcs.hotplug = &bxt_hpd_funcs; 3985 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3986 dev_priv->display.funcs.hotplug = &icp_hpd_funcs; 3987 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 3988 dev_priv->display.funcs.hotplug = &spt_hpd_funcs; 3989 else 3990 dev_priv->display.funcs.hotplug = &ilk_hpd_funcs; 3991 } 3992 } 3993 3994 /** 3995 * intel_irq_fini - deinitializes IRQ support 3996 * @i915: i915 device instance 3997 * 3998 * This function deinitializes all the IRQ support. 3999 */ 4000 void intel_irq_fini(struct drm_i915_private *i915) 4001 { 4002 int i; 4003 4004 for (i = 0; i < MAX_L3_SLICES; ++i) 4005 kfree(i915->l3_parity.remap_info[i]); 4006 } 4007 4008 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 4009 { 4010 if (HAS_GMCH(dev_priv)) { 4011 if (IS_CHERRYVIEW(dev_priv)) 4012 return cherryview_irq_handler; 4013 else if (IS_VALLEYVIEW(dev_priv)) 4014 return valleyview_irq_handler; 4015 else if (GRAPHICS_VER(dev_priv) == 4) 4016 return i965_irq_handler; 4017 else if (GRAPHICS_VER(dev_priv) == 3) 4018 return i915_irq_handler; 4019 else 4020 return i8xx_irq_handler; 4021 } else { 4022 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4023 return dg1_irq_handler; 4024 else if (GRAPHICS_VER(dev_priv) >= 11) 4025 return gen11_irq_handler; 4026 else if (GRAPHICS_VER(dev_priv) >= 8) 4027 return gen8_irq_handler; 4028 else 4029 return ilk_irq_handler; 4030 } 4031 } 4032 4033 static void intel_irq_reset(struct drm_i915_private *dev_priv) 4034 { 4035 if (HAS_GMCH(dev_priv)) { 4036 if (IS_CHERRYVIEW(dev_priv)) 4037 cherryview_irq_reset(dev_priv); 4038 else if (IS_VALLEYVIEW(dev_priv)) 4039 valleyview_irq_reset(dev_priv); 4040 else if (GRAPHICS_VER(dev_priv) == 4) 4041 i965_irq_reset(dev_priv); 4042 else if (GRAPHICS_VER(dev_priv) == 3) 4043 i915_irq_reset(dev_priv); 4044 else 4045 i8xx_irq_reset(dev_priv); 4046 } else { 4047 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4048 dg1_irq_reset(dev_priv); 4049 else if (GRAPHICS_VER(dev_priv) >= 11) 4050 gen11_irq_reset(dev_priv); 4051 else if (GRAPHICS_VER(dev_priv) >= 8) 4052 gen8_irq_reset(dev_priv); 4053 else 4054 ilk_irq_reset(dev_priv); 4055 } 4056 } 4057 4058 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 4059 { 4060 if (HAS_GMCH(dev_priv)) { 4061 if (IS_CHERRYVIEW(dev_priv)) 4062 cherryview_irq_postinstall(dev_priv); 4063 else if (IS_VALLEYVIEW(dev_priv)) 4064 valleyview_irq_postinstall(dev_priv); 4065 else if (GRAPHICS_VER(dev_priv) == 4) 4066 i965_irq_postinstall(dev_priv); 4067 else if (GRAPHICS_VER(dev_priv) == 3) 4068 i915_irq_postinstall(dev_priv); 4069 else 4070 i8xx_irq_postinstall(dev_priv); 4071 } else { 4072 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4073 dg1_irq_postinstall(dev_priv); 4074 else if (GRAPHICS_VER(dev_priv) >= 11) 4075 gen11_irq_postinstall(dev_priv); 4076 else if (GRAPHICS_VER(dev_priv) >= 8) 4077 gen8_irq_postinstall(dev_priv); 4078 else 4079 ilk_irq_postinstall(dev_priv); 4080 } 4081 } 4082 4083 /** 4084 * intel_irq_install - enables the hardware interrupt 4085 * @dev_priv: i915 device instance 4086 * 4087 * This function enables the hardware interrupt handling, but leaves the hotplug 4088 * handling still disabled. It is called after intel_irq_init(). 4089 * 4090 * In the driver load and resume code we need working interrupts in a few places 4091 * but don't want to deal with the hassle of concurrent probe and hotplug 4092 * workers. Hence the split into this two-stage approach. 4093 */ 4094 int intel_irq_install(struct drm_i915_private *dev_priv) 4095 { 4096 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4097 int ret; 4098 4099 /* 4100 * We enable some interrupt sources in our postinstall hooks, so mark 4101 * interrupts as enabled _before_ actually enabling them to avoid 4102 * special cases in our ordering checks. 4103 */ 4104 dev_priv->runtime_pm.irqs_enabled = true; 4105 4106 dev_priv->irq_enabled = true; 4107 4108 intel_irq_reset(dev_priv); 4109 4110 ret = request_irq(irq, intel_irq_handler(dev_priv), 4111 IRQF_SHARED, DRIVER_NAME, dev_priv); 4112 if (ret < 0) { 4113 dev_priv->irq_enabled = false; 4114 return ret; 4115 } 4116 4117 intel_irq_postinstall(dev_priv); 4118 4119 return ret; 4120 } 4121 4122 /** 4123 * intel_irq_uninstall - finilizes all irq handling 4124 * @dev_priv: i915 device instance 4125 * 4126 * This stops interrupt and hotplug handling and unregisters and frees all 4127 * resources acquired in the init functions. 4128 */ 4129 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4130 { 4131 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4132 4133 /* 4134 * FIXME we can get called twice during driver probe 4135 * error handling as well as during driver remove due to 4136 * intel_modeset_driver_remove() calling us out of sequence. 4137 * Would be nice if it didn't do that... 4138 */ 4139 if (!dev_priv->irq_enabled) 4140 return; 4141 4142 dev_priv->irq_enabled = false; 4143 4144 intel_irq_reset(dev_priv); 4145 4146 free_irq(irq, dev_priv); 4147 4148 intel_hpd_cancel_work(dev_priv); 4149 dev_priv->runtime_pm.irqs_enabled = false; 4150 } 4151 4152 /** 4153 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4154 * @dev_priv: i915 device instance 4155 * 4156 * This function is used to disable interrupts at runtime, both in the runtime 4157 * pm and the system suspend/resume code. 4158 */ 4159 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4160 { 4161 intel_irq_reset(dev_priv); 4162 dev_priv->runtime_pm.irqs_enabled = false; 4163 intel_synchronize_irq(dev_priv); 4164 } 4165 4166 /** 4167 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4168 * @dev_priv: i915 device instance 4169 * 4170 * This function is used to enable interrupts at runtime, both in the runtime 4171 * pm and the system suspend/resume code. 4172 */ 4173 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4174 { 4175 dev_priv->runtime_pm.irqs_enabled = true; 4176 intel_irq_reset(dev_priv); 4177 intel_irq_postinstall(dev_priv); 4178 } 4179 4180 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 4181 { 4182 return dev_priv->runtime_pm.irqs_enabled; 4183 } 4184 4185 void intel_synchronize_irq(struct drm_i915_private *i915) 4186 { 4187 synchronize_irq(to_pci_dev(i915->drm.dev)->irq); 4188 } 4189 4190 void intel_synchronize_hardirq(struct drm_i915_private *i915) 4191 { 4192 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); 4193 } 4194