1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/slab.h> 32 #include <linux/sysrq.h> 33 34 #include <drm/drm_drv.h> 35 36 #include "display/icl_dsi_regs.h" 37 #include "display/intel_de.h" 38 #include "display/intel_display_trace.h" 39 #include "display/intel_display_types.h" 40 #include "display/intel_fifo_underrun.h" 41 #include "display/intel_hotplug.h" 42 #include "display/intel_lpe_audio.h" 43 #include "display/intel_psr.h" 44 45 #include "gt/intel_breadcrumbs.h" 46 #include "gt/intel_gt.h" 47 #include "gt/intel_gt_irq.h" 48 #include "gt/intel_gt_pm_irq.h" 49 #include "gt/intel_gt_regs.h" 50 #include "gt/intel_rps.h" 51 52 #include "i915_driver.h" 53 #include "i915_drv.h" 54 #include "i915_irq.h" 55 #include "intel_pm.h" 56 57 /** 58 * DOC: interrupt handling 59 * 60 * These functions provide the basic support for enabling and disabling the 61 * interrupt handling support. There's a lot more functionality in i915_irq.c 62 * and related files, but that will be described in separate chapters. 63 */ 64 65 /* 66 * Interrupt statistic for PMU. Increments the counter only if the 67 * interrupt originated from the GPU so interrupts from a device which 68 * shares the interrupt line are not accounted. 69 */ 70 static inline void pmu_irq_stats(struct drm_i915_private *i915, 71 irqreturn_t res) 72 { 73 if (unlikely(res != IRQ_HANDLED)) 74 return; 75 76 /* 77 * A clever compiler translates that into INC. A not so clever one 78 * should at least prevent store tearing. 79 */ 80 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); 81 } 82 83 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 84 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, 85 enum hpd_pin pin); 86 87 static const u32 hpd_ilk[HPD_NUM_PINS] = { 88 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 89 }; 90 91 static const u32 hpd_ivb[HPD_NUM_PINS] = { 92 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 93 }; 94 95 static const u32 hpd_bdw[HPD_NUM_PINS] = { 96 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 97 }; 98 99 static const u32 hpd_ibx[HPD_NUM_PINS] = { 100 [HPD_CRT] = SDE_CRT_HOTPLUG, 101 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 102 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 103 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 104 [HPD_PORT_D] = SDE_PORTD_HOTPLUG, 105 }; 106 107 static const u32 hpd_cpt[HPD_NUM_PINS] = { 108 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 109 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 110 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 111 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 112 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 113 }; 114 115 static const u32 hpd_spt[HPD_NUM_PINS] = { 116 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 117 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 118 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 119 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 120 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, 121 }; 122 123 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 124 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 125 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 126 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 127 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 128 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 129 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, 130 }; 131 132 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 133 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 134 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 135 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 136 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 137 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 138 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 139 }; 140 141 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 142 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 143 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 144 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 145 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 146 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 147 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 148 }; 149 150 static const u32 hpd_bxt[HPD_NUM_PINS] = { 151 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 152 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), 153 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), 154 }; 155 156 static const u32 hpd_gen11[HPD_NUM_PINS] = { 157 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), 158 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), 159 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), 160 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), 161 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), 162 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), 163 }; 164 165 static const u32 hpd_icp[HPD_NUM_PINS] = { 166 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 167 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 168 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 169 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), 170 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), 171 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), 172 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), 173 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), 174 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), 175 }; 176 177 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { 178 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 179 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 180 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 181 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), 182 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1), 183 }; 184 185 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) 186 { 187 struct intel_hotplug *hpd = &dev_priv->display.hotplug; 188 189 if (HAS_GMCH(dev_priv)) { 190 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 191 IS_CHERRYVIEW(dev_priv)) 192 hpd->hpd = hpd_status_g4x; 193 else 194 hpd->hpd = hpd_status_i915; 195 return; 196 } 197 198 if (DISPLAY_VER(dev_priv) >= 11) 199 hpd->hpd = hpd_gen11; 200 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 201 hpd->hpd = hpd_bxt; 202 else if (DISPLAY_VER(dev_priv) >= 8) 203 hpd->hpd = hpd_bdw; 204 else if (DISPLAY_VER(dev_priv) >= 7) 205 hpd->hpd = hpd_ivb; 206 else 207 hpd->hpd = hpd_ilk; 208 209 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && 210 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) 211 return; 212 213 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) 214 hpd->pch_hpd = hpd_sde_dg1; 215 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 216 hpd->pch_hpd = hpd_icp; 217 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) 218 hpd->pch_hpd = hpd_spt; 219 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) 220 hpd->pch_hpd = hpd_cpt; 221 else if (HAS_PCH_IBX(dev_priv)) 222 hpd->pch_hpd = hpd_ibx; 223 else 224 MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); 225 } 226 227 static void 228 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 229 { 230 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 231 232 drm_crtc_handle_vblank(&crtc->base); 233 } 234 235 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 236 i915_reg_t iir, i915_reg_t ier) 237 { 238 intel_uncore_write(uncore, imr, 0xffffffff); 239 intel_uncore_posting_read(uncore, imr); 240 241 intel_uncore_write(uncore, ier, 0); 242 243 /* IIR can theoretically queue up two events. Be paranoid. */ 244 intel_uncore_write(uncore, iir, 0xffffffff); 245 intel_uncore_posting_read(uncore, iir); 246 intel_uncore_write(uncore, iir, 0xffffffff); 247 intel_uncore_posting_read(uncore, iir); 248 } 249 250 static void gen2_irq_reset(struct intel_uncore *uncore) 251 { 252 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 253 intel_uncore_posting_read16(uncore, GEN2_IMR); 254 255 intel_uncore_write16(uncore, GEN2_IER, 0); 256 257 /* IIR can theoretically queue up two events. Be paranoid. */ 258 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 259 intel_uncore_posting_read16(uncore, GEN2_IIR); 260 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 261 intel_uncore_posting_read16(uncore, GEN2_IIR); 262 } 263 264 /* 265 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 266 */ 267 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 268 { 269 u32 val = intel_uncore_read(uncore, reg); 270 271 if (val == 0) 272 return; 273 274 drm_WARN(&uncore->i915->drm, 1, 275 "Interrupt register 0x%x is not zero: 0x%08x\n", 276 i915_mmio_reg_offset(reg), val); 277 intel_uncore_write(uncore, reg, 0xffffffff); 278 intel_uncore_posting_read(uncore, reg); 279 intel_uncore_write(uncore, reg, 0xffffffff); 280 intel_uncore_posting_read(uncore, reg); 281 } 282 283 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 284 { 285 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 286 287 if (val == 0) 288 return; 289 290 drm_WARN(&uncore->i915->drm, 1, 291 "Interrupt register 0x%x is not zero: 0x%08x\n", 292 i915_mmio_reg_offset(GEN2_IIR), val); 293 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 294 intel_uncore_posting_read16(uncore, GEN2_IIR); 295 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 296 intel_uncore_posting_read16(uncore, GEN2_IIR); 297 } 298 299 void gen3_irq_init(struct intel_uncore *uncore, 300 i915_reg_t imr, u32 imr_val, 301 i915_reg_t ier, u32 ier_val, 302 i915_reg_t iir) 303 { 304 gen3_assert_iir_is_zero(uncore, iir); 305 306 intel_uncore_write(uncore, ier, ier_val); 307 intel_uncore_write(uncore, imr, imr_val); 308 intel_uncore_posting_read(uncore, imr); 309 } 310 311 static void gen2_irq_init(struct intel_uncore *uncore, 312 u32 imr_val, u32 ier_val) 313 { 314 gen2_assert_iir_is_zero(uncore); 315 316 intel_uncore_write16(uncore, GEN2_IER, ier_val); 317 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 318 intel_uncore_posting_read16(uncore, GEN2_IMR); 319 } 320 321 /* For display hotplug interrupt */ 322 static inline void 323 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 324 u32 mask, 325 u32 bits) 326 { 327 lockdep_assert_held(&dev_priv->irq_lock); 328 drm_WARN_ON(&dev_priv->drm, bits & ~mask); 329 330 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits); 331 } 332 333 /** 334 * i915_hotplug_interrupt_update - update hotplug interrupt enable 335 * @dev_priv: driver private 336 * @mask: bits to update 337 * @bits: bits to enable 338 * NOTE: the HPD enable bits are modified both inside and outside 339 * of an interrupt context. To avoid that read-modify-write cycles 340 * interfer, these bits are protected by a spinlock. Since this 341 * function is usually not called from a context where the lock is 342 * held already, this function acquires the lock itself. A non-locking 343 * version is also available. 344 */ 345 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 346 u32 mask, 347 u32 bits) 348 { 349 spin_lock_irq(&dev_priv->irq_lock); 350 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 351 spin_unlock_irq(&dev_priv->irq_lock); 352 } 353 354 /** 355 * ilk_update_display_irq - update DEIMR 356 * @dev_priv: driver private 357 * @interrupt_mask: mask of interrupt bits to update 358 * @enabled_irq_mask: mask of interrupt bits to enable 359 */ 360 static void ilk_update_display_irq(struct drm_i915_private *dev_priv, 361 u32 interrupt_mask, u32 enabled_irq_mask) 362 { 363 u32 new_val; 364 365 lockdep_assert_held(&dev_priv->irq_lock); 366 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 367 368 new_val = dev_priv->irq_mask; 369 new_val &= ~interrupt_mask; 370 new_val |= (~enabled_irq_mask & interrupt_mask); 371 372 if (new_val != dev_priv->irq_mask && 373 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 374 dev_priv->irq_mask = new_val; 375 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 376 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 377 } 378 } 379 380 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 381 { 382 ilk_update_display_irq(i915, bits, bits); 383 } 384 385 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 386 { 387 ilk_update_display_irq(i915, bits, 0); 388 } 389 390 /** 391 * bdw_update_port_irq - update DE port interrupt 392 * @dev_priv: driver private 393 * @interrupt_mask: mask of interrupt bits to update 394 * @enabled_irq_mask: mask of interrupt bits to enable 395 */ 396 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 397 u32 interrupt_mask, 398 u32 enabled_irq_mask) 399 { 400 u32 new_val; 401 u32 old_val; 402 403 lockdep_assert_held(&dev_priv->irq_lock); 404 405 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 406 407 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 408 return; 409 410 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 411 412 new_val = old_val; 413 new_val &= ~interrupt_mask; 414 new_val |= (~enabled_irq_mask & interrupt_mask); 415 416 if (new_val != old_val) { 417 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 418 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 419 } 420 } 421 422 /** 423 * bdw_update_pipe_irq - update DE pipe interrupt 424 * @dev_priv: driver private 425 * @pipe: pipe whose interrupt to update 426 * @interrupt_mask: mask of interrupt bits to update 427 * @enabled_irq_mask: mask of interrupt bits to enable 428 */ 429 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 430 enum pipe pipe, u32 interrupt_mask, 431 u32 enabled_irq_mask) 432 { 433 u32 new_val; 434 435 lockdep_assert_held(&dev_priv->irq_lock); 436 437 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 438 439 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 440 return; 441 442 new_val = dev_priv->de_irq_mask[pipe]; 443 new_val &= ~interrupt_mask; 444 new_val |= (~enabled_irq_mask & interrupt_mask); 445 446 if (new_val != dev_priv->de_irq_mask[pipe]) { 447 dev_priv->de_irq_mask[pipe] = new_val; 448 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 449 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 450 } 451 } 452 453 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 454 enum pipe pipe, u32 bits) 455 { 456 bdw_update_pipe_irq(i915, pipe, bits, bits); 457 } 458 459 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 460 enum pipe pipe, u32 bits) 461 { 462 bdw_update_pipe_irq(i915, pipe, bits, 0); 463 } 464 465 /** 466 * ibx_display_interrupt_update - update SDEIMR 467 * @dev_priv: driver private 468 * @interrupt_mask: mask of interrupt bits to update 469 * @enabled_irq_mask: mask of interrupt bits to enable 470 */ 471 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 472 u32 interrupt_mask, 473 u32 enabled_irq_mask) 474 { 475 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 476 sdeimr &= ~interrupt_mask; 477 sdeimr |= (~enabled_irq_mask & interrupt_mask); 478 479 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 480 481 lockdep_assert_held(&dev_priv->irq_lock); 482 483 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 484 return; 485 486 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 487 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 488 } 489 490 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 491 { 492 ibx_display_interrupt_update(i915, bits, bits); 493 } 494 495 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 496 { 497 ibx_display_interrupt_update(i915, bits, 0); 498 } 499 500 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 501 enum pipe pipe) 502 { 503 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 504 u32 enable_mask = status_mask << 16; 505 506 lockdep_assert_held(&dev_priv->irq_lock); 507 508 if (DISPLAY_VER(dev_priv) < 5) 509 goto out; 510 511 /* 512 * On pipe A we don't support the PSR interrupt yet, 513 * on pipe B and C the same bit MBZ. 514 */ 515 if (drm_WARN_ON_ONCE(&dev_priv->drm, 516 status_mask & PIPE_A_PSR_STATUS_VLV)) 517 return 0; 518 /* 519 * On pipe B and C we don't support the PSR interrupt yet, on pipe 520 * A the same bit is for perf counters which we don't use either. 521 */ 522 if (drm_WARN_ON_ONCE(&dev_priv->drm, 523 status_mask & PIPE_B_PSR_STATUS_VLV)) 524 return 0; 525 526 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 527 SPRITE0_FLIP_DONE_INT_EN_VLV | 528 SPRITE1_FLIP_DONE_INT_EN_VLV); 529 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 530 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 531 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 532 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 533 534 out: 535 drm_WARN_ONCE(&dev_priv->drm, 536 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 537 status_mask & ~PIPESTAT_INT_STATUS_MASK, 538 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 539 pipe_name(pipe), enable_mask, status_mask); 540 541 return enable_mask; 542 } 543 544 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 545 enum pipe pipe, u32 status_mask) 546 { 547 i915_reg_t reg = PIPESTAT(pipe); 548 u32 enable_mask; 549 550 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 551 "pipe %c: status_mask=0x%x\n", 552 pipe_name(pipe), status_mask); 553 554 lockdep_assert_held(&dev_priv->irq_lock); 555 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 556 557 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 558 return; 559 560 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 561 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 562 563 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 564 intel_uncore_posting_read(&dev_priv->uncore, reg); 565 } 566 567 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 568 enum pipe pipe, u32 status_mask) 569 { 570 i915_reg_t reg = PIPESTAT(pipe); 571 u32 enable_mask; 572 573 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 574 "pipe %c: status_mask=0x%x\n", 575 pipe_name(pipe), status_mask); 576 577 lockdep_assert_held(&dev_priv->irq_lock); 578 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 579 580 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 581 return; 582 583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 584 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 585 586 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 587 intel_uncore_posting_read(&dev_priv->uncore, reg); 588 } 589 590 static bool i915_has_asle(struct drm_i915_private *dev_priv) 591 { 592 if (!dev_priv->display.opregion.asle) 593 return false; 594 595 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 596 } 597 598 /** 599 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 600 * @dev_priv: i915 device private 601 */ 602 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 603 { 604 if (!i915_has_asle(dev_priv)) 605 return; 606 607 spin_lock_irq(&dev_priv->irq_lock); 608 609 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 610 if (DISPLAY_VER(dev_priv) >= 4) 611 i915_enable_pipestat(dev_priv, PIPE_A, 612 PIPE_LEGACY_BLC_EVENT_STATUS); 613 614 spin_unlock_irq(&dev_priv->irq_lock); 615 } 616 617 /* 618 * This timing diagram depicts the video signal in and 619 * around the vertical blanking period. 620 * 621 * Assumptions about the fictitious mode used in this example: 622 * vblank_start >= 3 623 * vsync_start = vblank_start + 1 624 * vsync_end = vblank_start + 2 625 * vtotal = vblank_start + 3 626 * 627 * start of vblank: 628 * latch double buffered registers 629 * increment frame counter (ctg+) 630 * generate start of vblank interrupt (gen4+) 631 * | 632 * | frame start: 633 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 634 * | may be shifted forward 1-3 extra lines via PIPECONF 635 * | | 636 * | | start of vsync: 637 * | | generate vsync interrupt 638 * | | | 639 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 640 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 641 * ----va---> <-----------------vb--------------------> <--------va------------- 642 * | | <----vs-----> | 643 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 644 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 645 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 646 * | | | 647 * last visible pixel first visible pixel 648 * | increment frame counter (gen3/4) 649 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 650 * 651 * x = horizontal active 652 * _ = horizontal blanking 653 * hs = horizontal sync 654 * va = vertical active 655 * vb = vertical blanking 656 * vs = vertical sync 657 * vbs = vblank_start (number) 658 * 659 * Summary: 660 * - most events happen at the start of horizontal sync 661 * - frame start happens at the start of horizontal blank, 1-4 lines 662 * (depending on PIPECONF settings) after the start of vblank 663 * - gen3/4 pixel and frame counter are synchronized with the start 664 * of horizontal active on the first line of vertical active 665 */ 666 667 /* Called from drm generic code, passed a 'crtc', which 668 * we use as a pipe index 669 */ 670 u32 i915_get_vblank_counter(struct drm_crtc *crtc) 671 { 672 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 673 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 674 const struct drm_display_mode *mode = &vblank->hwmode; 675 enum pipe pipe = to_intel_crtc(crtc)->pipe; 676 i915_reg_t high_frame, low_frame; 677 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 678 unsigned long irqflags; 679 680 /* 681 * On i965gm TV output the frame counter only works up to 682 * the point when we enable the TV encoder. After that the 683 * frame counter ceases to work and reads zero. We need a 684 * vblank wait before enabling the TV encoder and so we 685 * have to enable vblank interrupts while the frame counter 686 * is still in a working state. However the core vblank code 687 * does not like us returning non-zero frame counter values 688 * when we've told it that we don't have a working frame 689 * counter. Thus we must stop non-zero values leaking out. 690 */ 691 if (!vblank->max_vblank_count) 692 return 0; 693 694 htotal = mode->crtc_htotal; 695 hsync_start = mode->crtc_hsync_start; 696 vbl_start = mode->crtc_vblank_start; 697 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 698 vbl_start = DIV_ROUND_UP(vbl_start, 2); 699 700 /* Convert to pixel count */ 701 vbl_start *= htotal; 702 703 /* Start of vblank event occurs at start of hsync */ 704 vbl_start -= htotal - hsync_start; 705 706 high_frame = PIPEFRAME(pipe); 707 low_frame = PIPEFRAMEPIXEL(pipe); 708 709 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 710 711 /* 712 * High & low register fields aren't synchronized, so make sure 713 * we get a low value that's stable across two reads of the high 714 * register. 715 */ 716 do { 717 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 718 low = intel_de_read_fw(dev_priv, low_frame); 719 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 720 } while (high1 != high2); 721 722 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 723 724 high1 >>= PIPE_FRAME_HIGH_SHIFT; 725 pixel = low & PIPE_PIXEL_MASK; 726 low >>= PIPE_FRAME_LOW_SHIFT; 727 728 /* 729 * The frame counter increments at beginning of active. 730 * Cook up a vblank counter by also checking the pixel 731 * counter against vblank start. 732 */ 733 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 734 } 735 736 u32 g4x_get_vblank_counter(struct drm_crtc *crtc) 737 { 738 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 739 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 740 enum pipe pipe = to_intel_crtc(crtc)->pipe; 741 742 if (!vblank->max_vblank_count) 743 return 0; 744 745 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe)); 746 } 747 748 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc) 749 { 750 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 751 struct drm_vblank_crtc *vblank = 752 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 753 const struct drm_display_mode *mode = &vblank->hwmode; 754 u32 htotal = mode->crtc_htotal; 755 u32 clock = mode->crtc_clock; 756 u32 scan_prev_time, scan_curr_time, scan_post_time; 757 758 /* 759 * To avoid the race condition where we might cross into the 760 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 761 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 762 * during the same frame. 763 */ 764 do { 765 /* 766 * This field provides read back of the display 767 * pipe frame time stamp. The time stamp value 768 * is sampled at every start of vertical blank. 769 */ 770 scan_prev_time = intel_de_read_fw(dev_priv, 771 PIPE_FRMTMSTMP(crtc->pipe)); 772 773 /* 774 * The TIMESTAMP_CTR register has the current 775 * time stamp value. 776 */ 777 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); 778 779 scan_post_time = intel_de_read_fw(dev_priv, 780 PIPE_FRMTMSTMP(crtc->pipe)); 781 } while (scan_post_time != scan_prev_time); 782 783 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 784 clock), 1000 * htotal); 785 } 786 787 /* 788 * On certain encoders on certain platforms, pipe 789 * scanline register will not work to get the scanline, 790 * since the timings are driven from the PORT or issues 791 * with scanline register updates. 792 * This function will use Framestamp and current 793 * timestamp registers to calculate the scanline. 794 */ 795 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 796 { 797 struct drm_vblank_crtc *vblank = 798 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 799 const struct drm_display_mode *mode = &vblank->hwmode; 800 u32 vblank_start = mode->crtc_vblank_start; 801 u32 vtotal = mode->crtc_vtotal; 802 u32 scanline; 803 804 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc); 805 scanline = min(scanline, vtotal - 1); 806 scanline = (scanline + vblank_start) % vtotal; 807 808 return scanline; 809 } 810 811 /* 812 * intel_de_read_fw(), only for fast reads of display block, no need for 813 * forcewake etc. 814 */ 815 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 816 { 817 struct drm_device *dev = crtc->base.dev; 818 struct drm_i915_private *dev_priv = to_i915(dev); 819 const struct drm_display_mode *mode; 820 struct drm_vblank_crtc *vblank; 821 enum pipe pipe = crtc->pipe; 822 int position, vtotal; 823 824 if (!crtc->active) 825 return 0; 826 827 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 828 mode = &vblank->hwmode; 829 830 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 831 return __intel_get_crtc_scanline_from_timestamp(crtc); 832 833 vtotal = mode->crtc_vtotal; 834 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 835 vtotal /= 2; 836 837 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; 838 839 /* 840 * On HSW, the DSL reg (0x70000) appears to return 0 if we 841 * read it just before the start of vblank. So try it again 842 * so we don't accidentally end up spanning a vblank frame 843 * increment, causing the pipe_update_end() code to squak at us. 844 * 845 * The nature of this problem means we can't simply check the ISR 846 * bit and return the vblank start value; nor can we use the scanline 847 * debug register in the transcoder as it appears to have the same 848 * problem. We may need to extend this to include other platforms, 849 * but so far testing only shows the problem on HSW. 850 */ 851 if (HAS_DDI(dev_priv) && !position) { 852 int i, temp; 853 854 for (i = 0; i < 100; i++) { 855 udelay(1); 856 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; 857 if (temp != position) { 858 position = temp; 859 break; 860 } 861 } 862 } 863 864 /* 865 * See update_scanline_offset() for the details on the 866 * scanline_offset adjustment. 867 */ 868 return (position + crtc->scanline_offset) % vtotal; 869 } 870 871 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, 872 bool in_vblank_irq, 873 int *vpos, int *hpos, 874 ktime_t *stime, ktime_t *etime, 875 const struct drm_display_mode *mode) 876 { 877 struct drm_device *dev = _crtc->dev; 878 struct drm_i915_private *dev_priv = to_i915(dev); 879 struct intel_crtc *crtc = to_intel_crtc(_crtc); 880 enum pipe pipe = crtc->pipe; 881 int position; 882 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 883 unsigned long irqflags; 884 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 || 885 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 || 886 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 887 888 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { 889 drm_dbg(&dev_priv->drm, 890 "trying to get scanoutpos for disabled " 891 "pipe %c\n", pipe_name(pipe)); 892 return false; 893 } 894 895 htotal = mode->crtc_htotal; 896 hsync_start = mode->crtc_hsync_start; 897 vtotal = mode->crtc_vtotal; 898 vbl_start = mode->crtc_vblank_start; 899 vbl_end = mode->crtc_vblank_end; 900 901 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 902 vbl_start = DIV_ROUND_UP(vbl_start, 2); 903 vbl_end /= 2; 904 vtotal /= 2; 905 } 906 907 /* 908 * Lock uncore.lock, as we will do multiple timing critical raw 909 * register reads, potentially with preemption disabled, so the 910 * following code must not block on uncore.lock. 911 */ 912 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 913 914 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 915 916 /* Get optional system timestamp before query. */ 917 if (stime) 918 *stime = ktime_get(); 919 920 if (crtc->mode_flags & I915_MODE_FLAG_VRR) { 921 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc); 922 923 position = __intel_get_crtc_scanline(crtc); 924 925 /* 926 * Already exiting vblank? If so, shift our position 927 * so it looks like we're already apporaching the full 928 * vblank end. This should make the generated timestamp 929 * more or less match when the active portion will start. 930 */ 931 if (position >= vbl_start && scanlines < position) 932 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1); 933 } else if (use_scanline_counter) { 934 /* No obvious pixelcount register. Only query vertical 935 * scanout position from Display scan line register. 936 */ 937 position = __intel_get_crtc_scanline(crtc); 938 } else { 939 /* Have access to pixelcount since start of frame. 940 * We can split this into vertical and horizontal 941 * scanout position. 942 */ 943 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 944 945 /* convert to pixel counts */ 946 vbl_start *= htotal; 947 vbl_end *= htotal; 948 vtotal *= htotal; 949 950 /* 951 * In interlaced modes, the pixel counter counts all pixels, 952 * so one field will have htotal more pixels. In order to avoid 953 * the reported position from jumping backwards when the pixel 954 * counter is beyond the length of the shorter field, just 955 * clamp the position the length of the shorter field. This 956 * matches how the scanline counter based position works since 957 * the scanline counter doesn't count the two half lines. 958 */ 959 if (position >= vtotal) 960 position = vtotal - 1; 961 962 /* 963 * Start of vblank interrupt is triggered at start of hsync, 964 * just prior to the first active line of vblank. However we 965 * consider lines to start at the leading edge of horizontal 966 * active. So, should we get here before we've crossed into 967 * the horizontal active of the first line in vblank, we would 968 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 969 * always add htotal-hsync_start to the current pixel position. 970 */ 971 position = (position + htotal - hsync_start) % vtotal; 972 } 973 974 /* Get optional system timestamp after query. */ 975 if (etime) 976 *etime = ktime_get(); 977 978 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 979 980 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 981 982 /* 983 * While in vblank, position will be negative 984 * counting up towards 0 at vbl_end. And outside 985 * vblank, position will be positive counting 986 * up since vbl_end. 987 */ 988 if (position >= vbl_start) 989 position -= vbl_end; 990 else 991 position += vtotal - vbl_end; 992 993 if (use_scanline_counter) { 994 *vpos = position; 995 *hpos = 0; 996 } else { 997 *vpos = position / htotal; 998 *hpos = position - (*vpos * htotal); 999 } 1000 1001 return true; 1002 } 1003 1004 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, 1005 ktime_t *vblank_time, bool in_vblank_irq) 1006 { 1007 return drm_crtc_vblank_helper_get_vblank_timestamp_internal( 1008 crtc, max_error, vblank_time, in_vblank_irq, 1009 i915_get_crtc_scanoutpos); 1010 } 1011 1012 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1013 { 1014 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1015 unsigned long irqflags; 1016 int position; 1017 1018 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1019 position = __intel_get_crtc_scanline(crtc); 1020 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1021 1022 return position; 1023 } 1024 1025 /** 1026 * ivb_parity_work - Workqueue called when a parity error interrupt 1027 * occurred. 1028 * @work: workqueue struct 1029 * 1030 * Doesn't actually do anything except notify userspace. As a consequence of 1031 * this event, userspace should try to remap the bad rows since statistically 1032 * it is likely the same row is more likely to go bad again. 1033 */ 1034 static void ivb_parity_work(struct work_struct *work) 1035 { 1036 struct drm_i915_private *dev_priv = 1037 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1038 struct intel_gt *gt = to_gt(dev_priv); 1039 u32 error_status, row, bank, subbank; 1040 char *parity_event[6]; 1041 u32 misccpctl; 1042 u8 slice = 0; 1043 1044 /* We must turn off DOP level clock gating to access the L3 registers. 1045 * In order to prevent a get/put style interface, acquire struct mutex 1046 * any time we access those registers. 1047 */ 1048 mutex_lock(&dev_priv->drm.struct_mutex); 1049 1050 /* If we've screwed up tracking, just let the interrupt fire again */ 1051 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 1052 goto out; 1053 1054 misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL, 1055 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 1056 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1057 1058 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1059 i915_reg_t reg; 1060 1061 slice--; 1062 if (drm_WARN_ON_ONCE(&dev_priv->drm, 1063 slice >= NUM_L3_SLICES(dev_priv))) 1064 break; 1065 1066 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1067 1068 reg = GEN7_L3CDERRST1(slice); 1069 1070 error_status = intel_uncore_read(&dev_priv->uncore, reg); 1071 row = GEN7_PARITY_ERROR_ROW(error_status); 1072 bank = GEN7_PARITY_ERROR_BANK(error_status); 1073 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1074 1075 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1076 intel_uncore_posting_read(&dev_priv->uncore, reg); 1077 1078 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1079 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1080 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1081 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1082 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1083 parity_event[5] = NULL; 1084 1085 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1086 KOBJ_CHANGE, parity_event); 1087 1088 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1089 slice, row, bank, subbank); 1090 1091 kfree(parity_event[4]); 1092 kfree(parity_event[3]); 1093 kfree(parity_event[2]); 1094 kfree(parity_event[1]); 1095 } 1096 1097 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 1098 1099 out: 1100 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 1101 spin_lock_irq(gt->irq_lock); 1102 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 1103 spin_unlock_irq(gt->irq_lock); 1104 1105 mutex_unlock(&dev_priv->drm.struct_mutex); 1106 } 1107 1108 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1109 { 1110 switch (pin) { 1111 case HPD_PORT_TC1: 1112 case HPD_PORT_TC2: 1113 case HPD_PORT_TC3: 1114 case HPD_PORT_TC4: 1115 case HPD_PORT_TC5: 1116 case HPD_PORT_TC6: 1117 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); 1118 default: 1119 return false; 1120 } 1121 } 1122 1123 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1124 { 1125 switch (pin) { 1126 case HPD_PORT_A: 1127 return val & PORTA_HOTPLUG_LONG_DETECT; 1128 case HPD_PORT_B: 1129 return val & PORTB_HOTPLUG_LONG_DETECT; 1130 case HPD_PORT_C: 1131 return val & PORTC_HOTPLUG_LONG_DETECT; 1132 default: 1133 return false; 1134 } 1135 } 1136 1137 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1138 { 1139 switch (pin) { 1140 case HPD_PORT_A: 1141 case HPD_PORT_B: 1142 case HPD_PORT_C: 1143 case HPD_PORT_D: 1144 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); 1145 default: 1146 return false; 1147 } 1148 } 1149 1150 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1151 { 1152 switch (pin) { 1153 case HPD_PORT_TC1: 1154 case HPD_PORT_TC2: 1155 case HPD_PORT_TC3: 1156 case HPD_PORT_TC4: 1157 case HPD_PORT_TC5: 1158 case HPD_PORT_TC6: 1159 return val & ICP_TC_HPD_LONG_DETECT(pin); 1160 default: 1161 return false; 1162 } 1163 } 1164 1165 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1166 { 1167 switch (pin) { 1168 case HPD_PORT_E: 1169 return val & PORTE_HOTPLUG_LONG_DETECT; 1170 default: 1171 return false; 1172 } 1173 } 1174 1175 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1176 { 1177 switch (pin) { 1178 case HPD_PORT_A: 1179 return val & PORTA_HOTPLUG_LONG_DETECT; 1180 case HPD_PORT_B: 1181 return val & PORTB_HOTPLUG_LONG_DETECT; 1182 case HPD_PORT_C: 1183 return val & PORTC_HOTPLUG_LONG_DETECT; 1184 case HPD_PORT_D: 1185 return val & PORTD_HOTPLUG_LONG_DETECT; 1186 default: 1187 return false; 1188 } 1189 } 1190 1191 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1192 { 1193 switch (pin) { 1194 case HPD_PORT_A: 1195 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1196 default: 1197 return false; 1198 } 1199 } 1200 1201 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1202 { 1203 switch (pin) { 1204 case HPD_PORT_B: 1205 return val & PORTB_HOTPLUG_LONG_DETECT; 1206 case HPD_PORT_C: 1207 return val & PORTC_HOTPLUG_LONG_DETECT; 1208 case HPD_PORT_D: 1209 return val & PORTD_HOTPLUG_LONG_DETECT; 1210 default: 1211 return false; 1212 } 1213 } 1214 1215 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1216 { 1217 switch (pin) { 1218 case HPD_PORT_B: 1219 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1220 case HPD_PORT_C: 1221 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1222 case HPD_PORT_D: 1223 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1224 default: 1225 return false; 1226 } 1227 } 1228 1229 /* 1230 * Get a bit mask of pins that have triggered, and which ones may be long. 1231 * This can be called multiple times with the same masks to accumulate 1232 * hotplug detection results from several registers. 1233 * 1234 * Note that the caller is expected to zero out the masks initially. 1235 */ 1236 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1237 u32 *pin_mask, u32 *long_mask, 1238 u32 hotplug_trigger, u32 dig_hotplug_reg, 1239 const u32 hpd[HPD_NUM_PINS], 1240 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1241 { 1242 enum hpd_pin pin; 1243 1244 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); 1245 1246 for_each_hpd_pin(pin) { 1247 if ((hpd[pin] & hotplug_trigger) == 0) 1248 continue; 1249 1250 *pin_mask |= BIT(pin); 1251 1252 if (long_pulse_detect(pin, dig_hotplug_reg)) 1253 *long_mask |= BIT(pin); 1254 } 1255 1256 drm_dbg(&dev_priv->drm, 1257 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1258 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1259 1260 } 1261 1262 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 1263 const u32 hpd[HPD_NUM_PINS]) 1264 { 1265 struct intel_encoder *encoder; 1266 u32 enabled_irqs = 0; 1267 1268 for_each_intel_encoder(&dev_priv->drm, encoder) 1269 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 1270 enabled_irqs |= hpd[encoder->hpd_pin]; 1271 1272 return enabled_irqs; 1273 } 1274 1275 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, 1276 const u32 hpd[HPD_NUM_PINS]) 1277 { 1278 struct intel_encoder *encoder; 1279 u32 hotplug_irqs = 0; 1280 1281 for_each_intel_encoder(&dev_priv->drm, encoder) 1282 hotplug_irqs |= hpd[encoder->hpd_pin]; 1283 1284 return hotplug_irqs; 1285 } 1286 1287 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, 1288 hotplug_enables_func hotplug_enables) 1289 { 1290 struct intel_encoder *encoder; 1291 u32 hotplug = 0; 1292 1293 for_each_intel_encoder(&i915->drm, encoder) 1294 hotplug |= hotplug_enables(i915, encoder->hpd_pin); 1295 1296 return hotplug; 1297 } 1298 1299 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1300 { 1301 wake_up_all(&dev_priv->display.gmbus.wait_queue); 1302 } 1303 1304 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1305 { 1306 wake_up_all(&dev_priv->display.gmbus.wait_queue); 1307 } 1308 1309 #if defined(CONFIG_DEBUG_FS) 1310 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1311 enum pipe pipe, 1312 u32 crc0, u32 crc1, 1313 u32 crc2, u32 crc3, 1314 u32 crc4) 1315 { 1316 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 1317 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 1318 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1319 1320 trace_intel_pipe_crc(crtc, crcs); 1321 1322 spin_lock(&pipe_crc->lock); 1323 /* 1324 * For some not yet identified reason, the first CRC is 1325 * bonkers. So let's just wait for the next vblank and read 1326 * out the buggy result. 1327 * 1328 * On GEN8+ sometimes the second CRC is bonkers as well, so 1329 * don't trust that one either. 1330 */ 1331 if (pipe_crc->skipped <= 0 || 1332 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1333 pipe_crc->skipped++; 1334 spin_unlock(&pipe_crc->lock); 1335 return; 1336 } 1337 spin_unlock(&pipe_crc->lock); 1338 1339 drm_crtc_add_crc_entry(&crtc->base, true, 1340 drm_crtc_accurate_vblank_count(&crtc->base), 1341 crcs); 1342 } 1343 #else 1344 static inline void 1345 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1346 enum pipe pipe, 1347 u32 crc0, u32 crc1, 1348 u32 crc2, u32 crc3, 1349 u32 crc4) {} 1350 #endif 1351 1352 static void flip_done_handler(struct drm_i915_private *i915, 1353 enum pipe pipe) 1354 { 1355 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); 1356 struct drm_crtc_state *crtc_state = crtc->base.state; 1357 struct drm_pending_vblank_event *e = crtc_state->event; 1358 struct drm_device *dev = &i915->drm; 1359 unsigned long irqflags; 1360 1361 spin_lock_irqsave(&dev->event_lock, irqflags); 1362 1363 crtc_state->event = NULL; 1364 1365 drm_crtc_send_vblank_event(&crtc->base, e); 1366 1367 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1368 } 1369 1370 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1371 enum pipe pipe) 1372 { 1373 display_pipe_crc_irq_handler(dev_priv, pipe, 1374 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1375 0, 0, 0, 0); 1376 } 1377 1378 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1379 enum pipe pipe) 1380 { 1381 display_pipe_crc_irq_handler(dev_priv, pipe, 1382 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1383 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 1384 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 1385 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 1386 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 1387 } 1388 1389 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1390 enum pipe pipe) 1391 { 1392 u32 res1, res2; 1393 1394 if (DISPLAY_VER(dev_priv) >= 3) 1395 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); 1396 else 1397 res1 = 0; 1398 1399 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 1400 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); 1401 else 1402 res2 = 0; 1403 1404 display_pipe_crc_irq_handler(dev_priv, pipe, 1405 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), 1406 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), 1407 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), 1408 res1, res2); 1409 } 1410 1411 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1412 { 1413 enum pipe pipe; 1414 1415 for_each_pipe(dev_priv, pipe) { 1416 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), 1417 PIPESTAT_INT_STATUS_MASK | 1418 PIPE_FIFO_UNDERRUN_STATUS); 1419 1420 dev_priv->pipestat_irq_mask[pipe] = 0; 1421 } 1422 } 1423 1424 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1425 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1426 { 1427 enum pipe pipe; 1428 1429 spin_lock(&dev_priv->irq_lock); 1430 1431 if (!dev_priv->display_irqs_enabled) { 1432 spin_unlock(&dev_priv->irq_lock); 1433 return; 1434 } 1435 1436 for_each_pipe(dev_priv, pipe) { 1437 i915_reg_t reg; 1438 u32 status_mask, enable_mask, iir_bit = 0; 1439 1440 /* 1441 * PIPESTAT bits get signalled even when the interrupt is 1442 * disabled with the mask bits, and some of the status bits do 1443 * not generate interrupts at all (like the underrun bit). Hence 1444 * we need to be careful that we only handle what we want to 1445 * handle. 1446 */ 1447 1448 /* fifo underruns are filterered in the underrun handler. */ 1449 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1450 1451 switch (pipe) { 1452 default: 1453 case PIPE_A: 1454 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1455 break; 1456 case PIPE_B: 1457 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1458 break; 1459 case PIPE_C: 1460 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1461 break; 1462 } 1463 if (iir & iir_bit) 1464 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1465 1466 if (!status_mask) 1467 continue; 1468 1469 reg = PIPESTAT(pipe); 1470 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 1471 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1472 1473 /* 1474 * Clear the PIPE*STAT regs before the IIR 1475 * 1476 * Toggle the enable bits to make sure we get an 1477 * edge in the ISR pipe event bit if we don't clear 1478 * all the enabled status bits. Otherwise the edge 1479 * triggered IIR on i965/g4x wouldn't notice that 1480 * an interrupt is still pending. 1481 */ 1482 if (pipe_stats[pipe]) { 1483 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 1484 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 1485 } 1486 } 1487 spin_unlock(&dev_priv->irq_lock); 1488 } 1489 1490 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1491 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1492 { 1493 enum pipe pipe; 1494 1495 for_each_pipe(dev_priv, pipe) { 1496 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1497 intel_handle_vblank(dev_priv, pipe); 1498 1499 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1500 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1501 1502 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1503 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1504 } 1505 } 1506 1507 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1508 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1509 { 1510 bool blc_event = false; 1511 enum pipe pipe; 1512 1513 for_each_pipe(dev_priv, pipe) { 1514 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1515 intel_handle_vblank(dev_priv, pipe); 1516 1517 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1518 blc_event = true; 1519 1520 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1521 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1522 1523 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1524 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1525 } 1526 1527 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1528 intel_opregion_asle_intr(dev_priv); 1529 } 1530 1531 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1532 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1533 { 1534 bool blc_event = false; 1535 enum pipe pipe; 1536 1537 for_each_pipe(dev_priv, pipe) { 1538 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1539 intel_handle_vblank(dev_priv, pipe); 1540 1541 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1542 blc_event = true; 1543 1544 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1545 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1546 1547 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1548 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1549 } 1550 1551 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1552 intel_opregion_asle_intr(dev_priv); 1553 1554 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1555 gmbus_irq_handler(dev_priv); 1556 } 1557 1558 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1559 u32 pipe_stats[I915_MAX_PIPES]) 1560 { 1561 enum pipe pipe; 1562 1563 for_each_pipe(dev_priv, pipe) { 1564 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1565 intel_handle_vblank(dev_priv, pipe); 1566 1567 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1568 flip_done_handler(dev_priv, pipe); 1569 1570 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1571 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1572 1573 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1574 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1575 } 1576 1577 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1578 gmbus_irq_handler(dev_priv); 1579 } 1580 1581 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1582 { 1583 u32 hotplug_status = 0, hotplug_status_mask; 1584 int i; 1585 1586 if (IS_G4X(dev_priv) || 1587 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1588 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1589 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1590 else 1591 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1592 1593 /* 1594 * We absolutely have to clear all the pending interrupt 1595 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1596 * interrupt bit won't have an edge, and the i965/g4x 1597 * edge triggered IIR will not notice that an interrupt 1598 * is still pending. We can't use PORT_HOTPLUG_EN to 1599 * guarantee the edge as the act of toggling the enable 1600 * bits can itself generate a new hotplug interrupt :( 1601 */ 1602 for (i = 0; i < 10; i++) { 1603 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; 1604 1605 if (tmp == 0) 1606 return hotplug_status; 1607 1608 hotplug_status |= tmp; 1609 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); 1610 } 1611 1612 drm_WARN_ONCE(&dev_priv->drm, 1, 1613 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1614 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 1615 1616 return hotplug_status; 1617 } 1618 1619 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1620 u32 hotplug_status) 1621 { 1622 u32 pin_mask = 0, long_mask = 0; 1623 u32 hotplug_trigger; 1624 1625 if (IS_G4X(dev_priv) || 1626 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1627 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1628 else 1629 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1630 1631 if (hotplug_trigger) { 1632 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1633 hotplug_trigger, hotplug_trigger, 1634 dev_priv->display.hotplug.hpd, 1635 i9xx_port_hotplug_long_detect); 1636 1637 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1638 } 1639 1640 if ((IS_G4X(dev_priv) || 1641 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1642 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1643 dp_aux_irq_handler(dev_priv); 1644 } 1645 1646 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1647 { 1648 struct drm_i915_private *dev_priv = arg; 1649 irqreturn_t ret = IRQ_NONE; 1650 1651 if (!intel_irqs_enabled(dev_priv)) 1652 return IRQ_NONE; 1653 1654 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1655 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1656 1657 do { 1658 u32 iir, gt_iir, pm_iir; 1659 u32 pipe_stats[I915_MAX_PIPES] = {}; 1660 u32 hotplug_status = 0; 1661 u32 ier = 0; 1662 1663 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 1664 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 1665 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1666 1667 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1668 break; 1669 1670 ret = IRQ_HANDLED; 1671 1672 /* 1673 * Theory on interrupt generation, based on empirical evidence: 1674 * 1675 * x = ((VLV_IIR & VLV_IER) || 1676 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1677 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1678 * 1679 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1680 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1681 * guarantee the CPU interrupt will be raised again even if we 1682 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1683 * bits this time around. 1684 */ 1685 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 1686 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 1687 1688 if (gt_iir) 1689 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 1690 if (pm_iir) 1691 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 1692 1693 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1694 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1695 1696 /* Call regardless, as some status bits might not be 1697 * signalled in iir */ 1698 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1699 1700 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1701 I915_LPE_PIPE_B_INTERRUPT)) 1702 intel_lpe_audio_irq_handler(dev_priv); 1703 1704 /* 1705 * VLV_IIR is single buffered, and reflects the level 1706 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1707 */ 1708 if (iir) 1709 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1710 1711 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1712 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1713 1714 if (gt_iir) 1715 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir); 1716 if (pm_iir) 1717 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); 1718 1719 if (hotplug_status) 1720 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1721 1722 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1723 } while (0); 1724 1725 pmu_irq_stats(dev_priv, ret); 1726 1727 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1728 1729 return ret; 1730 } 1731 1732 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1733 { 1734 struct drm_i915_private *dev_priv = arg; 1735 irqreturn_t ret = IRQ_NONE; 1736 1737 if (!intel_irqs_enabled(dev_priv)) 1738 return IRQ_NONE; 1739 1740 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1741 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1742 1743 do { 1744 u32 master_ctl, iir; 1745 u32 pipe_stats[I915_MAX_PIPES] = {}; 1746 u32 hotplug_status = 0; 1747 u32 ier = 0; 1748 1749 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1750 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1751 1752 if (master_ctl == 0 && iir == 0) 1753 break; 1754 1755 ret = IRQ_HANDLED; 1756 1757 /* 1758 * Theory on interrupt generation, based on empirical evidence: 1759 * 1760 * x = ((VLV_IIR & VLV_IER) || 1761 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1762 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1763 * 1764 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1765 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1766 * guarantee the CPU interrupt will be raised again even if we 1767 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1768 * bits this time around. 1769 */ 1770 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 1771 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 1772 1773 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 1774 1775 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1776 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1777 1778 /* Call regardless, as some status bits might not be 1779 * signalled in iir */ 1780 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1781 1782 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1783 I915_LPE_PIPE_B_INTERRUPT | 1784 I915_LPE_PIPE_C_INTERRUPT)) 1785 intel_lpe_audio_irq_handler(dev_priv); 1786 1787 /* 1788 * VLV_IIR is single buffered, and reflects the level 1789 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1790 */ 1791 if (iir) 1792 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1793 1794 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1795 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1796 1797 if (hotplug_status) 1798 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1799 1800 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1801 } while (0); 1802 1803 pmu_irq_stats(dev_priv, ret); 1804 1805 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1806 1807 return ret; 1808 } 1809 1810 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1811 u32 hotplug_trigger) 1812 { 1813 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1814 1815 /* 1816 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1817 * unless we touch the hotplug register, even if hotplug_trigger is 1818 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1819 * errors. 1820 */ 1821 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1822 if (!hotplug_trigger) { 1823 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1824 PORTD_HOTPLUG_STATUS_MASK | 1825 PORTC_HOTPLUG_STATUS_MASK | 1826 PORTB_HOTPLUG_STATUS_MASK; 1827 dig_hotplug_reg &= ~mask; 1828 } 1829 1830 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1831 if (!hotplug_trigger) 1832 return; 1833 1834 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1835 hotplug_trigger, dig_hotplug_reg, 1836 dev_priv->display.hotplug.pch_hpd, 1837 pch_port_hotplug_long_detect); 1838 1839 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1840 } 1841 1842 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1843 { 1844 enum pipe pipe; 1845 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1846 1847 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1848 1849 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1850 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1851 SDE_AUDIO_POWER_SHIFT); 1852 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 1853 port_name(port)); 1854 } 1855 1856 if (pch_iir & SDE_AUX_MASK) 1857 dp_aux_irq_handler(dev_priv); 1858 1859 if (pch_iir & SDE_GMBUS) 1860 gmbus_irq_handler(dev_priv); 1861 1862 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1863 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 1864 1865 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1866 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 1867 1868 if (pch_iir & SDE_POISON) 1869 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1870 1871 if (pch_iir & SDE_FDI_MASK) { 1872 for_each_pipe(dev_priv, pipe) 1873 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1874 pipe_name(pipe), 1875 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1876 } 1877 1878 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1879 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 1880 1881 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1882 drm_dbg(&dev_priv->drm, 1883 "PCH transcoder CRC error interrupt\n"); 1884 1885 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1886 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 1887 1888 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1889 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 1890 } 1891 1892 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1893 { 1894 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 1895 enum pipe pipe; 1896 1897 if (err_int & ERR_INT_POISON) 1898 drm_err(&dev_priv->drm, "Poison interrupt\n"); 1899 1900 for_each_pipe(dev_priv, pipe) { 1901 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1902 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1903 1904 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1905 if (IS_IVYBRIDGE(dev_priv)) 1906 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1907 else 1908 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1909 } 1910 } 1911 1912 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 1913 } 1914 1915 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1916 { 1917 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 1918 enum pipe pipe; 1919 1920 if (serr_int & SERR_INT_POISON) 1921 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1922 1923 for_each_pipe(dev_priv, pipe) 1924 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1925 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1926 1927 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 1928 } 1929 1930 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1931 { 1932 enum pipe pipe; 1933 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1934 1935 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1936 1937 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1938 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1939 SDE_AUDIO_POWER_SHIFT_CPT); 1940 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 1941 port_name(port)); 1942 } 1943 1944 if (pch_iir & SDE_AUX_MASK_CPT) 1945 dp_aux_irq_handler(dev_priv); 1946 1947 if (pch_iir & SDE_GMBUS_CPT) 1948 gmbus_irq_handler(dev_priv); 1949 1950 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1951 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 1952 1953 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1954 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 1955 1956 if (pch_iir & SDE_FDI_MASK_CPT) { 1957 for_each_pipe(dev_priv, pipe) 1958 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1959 pipe_name(pipe), 1960 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1961 } 1962 1963 if (pch_iir & SDE_ERROR_CPT) 1964 cpt_serr_int_handler(dev_priv); 1965 } 1966 1967 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1968 { 1969 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; 1970 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; 1971 u32 pin_mask = 0, long_mask = 0; 1972 1973 if (ddi_hotplug_trigger) { 1974 u32 dig_hotplug_reg; 1975 1976 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); 1977 1978 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1979 ddi_hotplug_trigger, dig_hotplug_reg, 1980 dev_priv->display.hotplug.pch_hpd, 1981 icp_ddi_port_hotplug_long_detect); 1982 } 1983 1984 if (tc_hotplug_trigger) { 1985 u32 dig_hotplug_reg; 1986 1987 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0); 1988 1989 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1990 tc_hotplug_trigger, dig_hotplug_reg, 1991 dev_priv->display.hotplug.pch_hpd, 1992 icp_tc_port_hotplug_long_detect); 1993 } 1994 1995 if (pin_mask) 1996 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1997 1998 if (pch_iir & SDE_GMBUS_ICP) 1999 gmbus_irq_handler(dev_priv); 2000 } 2001 2002 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2003 { 2004 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2005 ~SDE_PORTE_HOTPLUG_SPT; 2006 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2007 u32 pin_mask = 0, long_mask = 0; 2008 2009 if (hotplug_trigger) { 2010 u32 dig_hotplug_reg; 2011 2012 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); 2013 2014 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2015 hotplug_trigger, dig_hotplug_reg, 2016 dev_priv->display.hotplug.pch_hpd, 2017 spt_port_hotplug_long_detect); 2018 } 2019 2020 if (hotplug2_trigger) { 2021 u32 dig_hotplug_reg; 2022 2023 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0); 2024 2025 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2026 hotplug2_trigger, dig_hotplug_reg, 2027 dev_priv->display.hotplug.pch_hpd, 2028 spt_port_hotplug2_long_detect); 2029 } 2030 2031 if (pin_mask) 2032 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2033 2034 if (pch_iir & SDE_GMBUS_CPT) 2035 gmbus_irq_handler(dev_priv); 2036 } 2037 2038 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2039 u32 hotplug_trigger) 2040 { 2041 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2042 2043 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); 2044 2045 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2046 hotplug_trigger, dig_hotplug_reg, 2047 dev_priv->display.hotplug.hpd, 2048 ilk_port_hotplug_long_detect); 2049 2050 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2051 } 2052 2053 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2054 u32 de_iir) 2055 { 2056 enum pipe pipe; 2057 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2058 2059 if (hotplug_trigger) 2060 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2061 2062 if (de_iir & DE_AUX_CHANNEL_A) 2063 dp_aux_irq_handler(dev_priv); 2064 2065 if (de_iir & DE_GSE) 2066 intel_opregion_asle_intr(dev_priv); 2067 2068 if (de_iir & DE_POISON) 2069 drm_err(&dev_priv->drm, "Poison interrupt\n"); 2070 2071 for_each_pipe(dev_priv, pipe) { 2072 if (de_iir & DE_PIPE_VBLANK(pipe)) 2073 intel_handle_vblank(dev_priv, pipe); 2074 2075 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2076 flip_done_handler(dev_priv, pipe); 2077 2078 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2079 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2080 2081 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2082 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2083 } 2084 2085 /* check event from PCH */ 2086 if (de_iir & DE_PCH_EVENT) { 2087 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2088 2089 if (HAS_PCH_CPT(dev_priv)) 2090 cpt_irq_handler(dev_priv, pch_iir); 2091 else 2092 ibx_irq_handler(dev_priv, pch_iir); 2093 2094 /* should clear PCH hotplug event before clear CPU irq */ 2095 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2096 } 2097 2098 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 2099 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 2100 } 2101 2102 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2103 u32 de_iir) 2104 { 2105 enum pipe pipe; 2106 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2107 2108 if (hotplug_trigger) 2109 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2110 2111 if (de_iir & DE_ERR_INT_IVB) 2112 ivb_err_int_handler(dev_priv); 2113 2114 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2115 dp_aux_irq_handler(dev_priv); 2116 2117 if (de_iir & DE_GSE_IVB) 2118 intel_opregion_asle_intr(dev_priv); 2119 2120 for_each_pipe(dev_priv, pipe) { 2121 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 2122 intel_handle_vblank(dev_priv, pipe); 2123 2124 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2125 flip_done_handler(dev_priv, pipe); 2126 } 2127 2128 /* check event from PCH */ 2129 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2130 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2131 2132 cpt_irq_handler(dev_priv, pch_iir); 2133 2134 /* clear PCH hotplug event before clear CPU irq */ 2135 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2136 } 2137 } 2138 2139 /* 2140 * To handle irqs with the minimum potential races with fresh interrupts, we: 2141 * 1 - Disable Master Interrupt Control. 2142 * 2 - Find the source(s) of the interrupt. 2143 * 3 - Clear the Interrupt Identity bits (IIR). 2144 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2145 * 5 - Re-enable Master Interrupt Control. 2146 */ 2147 static irqreturn_t ilk_irq_handler(int irq, void *arg) 2148 { 2149 struct drm_i915_private *i915 = arg; 2150 void __iomem * const regs = i915->uncore.regs; 2151 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2152 irqreturn_t ret = IRQ_NONE; 2153 2154 if (unlikely(!intel_irqs_enabled(i915))) 2155 return IRQ_NONE; 2156 2157 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2158 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2159 2160 /* disable master interrupt before clearing iir */ 2161 de_ier = raw_reg_read(regs, DEIER); 2162 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2163 2164 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2165 * interrupts will will be stored on its back queue, and then we'll be 2166 * able to process them after we restore SDEIER (as soon as we restore 2167 * it, we'll get an interrupt if SDEIIR still has something to process 2168 * due to its back queue). */ 2169 if (!HAS_PCH_NOP(i915)) { 2170 sde_ier = raw_reg_read(regs, SDEIER); 2171 raw_reg_write(regs, SDEIER, 0); 2172 } 2173 2174 /* Find, clear, then process each source of interrupt */ 2175 2176 gt_iir = raw_reg_read(regs, GTIIR); 2177 if (gt_iir) { 2178 raw_reg_write(regs, GTIIR, gt_iir); 2179 if (GRAPHICS_VER(i915) >= 6) 2180 gen6_gt_irq_handler(to_gt(i915), gt_iir); 2181 else 2182 gen5_gt_irq_handler(to_gt(i915), gt_iir); 2183 ret = IRQ_HANDLED; 2184 } 2185 2186 de_iir = raw_reg_read(regs, DEIIR); 2187 if (de_iir) { 2188 raw_reg_write(regs, DEIIR, de_iir); 2189 if (DISPLAY_VER(i915) >= 7) 2190 ivb_display_irq_handler(i915, de_iir); 2191 else 2192 ilk_display_irq_handler(i915, de_iir); 2193 ret = IRQ_HANDLED; 2194 } 2195 2196 if (GRAPHICS_VER(i915) >= 6) { 2197 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 2198 if (pm_iir) { 2199 raw_reg_write(regs, GEN6_PMIIR, pm_iir); 2200 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir); 2201 ret = IRQ_HANDLED; 2202 } 2203 } 2204 2205 raw_reg_write(regs, DEIER, de_ier); 2206 if (sde_ier) 2207 raw_reg_write(regs, SDEIER, sde_ier); 2208 2209 pmu_irq_stats(i915, ret); 2210 2211 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2212 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2213 2214 return ret; 2215 } 2216 2217 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2218 u32 hotplug_trigger) 2219 { 2220 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2221 2222 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); 2223 2224 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2225 hotplug_trigger, dig_hotplug_reg, 2226 dev_priv->display.hotplug.hpd, 2227 bxt_port_hotplug_long_detect); 2228 2229 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2230 } 2231 2232 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2233 { 2234 u32 pin_mask = 0, long_mask = 0; 2235 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2236 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2237 2238 if (trigger_tc) { 2239 u32 dig_hotplug_reg; 2240 2241 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0); 2242 2243 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2244 trigger_tc, dig_hotplug_reg, 2245 dev_priv->display.hotplug.hpd, 2246 gen11_port_hotplug_long_detect); 2247 } 2248 2249 if (trigger_tbt) { 2250 u32 dig_hotplug_reg; 2251 2252 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0); 2253 2254 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2255 trigger_tbt, dig_hotplug_reg, 2256 dev_priv->display.hotplug.hpd, 2257 gen11_port_hotplug_long_detect); 2258 } 2259 2260 if (pin_mask) 2261 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2262 else 2263 drm_err(&dev_priv->drm, 2264 "Unexpected DE HPD interrupt 0x%08x\n", iir); 2265 } 2266 2267 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2268 { 2269 u32 mask; 2270 2271 if (DISPLAY_VER(dev_priv) >= 13) 2272 return TGL_DE_PORT_AUX_DDIA | 2273 TGL_DE_PORT_AUX_DDIB | 2274 TGL_DE_PORT_AUX_DDIC | 2275 XELPD_DE_PORT_AUX_DDID | 2276 XELPD_DE_PORT_AUX_DDIE | 2277 TGL_DE_PORT_AUX_USBC1 | 2278 TGL_DE_PORT_AUX_USBC2 | 2279 TGL_DE_PORT_AUX_USBC3 | 2280 TGL_DE_PORT_AUX_USBC4; 2281 else if (DISPLAY_VER(dev_priv) >= 12) 2282 return TGL_DE_PORT_AUX_DDIA | 2283 TGL_DE_PORT_AUX_DDIB | 2284 TGL_DE_PORT_AUX_DDIC | 2285 TGL_DE_PORT_AUX_USBC1 | 2286 TGL_DE_PORT_AUX_USBC2 | 2287 TGL_DE_PORT_AUX_USBC3 | 2288 TGL_DE_PORT_AUX_USBC4 | 2289 TGL_DE_PORT_AUX_USBC5 | 2290 TGL_DE_PORT_AUX_USBC6; 2291 2292 2293 mask = GEN8_AUX_CHANNEL_A; 2294 if (DISPLAY_VER(dev_priv) >= 9) 2295 mask |= GEN9_AUX_CHANNEL_B | 2296 GEN9_AUX_CHANNEL_C | 2297 GEN9_AUX_CHANNEL_D; 2298 2299 if (DISPLAY_VER(dev_priv) == 11) { 2300 mask |= ICL_AUX_CHANNEL_F; 2301 mask |= ICL_AUX_CHANNEL_E; 2302 } 2303 2304 return mask; 2305 } 2306 2307 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 2308 { 2309 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) 2310 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 2311 else if (DISPLAY_VER(dev_priv) >= 11) 2312 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 2313 else if (DISPLAY_VER(dev_priv) >= 9) 2314 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2315 else 2316 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2317 } 2318 2319 static void 2320 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2321 { 2322 bool found = false; 2323 2324 if (iir & GEN8_DE_MISC_GSE) { 2325 intel_opregion_asle_intr(dev_priv); 2326 found = true; 2327 } 2328 2329 if (iir & GEN8_DE_EDP_PSR) { 2330 struct intel_encoder *encoder; 2331 u32 psr_iir; 2332 i915_reg_t iir_reg; 2333 2334 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2335 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2336 2337 if (DISPLAY_VER(dev_priv) >= 12) 2338 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); 2339 else 2340 iir_reg = EDP_PSR_IIR; 2341 2342 psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); 2343 2344 if (psr_iir) 2345 found = true; 2346 2347 intel_psr_irq_handler(intel_dp, psr_iir); 2348 2349 /* prior GEN12 only have one EDP PSR */ 2350 if (DISPLAY_VER(dev_priv) < 12) 2351 break; 2352 } 2353 } 2354 2355 if (!found) 2356 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); 2357 } 2358 2359 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 2360 u32 te_trigger) 2361 { 2362 enum pipe pipe = INVALID_PIPE; 2363 enum transcoder dsi_trans; 2364 enum port port; 2365 u32 val, tmp; 2366 2367 /* 2368 * Incase of dual link, TE comes from DSI_1 2369 * this is to check if dual link is enabled 2370 */ 2371 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 2372 val &= PORT_SYNC_MODE_ENABLE; 2373 2374 /* 2375 * if dual link is enabled, then read DSI_0 2376 * transcoder registers 2377 */ 2378 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 2379 PORT_A : PORT_B; 2380 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 2381 2382 /* Check if DSI configured in command mode */ 2383 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 2384 val = val & OP_MODE_MASK; 2385 2386 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 2387 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 2388 return; 2389 } 2390 2391 /* Get PIPE for handling VBLANK event */ 2392 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); 2393 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 2394 case TRANS_DDI_EDP_INPUT_A_ON: 2395 pipe = PIPE_A; 2396 break; 2397 case TRANS_DDI_EDP_INPUT_B_ONOFF: 2398 pipe = PIPE_B; 2399 break; 2400 case TRANS_DDI_EDP_INPUT_C_ONOFF: 2401 pipe = PIPE_C; 2402 break; 2403 default: 2404 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 2405 return; 2406 } 2407 2408 intel_handle_vblank(dev_priv, pipe); 2409 2410 /* clear TE in dsi IIR */ 2411 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 2412 tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 2413 } 2414 2415 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 2416 { 2417 if (DISPLAY_VER(i915) >= 9) 2418 return GEN9_PIPE_PLANE1_FLIP_DONE; 2419 else 2420 return GEN8_PIPE_PRIMARY_FLIP_DONE; 2421 } 2422 2423 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) 2424 { 2425 u32 mask = GEN8_PIPE_FIFO_UNDERRUN; 2426 2427 if (DISPLAY_VER(dev_priv) >= 13) 2428 mask |= XELPD_PIPE_SOFT_UNDERRUN | 2429 XELPD_PIPE_HARD_UNDERRUN; 2430 2431 return mask; 2432 } 2433 2434 static irqreturn_t 2435 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2436 { 2437 irqreturn_t ret = IRQ_NONE; 2438 u32 iir; 2439 enum pipe pipe; 2440 2441 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 2442 2443 if (master_ctl & GEN8_DE_MISC_IRQ) { 2444 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 2445 if (iir) { 2446 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 2447 ret = IRQ_HANDLED; 2448 gen8_de_misc_irq_handler(dev_priv, iir); 2449 } else { 2450 drm_err(&dev_priv->drm, 2451 "The master control interrupt lied (DE MISC)!\n"); 2452 } 2453 } 2454 2455 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2456 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 2457 if (iir) { 2458 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 2459 ret = IRQ_HANDLED; 2460 gen11_hpd_irq_handler(dev_priv, iir); 2461 } else { 2462 drm_err(&dev_priv->drm, 2463 "The master control interrupt lied, (DE HPD)!\n"); 2464 } 2465 } 2466 2467 if (master_ctl & GEN8_DE_PORT_IRQ) { 2468 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 2469 if (iir) { 2470 bool found = false; 2471 2472 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 2473 ret = IRQ_HANDLED; 2474 2475 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2476 dp_aux_irq_handler(dev_priv); 2477 found = true; 2478 } 2479 2480 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 2481 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 2482 2483 if (hotplug_trigger) { 2484 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 2485 found = true; 2486 } 2487 } else if (IS_BROADWELL(dev_priv)) { 2488 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 2489 2490 if (hotplug_trigger) { 2491 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2492 found = true; 2493 } 2494 } 2495 2496 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 2497 (iir & BXT_DE_PORT_GMBUS)) { 2498 gmbus_irq_handler(dev_priv); 2499 found = true; 2500 } 2501 2502 if (DISPLAY_VER(dev_priv) >= 11) { 2503 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 2504 2505 if (te_trigger) { 2506 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 2507 found = true; 2508 } 2509 } 2510 2511 if (!found) 2512 drm_err(&dev_priv->drm, 2513 "Unexpected DE Port interrupt\n"); 2514 } 2515 else 2516 drm_err(&dev_priv->drm, 2517 "The master control interrupt lied (DE PORT)!\n"); 2518 } 2519 2520 for_each_pipe(dev_priv, pipe) { 2521 u32 fault_errors; 2522 2523 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2524 continue; 2525 2526 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 2527 if (!iir) { 2528 drm_err(&dev_priv->drm, 2529 "The master control interrupt lied (DE PIPE)!\n"); 2530 continue; 2531 } 2532 2533 ret = IRQ_HANDLED; 2534 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 2535 2536 if (iir & GEN8_PIPE_VBLANK) 2537 intel_handle_vblank(dev_priv, pipe); 2538 2539 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 2540 flip_done_handler(dev_priv, pipe); 2541 2542 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2543 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2544 2545 if (iir & gen8_de_pipe_underrun_mask(dev_priv)) 2546 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2547 2548 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2549 if (fault_errors) 2550 drm_err(&dev_priv->drm, 2551 "Fault errors on pipe %c: 0x%08x\n", 2552 pipe_name(pipe), 2553 fault_errors); 2554 } 2555 2556 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2557 master_ctl & GEN8_DE_PCH_IRQ) { 2558 /* 2559 * FIXME(BDW): Assume for now that the new interrupt handling 2560 * scheme also closed the SDE interrupt handling race we've seen 2561 * on older pch-split platforms. But this needs testing. 2562 */ 2563 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2564 if (iir) { 2565 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); 2566 ret = IRQ_HANDLED; 2567 2568 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2569 icp_irq_handler(dev_priv, iir); 2570 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2571 spt_irq_handler(dev_priv, iir); 2572 else 2573 cpt_irq_handler(dev_priv, iir); 2574 } else { 2575 /* 2576 * Like on previous PCH there seems to be something 2577 * fishy going on with forwarding PCH interrupts. 2578 */ 2579 drm_dbg(&dev_priv->drm, 2580 "The master control interrupt lied (SDE)!\n"); 2581 } 2582 } 2583 2584 return ret; 2585 } 2586 2587 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2588 { 2589 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2590 2591 /* 2592 * Now with master disabled, get a sample of level indications 2593 * for this interrupt. Indications will be cleared on related acks. 2594 * New indications can and will light up during processing, 2595 * and will generate new interrupt after enabling master. 2596 */ 2597 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2598 } 2599 2600 static inline void gen8_master_intr_enable(void __iomem * const regs) 2601 { 2602 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2603 } 2604 2605 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2606 { 2607 struct drm_i915_private *dev_priv = arg; 2608 void __iomem * const regs = dev_priv->uncore.regs; 2609 u32 master_ctl; 2610 2611 if (!intel_irqs_enabled(dev_priv)) 2612 return IRQ_NONE; 2613 2614 master_ctl = gen8_master_intr_disable(regs); 2615 if (!master_ctl) { 2616 gen8_master_intr_enable(regs); 2617 return IRQ_NONE; 2618 } 2619 2620 /* Find, queue (onto bottom-halves), then clear each source */ 2621 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 2622 2623 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2624 if (master_ctl & ~GEN8_GT_IRQS) { 2625 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2626 gen8_de_irq_handler(dev_priv, master_ctl); 2627 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2628 } 2629 2630 gen8_master_intr_enable(regs); 2631 2632 pmu_irq_stats(dev_priv, IRQ_HANDLED); 2633 2634 return IRQ_HANDLED; 2635 } 2636 2637 static u32 2638 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) 2639 { 2640 void __iomem * const regs = i915->uncore.regs; 2641 u32 iir; 2642 2643 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2644 return 0; 2645 2646 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2647 if (likely(iir)) 2648 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2649 2650 return iir; 2651 } 2652 2653 static void 2654 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) 2655 { 2656 if (iir & GEN11_GU_MISC_GSE) 2657 intel_opregion_asle_intr(i915); 2658 } 2659 2660 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2661 { 2662 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2663 2664 /* 2665 * Now with master disabled, get a sample of level indications 2666 * for this interrupt. Indications will be cleared on related acks. 2667 * New indications can and will light up during processing, 2668 * and will generate new interrupt after enabling master. 2669 */ 2670 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2671 } 2672 2673 static inline void gen11_master_intr_enable(void __iomem * const regs) 2674 { 2675 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2676 } 2677 2678 static void 2679 gen11_display_irq_handler(struct drm_i915_private *i915) 2680 { 2681 void __iomem * const regs = i915->uncore.regs; 2682 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2683 2684 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2685 /* 2686 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2687 * for the display related bits. 2688 */ 2689 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 2690 gen8_de_irq_handler(i915, disp_ctl); 2691 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 2692 GEN11_DISPLAY_IRQ_ENABLE); 2693 2694 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2695 } 2696 2697 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2698 { 2699 struct drm_i915_private *i915 = arg; 2700 void __iomem * const regs = i915->uncore.regs; 2701 struct intel_gt *gt = to_gt(i915); 2702 u32 master_ctl; 2703 u32 gu_misc_iir; 2704 2705 if (!intel_irqs_enabled(i915)) 2706 return IRQ_NONE; 2707 2708 master_ctl = gen11_master_intr_disable(regs); 2709 if (!master_ctl) { 2710 gen11_master_intr_enable(regs); 2711 return IRQ_NONE; 2712 } 2713 2714 /* Find, queue (onto bottom-halves), then clear each source */ 2715 gen11_gt_irq_handler(gt, master_ctl); 2716 2717 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2718 if (master_ctl & GEN11_DISPLAY_IRQ) 2719 gen11_display_irq_handler(i915); 2720 2721 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 2722 2723 gen11_master_intr_enable(regs); 2724 2725 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 2726 2727 pmu_irq_stats(i915, IRQ_HANDLED); 2728 2729 return IRQ_HANDLED; 2730 } 2731 2732 static inline u32 dg1_master_intr_disable(void __iomem * const regs) 2733 { 2734 u32 val; 2735 2736 /* First disable interrupts */ 2737 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); 2738 2739 /* Get the indication levels and ack the master unit */ 2740 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); 2741 if (unlikely(!val)) 2742 return 0; 2743 2744 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); 2745 2746 return val; 2747 } 2748 2749 static inline void dg1_master_intr_enable(void __iomem * const regs) 2750 { 2751 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 2752 } 2753 2754 static irqreturn_t dg1_irq_handler(int irq, void *arg) 2755 { 2756 struct drm_i915_private * const i915 = arg; 2757 struct intel_gt *gt = to_gt(i915); 2758 void __iomem * const regs = gt->uncore->regs; 2759 u32 master_tile_ctl, master_ctl; 2760 u32 gu_misc_iir; 2761 2762 if (!intel_irqs_enabled(i915)) 2763 return IRQ_NONE; 2764 2765 master_tile_ctl = dg1_master_intr_disable(regs); 2766 if (!master_tile_ctl) { 2767 dg1_master_intr_enable(regs); 2768 return IRQ_NONE; 2769 } 2770 2771 /* FIXME: we only support tile 0 for now. */ 2772 if (master_tile_ctl & DG1_MSTR_TILE(0)) { 2773 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2774 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); 2775 } else { 2776 DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl); 2777 dg1_master_intr_enable(regs); 2778 return IRQ_NONE; 2779 } 2780 2781 gen11_gt_irq_handler(gt, master_ctl); 2782 2783 if (master_ctl & GEN11_DISPLAY_IRQ) 2784 gen11_display_irq_handler(i915); 2785 2786 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 2787 2788 dg1_master_intr_enable(regs); 2789 2790 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 2791 2792 pmu_irq_stats(i915, IRQ_HANDLED); 2793 2794 return IRQ_HANDLED; 2795 } 2796 2797 /* Called from drm generic code, passed 'crtc' which 2798 * we use as a pipe index 2799 */ 2800 int i8xx_enable_vblank(struct drm_crtc *crtc) 2801 { 2802 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2803 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2804 unsigned long irqflags; 2805 2806 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2807 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2808 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2809 2810 return 0; 2811 } 2812 2813 int i915gm_enable_vblank(struct drm_crtc *crtc) 2814 { 2815 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2816 2817 /* 2818 * Vblank interrupts fail to wake the device up from C2+. 2819 * Disabling render clock gating during C-states avoids 2820 * the problem. There is a small power cost so we do this 2821 * only when vblank interrupts are actually enabled. 2822 */ 2823 if (dev_priv->vblank_enabled++ == 0) 2824 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2825 2826 return i8xx_enable_vblank(crtc); 2827 } 2828 2829 int i965_enable_vblank(struct drm_crtc *crtc) 2830 { 2831 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2832 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2833 unsigned long irqflags; 2834 2835 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2836 i915_enable_pipestat(dev_priv, pipe, 2837 PIPE_START_VBLANK_INTERRUPT_STATUS); 2838 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2839 2840 return 0; 2841 } 2842 2843 int ilk_enable_vblank(struct drm_crtc *crtc) 2844 { 2845 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2846 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2847 unsigned long irqflags; 2848 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2849 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2850 2851 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2852 ilk_enable_display_irq(dev_priv, bit); 2853 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2854 2855 /* Even though there is no DMC, frame counter can get stuck when 2856 * PSR is active as no frames are generated. 2857 */ 2858 if (HAS_PSR(dev_priv)) 2859 drm_crtc_vblank_restore(crtc); 2860 2861 return 0; 2862 } 2863 2864 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 2865 bool enable) 2866 { 2867 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 2868 enum port port; 2869 2870 if (!(intel_crtc->mode_flags & 2871 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 2872 return false; 2873 2874 /* for dual link cases we consider TE from slave */ 2875 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 2876 port = PORT_B; 2877 else 2878 port = PORT_A; 2879 2880 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, 2881 enable ? 0 : DSI_TE_EVENT); 2882 2883 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 2884 2885 return true; 2886 } 2887 2888 int bdw_enable_vblank(struct drm_crtc *_crtc) 2889 { 2890 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2891 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2892 enum pipe pipe = crtc->pipe; 2893 unsigned long irqflags; 2894 2895 if (gen11_dsi_configure_te(crtc, true)) 2896 return 0; 2897 2898 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2899 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2900 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2901 2902 /* Even if there is no DMC, frame counter can get stuck when 2903 * PSR is active as no frames are generated, so check only for PSR. 2904 */ 2905 if (HAS_PSR(dev_priv)) 2906 drm_crtc_vblank_restore(&crtc->base); 2907 2908 return 0; 2909 } 2910 2911 /* Called from drm generic code, passed 'crtc' which 2912 * we use as a pipe index 2913 */ 2914 void i8xx_disable_vblank(struct drm_crtc *crtc) 2915 { 2916 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2917 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2918 unsigned long irqflags; 2919 2920 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2921 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2922 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2923 } 2924 2925 void i915gm_disable_vblank(struct drm_crtc *crtc) 2926 { 2927 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2928 2929 i8xx_disable_vblank(crtc); 2930 2931 if (--dev_priv->vblank_enabled == 0) 2932 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2933 } 2934 2935 void i965_disable_vblank(struct drm_crtc *crtc) 2936 { 2937 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2938 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2939 unsigned long irqflags; 2940 2941 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2942 i915_disable_pipestat(dev_priv, pipe, 2943 PIPE_START_VBLANK_INTERRUPT_STATUS); 2944 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2945 } 2946 2947 void ilk_disable_vblank(struct drm_crtc *crtc) 2948 { 2949 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2950 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2951 unsigned long irqflags; 2952 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2953 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2954 2955 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2956 ilk_disable_display_irq(dev_priv, bit); 2957 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2958 } 2959 2960 void bdw_disable_vblank(struct drm_crtc *_crtc) 2961 { 2962 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2963 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2964 enum pipe pipe = crtc->pipe; 2965 unsigned long irqflags; 2966 2967 if (gen11_dsi_configure_te(crtc, false)) 2968 return; 2969 2970 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2971 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2972 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2973 } 2974 2975 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2976 { 2977 struct intel_uncore *uncore = &dev_priv->uncore; 2978 2979 if (HAS_PCH_NOP(dev_priv)) 2980 return; 2981 2982 GEN3_IRQ_RESET(uncore, SDE); 2983 2984 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2985 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); 2986 } 2987 2988 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2989 { 2990 struct intel_uncore *uncore = &dev_priv->uncore; 2991 2992 if (IS_CHERRYVIEW(dev_priv)) 2993 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2994 else 2995 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 2996 2997 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2998 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); 2999 3000 i9xx_pipestat_irq_reset(dev_priv); 3001 3002 GEN3_IRQ_RESET(uncore, VLV_); 3003 dev_priv->irq_mask = ~0u; 3004 } 3005 3006 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3007 { 3008 struct intel_uncore *uncore = &dev_priv->uncore; 3009 3010 u32 pipestat_mask; 3011 u32 enable_mask; 3012 enum pipe pipe; 3013 3014 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3015 3016 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3017 for_each_pipe(dev_priv, pipe) 3018 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3019 3020 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3021 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3022 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3023 I915_LPE_PIPE_A_INTERRUPT | 3024 I915_LPE_PIPE_B_INTERRUPT; 3025 3026 if (IS_CHERRYVIEW(dev_priv)) 3027 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3028 I915_LPE_PIPE_C_INTERRUPT; 3029 3030 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 3031 3032 dev_priv->irq_mask = ~enable_mask; 3033 3034 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 3035 } 3036 3037 /* drm_dma.h hooks 3038 */ 3039 static void ilk_irq_reset(struct drm_i915_private *dev_priv) 3040 { 3041 struct intel_uncore *uncore = &dev_priv->uncore; 3042 3043 GEN3_IRQ_RESET(uncore, DE); 3044 dev_priv->irq_mask = ~0u; 3045 3046 if (GRAPHICS_VER(dev_priv) == 7) 3047 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 3048 3049 if (IS_HASWELL(dev_priv)) { 3050 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3051 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3052 } 3053 3054 gen5_gt_irq_reset(to_gt(dev_priv)); 3055 3056 ibx_irq_reset(dev_priv); 3057 } 3058 3059 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 3060 { 3061 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 3062 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3063 3064 gen5_gt_irq_reset(to_gt(dev_priv)); 3065 3066 spin_lock_irq(&dev_priv->irq_lock); 3067 if (dev_priv->display_irqs_enabled) 3068 vlv_display_irq_reset(dev_priv); 3069 spin_unlock_irq(&dev_priv->irq_lock); 3070 } 3071 3072 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 3073 { 3074 struct intel_uncore *uncore = &dev_priv->uncore; 3075 enum pipe pipe; 3076 3077 if (!HAS_DISPLAY(dev_priv)) 3078 return; 3079 3080 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3081 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3082 3083 for_each_pipe(dev_priv, pipe) 3084 if (intel_display_power_is_enabled(dev_priv, 3085 POWER_DOMAIN_PIPE(pipe))) 3086 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3087 3088 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3089 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3090 } 3091 3092 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 3093 { 3094 struct intel_uncore *uncore = &dev_priv->uncore; 3095 3096 gen8_master_intr_disable(uncore->regs); 3097 3098 gen8_gt_irq_reset(to_gt(dev_priv)); 3099 gen8_display_irq_reset(dev_priv); 3100 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3101 3102 if (HAS_PCH_SPLIT(dev_priv)) 3103 ibx_irq_reset(dev_priv); 3104 3105 } 3106 3107 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 3108 { 3109 struct intel_uncore *uncore = &dev_priv->uncore; 3110 enum pipe pipe; 3111 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3112 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3113 3114 if (!HAS_DISPLAY(dev_priv)) 3115 return; 3116 3117 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 3118 3119 if (DISPLAY_VER(dev_priv) >= 12) { 3120 enum transcoder trans; 3121 3122 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3123 enum intel_display_power_domain domain; 3124 3125 domain = POWER_DOMAIN_TRANSCODER(trans); 3126 if (!intel_display_power_is_enabled(dev_priv, domain)) 3127 continue; 3128 3129 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 3130 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 3131 } 3132 } else { 3133 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3134 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3135 } 3136 3137 for_each_pipe(dev_priv, pipe) 3138 if (intel_display_power_is_enabled(dev_priv, 3139 POWER_DOMAIN_PIPE(pipe))) 3140 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3141 3142 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3143 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3144 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 3145 3146 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3147 GEN3_IRQ_RESET(uncore, SDE); 3148 } 3149 3150 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 3151 { 3152 struct intel_gt *gt = to_gt(dev_priv); 3153 struct intel_uncore *uncore = gt->uncore; 3154 3155 gen11_master_intr_disable(dev_priv->uncore.regs); 3156 3157 gen11_gt_irq_reset(gt); 3158 gen11_display_irq_reset(dev_priv); 3159 3160 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3161 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3162 } 3163 3164 static void dg1_irq_reset(struct drm_i915_private *dev_priv) 3165 { 3166 struct intel_gt *gt = to_gt(dev_priv); 3167 struct intel_uncore *uncore = gt->uncore; 3168 3169 dg1_master_intr_disable(dev_priv->uncore.regs); 3170 3171 gen11_gt_irq_reset(gt); 3172 gen11_display_irq_reset(dev_priv); 3173 3174 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3175 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3176 } 3177 3178 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3179 u8 pipe_mask) 3180 { 3181 struct intel_uncore *uncore = &dev_priv->uncore; 3182 u32 extra_ier = GEN8_PIPE_VBLANK | 3183 gen8_de_pipe_underrun_mask(dev_priv) | 3184 gen8_de_pipe_flip_done_mask(dev_priv); 3185 enum pipe pipe; 3186 3187 spin_lock_irq(&dev_priv->irq_lock); 3188 3189 if (!intel_irqs_enabled(dev_priv)) { 3190 spin_unlock_irq(&dev_priv->irq_lock); 3191 return; 3192 } 3193 3194 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3195 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3196 dev_priv->de_irq_mask[pipe], 3197 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3198 3199 spin_unlock_irq(&dev_priv->irq_lock); 3200 } 3201 3202 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3203 u8 pipe_mask) 3204 { 3205 struct intel_uncore *uncore = &dev_priv->uncore; 3206 enum pipe pipe; 3207 3208 spin_lock_irq(&dev_priv->irq_lock); 3209 3210 if (!intel_irqs_enabled(dev_priv)) { 3211 spin_unlock_irq(&dev_priv->irq_lock); 3212 return; 3213 } 3214 3215 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3216 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3217 3218 spin_unlock_irq(&dev_priv->irq_lock); 3219 3220 /* make sure we're done processing display irqs */ 3221 intel_synchronize_irq(dev_priv); 3222 } 3223 3224 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 3225 { 3226 struct intel_uncore *uncore = &dev_priv->uncore; 3227 3228 intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0); 3229 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3230 3231 gen8_gt_irq_reset(to_gt(dev_priv)); 3232 3233 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3234 3235 spin_lock_irq(&dev_priv->irq_lock); 3236 if (dev_priv->display_irqs_enabled) 3237 vlv_display_irq_reset(dev_priv); 3238 spin_unlock_irq(&dev_priv->irq_lock); 3239 } 3240 3241 static u32 ibx_hotplug_enables(struct drm_i915_private *i915, 3242 enum hpd_pin pin) 3243 { 3244 switch (pin) { 3245 case HPD_PORT_A: 3246 /* 3247 * When CPU and PCH are on the same package, port A 3248 * HPD must be enabled in both north and south. 3249 */ 3250 return HAS_PCH_LPT_LP(i915) ? 3251 PORTA_HOTPLUG_ENABLE : 0; 3252 case HPD_PORT_B: 3253 return PORTB_HOTPLUG_ENABLE | 3254 PORTB_PULSE_DURATION_2ms; 3255 case HPD_PORT_C: 3256 return PORTC_HOTPLUG_ENABLE | 3257 PORTC_PULSE_DURATION_2ms; 3258 case HPD_PORT_D: 3259 return PORTD_HOTPLUG_ENABLE | 3260 PORTD_PULSE_DURATION_2ms; 3261 default: 3262 return 0; 3263 } 3264 } 3265 3266 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3267 { 3268 /* 3269 * Enable digital hotplug on the PCH, and configure the DP short pulse 3270 * duration to 2ms (which is the minimum in the Display Port spec). 3271 * The pulse duration bits are reserved on LPT+. 3272 */ 3273 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 3274 PORTA_HOTPLUG_ENABLE | 3275 PORTB_HOTPLUG_ENABLE | 3276 PORTC_HOTPLUG_ENABLE | 3277 PORTD_HOTPLUG_ENABLE | 3278 PORTB_PULSE_DURATION_MASK | 3279 PORTC_PULSE_DURATION_MASK | 3280 PORTD_PULSE_DURATION_MASK, 3281 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); 3282 } 3283 3284 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3285 { 3286 u32 hotplug_irqs, enabled_irqs; 3287 3288 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3289 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3290 3291 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3292 3293 ibx_hpd_detection_setup(dev_priv); 3294 } 3295 3296 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, 3297 enum hpd_pin pin) 3298 { 3299 switch (pin) { 3300 case HPD_PORT_A: 3301 case HPD_PORT_B: 3302 case HPD_PORT_C: 3303 case HPD_PORT_D: 3304 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); 3305 default: 3306 return 0; 3307 } 3308 } 3309 3310 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, 3311 enum hpd_pin pin) 3312 { 3313 switch (pin) { 3314 case HPD_PORT_TC1: 3315 case HPD_PORT_TC2: 3316 case HPD_PORT_TC3: 3317 case HPD_PORT_TC4: 3318 case HPD_PORT_TC5: 3319 case HPD_PORT_TC6: 3320 return ICP_TC_HPD_ENABLE(pin); 3321 default: 3322 return 0; 3323 } 3324 } 3325 3326 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) 3327 { 3328 intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 3329 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | 3330 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | 3331 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | 3332 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D), 3333 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); 3334 } 3335 3336 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3337 { 3338 intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 3339 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | 3340 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | 3341 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | 3342 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) | 3343 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | 3344 ICP_TC_HPD_ENABLE(HPD_PORT_TC6), 3345 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); 3346 } 3347 3348 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3349 { 3350 u32 hotplug_irqs, enabled_irqs; 3351 3352 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3353 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3354 3355 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) 3356 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3357 3358 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3359 3360 icp_ddi_hpd_detection_setup(dev_priv); 3361 icp_tc_hpd_detection_setup(dev_priv); 3362 } 3363 3364 static u32 gen11_hotplug_enables(struct drm_i915_private *i915, 3365 enum hpd_pin pin) 3366 { 3367 switch (pin) { 3368 case HPD_PORT_TC1: 3369 case HPD_PORT_TC2: 3370 case HPD_PORT_TC3: 3371 case HPD_PORT_TC4: 3372 case HPD_PORT_TC5: 3373 case HPD_PORT_TC6: 3374 return GEN11_HOTPLUG_CTL_ENABLE(pin); 3375 default: 3376 return 0; 3377 } 3378 } 3379 3380 static void dg1_hpd_invert(struct drm_i915_private *i915) 3381 { 3382 u32 val = (INVERT_DDIA_HPD | 3383 INVERT_DDIB_HPD | 3384 INVERT_DDIC_HPD | 3385 INVERT_DDID_HPD); 3386 intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val); 3387 } 3388 3389 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) 3390 { 3391 dg1_hpd_invert(dev_priv); 3392 icp_hpd_irq_setup(dev_priv); 3393 } 3394 3395 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3396 { 3397 intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 3398 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3399 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3400 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3401 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3402 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3403 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6), 3404 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); 3405 } 3406 3407 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3408 { 3409 intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 3410 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3411 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3412 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3413 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3414 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3415 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6), 3416 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); 3417 } 3418 3419 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3420 { 3421 u32 hotplug_irqs, enabled_irqs; 3422 3423 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3424 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3425 3426 intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs, 3427 ~enabled_irqs & hotplug_irqs); 3428 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3429 3430 gen11_tc_hpd_detection_setup(dev_priv); 3431 gen11_tbt_hpd_detection_setup(dev_priv); 3432 3433 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3434 icp_hpd_irq_setup(dev_priv); 3435 } 3436 3437 static u32 spt_hotplug_enables(struct drm_i915_private *i915, 3438 enum hpd_pin pin) 3439 { 3440 switch (pin) { 3441 case HPD_PORT_A: 3442 return PORTA_HOTPLUG_ENABLE; 3443 case HPD_PORT_B: 3444 return PORTB_HOTPLUG_ENABLE; 3445 case HPD_PORT_C: 3446 return PORTC_HOTPLUG_ENABLE; 3447 case HPD_PORT_D: 3448 return PORTD_HOTPLUG_ENABLE; 3449 default: 3450 return 0; 3451 } 3452 } 3453 3454 static u32 spt_hotplug2_enables(struct drm_i915_private *i915, 3455 enum hpd_pin pin) 3456 { 3457 switch (pin) { 3458 case HPD_PORT_E: 3459 return PORTE_HOTPLUG_ENABLE; 3460 default: 3461 return 0; 3462 } 3463 } 3464 3465 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3466 { 3467 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3468 if (HAS_PCH_CNP(dev_priv)) { 3469 intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, 3470 CHASSIS_CLK_REQ_DURATION(0xf)); 3471 } 3472 3473 /* Enable digital hotplug on the PCH */ 3474 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 3475 PORTA_HOTPLUG_ENABLE | 3476 PORTB_HOTPLUG_ENABLE | 3477 PORTC_HOTPLUG_ENABLE | 3478 PORTD_HOTPLUG_ENABLE, 3479 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); 3480 3481 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE, 3482 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); 3483 } 3484 3485 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3486 { 3487 u32 hotplug_irqs, enabled_irqs; 3488 3489 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3490 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3491 3492 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3493 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); 3494 3495 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3496 3497 spt_hpd_detection_setup(dev_priv); 3498 } 3499 3500 static u32 ilk_hotplug_enables(struct drm_i915_private *i915, 3501 enum hpd_pin pin) 3502 { 3503 switch (pin) { 3504 case HPD_PORT_A: 3505 return DIGITAL_PORTA_HOTPLUG_ENABLE | 3506 DIGITAL_PORTA_PULSE_DURATION_2ms; 3507 default: 3508 return 0; 3509 } 3510 } 3511 3512 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3513 { 3514 /* 3515 * Enable digital hotplug on the CPU, and configure the DP short pulse 3516 * duration to 2ms (which is the minimum in the Display Port spec) 3517 * The pulse duration bits are reserved on HSW+. 3518 */ 3519 intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 3520 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK, 3521 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); 3522 } 3523 3524 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3525 { 3526 u32 hotplug_irqs, enabled_irqs; 3527 3528 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3529 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3530 3531 if (DISPLAY_VER(dev_priv) >= 8) 3532 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3533 else 3534 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3535 3536 ilk_hpd_detection_setup(dev_priv); 3537 3538 ibx_hpd_irq_setup(dev_priv); 3539 } 3540 3541 static u32 bxt_hotplug_enables(struct drm_i915_private *i915, 3542 enum hpd_pin pin) 3543 { 3544 u32 hotplug; 3545 3546 switch (pin) { 3547 case HPD_PORT_A: 3548 hotplug = PORTA_HOTPLUG_ENABLE; 3549 if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) 3550 hotplug |= BXT_DDIA_HPD_INVERT; 3551 return hotplug; 3552 case HPD_PORT_B: 3553 hotplug = PORTB_HOTPLUG_ENABLE; 3554 if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) 3555 hotplug |= BXT_DDIB_HPD_INVERT; 3556 return hotplug; 3557 case HPD_PORT_C: 3558 hotplug = PORTC_HOTPLUG_ENABLE; 3559 if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) 3560 hotplug |= BXT_DDIC_HPD_INVERT; 3561 return hotplug; 3562 default: 3563 return 0; 3564 } 3565 } 3566 3567 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3568 { 3569 intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 3570 PORTA_HOTPLUG_ENABLE | 3571 PORTB_HOTPLUG_ENABLE | 3572 PORTC_HOTPLUG_ENABLE | 3573 BXT_DDI_HPD_INVERT_MASK, 3574 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); 3575 } 3576 3577 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3578 { 3579 u32 hotplug_irqs, enabled_irqs; 3580 3581 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3582 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); 3583 3584 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3585 3586 bxt_hpd_detection_setup(dev_priv); 3587 } 3588 3589 /* 3590 * SDEIER is also touched by the interrupt handler to work around missed PCH 3591 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3592 * instead we unconditionally enable all PCH interrupt sources here, but then 3593 * only unmask them as needed with SDEIMR. 3594 * 3595 * Note that we currently do this after installing the interrupt handler, 3596 * but before we enable the master interrupt. That should be sufficient 3597 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 3598 * interrupts could still race. 3599 */ 3600 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3601 { 3602 struct intel_uncore *uncore = &dev_priv->uncore; 3603 u32 mask; 3604 3605 if (HAS_PCH_NOP(dev_priv)) 3606 return; 3607 3608 if (HAS_PCH_IBX(dev_priv)) 3609 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3610 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3611 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3612 else 3613 mask = SDE_GMBUS_CPT; 3614 3615 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3616 } 3617 3618 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 3619 { 3620 struct intel_uncore *uncore = &dev_priv->uncore; 3621 u32 display_mask, extra_mask; 3622 3623 if (GRAPHICS_VER(dev_priv) >= 7) { 3624 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3625 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3626 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3627 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3628 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 3629 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 3630 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 3631 DE_DP_A_HOTPLUG_IVB); 3632 } else { 3633 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3634 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3635 DE_PIPEA_CRC_DONE | DE_POISON); 3636 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 3637 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3638 DE_PLANE_FLIP_DONE(PLANE_A) | 3639 DE_PLANE_FLIP_DONE(PLANE_B) | 3640 DE_DP_A_HOTPLUG); 3641 } 3642 3643 if (IS_HASWELL(dev_priv)) { 3644 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3645 display_mask |= DE_EDP_PSR_INT_HSW; 3646 } 3647 3648 if (IS_IRONLAKE_M(dev_priv)) 3649 extra_mask |= DE_PCU_EVENT; 3650 3651 dev_priv->irq_mask = ~display_mask; 3652 3653 ibx_irq_postinstall(dev_priv); 3654 3655 gen5_gt_irq_postinstall(to_gt(dev_priv)); 3656 3657 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3658 display_mask | extra_mask); 3659 } 3660 3661 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3662 { 3663 lockdep_assert_held(&dev_priv->irq_lock); 3664 3665 if (dev_priv->display_irqs_enabled) 3666 return; 3667 3668 dev_priv->display_irqs_enabled = true; 3669 3670 if (intel_irqs_enabled(dev_priv)) { 3671 vlv_display_irq_reset(dev_priv); 3672 vlv_display_irq_postinstall(dev_priv); 3673 } 3674 } 3675 3676 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3677 { 3678 lockdep_assert_held(&dev_priv->irq_lock); 3679 3680 if (!dev_priv->display_irqs_enabled) 3681 return; 3682 3683 dev_priv->display_irqs_enabled = false; 3684 3685 if (intel_irqs_enabled(dev_priv)) 3686 vlv_display_irq_reset(dev_priv); 3687 } 3688 3689 3690 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 3691 { 3692 gen5_gt_irq_postinstall(to_gt(dev_priv)); 3693 3694 spin_lock_irq(&dev_priv->irq_lock); 3695 if (dev_priv->display_irqs_enabled) 3696 vlv_display_irq_postinstall(dev_priv); 3697 spin_unlock_irq(&dev_priv->irq_lock); 3698 3699 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3700 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3701 } 3702 3703 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3704 { 3705 struct intel_uncore *uncore = &dev_priv->uncore; 3706 3707 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 3708 GEN8_PIPE_CDCLK_CRC_DONE; 3709 u32 de_pipe_enables; 3710 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 3711 u32 de_port_enables; 3712 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3713 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3714 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3715 enum pipe pipe; 3716 3717 if (!HAS_DISPLAY(dev_priv)) 3718 return; 3719 3720 if (DISPLAY_VER(dev_priv) <= 10) 3721 de_misc_masked |= GEN8_DE_MISC_GSE; 3722 3723 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3724 de_port_masked |= BXT_DE_PORT_GMBUS; 3725 3726 if (DISPLAY_VER(dev_priv) >= 11) { 3727 enum port port; 3728 3729 if (intel_bios_is_dsi_present(dev_priv, &port)) 3730 de_port_masked |= DSI0_TE | DSI1_TE; 3731 } 3732 3733 de_pipe_enables = de_pipe_masked | 3734 GEN8_PIPE_VBLANK | 3735 gen8_de_pipe_underrun_mask(dev_priv) | 3736 gen8_de_pipe_flip_done_mask(dev_priv); 3737 3738 de_port_enables = de_port_masked; 3739 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3740 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3741 else if (IS_BROADWELL(dev_priv)) 3742 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 3743 3744 if (DISPLAY_VER(dev_priv) >= 12) { 3745 enum transcoder trans; 3746 3747 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3748 enum intel_display_power_domain domain; 3749 3750 domain = POWER_DOMAIN_TRANSCODER(trans); 3751 if (!intel_display_power_is_enabled(dev_priv, domain)) 3752 continue; 3753 3754 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 3755 } 3756 } else { 3757 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3758 } 3759 3760 for_each_pipe(dev_priv, pipe) { 3761 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3762 3763 if (intel_display_power_is_enabled(dev_priv, 3764 POWER_DOMAIN_PIPE(pipe))) 3765 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3766 dev_priv->de_irq_mask[pipe], 3767 de_pipe_enables); 3768 } 3769 3770 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3771 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3772 3773 if (DISPLAY_VER(dev_priv) >= 11) { 3774 u32 de_hpd_masked = 0; 3775 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3776 GEN11_DE_TBT_HOTPLUG_MASK; 3777 3778 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 3779 de_hpd_enables); 3780 } 3781 } 3782 3783 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 3784 { 3785 struct intel_uncore *uncore = &dev_priv->uncore; 3786 u32 mask = SDE_GMBUS_ICP; 3787 3788 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3789 } 3790 3791 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 3792 { 3793 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3794 icp_irq_postinstall(dev_priv); 3795 else if (HAS_PCH_SPLIT(dev_priv)) 3796 ibx_irq_postinstall(dev_priv); 3797 3798 gen8_gt_irq_postinstall(to_gt(dev_priv)); 3799 gen8_de_irq_postinstall(dev_priv); 3800 3801 gen8_master_intr_enable(dev_priv->uncore.regs); 3802 } 3803 3804 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 3805 { 3806 if (!HAS_DISPLAY(dev_priv)) 3807 return; 3808 3809 gen8_de_irq_postinstall(dev_priv); 3810 3811 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3812 GEN11_DISPLAY_IRQ_ENABLE); 3813 } 3814 3815 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 3816 { 3817 struct intel_gt *gt = to_gt(dev_priv); 3818 struct intel_uncore *uncore = gt->uncore; 3819 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3820 3821 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3822 icp_irq_postinstall(dev_priv); 3823 3824 gen11_gt_irq_postinstall(gt); 3825 gen11_de_irq_postinstall(dev_priv); 3826 3827 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3828 3829 gen11_master_intr_enable(uncore->regs); 3830 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 3831 } 3832 3833 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) 3834 { 3835 struct intel_gt *gt = to_gt(dev_priv); 3836 struct intel_uncore *uncore = gt->uncore; 3837 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3838 3839 gen11_gt_irq_postinstall(gt); 3840 3841 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3842 3843 if (HAS_DISPLAY(dev_priv)) { 3844 icp_irq_postinstall(dev_priv); 3845 gen8_de_irq_postinstall(dev_priv); 3846 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3847 GEN11_DISPLAY_IRQ_ENABLE); 3848 } 3849 3850 dg1_master_intr_enable(uncore->regs); 3851 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); 3852 } 3853 3854 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 3855 { 3856 gen8_gt_irq_postinstall(to_gt(dev_priv)); 3857 3858 spin_lock_irq(&dev_priv->irq_lock); 3859 if (dev_priv->display_irqs_enabled) 3860 vlv_display_irq_postinstall(dev_priv); 3861 spin_unlock_irq(&dev_priv->irq_lock); 3862 3863 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3864 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3865 } 3866 3867 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 3868 { 3869 struct intel_uncore *uncore = &dev_priv->uncore; 3870 3871 i9xx_pipestat_irq_reset(dev_priv); 3872 3873 gen2_irq_reset(uncore); 3874 dev_priv->irq_mask = ~0u; 3875 } 3876 3877 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3878 { 3879 struct intel_uncore *uncore = &dev_priv->uncore; 3880 u16 enable_mask; 3881 3882 intel_uncore_write16(uncore, 3883 EMR, 3884 ~(I915_ERROR_PAGE_TABLE | 3885 I915_ERROR_MEMORY_REFRESH)); 3886 3887 /* Unmask the interrupts that we always want on. */ 3888 dev_priv->irq_mask = 3889 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3890 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3891 I915_MASTER_ERROR_INTERRUPT); 3892 3893 enable_mask = 3894 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3895 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3896 I915_MASTER_ERROR_INTERRUPT | 3897 I915_USER_INTERRUPT; 3898 3899 gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask); 3900 3901 /* Interrupt setup is already guaranteed to be single-threaded, this is 3902 * just to make the assert_spin_locked check happy. */ 3903 spin_lock_irq(&dev_priv->irq_lock); 3904 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3905 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3906 spin_unlock_irq(&dev_priv->irq_lock); 3907 } 3908 3909 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 3910 u16 *eir, u16 *eir_stuck) 3911 { 3912 struct intel_uncore *uncore = &i915->uncore; 3913 u16 emr; 3914 3915 *eir = intel_uncore_read16(uncore, EIR); 3916 3917 if (*eir) 3918 intel_uncore_write16(uncore, EIR, *eir); 3919 3920 *eir_stuck = intel_uncore_read16(uncore, EIR); 3921 if (*eir_stuck == 0) 3922 return; 3923 3924 /* 3925 * Toggle all EMR bits to make sure we get an edge 3926 * in the ISR master error bit if we don't clear 3927 * all the EIR bits. Otherwise the edge triggered 3928 * IIR on i965/g4x wouldn't notice that an interrupt 3929 * is still pending. Also some EIR bits can't be 3930 * cleared except by handling the underlying error 3931 * (or by a GPU reset) so we mask any bit that 3932 * remains set. 3933 */ 3934 emr = intel_uncore_read16(uncore, EMR); 3935 intel_uncore_write16(uncore, EMR, 0xffff); 3936 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 3937 } 3938 3939 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 3940 u16 eir, u16 eir_stuck) 3941 { 3942 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 3943 3944 if (eir_stuck) 3945 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 3946 eir_stuck); 3947 } 3948 3949 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 3950 u32 *eir, u32 *eir_stuck) 3951 { 3952 u32 emr; 3953 3954 *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0); 3955 3956 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 3957 if (*eir_stuck == 0) 3958 return; 3959 3960 /* 3961 * Toggle all EMR bits to make sure we get an edge 3962 * in the ISR master error bit if we don't clear 3963 * all the EIR bits. Otherwise the edge triggered 3964 * IIR on i965/g4x wouldn't notice that an interrupt 3965 * is still pending. Also some EIR bits can't be 3966 * cleared except by handling the underlying error 3967 * (or by a GPU reset) so we mask any bit that 3968 * remains set. 3969 */ 3970 emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff); 3971 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 3972 } 3973 3974 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 3975 u32 eir, u32 eir_stuck) 3976 { 3977 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 3978 3979 if (eir_stuck) 3980 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 3981 eir_stuck); 3982 } 3983 3984 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3985 { 3986 struct drm_i915_private *dev_priv = arg; 3987 irqreturn_t ret = IRQ_NONE; 3988 3989 if (!intel_irqs_enabled(dev_priv)) 3990 return IRQ_NONE; 3991 3992 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3993 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3994 3995 do { 3996 u32 pipe_stats[I915_MAX_PIPES] = {}; 3997 u16 eir = 0, eir_stuck = 0; 3998 u16 iir; 3999 4000 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 4001 if (iir == 0) 4002 break; 4003 4004 ret = IRQ_HANDLED; 4005 4006 /* Call regardless, as some status bits might not be 4007 * signalled in iir */ 4008 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4009 4010 if (iir & I915_MASTER_ERROR_INTERRUPT) 4011 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4012 4013 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 4014 4015 if (iir & I915_USER_INTERRUPT) 4016 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 4017 4018 if (iir & I915_MASTER_ERROR_INTERRUPT) 4019 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4020 4021 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4022 } while (0); 4023 4024 pmu_irq_stats(dev_priv, ret); 4025 4026 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4027 4028 return ret; 4029 } 4030 4031 static void i915_irq_reset(struct drm_i915_private *dev_priv) 4032 { 4033 struct intel_uncore *uncore = &dev_priv->uncore; 4034 4035 if (I915_HAS_HOTPLUG(dev_priv)) { 4036 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4037 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0); 4038 } 4039 4040 i9xx_pipestat_irq_reset(dev_priv); 4041 4042 GEN3_IRQ_RESET(uncore, GEN2_); 4043 dev_priv->irq_mask = ~0u; 4044 } 4045 4046 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 4047 { 4048 struct intel_uncore *uncore = &dev_priv->uncore; 4049 u32 enable_mask; 4050 4051 intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE | 4052 I915_ERROR_MEMORY_REFRESH)); 4053 4054 /* Unmask the interrupts that we always want on. */ 4055 dev_priv->irq_mask = 4056 ~(I915_ASLE_INTERRUPT | 4057 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4058 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4059 I915_MASTER_ERROR_INTERRUPT); 4060 4061 enable_mask = 4062 I915_ASLE_INTERRUPT | 4063 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4064 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4065 I915_MASTER_ERROR_INTERRUPT | 4066 I915_USER_INTERRUPT; 4067 4068 if (I915_HAS_HOTPLUG(dev_priv)) { 4069 /* Enable in IER... */ 4070 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4071 /* and unmask in IMR */ 4072 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4073 } 4074 4075 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4076 4077 /* Interrupt setup is already guaranteed to be single-threaded, this is 4078 * just to make the assert_spin_locked check happy. */ 4079 spin_lock_irq(&dev_priv->irq_lock); 4080 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4081 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4082 spin_unlock_irq(&dev_priv->irq_lock); 4083 4084 i915_enable_asle_pipestat(dev_priv); 4085 } 4086 4087 static irqreturn_t i915_irq_handler(int irq, void *arg) 4088 { 4089 struct drm_i915_private *dev_priv = arg; 4090 irqreturn_t ret = IRQ_NONE; 4091 4092 if (!intel_irqs_enabled(dev_priv)) 4093 return IRQ_NONE; 4094 4095 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4096 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4097 4098 do { 4099 u32 pipe_stats[I915_MAX_PIPES] = {}; 4100 u32 eir = 0, eir_stuck = 0; 4101 u32 hotplug_status = 0; 4102 u32 iir; 4103 4104 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4105 if (iir == 0) 4106 break; 4107 4108 ret = IRQ_HANDLED; 4109 4110 if (I915_HAS_HOTPLUG(dev_priv) && 4111 iir & I915_DISPLAY_PORT_INTERRUPT) 4112 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4113 4114 /* Call regardless, as some status bits might not be 4115 * signalled in iir */ 4116 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4117 4118 if (iir & I915_MASTER_ERROR_INTERRUPT) 4119 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4120 4121 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4122 4123 if (iir & I915_USER_INTERRUPT) 4124 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 4125 4126 if (iir & I915_MASTER_ERROR_INTERRUPT) 4127 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4128 4129 if (hotplug_status) 4130 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4131 4132 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4133 } while (0); 4134 4135 pmu_irq_stats(dev_priv, ret); 4136 4137 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4138 4139 return ret; 4140 } 4141 4142 static void i965_irq_reset(struct drm_i915_private *dev_priv) 4143 { 4144 struct intel_uncore *uncore = &dev_priv->uncore; 4145 4146 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4147 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); 4148 4149 i9xx_pipestat_irq_reset(dev_priv); 4150 4151 GEN3_IRQ_RESET(uncore, GEN2_); 4152 dev_priv->irq_mask = ~0u; 4153 } 4154 4155 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 4156 { 4157 struct intel_uncore *uncore = &dev_priv->uncore; 4158 u32 enable_mask; 4159 u32 error_mask; 4160 4161 /* 4162 * Enable some error detection, note the instruction error mask 4163 * bit is reserved, so we leave it masked. 4164 */ 4165 if (IS_G4X(dev_priv)) { 4166 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4167 GM45_ERROR_MEM_PRIV | 4168 GM45_ERROR_CP_PRIV | 4169 I915_ERROR_MEMORY_REFRESH); 4170 } else { 4171 error_mask = ~(I915_ERROR_PAGE_TABLE | 4172 I915_ERROR_MEMORY_REFRESH); 4173 } 4174 intel_uncore_write(uncore, EMR, error_mask); 4175 4176 /* Unmask the interrupts that we always want on. */ 4177 dev_priv->irq_mask = 4178 ~(I915_ASLE_INTERRUPT | 4179 I915_DISPLAY_PORT_INTERRUPT | 4180 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4181 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4182 I915_MASTER_ERROR_INTERRUPT); 4183 4184 enable_mask = 4185 I915_ASLE_INTERRUPT | 4186 I915_DISPLAY_PORT_INTERRUPT | 4187 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4188 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4189 I915_MASTER_ERROR_INTERRUPT | 4190 I915_USER_INTERRUPT; 4191 4192 if (IS_G4X(dev_priv)) 4193 enable_mask |= I915_BSD_USER_INTERRUPT; 4194 4195 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4196 4197 /* Interrupt setup is already guaranteed to be single-threaded, this is 4198 * just to make the assert_spin_locked check happy. */ 4199 spin_lock_irq(&dev_priv->irq_lock); 4200 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4201 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4202 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4203 spin_unlock_irq(&dev_priv->irq_lock); 4204 4205 i915_enable_asle_pipestat(dev_priv); 4206 } 4207 4208 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4209 { 4210 u32 hotplug_en; 4211 4212 lockdep_assert_held(&dev_priv->irq_lock); 4213 4214 /* Note HDMI and DP share hotplug bits */ 4215 /* enable bits are the same for all generations */ 4216 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4217 /* Programming the CRT detection parameters tends 4218 to generate a spurious hotplug event about three 4219 seconds later. So just do it once. 4220 */ 4221 if (IS_G4X(dev_priv)) 4222 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4223 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4224 4225 /* Ignore TV since it's buggy */ 4226 i915_hotplug_interrupt_update_locked(dev_priv, 4227 HOTPLUG_INT_EN_MASK | 4228 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4229 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4230 hotplug_en); 4231 } 4232 4233 static irqreturn_t i965_irq_handler(int irq, void *arg) 4234 { 4235 struct drm_i915_private *dev_priv = arg; 4236 irqreturn_t ret = IRQ_NONE; 4237 4238 if (!intel_irqs_enabled(dev_priv)) 4239 return IRQ_NONE; 4240 4241 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4242 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4243 4244 do { 4245 u32 pipe_stats[I915_MAX_PIPES] = {}; 4246 u32 eir = 0, eir_stuck = 0; 4247 u32 hotplug_status = 0; 4248 u32 iir; 4249 4250 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4251 if (iir == 0) 4252 break; 4253 4254 ret = IRQ_HANDLED; 4255 4256 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4257 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4258 4259 /* Call regardless, as some status bits might not be 4260 * signalled in iir */ 4261 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4262 4263 if (iir & I915_MASTER_ERROR_INTERRUPT) 4264 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4265 4266 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4267 4268 if (iir & I915_USER_INTERRUPT) 4269 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], 4270 iir); 4271 4272 if (iir & I915_BSD_USER_INTERRUPT) 4273 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0], 4274 iir >> 25); 4275 4276 if (iir & I915_MASTER_ERROR_INTERRUPT) 4277 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4278 4279 if (hotplug_status) 4280 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4281 4282 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4283 } while (0); 4284 4285 pmu_irq_stats(dev_priv, IRQ_HANDLED); 4286 4287 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4288 4289 return ret; 4290 } 4291 4292 struct intel_hotplug_funcs { 4293 void (*hpd_irq_setup)(struct drm_i915_private *i915); 4294 }; 4295 4296 #define HPD_FUNCS(platform) \ 4297 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ 4298 .hpd_irq_setup = platform##_hpd_irq_setup, \ 4299 } 4300 4301 HPD_FUNCS(i915); 4302 HPD_FUNCS(dg1); 4303 HPD_FUNCS(gen11); 4304 HPD_FUNCS(bxt); 4305 HPD_FUNCS(icp); 4306 HPD_FUNCS(spt); 4307 HPD_FUNCS(ilk); 4308 #undef HPD_FUNCS 4309 4310 void intel_hpd_irq_setup(struct drm_i915_private *i915) 4311 { 4312 if (i915->display_irqs_enabled && i915->display.funcs.hotplug) 4313 i915->display.funcs.hotplug->hpd_irq_setup(i915); 4314 } 4315 4316 /** 4317 * intel_irq_init - initializes irq support 4318 * @dev_priv: i915 device instance 4319 * 4320 * This function initializes all the irq support including work items, timers 4321 * and all the vtables. It does not setup the interrupt itself though. 4322 */ 4323 void intel_irq_init(struct drm_i915_private *dev_priv) 4324 { 4325 int i; 4326 4327 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 4328 for (i = 0; i < MAX_L3_SLICES; ++i) 4329 dev_priv->l3_parity.remap_info[i] = NULL; 4330 4331 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 4332 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) 4333 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; 4334 4335 if (!HAS_DISPLAY(dev_priv)) 4336 return; 4337 4338 intel_hpd_init_pins(dev_priv); 4339 4340 intel_hpd_init_early(dev_priv); 4341 4342 dev_priv->drm.vblank_disable_immediate = true; 4343 4344 /* Most platforms treat the display irq block as an always-on 4345 * power domain. vlv/chv can disable it at runtime and need 4346 * special care to avoid writing any of the display block registers 4347 * outside of the power domain. We defer setting up the display irqs 4348 * in this case to the runtime pm. 4349 */ 4350 dev_priv->display_irqs_enabled = true; 4351 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4352 dev_priv->display_irqs_enabled = false; 4353 4354 if (HAS_GMCH(dev_priv)) { 4355 if (I915_HAS_HOTPLUG(dev_priv)) 4356 dev_priv->display.funcs.hotplug = &i915_hpd_funcs; 4357 } else { 4358 if (HAS_PCH_DG2(dev_priv)) 4359 dev_priv->display.funcs.hotplug = &icp_hpd_funcs; 4360 else if (HAS_PCH_DG1(dev_priv)) 4361 dev_priv->display.funcs.hotplug = &dg1_hpd_funcs; 4362 else if (DISPLAY_VER(dev_priv) >= 11) 4363 dev_priv->display.funcs.hotplug = &gen11_hpd_funcs; 4364 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 4365 dev_priv->display.funcs.hotplug = &bxt_hpd_funcs; 4366 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 4367 dev_priv->display.funcs.hotplug = &icp_hpd_funcs; 4368 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 4369 dev_priv->display.funcs.hotplug = &spt_hpd_funcs; 4370 else 4371 dev_priv->display.funcs.hotplug = &ilk_hpd_funcs; 4372 } 4373 } 4374 4375 /** 4376 * intel_irq_fini - deinitializes IRQ support 4377 * @i915: i915 device instance 4378 * 4379 * This function deinitializes all the IRQ support. 4380 */ 4381 void intel_irq_fini(struct drm_i915_private *i915) 4382 { 4383 int i; 4384 4385 for (i = 0; i < MAX_L3_SLICES; ++i) 4386 kfree(i915->l3_parity.remap_info[i]); 4387 } 4388 4389 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 4390 { 4391 if (HAS_GMCH(dev_priv)) { 4392 if (IS_CHERRYVIEW(dev_priv)) 4393 return cherryview_irq_handler; 4394 else if (IS_VALLEYVIEW(dev_priv)) 4395 return valleyview_irq_handler; 4396 else if (GRAPHICS_VER(dev_priv) == 4) 4397 return i965_irq_handler; 4398 else if (GRAPHICS_VER(dev_priv) == 3) 4399 return i915_irq_handler; 4400 else 4401 return i8xx_irq_handler; 4402 } else { 4403 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4404 return dg1_irq_handler; 4405 else if (GRAPHICS_VER(dev_priv) >= 11) 4406 return gen11_irq_handler; 4407 else if (GRAPHICS_VER(dev_priv) >= 8) 4408 return gen8_irq_handler; 4409 else 4410 return ilk_irq_handler; 4411 } 4412 } 4413 4414 static void intel_irq_reset(struct drm_i915_private *dev_priv) 4415 { 4416 if (HAS_GMCH(dev_priv)) { 4417 if (IS_CHERRYVIEW(dev_priv)) 4418 cherryview_irq_reset(dev_priv); 4419 else if (IS_VALLEYVIEW(dev_priv)) 4420 valleyview_irq_reset(dev_priv); 4421 else if (GRAPHICS_VER(dev_priv) == 4) 4422 i965_irq_reset(dev_priv); 4423 else if (GRAPHICS_VER(dev_priv) == 3) 4424 i915_irq_reset(dev_priv); 4425 else 4426 i8xx_irq_reset(dev_priv); 4427 } else { 4428 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4429 dg1_irq_reset(dev_priv); 4430 else if (GRAPHICS_VER(dev_priv) >= 11) 4431 gen11_irq_reset(dev_priv); 4432 else if (GRAPHICS_VER(dev_priv) >= 8) 4433 gen8_irq_reset(dev_priv); 4434 else 4435 ilk_irq_reset(dev_priv); 4436 } 4437 } 4438 4439 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 4440 { 4441 if (HAS_GMCH(dev_priv)) { 4442 if (IS_CHERRYVIEW(dev_priv)) 4443 cherryview_irq_postinstall(dev_priv); 4444 else if (IS_VALLEYVIEW(dev_priv)) 4445 valleyview_irq_postinstall(dev_priv); 4446 else if (GRAPHICS_VER(dev_priv) == 4) 4447 i965_irq_postinstall(dev_priv); 4448 else if (GRAPHICS_VER(dev_priv) == 3) 4449 i915_irq_postinstall(dev_priv); 4450 else 4451 i8xx_irq_postinstall(dev_priv); 4452 } else { 4453 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4454 dg1_irq_postinstall(dev_priv); 4455 else if (GRAPHICS_VER(dev_priv) >= 11) 4456 gen11_irq_postinstall(dev_priv); 4457 else if (GRAPHICS_VER(dev_priv) >= 8) 4458 gen8_irq_postinstall(dev_priv); 4459 else 4460 ilk_irq_postinstall(dev_priv); 4461 } 4462 } 4463 4464 /** 4465 * intel_irq_install - enables the hardware interrupt 4466 * @dev_priv: i915 device instance 4467 * 4468 * This function enables the hardware interrupt handling, but leaves the hotplug 4469 * handling still disabled. It is called after intel_irq_init(). 4470 * 4471 * In the driver load and resume code we need working interrupts in a few places 4472 * but don't want to deal with the hassle of concurrent probe and hotplug 4473 * workers. Hence the split into this two-stage approach. 4474 */ 4475 int intel_irq_install(struct drm_i915_private *dev_priv) 4476 { 4477 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4478 int ret; 4479 4480 /* 4481 * We enable some interrupt sources in our postinstall hooks, so mark 4482 * interrupts as enabled _before_ actually enabling them to avoid 4483 * special cases in our ordering checks. 4484 */ 4485 dev_priv->runtime_pm.irqs_enabled = true; 4486 4487 dev_priv->irq_enabled = true; 4488 4489 intel_irq_reset(dev_priv); 4490 4491 ret = request_irq(irq, intel_irq_handler(dev_priv), 4492 IRQF_SHARED, DRIVER_NAME, dev_priv); 4493 if (ret < 0) { 4494 dev_priv->irq_enabled = false; 4495 return ret; 4496 } 4497 4498 intel_irq_postinstall(dev_priv); 4499 4500 return ret; 4501 } 4502 4503 /** 4504 * intel_irq_uninstall - finilizes all irq handling 4505 * @dev_priv: i915 device instance 4506 * 4507 * This stops interrupt and hotplug handling and unregisters and frees all 4508 * resources acquired in the init functions. 4509 */ 4510 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4511 { 4512 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4513 4514 /* 4515 * FIXME we can get called twice during driver probe 4516 * error handling as well as during driver remove due to 4517 * intel_modeset_driver_remove() calling us out of sequence. 4518 * Would be nice if it didn't do that... 4519 */ 4520 if (!dev_priv->irq_enabled) 4521 return; 4522 4523 dev_priv->irq_enabled = false; 4524 4525 intel_irq_reset(dev_priv); 4526 4527 free_irq(irq, dev_priv); 4528 4529 intel_hpd_cancel_work(dev_priv); 4530 dev_priv->runtime_pm.irqs_enabled = false; 4531 } 4532 4533 /** 4534 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4535 * @dev_priv: i915 device instance 4536 * 4537 * This function is used to disable interrupts at runtime, both in the runtime 4538 * pm and the system suspend/resume code. 4539 */ 4540 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4541 { 4542 intel_irq_reset(dev_priv); 4543 dev_priv->runtime_pm.irqs_enabled = false; 4544 intel_synchronize_irq(dev_priv); 4545 } 4546 4547 /** 4548 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4549 * @dev_priv: i915 device instance 4550 * 4551 * This function is used to enable interrupts at runtime, both in the runtime 4552 * pm and the system suspend/resume code. 4553 */ 4554 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4555 { 4556 dev_priv->runtime_pm.irqs_enabled = true; 4557 intel_irq_reset(dev_priv); 4558 intel_irq_postinstall(dev_priv); 4559 } 4560 4561 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 4562 { 4563 return dev_priv->runtime_pm.irqs_enabled; 4564 } 4565 4566 void intel_synchronize_irq(struct drm_i915_private *i915) 4567 { 4568 synchronize_irq(to_pci_dev(i915->drm.dev)->irq); 4569 } 4570 4571 void intel_synchronize_hardirq(struct drm_i915_private *i915) 4572 { 4573 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); 4574 } 4575