1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/circ_buf.h> 32 #include <linux/slab.h> 33 #include <linux/sysrq.h> 34 35 #include <drm/drm_drv.h> 36 37 #include "display/intel_de.h" 38 #include "display/intel_display_trace.h" 39 #include "display/intel_display_types.h" 40 #include "display/intel_fifo_underrun.h" 41 #include "display/intel_hotplug.h" 42 #include "display/intel_lpe_audio.h" 43 #include "display/intel_psr.h" 44 45 #include "gt/intel_breadcrumbs.h" 46 #include "gt/intel_gt.h" 47 #include "gt/intel_gt_irq.h" 48 #include "gt/intel_gt_pm_irq.h" 49 #include "gt/intel_rps.h" 50 51 #include "i915_drv.h" 52 #include "i915_irq.h" 53 #include "intel_pm.h" 54 55 /** 56 * DOC: interrupt handling 57 * 58 * These functions provide the basic support for enabling and disabling the 59 * interrupt handling support. There's a lot more functionality in i915_irq.c 60 * and related files, but that will be described in separate chapters. 61 */ 62 63 /* 64 * Interrupt statistic for PMU. Increments the counter only if the 65 * interrupt originated from the the GPU so interrupts from a device which 66 * shares the interrupt line are not accounted. 67 */ 68 static inline void pmu_irq_stats(struct drm_i915_private *i915, 69 irqreturn_t res) 70 { 71 if (unlikely(res != IRQ_HANDLED)) 72 return; 73 74 /* 75 * A clever compiler translates that into INC. A not so clever one 76 * should at least prevent store tearing. 77 */ 78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); 79 } 80 81 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 82 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, 83 enum hpd_pin pin); 84 85 static const u32 hpd_ilk[HPD_NUM_PINS] = { 86 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 87 }; 88 89 static const u32 hpd_ivb[HPD_NUM_PINS] = { 90 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 91 }; 92 93 static const u32 hpd_bdw[HPD_NUM_PINS] = { 94 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 95 }; 96 97 static const u32 hpd_ibx[HPD_NUM_PINS] = { 98 [HPD_CRT] = SDE_CRT_HOTPLUG, 99 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 100 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 101 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 102 [HPD_PORT_D] = SDE_PORTD_HOTPLUG, 103 }; 104 105 static const u32 hpd_cpt[HPD_NUM_PINS] = { 106 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 107 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 108 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 109 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 110 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 111 }; 112 113 static const u32 hpd_spt[HPD_NUM_PINS] = { 114 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 115 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 116 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 117 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 118 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, 119 }; 120 121 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 122 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 123 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 124 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 125 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 126 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 127 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, 128 }; 129 130 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 131 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 132 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 133 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 134 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 135 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 136 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 137 }; 138 139 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 140 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 141 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 142 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 143 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 144 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 145 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 146 }; 147 148 static const u32 hpd_bxt[HPD_NUM_PINS] = { 149 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 150 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), 151 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), 152 }; 153 154 static const u32 hpd_gen11[HPD_NUM_PINS] = { 155 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), 156 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), 157 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), 158 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), 159 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), 160 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), 161 }; 162 163 static const u32 hpd_icp[HPD_NUM_PINS] = { 164 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 165 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 166 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 167 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), 168 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), 169 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), 170 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), 171 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), 172 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), 173 }; 174 175 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { 176 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 177 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 178 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 179 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), 180 }; 181 182 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) 183 { 184 struct i915_hotplug *hpd = &dev_priv->hotplug; 185 186 if (HAS_GMCH(dev_priv)) { 187 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 188 IS_CHERRYVIEW(dev_priv)) 189 hpd->hpd = hpd_status_g4x; 190 else 191 hpd->hpd = hpd_status_i915; 192 return; 193 } 194 195 if (DISPLAY_VER(dev_priv) >= 11) 196 hpd->hpd = hpd_gen11; 197 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 198 hpd->hpd = hpd_bxt; 199 else if (DISPLAY_VER(dev_priv) >= 8) 200 hpd->hpd = hpd_bdw; 201 else if (DISPLAY_VER(dev_priv) >= 7) 202 hpd->hpd = hpd_ivb; 203 else 204 hpd->hpd = hpd_ilk; 205 206 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && 207 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) 208 return; 209 210 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) 211 hpd->pch_hpd = hpd_sde_dg1; 212 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 213 hpd->pch_hpd = hpd_icp; 214 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) 215 hpd->pch_hpd = hpd_spt; 216 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) 217 hpd->pch_hpd = hpd_cpt; 218 else if (HAS_PCH_IBX(dev_priv)) 219 hpd->pch_hpd = hpd_ibx; 220 else 221 MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); 222 } 223 224 static void 225 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 226 { 227 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 228 229 drm_crtc_handle_vblank(&crtc->base); 230 } 231 232 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 233 i915_reg_t iir, i915_reg_t ier) 234 { 235 intel_uncore_write(uncore, imr, 0xffffffff); 236 intel_uncore_posting_read(uncore, imr); 237 238 intel_uncore_write(uncore, ier, 0); 239 240 /* IIR can theoretically queue up two events. Be paranoid. */ 241 intel_uncore_write(uncore, iir, 0xffffffff); 242 intel_uncore_posting_read(uncore, iir); 243 intel_uncore_write(uncore, iir, 0xffffffff); 244 intel_uncore_posting_read(uncore, iir); 245 } 246 247 void gen2_irq_reset(struct intel_uncore *uncore) 248 { 249 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 250 intel_uncore_posting_read16(uncore, GEN2_IMR); 251 252 intel_uncore_write16(uncore, GEN2_IER, 0); 253 254 /* IIR can theoretically queue up two events. Be paranoid. */ 255 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 256 intel_uncore_posting_read16(uncore, GEN2_IIR); 257 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 258 intel_uncore_posting_read16(uncore, GEN2_IIR); 259 } 260 261 /* 262 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 263 */ 264 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 265 { 266 u32 val = intel_uncore_read(uncore, reg); 267 268 if (val == 0) 269 return; 270 271 drm_WARN(&uncore->i915->drm, 1, 272 "Interrupt register 0x%x is not zero: 0x%08x\n", 273 i915_mmio_reg_offset(reg), val); 274 intel_uncore_write(uncore, reg, 0xffffffff); 275 intel_uncore_posting_read(uncore, reg); 276 intel_uncore_write(uncore, reg, 0xffffffff); 277 intel_uncore_posting_read(uncore, reg); 278 } 279 280 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 281 { 282 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 283 284 if (val == 0) 285 return; 286 287 drm_WARN(&uncore->i915->drm, 1, 288 "Interrupt register 0x%x is not zero: 0x%08x\n", 289 i915_mmio_reg_offset(GEN2_IIR), val); 290 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 291 intel_uncore_posting_read16(uncore, GEN2_IIR); 292 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 293 intel_uncore_posting_read16(uncore, GEN2_IIR); 294 } 295 296 void gen3_irq_init(struct intel_uncore *uncore, 297 i915_reg_t imr, u32 imr_val, 298 i915_reg_t ier, u32 ier_val, 299 i915_reg_t iir) 300 { 301 gen3_assert_iir_is_zero(uncore, iir); 302 303 intel_uncore_write(uncore, ier, ier_val); 304 intel_uncore_write(uncore, imr, imr_val); 305 intel_uncore_posting_read(uncore, imr); 306 } 307 308 void gen2_irq_init(struct intel_uncore *uncore, 309 u32 imr_val, u32 ier_val) 310 { 311 gen2_assert_iir_is_zero(uncore); 312 313 intel_uncore_write16(uncore, GEN2_IER, ier_val); 314 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 315 intel_uncore_posting_read16(uncore, GEN2_IMR); 316 } 317 318 /* For display hotplug interrupt */ 319 static inline void 320 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 321 u32 mask, 322 u32 bits) 323 { 324 u32 val; 325 326 lockdep_assert_held(&dev_priv->irq_lock); 327 drm_WARN_ON(&dev_priv->drm, bits & ~mask); 328 329 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN); 330 val &= ~mask; 331 val |= bits; 332 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val); 333 } 334 335 /** 336 * i915_hotplug_interrupt_update - update hotplug interrupt enable 337 * @dev_priv: driver private 338 * @mask: bits to update 339 * @bits: bits to enable 340 * NOTE: the HPD enable bits are modified both inside and outside 341 * of an interrupt context. To avoid that read-modify-write cycles 342 * interfer, these bits are protected by a spinlock. Since this 343 * function is usually not called from a context where the lock is 344 * held already, this function acquires the lock itself. A non-locking 345 * version is also available. 346 */ 347 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 348 u32 mask, 349 u32 bits) 350 { 351 spin_lock_irq(&dev_priv->irq_lock); 352 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 353 spin_unlock_irq(&dev_priv->irq_lock); 354 } 355 356 /** 357 * ilk_update_display_irq - update DEIMR 358 * @dev_priv: driver private 359 * @interrupt_mask: mask of interrupt bits to update 360 * @enabled_irq_mask: mask of interrupt bits to enable 361 */ 362 static void ilk_update_display_irq(struct drm_i915_private *dev_priv, 363 u32 interrupt_mask, u32 enabled_irq_mask) 364 { 365 u32 new_val; 366 367 lockdep_assert_held(&dev_priv->irq_lock); 368 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 369 370 new_val = dev_priv->irq_mask; 371 new_val &= ~interrupt_mask; 372 new_val |= (~enabled_irq_mask & interrupt_mask); 373 374 if (new_val != dev_priv->irq_mask && 375 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 376 dev_priv->irq_mask = new_val; 377 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 378 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 379 } 380 } 381 382 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 383 { 384 ilk_update_display_irq(i915, bits, bits); 385 } 386 387 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 388 { 389 ilk_update_display_irq(i915, bits, 0); 390 } 391 392 /** 393 * bdw_update_port_irq - update DE port interrupt 394 * @dev_priv: driver private 395 * @interrupt_mask: mask of interrupt bits to update 396 * @enabled_irq_mask: mask of interrupt bits to enable 397 */ 398 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 399 u32 interrupt_mask, 400 u32 enabled_irq_mask) 401 { 402 u32 new_val; 403 u32 old_val; 404 405 lockdep_assert_held(&dev_priv->irq_lock); 406 407 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 408 409 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 410 return; 411 412 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 413 414 new_val = old_val; 415 new_val &= ~interrupt_mask; 416 new_val |= (~enabled_irq_mask & interrupt_mask); 417 418 if (new_val != old_val) { 419 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 420 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 421 } 422 } 423 424 /** 425 * bdw_update_pipe_irq - update DE pipe interrupt 426 * @dev_priv: driver private 427 * @pipe: pipe whose interrupt to update 428 * @interrupt_mask: mask of interrupt bits to update 429 * @enabled_irq_mask: mask of interrupt bits to enable 430 */ 431 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 432 enum pipe pipe, u32 interrupt_mask, 433 u32 enabled_irq_mask) 434 { 435 u32 new_val; 436 437 lockdep_assert_held(&dev_priv->irq_lock); 438 439 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 440 441 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 442 return; 443 444 new_val = dev_priv->de_irq_mask[pipe]; 445 new_val &= ~interrupt_mask; 446 new_val |= (~enabled_irq_mask & interrupt_mask); 447 448 if (new_val != dev_priv->de_irq_mask[pipe]) { 449 dev_priv->de_irq_mask[pipe] = new_val; 450 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 451 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 452 } 453 } 454 455 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 456 enum pipe pipe, u32 bits) 457 { 458 bdw_update_pipe_irq(i915, pipe, bits, bits); 459 } 460 461 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 462 enum pipe pipe, u32 bits) 463 { 464 bdw_update_pipe_irq(i915, pipe, bits, 0); 465 } 466 467 /** 468 * ibx_display_interrupt_update - update SDEIMR 469 * @dev_priv: driver private 470 * @interrupt_mask: mask of interrupt bits to update 471 * @enabled_irq_mask: mask of interrupt bits to enable 472 */ 473 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 474 u32 interrupt_mask, 475 u32 enabled_irq_mask) 476 { 477 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 478 sdeimr &= ~interrupt_mask; 479 sdeimr |= (~enabled_irq_mask & interrupt_mask); 480 481 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 482 483 lockdep_assert_held(&dev_priv->irq_lock); 484 485 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 486 return; 487 488 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 489 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 490 } 491 492 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 493 { 494 ibx_display_interrupt_update(i915, bits, bits); 495 } 496 497 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 498 { 499 ibx_display_interrupt_update(i915, bits, 0); 500 } 501 502 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 503 enum pipe pipe) 504 { 505 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 506 u32 enable_mask = status_mask << 16; 507 508 lockdep_assert_held(&dev_priv->irq_lock); 509 510 if (DISPLAY_VER(dev_priv) < 5) 511 goto out; 512 513 /* 514 * On pipe A we don't support the PSR interrupt yet, 515 * on pipe B and C the same bit MBZ. 516 */ 517 if (drm_WARN_ON_ONCE(&dev_priv->drm, 518 status_mask & PIPE_A_PSR_STATUS_VLV)) 519 return 0; 520 /* 521 * On pipe B and C we don't support the PSR interrupt yet, on pipe 522 * A the same bit is for perf counters which we don't use either. 523 */ 524 if (drm_WARN_ON_ONCE(&dev_priv->drm, 525 status_mask & PIPE_B_PSR_STATUS_VLV)) 526 return 0; 527 528 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 529 SPRITE0_FLIP_DONE_INT_EN_VLV | 530 SPRITE1_FLIP_DONE_INT_EN_VLV); 531 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 532 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 533 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 534 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 535 536 out: 537 drm_WARN_ONCE(&dev_priv->drm, 538 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 539 status_mask & ~PIPESTAT_INT_STATUS_MASK, 540 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 541 pipe_name(pipe), enable_mask, status_mask); 542 543 return enable_mask; 544 } 545 546 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 547 enum pipe pipe, u32 status_mask) 548 { 549 i915_reg_t reg = PIPESTAT(pipe); 550 u32 enable_mask; 551 552 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 553 "pipe %c: status_mask=0x%x\n", 554 pipe_name(pipe), status_mask); 555 556 lockdep_assert_held(&dev_priv->irq_lock); 557 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 558 559 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 560 return; 561 562 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 563 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 564 565 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 566 intel_uncore_posting_read(&dev_priv->uncore, reg); 567 } 568 569 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 570 enum pipe pipe, u32 status_mask) 571 { 572 i915_reg_t reg = PIPESTAT(pipe); 573 u32 enable_mask; 574 575 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 576 "pipe %c: status_mask=0x%x\n", 577 pipe_name(pipe), status_mask); 578 579 lockdep_assert_held(&dev_priv->irq_lock); 580 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 581 582 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 583 return; 584 585 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 586 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 587 588 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 589 intel_uncore_posting_read(&dev_priv->uncore, reg); 590 } 591 592 static bool i915_has_asle(struct drm_i915_private *dev_priv) 593 { 594 if (!dev_priv->opregion.asle) 595 return false; 596 597 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 598 } 599 600 /** 601 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 602 * @dev_priv: i915 device private 603 */ 604 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 605 { 606 if (!i915_has_asle(dev_priv)) 607 return; 608 609 spin_lock_irq(&dev_priv->irq_lock); 610 611 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 612 if (DISPLAY_VER(dev_priv) >= 4) 613 i915_enable_pipestat(dev_priv, PIPE_A, 614 PIPE_LEGACY_BLC_EVENT_STATUS); 615 616 spin_unlock_irq(&dev_priv->irq_lock); 617 } 618 619 /* 620 * This timing diagram depicts the video signal in and 621 * around the vertical blanking period. 622 * 623 * Assumptions about the fictitious mode used in this example: 624 * vblank_start >= 3 625 * vsync_start = vblank_start + 1 626 * vsync_end = vblank_start + 2 627 * vtotal = vblank_start + 3 628 * 629 * start of vblank: 630 * latch double buffered registers 631 * increment frame counter (ctg+) 632 * generate start of vblank interrupt (gen4+) 633 * | 634 * | frame start: 635 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 636 * | may be shifted forward 1-3 extra lines via PIPECONF 637 * | | 638 * | | start of vsync: 639 * | | generate vsync interrupt 640 * | | | 641 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 642 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 643 * ----va---> <-----------------vb--------------------> <--------va------------- 644 * | | <----vs-----> | 645 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 646 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 647 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 648 * | | | 649 * last visible pixel first visible pixel 650 * | increment frame counter (gen3/4) 651 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 652 * 653 * x = horizontal active 654 * _ = horizontal blanking 655 * hs = horizontal sync 656 * va = vertical active 657 * vb = vertical blanking 658 * vs = vertical sync 659 * vbs = vblank_start (number) 660 * 661 * Summary: 662 * - most events happen at the start of horizontal sync 663 * - frame start happens at the start of horizontal blank, 1-4 lines 664 * (depending on PIPECONF settings) after the start of vblank 665 * - gen3/4 pixel and frame counter are synchronized with the start 666 * of horizontal active on the first line of vertical active 667 */ 668 669 /* Called from drm generic code, passed a 'crtc', which 670 * we use as a pipe index 671 */ 672 u32 i915_get_vblank_counter(struct drm_crtc *crtc) 673 { 674 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 675 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 676 const struct drm_display_mode *mode = &vblank->hwmode; 677 enum pipe pipe = to_intel_crtc(crtc)->pipe; 678 i915_reg_t high_frame, low_frame; 679 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 680 unsigned long irqflags; 681 682 /* 683 * On i965gm TV output the frame counter only works up to 684 * the point when we enable the TV encoder. After that the 685 * frame counter ceases to work and reads zero. We need a 686 * vblank wait before enabling the TV encoder and so we 687 * have to enable vblank interrupts while the frame counter 688 * is still in a working state. However the core vblank code 689 * does not like us returning non-zero frame counter values 690 * when we've told it that we don't have a working frame 691 * counter. Thus we must stop non-zero values leaking out. 692 */ 693 if (!vblank->max_vblank_count) 694 return 0; 695 696 htotal = mode->crtc_htotal; 697 hsync_start = mode->crtc_hsync_start; 698 vbl_start = mode->crtc_vblank_start; 699 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 700 vbl_start = DIV_ROUND_UP(vbl_start, 2); 701 702 /* Convert to pixel count */ 703 vbl_start *= htotal; 704 705 /* Start of vblank event occurs at start of hsync */ 706 vbl_start -= htotal - hsync_start; 707 708 high_frame = PIPEFRAME(pipe); 709 low_frame = PIPEFRAMEPIXEL(pipe); 710 711 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 712 713 /* 714 * High & low register fields aren't synchronized, so make sure 715 * we get a low value that's stable across two reads of the high 716 * register. 717 */ 718 do { 719 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 720 low = intel_de_read_fw(dev_priv, low_frame); 721 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 722 } while (high1 != high2); 723 724 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 725 726 high1 >>= PIPE_FRAME_HIGH_SHIFT; 727 pixel = low & PIPE_PIXEL_MASK; 728 low >>= PIPE_FRAME_LOW_SHIFT; 729 730 /* 731 * The frame counter increments at beginning of active. 732 * Cook up a vblank counter by also checking the pixel 733 * counter against vblank start. 734 */ 735 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 736 } 737 738 u32 g4x_get_vblank_counter(struct drm_crtc *crtc) 739 { 740 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 741 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 742 enum pipe pipe = to_intel_crtc(crtc)->pipe; 743 744 if (!vblank->max_vblank_count) 745 return 0; 746 747 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe)); 748 } 749 750 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc) 751 { 752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 753 struct drm_vblank_crtc *vblank = 754 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 755 const struct drm_display_mode *mode = &vblank->hwmode; 756 u32 htotal = mode->crtc_htotal; 757 u32 clock = mode->crtc_clock; 758 u32 scan_prev_time, scan_curr_time, scan_post_time; 759 760 /* 761 * To avoid the race condition where we might cross into the 762 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 763 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 764 * during the same frame. 765 */ 766 do { 767 /* 768 * This field provides read back of the display 769 * pipe frame time stamp. The time stamp value 770 * is sampled at every start of vertical blank. 771 */ 772 scan_prev_time = intel_de_read_fw(dev_priv, 773 PIPE_FRMTMSTMP(crtc->pipe)); 774 775 /* 776 * The TIMESTAMP_CTR register has the current 777 * time stamp value. 778 */ 779 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); 780 781 scan_post_time = intel_de_read_fw(dev_priv, 782 PIPE_FRMTMSTMP(crtc->pipe)); 783 } while (scan_post_time != scan_prev_time); 784 785 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 786 clock), 1000 * htotal); 787 } 788 789 /* 790 * On certain encoders on certain platforms, pipe 791 * scanline register will not work to get the scanline, 792 * since the timings are driven from the PORT or issues 793 * with scanline register updates. 794 * This function will use Framestamp and current 795 * timestamp registers to calculate the scanline. 796 */ 797 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 798 { 799 struct drm_vblank_crtc *vblank = 800 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 801 const struct drm_display_mode *mode = &vblank->hwmode; 802 u32 vblank_start = mode->crtc_vblank_start; 803 u32 vtotal = mode->crtc_vtotal; 804 u32 scanline; 805 806 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc); 807 scanline = min(scanline, vtotal - 1); 808 scanline = (scanline + vblank_start) % vtotal; 809 810 return scanline; 811 } 812 813 /* 814 * intel_de_read_fw(), only for fast reads of display block, no need for 815 * forcewake etc. 816 */ 817 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 818 { 819 struct drm_device *dev = crtc->base.dev; 820 struct drm_i915_private *dev_priv = to_i915(dev); 821 const struct drm_display_mode *mode; 822 struct drm_vblank_crtc *vblank; 823 enum pipe pipe = crtc->pipe; 824 int position, vtotal; 825 826 if (!crtc->active) 827 return 0; 828 829 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 830 mode = &vblank->hwmode; 831 832 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 833 return __intel_get_crtc_scanline_from_timestamp(crtc); 834 835 vtotal = mode->crtc_vtotal; 836 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 837 vtotal /= 2; 838 839 if (DISPLAY_VER(dev_priv) == 2) 840 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 841 else 842 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 843 844 /* 845 * On HSW, the DSL reg (0x70000) appears to return 0 if we 846 * read it just before the start of vblank. So try it again 847 * so we don't accidentally end up spanning a vblank frame 848 * increment, causing the pipe_update_end() code to squak at us. 849 * 850 * The nature of this problem means we can't simply check the ISR 851 * bit and return the vblank start value; nor can we use the scanline 852 * debug register in the transcoder as it appears to have the same 853 * problem. We may need to extend this to include other platforms, 854 * but so far testing only shows the problem on HSW. 855 */ 856 if (HAS_DDI(dev_priv) && !position) { 857 int i, temp; 858 859 for (i = 0; i < 100; i++) { 860 udelay(1); 861 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 862 if (temp != position) { 863 position = temp; 864 break; 865 } 866 } 867 } 868 869 /* 870 * See update_scanline_offset() for the details on the 871 * scanline_offset adjustment. 872 */ 873 return (position + crtc->scanline_offset) % vtotal; 874 } 875 876 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, 877 bool in_vblank_irq, 878 int *vpos, int *hpos, 879 ktime_t *stime, ktime_t *etime, 880 const struct drm_display_mode *mode) 881 { 882 struct drm_device *dev = _crtc->dev; 883 struct drm_i915_private *dev_priv = to_i915(dev); 884 struct intel_crtc *crtc = to_intel_crtc(_crtc); 885 enum pipe pipe = crtc->pipe; 886 int position; 887 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 888 unsigned long irqflags; 889 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 || 890 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 || 891 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 892 893 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { 894 drm_dbg(&dev_priv->drm, 895 "trying to get scanoutpos for disabled " 896 "pipe %c\n", pipe_name(pipe)); 897 return false; 898 } 899 900 htotal = mode->crtc_htotal; 901 hsync_start = mode->crtc_hsync_start; 902 vtotal = mode->crtc_vtotal; 903 vbl_start = mode->crtc_vblank_start; 904 vbl_end = mode->crtc_vblank_end; 905 906 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 907 vbl_start = DIV_ROUND_UP(vbl_start, 2); 908 vbl_end /= 2; 909 vtotal /= 2; 910 } 911 912 /* 913 * Lock uncore.lock, as we will do multiple timing critical raw 914 * register reads, potentially with preemption disabled, so the 915 * following code must not block on uncore.lock. 916 */ 917 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 918 919 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 920 921 /* Get optional system timestamp before query. */ 922 if (stime) 923 *stime = ktime_get(); 924 925 if (crtc->mode_flags & I915_MODE_FLAG_VRR) { 926 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc); 927 928 position = __intel_get_crtc_scanline(crtc); 929 930 /* 931 * Already exiting vblank? If so, shift our position 932 * so it looks like we're already apporaching the full 933 * vblank end. This should make the generated timestamp 934 * more or less match when the active portion will start. 935 */ 936 if (position >= vbl_start && scanlines < position) 937 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1); 938 } else if (use_scanline_counter) { 939 /* No obvious pixelcount register. Only query vertical 940 * scanout position from Display scan line register. 941 */ 942 position = __intel_get_crtc_scanline(crtc); 943 } else { 944 /* Have access to pixelcount since start of frame. 945 * We can split this into vertical and horizontal 946 * scanout position. 947 */ 948 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 949 950 /* convert to pixel counts */ 951 vbl_start *= htotal; 952 vbl_end *= htotal; 953 vtotal *= htotal; 954 955 /* 956 * In interlaced modes, the pixel counter counts all pixels, 957 * so one field will have htotal more pixels. In order to avoid 958 * the reported position from jumping backwards when the pixel 959 * counter is beyond the length of the shorter field, just 960 * clamp the position the length of the shorter field. This 961 * matches how the scanline counter based position works since 962 * the scanline counter doesn't count the two half lines. 963 */ 964 if (position >= vtotal) 965 position = vtotal - 1; 966 967 /* 968 * Start of vblank interrupt is triggered at start of hsync, 969 * just prior to the first active line of vblank. However we 970 * consider lines to start at the leading edge of horizontal 971 * active. So, should we get here before we've crossed into 972 * the horizontal active of the first line in vblank, we would 973 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 974 * always add htotal-hsync_start to the current pixel position. 975 */ 976 position = (position + htotal - hsync_start) % vtotal; 977 } 978 979 /* Get optional system timestamp after query. */ 980 if (etime) 981 *etime = ktime_get(); 982 983 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 984 985 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 986 987 /* 988 * While in vblank, position will be negative 989 * counting up towards 0 at vbl_end. And outside 990 * vblank, position will be positive counting 991 * up since vbl_end. 992 */ 993 if (position >= vbl_start) 994 position -= vbl_end; 995 else 996 position += vtotal - vbl_end; 997 998 if (use_scanline_counter) { 999 *vpos = position; 1000 *hpos = 0; 1001 } else { 1002 *vpos = position / htotal; 1003 *hpos = position - (*vpos * htotal); 1004 } 1005 1006 return true; 1007 } 1008 1009 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, 1010 ktime_t *vblank_time, bool in_vblank_irq) 1011 { 1012 return drm_crtc_vblank_helper_get_vblank_timestamp_internal( 1013 crtc, max_error, vblank_time, in_vblank_irq, 1014 i915_get_crtc_scanoutpos); 1015 } 1016 1017 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1018 { 1019 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1020 unsigned long irqflags; 1021 int position; 1022 1023 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1024 position = __intel_get_crtc_scanline(crtc); 1025 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1026 1027 return position; 1028 } 1029 1030 /** 1031 * ivb_parity_work - Workqueue called when a parity error interrupt 1032 * occurred. 1033 * @work: workqueue struct 1034 * 1035 * Doesn't actually do anything except notify userspace. As a consequence of 1036 * this event, userspace should try to remap the bad rows since statistically 1037 * it is likely the same row is more likely to go bad again. 1038 */ 1039 static void ivb_parity_work(struct work_struct *work) 1040 { 1041 struct drm_i915_private *dev_priv = 1042 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1043 struct intel_gt *gt = to_gt(dev_priv); 1044 u32 error_status, row, bank, subbank; 1045 char *parity_event[6]; 1046 u32 misccpctl; 1047 u8 slice = 0; 1048 1049 /* We must turn off DOP level clock gating to access the L3 registers. 1050 * In order to prevent a get/put style interface, acquire struct mutex 1051 * any time we access those registers. 1052 */ 1053 mutex_lock(&dev_priv->drm.struct_mutex); 1054 1055 /* If we've screwed up tracking, just let the interrupt fire again */ 1056 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 1057 goto out; 1058 1059 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1060 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1061 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1062 1063 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1064 i915_reg_t reg; 1065 1066 slice--; 1067 if (drm_WARN_ON_ONCE(&dev_priv->drm, 1068 slice >= NUM_L3_SLICES(dev_priv))) 1069 break; 1070 1071 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1072 1073 reg = GEN7_L3CDERRST1(slice); 1074 1075 error_status = intel_uncore_read(&dev_priv->uncore, reg); 1076 row = GEN7_PARITY_ERROR_ROW(error_status); 1077 bank = GEN7_PARITY_ERROR_BANK(error_status); 1078 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1079 1080 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1081 intel_uncore_posting_read(&dev_priv->uncore, reg); 1082 1083 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1084 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1085 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1086 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1087 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1088 parity_event[5] = NULL; 1089 1090 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1091 KOBJ_CHANGE, parity_event); 1092 1093 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1094 slice, row, bank, subbank); 1095 1096 kfree(parity_event[4]); 1097 kfree(parity_event[3]); 1098 kfree(parity_event[2]); 1099 kfree(parity_event[1]); 1100 } 1101 1102 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 1103 1104 out: 1105 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 1106 spin_lock_irq(>->irq_lock); 1107 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 1108 spin_unlock_irq(>->irq_lock); 1109 1110 mutex_unlock(&dev_priv->drm.struct_mutex); 1111 } 1112 1113 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1114 { 1115 switch (pin) { 1116 case HPD_PORT_TC1: 1117 case HPD_PORT_TC2: 1118 case HPD_PORT_TC3: 1119 case HPD_PORT_TC4: 1120 case HPD_PORT_TC5: 1121 case HPD_PORT_TC6: 1122 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); 1123 default: 1124 return false; 1125 } 1126 } 1127 1128 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1129 { 1130 switch (pin) { 1131 case HPD_PORT_A: 1132 return val & PORTA_HOTPLUG_LONG_DETECT; 1133 case HPD_PORT_B: 1134 return val & PORTB_HOTPLUG_LONG_DETECT; 1135 case HPD_PORT_C: 1136 return val & PORTC_HOTPLUG_LONG_DETECT; 1137 default: 1138 return false; 1139 } 1140 } 1141 1142 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1143 { 1144 switch (pin) { 1145 case HPD_PORT_A: 1146 case HPD_PORT_B: 1147 case HPD_PORT_C: 1148 case HPD_PORT_D: 1149 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); 1150 default: 1151 return false; 1152 } 1153 } 1154 1155 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1156 { 1157 switch (pin) { 1158 case HPD_PORT_TC1: 1159 case HPD_PORT_TC2: 1160 case HPD_PORT_TC3: 1161 case HPD_PORT_TC4: 1162 case HPD_PORT_TC5: 1163 case HPD_PORT_TC6: 1164 return val & ICP_TC_HPD_LONG_DETECT(pin); 1165 default: 1166 return false; 1167 } 1168 } 1169 1170 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1171 { 1172 switch (pin) { 1173 case HPD_PORT_E: 1174 return val & PORTE_HOTPLUG_LONG_DETECT; 1175 default: 1176 return false; 1177 } 1178 } 1179 1180 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1181 { 1182 switch (pin) { 1183 case HPD_PORT_A: 1184 return val & PORTA_HOTPLUG_LONG_DETECT; 1185 case HPD_PORT_B: 1186 return val & PORTB_HOTPLUG_LONG_DETECT; 1187 case HPD_PORT_C: 1188 return val & PORTC_HOTPLUG_LONG_DETECT; 1189 case HPD_PORT_D: 1190 return val & PORTD_HOTPLUG_LONG_DETECT; 1191 default: 1192 return false; 1193 } 1194 } 1195 1196 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1197 { 1198 switch (pin) { 1199 case HPD_PORT_A: 1200 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1201 default: 1202 return false; 1203 } 1204 } 1205 1206 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1207 { 1208 switch (pin) { 1209 case HPD_PORT_B: 1210 return val & PORTB_HOTPLUG_LONG_DETECT; 1211 case HPD_PORT_C: 1212 return val & PORTC_HOTPLUG_LONG_DETECT; 1213 case HPD_PORT_D: 1214 return val & PORTD_HOTPLUG_LONG_DETECT; 1215 default: 1216 return false; 1217 } 1218 } 1219 1220 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1221 { 1222 switch (pin) { 1223 case HPD_PORT_B: 1224 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1225 case HPD_PORT_C: 1226 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1227 case HPD_PORT_D: 1228 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1229 default: 1230 return false; 1231 } 1232 } 1233 1234 /* 1235 * Get a bit mask of pins that have triggered, and which ones may be long. 1236 * This can be called multiple times with the same masks to accumulate 1237 * hotplug detection results from several registers. 1238 * 1239 * Note that the caller is expected to zero out the masks initially. 1240 */ 1241 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1242 u32 *pin_mask, u32 *long_mask, 1243 u32 hotplug_trigger, u32 dig_hotplug_reg, 1244 const u32 hpd[HPD_NUM_PINS], 1245 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1246 { 1247 enum hpd_pin pin; 1248 1249 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); 1250 1251 for_each_hpd_pin(pin) { 1252 if ((hpd[pin] & hotplug_trigger) == 0) 1253 continue; 1254 1255 *pin_mask |= BIT(pin); 1256 1257 if (long_pulse_detect(pin, dig_hotplug_reg)) 1258 *long_mask |= BIT(pin); 1259 } 1260 1261 drm_dbg(&dev_priv->drm, 1262 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1263 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1264 1265 } 1266 1267 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 1268 const u32 hpd[HPD_NUM_PINS]) 1269 { 1270 struct intel_encoder *encoder; 1271 u32 enabled_irqs = 0; 1272 1273 for_each_intel_encoder(&dev_priv->drm, encoder) 1274 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 1275 enabled_irqs |= hpd[encoder->hpd_pin]; 1276 1277 return enabled_irqs; 1278 } 1279 1280 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, 1281 const u32 hpd[HPD_NUM_PINS]) 1282 { 1283 struct intel_encoder *encoder; 1284 u32 hotplug_irqs = 0; 1285 1286 for_each_intel_encoder(&dev_priv->drm, encoder) 1287 hotplug_irqs |= hpd[encoder->hpd_pin]; 1288 1289 return hotplug_irqs; 1290 } 1291 1292 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, 1293 hotplug_enables_func hotplug_enables) 1294 { 1295 struct intel_encoder *encoder; 1296 u32 hotplug = 0; 1297 1298 for_each_intel_encoder(&i915->drm, encoder) 1299 hotplug |= hotplug_enables(i915, encoder->hpd_pin); 1300 1301 return hotplug; 1302 } 1303 1304 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1305 { 1306 wake_up_all(&dev_priv->gmbus_wait_queue); 1307 } 1308 1309 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1310 { 1311 wake_up_all(&dev_priv->gmbus_wait_queue); 1312 } 1313 1314 #if defined(CONFIG_DEBUG_FS) 1315 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1316 enum pipe pipe, 1317 u32 crc0, u32 crc1, 1318 u32 crc2, u32 crc3, 1319 u32 crc4) 1320 { 1321 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 1322 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 1323 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1324 1325 trace_intel_pipe_crc(crtc, crcs); 1326 1327 spin_lock(&pipe_crc->lock); 1328 /* 1329 * For some not yet identified reason, the first CRC is 1330 * bonkers. So let's just wait for the next vblank and read 1331 * out the buggy result. 1332 * 1333 * On GEN8+ sometimes the second CRC is bonkers as well, so 1334 * don't trust that one either. 1335 */ 1336 if (pipe_crc->skipped <= 0 || 1337 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1338 pipe_crc->skipped++; 1339 spin_unlock(&pipe_crc->lock); 1340 return; 1341 } 1342 spin_unlock(&pipe_crc->lock); 1343 1344 drm_crtc_add_crc_entry(&crtc->base, true, 1345 drm_crtc_accurate_vblank_count(&crtc->base), 1346 crcs); 1347 } 1348 #else 1349 static inline void 1350 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1351 enum pipe pipe, 1352 u32 crc0, u32 crc1, 1353 u32 crc2, u32 crc3, 1354 u32 crc4) {} 1355 #endif 1356 1357 static void flip_done_handler(struct drm_i915_private *i915, 1358 enum pipe pipe) 1359 { 1360 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); 1361 struct drm_crtc_state *crtc_state = crtc->base.state; 1362 struct drm_pending_vblank_event *e = crtc_state->event; 1363 struct drm_device *dev = &i915->drm; 1364 unsigned long irqflags; 1365 1366 spin_lock_irqsave(&dev->event_lock, irqflags); 1367 1368 crtc_state->event = NULL; 1369 1370 drm_crtc_send_vblank_event(&crtc->base, e); 1371 1372 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1373 } 1374 1375 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1376 enum pipe pipe) 1377 { 1378 display_pipe_crc_irq_handler(dev_priv, pipe, 1379 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1380 0, 0, 0, 0); 1381 } 1382 1383 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1384 enum pipe pipe) 1385 { 1386 display_pipe_crc_irq_handler(dev_priv, pipe, 1387 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1388 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 1389 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 1390 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 1391 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 1392 } 1393 1394 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1395 enum pipe pipe) 1396 { 1397 u32 res1, res2; 1398 1399 if (DISPLAY_VER(dev_priv) >= 3) 1400 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); 1401 else 1402 res1 = 0; 1403 1404 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 1405 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); 1406 else 1407 res2 = 0; 1408 1409 display_pipe_crc_irq_handler(dev_priv, pipe, 1410 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), 1411 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), 1412 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), 1413 res1, res2); 1414 } 1415 1416 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1417 { 1418 enum pipe pipe; 1419 1420 for_each_pipe(dev_priv, pipe) { 1421 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), 1422 PIPESTAT_INT_STATUS_MASK | 1423 PIPE_FIFO_UNDERRUN_STATUS); 1424 1425 dev_priv->pipestat_irq_mask[pipe] = 0; 1426 } 1427 } 1428 1429 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1430 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1431 { 1432 enum pipe pipe; 1433 1434 spin_lock(&dev_priv->irq_lock); 1435 1436 if (!dev_priv->display_irqs_enabled) { 1437 spin_unlock(&dev_priv->irq_lock); 1438 return; 1439 } 1440 1441 for_each_pipe(dev_priv, pipe) { 1442 i915_reg_t reg; 1443 u32 status_mask, enable_mask, iir_bit = 0; 1444 1445 /* 1446 * PIPESTAT bits get signalled even when the interrupt is 1447 * disabled with the mask bits, and some of the status bits do 1448 * not generate interrupts at all (like the underrun bit). Hence 1449 * we need to be careful that we only handle what we want to 1450 * handle. 1451 */ 1452 1453 /* fifo underruns are filterered in the underrun handler. */ 1454 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1455 1456 switch (pipe) { 1457 default: 1458 case PIPE_A: 1459 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1460 break; 1461 case PIPE_B: 1462 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1463 break; 1464 case PIPE_C: 1465 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1466 break; 1467 } 1468 if (iir & iir_bit) 1469 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1470 1471 if (!status_mask) 1472 continue; 1473 1474 reg = PIPESTAT(pipe); 1475 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 1476 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1477 1478 /* 1479 * Clear the PIPE*STAT regs before the IIR 1480 * 1481 * Toggle the enable bits to make sure we get an 1482 * edge in the ISR pipe event bit if we don't clear 1483 * all the enabled status bits. Otherwise the edge 1484 * triggered IIR on i965/g4x wouldn't notice that 1485 * an interrupt is still pending. 1486 */ 1487 if (pipe_stats[pipe]) { 1488 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 1489 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 1490 } 1491 } 1492 spin_unlock(&dev_priv->irq_lock); 1493 } 1494 1495 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1496 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1497 { 1498 enum pipe pipe; 1499 1500 for_each_pipe(dev_priv, pipe) { 1501 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1502 intel_handle_vblank(dev_priv, pipe); 1503 1504 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1505 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1506 1507 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1508 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1509 } 1510 } 1511 1512 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1513 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1514 { 1515 bool blc_event = false; 1516 enum pipe pipe; 1517 1518 for_each_pipe(dev_priv, pipe) { 1519 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1520 intel_handle_vblank(dev_priv, pipe); 1521 1522 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1523 blc_event = true; 1524 1525 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1526 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1527 1528 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1529 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1530 } 1531 1532 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1533 intel_opregion_asle_intr(dev_priv); 1534 } 1535 1536 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1537 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1538 { 1539 bool blc_event = false; 1540 enum pipe pipe; 1541 1542 for_each_pipe(dev_priv, pipe) { 1543 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1544 intel_handle_vblank(dev_priv, pipe); 1545 1546 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1547 blc_event = true; 1548 1549 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1550 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1551 1552 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1553 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1554 } 1555 1556 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1557 intel_opregion_asle_intr(dev_priv); 1558 1559 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1560 gmbus_irq_handler(dev_priv); 1561 } 1562 1563 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1564 u32 pipe_stats[I915_MAX_PIPES]) 1565 { 1566 enum pipe pipe; 1567 1568 for_each_pipe(dev_priv, pipe) { 1569 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1570 intel_handle_vblank(dev_priv, pipe); 1571 1572 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1573 flip_done_handler(dev_priv, pipe); 1574 1575 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1576 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1577 1578 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1579 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1580 } 1581 1582 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1583 gmbus_irq_handler(dev_priv); 1584 } 1585 1586 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1587 { 1588 u32 hotplug_status = 0, hotplug_status_mask; 1589 int i; 1590 1591 if (IS_G4X(dev_priv) || 1592 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1593 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1594 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1595 else 1596 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1597 1598 /* 1599 * We absolutely have to clear all the pending interrupt 1600 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1601 * interrupt bit won't have an edge, and the i965/g4x 1602 * edge triggered IIR will not notice that an interrupt 1603 * is still pending. We can't use PORT_HOTPLUG_EN to 1604 * guarantee the edge as the act of toggling the enable 1605 * bits can itself generate a new hotplug interrupt :( 1606 */ 1607 for (i = 0; i < 10; i++) { 1608 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; 1609 1610 if (tmp == 0) 1611 return hotplug_status; 1612 1613 hotplug_status |= tmp; 1614 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); 1615 } 1616 1617 drm_WARN_ONCE(&dev_priv->drm, 1, 1618 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1619 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 1620 1621 return hotplug_status; 1622 } 1623 1624 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1625 u32 hotplug_status) 1626 { 1627 u32 pin_mask = 0, long_mask = 0; 1628 u32 hotplug_trigger; 1629 1630 if (IS_G4X(dev_priv) || 1631 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1632 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1633 else 1634 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1635 1636 if (hotplug_trigger) { 1637 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1638 hotplug_trigger, hotplug_trigger, 1639 dev_priv->hotplug.hpd, 1640 i9xx_port_hotplug_long_detect); 1641 1642 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1643 } 1644 1645 if ((IS_G4X(dev_priv) || 1646 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1647 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1648 dp_aux_irq_handler(dev_priv); 1649 } 1650 1651 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1652 { 1653 struct drm_i915_private *dev_priv = arg; 1654 irqreturn_t ret = IRQ_NONE; 1655 1656 if (!intel_irqs_enabled(dev_priv)) 1657 return IRQ_NONE; 1658 1659 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1660 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1661 1662 do { 1663 u32 iir, gt_iir, pm_iir; 1664 u32 pipe_stats[I915_MAX_PIPES] = {}; 1665 u32 hotplug_status = 0; 1666 u32 ier = 0; 1667 1668 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 1669 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 1670 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1671 1672 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1673 break; 1674 1675 ret = IRQ_HANDLED; 1676 1677 /* 1678 * Theory on interrupt generation, based on empirical evidence: 1679 * 1680 * x = ((VLV_IIR & VLV_IER) || 1681 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1682 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1683 * 1684 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1685 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1686 * guarantee the CPU interrupt will be raised again even if we 1687 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1688 * bits this time around. 1689 */ 1690 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 1691 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1692 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1693 1694 if (gt_iir) 1695 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 1696 if (pm_iir) 1697 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 1698 1699 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1700 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1701 1702 /* Call regardless, as some status bits might not be 1703 * signalled in iir */ 1704 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1705 1706 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1707 I915_LPE_PIPE_B_INTERRUPT)) 1708 intel_lpe_audio_irq_handler(dev_priv); 1709 1710 /* 1711 * VLV_IIR is single buffered, and reflects the level 1712 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1713 */ 1714 if (iir) 1715 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1716 1717 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1718 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1719 1720 if (gt_iir) 1721 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir); 1722 if (pm_iir) 1723 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); 1724 1725 if (hotplug_status) 1726 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1727 1728 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1729 } while (0); 1730 1731 pmu_irq_stats(dev_priv, ret); 1732 1733 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1734 1735 return ret; 1736 } 1737 1738 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1739 { 1740 struct drm_i915_private *dev_priv = arg; 1741 irqreturn_t ret = IRQ_NONE; 1742 1743 if (!intel_irqs_enabled(dev_priv)) 1744 return IRQ_NONE; 1745 1746 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1747 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1748 1749 do { 1750 u32 master_ctl, iir; 1751 u32 pipe_stats[I915_MAX_PIPES] = {}; 1752 u32 hotplug_status = 0; 1753 u32 ier = 0; 1754 1755 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1756 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1757 1758 if (master_ctl == 0 && iir == 0) 1759 break; 1760 1761 ret = IRQ_HANDLED; 1762 1763 /* 1764 * Theory on interrupt generation, based on empirical evidence: 1765 * 1766 * x = ((VLV_IIR & VLV_IER) || 1767 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1768 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1769 * 1770 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1771 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1772 * guarantee the CPU interrupt will be raised again even if we 1773 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1774 * bits this time around. 1775 */ 1776 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 1777 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1778 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1779 1780 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 1781 1782 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1783 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1784 1785 /* Call regardless, as some status bits might not be 1786 * signalled in iir */ 1787 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1788 1789 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1790 I915_LPE_PIPE_B_INTERRUPT | 1791 I915_LPE_PIPE_C_INTERRUPT)) 1792 intel_lpe_audio_irq_handler(dev_priv); 1793 1794 /* 1795 * VLV_IIR is single buffered, and reflects the level 1796 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1797 */ 1798 if (iir) 1799 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1800 1801 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1802 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1803 1804 if (hotplug_status) 1805 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1806 1807 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1808 } while (0); 1809 1810 pmu_irq_stats(dev_priv, ret); 1811 1812 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1813 1814 return ret; 1815 } 1816 1817 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1818 u32 hotplug_trigger) 1819 { 1820 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1821 1822 /* 1823 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1824 * unless we touch the hotplug register, even if hotplug_trigger is 1825 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1826 * errors. 1827 */ 1828 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1829 if (!hotplug_trigger) { 1830 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1831 PORTD_HOTPLUG_STATUS_MASK | 1832 PORTC_HOTPLUG_STATUS_MASK | 1833 PORTB_HOTPLUG_STATUS_MASK; 1834 dig_hotplug_reg &= ~mask; 1835 } 1836 1837 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1838 if (!hotplug_trigger) 1839 return; 1840 1841 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1842 hotplug_trigger, dig_hotplug_reg, 1843 dev_priv->hotplug.pch_hpd, 1844 pch_port_hotplug_long_detect); 1845 1846 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1847 } 1848 1849 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1850 { 1851 enum pipe pipe; 1852 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1853 1854 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1855 1856 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1857 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1858 SDE_AUDIO_POWER_SHIFT); 1859 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 1860 port_name(port)); 1861 } 1862 1863 if (pch_iir & SDE_AUX_MASK) 1864 dp_aux_irq_handler(dev_priv); 1865 1866 if (pch_iir & SDE_GMBUS) 1867 gmbus_irq_handler(dev_priv); 1868 1869 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1870 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 1871 1872 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1873 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 1874 1875 if (pch_iir & SDE_POISON) 1876 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1877 1878 if (pch_iir & SDE_FDI_MASK) { 1879 for_each_pipe(dev_priv, pipe) 1880 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1881 pipe_name(pipe), 1882 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1883 } 1884 1885 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1886 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 1887 1888 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1889 drm_dbg(&dev_priv->drm, 1890 "PCH transcoder CRC error interrupt\n"); 1891 1892 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1893 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 1894 1895 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1896 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 1897 } 1898 1899 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1900 { 1901 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 1902 enum pipe pipe; 1903 1904 if (err_int & ERR_INT_POISON) 1905 drm_err(&dev_priv->drm, "Poison interrupt\n"); 1906 1907 for_each_pipe(dev_priv, pipe) { 1908 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1909 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1910 1911 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1912 if (IS_IVYBRIDGE(dev_priv)) 1913 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1914 else 1915 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1916 } 1917 } 1918 1919 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 1920 } 1921 1922 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1923 { 1924 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 1925 enum pipe pipe; 1926 1927 if (serr_int & SERR_INT_POISON) 1928 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1929 1930 for_each_pipe(dev_priv, pipe) 1931 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1932 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1933 1934 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 1935 } 1936 1937 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1938 { 1939 enum pipe pipe; 1940 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1941 1942 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1943 1944 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1945 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1946 SDE_AUDIO_POWER_SHIFT_CPT); 1947 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 1948 port_name(port)); 1949 } 1950 1951 if (pch_iir & SDE_AUX_MASK_CPT) 1952 dp_aux_irq_handler(dev_priv); 1953 1954 if (pch_iir & SDE_GMBUS_CPT) 1955 gmbus_irq_handler(dev_priv); 1956 1957 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1958 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 1959 1960 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1961 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 1962 1963 if (pch_iir & SDE_FDI_MASK_CPT) { 1964 for_each_pipe(dev_priv, pipe) 1965 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1966 pipe_name(pipe), 1967 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1968 } 1969 1970 if (pch_iir & SDE_ERROR_CPT) 1971 cpt_serr_int_handler(dev_priv); 1972 } 1973 1974 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1975 { 1976 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; 1977 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; 1978 u32 pin_mask = 0, long_mask = 0; 1979 1980 if (ddi_hotplug_trigger) { 1981 u32 dig_hotplug_reg; 1982 1983 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 1984 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg); 1985 1986 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1987 ddi_hotplug_trigger, dig_hotplug_reg, 1988 dev_priv->hotplug.pch_hpd, 1989 icp_ddi_port_hotplug_long_detect); 1990 } 1991 1992 if (tc_hotplug_trigger) { 1993 u32 dig_hotplug_reg; 1994 1995 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 1996 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg); 1997 1998 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1999 tc_hotplug_trigger, dig_hotplug_reg, 2000 dev_priv->hotplug.pch_hpd, 2001 icp_tc_port_hotplug_long_detect); 2002 } 2003 2004 if (pin_mask) 2005 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2006 2007 if (pch_iir & SDE_GMBUS_ICP) 2008 gmbus_irq_handler(dev_priv); 2009 } 2010 2011 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2012 { 2013 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2014 ~SDE_PORTE_HOTPLUG_SPT; 2015 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2016 u32 pin_mask = 0, long_mask = 0; 2017 2018 if (hotplug_trigger) { 2019 u32 dig_hotplug_reg; 2020 2021 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 2022 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 2023 2024 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2025 hotplug_trigger, dig_hotplug_reg, 2026 dev_priv->hotplug.pch_hpd, 2027 spt_port_hotplug_long_detect); 2028 } 2029 2030 if (hotplug2_trigger) { 2031 u32 dig_hotplug_reg; 2032 2033 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 2034 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2035 2036 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2037 hotplug2_trigger, dig_hotplug_reg, 2038 dev_priv->hotplug.pch_hpd, 2039 spt_port_hotplug2_long_detect); 2040 } 2041 2042 if (pin_mask) 2043 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2044 2045 if (pch_iir & SDE_GMBUS_CPT) 2046 gmbus_irq_handler(dev_priv); 2047 } 2048 2049 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2050 u32 hotplug_trigger) 2051 { 2052 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2053 2054 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 2055 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2056 2057 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2058 hotplug_trigger, dig_hotplug_reg, 2059 dev_priv->hotplug.hpd, 2060 ilk_port_hotplug_long_detect); 2061 2062 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2063 } 2064 2065 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2066 u32 de_iir) 2067 { 2068 enum pipe pipe; 2069 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2070 2071 if (hotplug_trigger) 2072 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2073 2074 if (de_iir & DE_AUX_CHANNEL_A) 2075 dp_aux_irq_handler(dev_priv); 2076 2077 if (de_iir & DE_GSE) 2078 intel_opregion_asle_intr(dev_priv); 2079 2080 if (de_iir & DE_POISON) 2081 drm_err(&dev_priv->drm, "Poison interrupt\n"); 2082 2083 for_each_pipe(dev_priv, pipe) { 2084 if (de_iir & DE_PIPE_VBLANK(pipe)) 2085 intel_handle_vblank(dev_priv, pipe); 2086 2087 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2088 flip_done_handler(dev_priv, pipe); 2089 2090 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2091 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2092 2093 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2094 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2095 } 2096 2097 /* check event from PCH */ 2098 if (de_iir & DE_PCH_EVENT) { 2099 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2100 2101 if (HAS_PCH_CPT(dev_priv)) 2102 cpt_irq_handler(dev_priv, pch_iir); 2103 else 2104 ibx_irq_handler(dev_priv, pch_iir); 2105 2106 /* should clear PCH hotplug event before clear CPU irq */ 2107 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2108 } 2109 2110 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 2111 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 2112 } 2113 2114 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2115 u32 de_iir) 2116 { 2117 enum pipe pipe; 2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2119 2120 if (hotplug_trigger) 2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2122 2123 if (de_iir & DE_ERR_INT_IVB) 2124 ivb_err_int_handler(dev_priv); 2125 2126 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2127 dp_aux_irq_handler(dev_priv); 2128 2129 if (de_iir & DE_GSE_IVB) 2130 intel_opregion_asle_intr(dev_priv); 2131 2132 for_each_pipe(dev_priv, pipe) { 2133 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 2134 intel_handle_vblank(dev_priv, pipe); 2135 2136 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2137 flip_done_handler(dev_priv, pipe); 2138 } 2139 2140 /* check event from PCH */ 2141 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2142 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2143 2144 cpt_irq_handler(dev_priv, pch_iir); 2145 2146 /* clear PCH hotplug event before clear CPU irq */ 2147 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2148 } 2149 } 2150 2151 /* 2152 * To handle irqs with the minimum potential races with fresh interrupts, we: 2153 * 1 - Disable Master Interrupt Control. 2154 * 2 - Find the source(s) of the interrupt. 2155 * 3 - Clear the Interrupt Identity bits (IIR). 2156 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2157 * 5 - Re-enable Master Interrupt Control. 2158 */ 2159 static irqreturn_t ilk_irq_handler(int irq, void *arg) 2160 { 2161 struct drm_i915_private *i915 = arg; 2162 void __iomem * const regs = i915->uncore.regs; 2163 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2164 irqreturn_t ret = IRQ_NONE; 2165 2166 if (unlikely(!intel_irqs_enabled(i915))) 2167 return IRQ_NONE; 2168 2169 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2170 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2171 2172 /* disable master interrupt before clearing iir */ 2173 de_ier = raw_reg_read(regs, DEIER); 2174 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2175 2176 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2177 * interrupts will will be stored on its back queue, and then we'll be 2178 * able to process them after we restore SDEIER (as soon as we restore 2179 * it, we'll get an interrupt if SDEIIR still has something to process 2180 * due to its back queue). */ 2181 if (!HAS_PCH_NOP(i915)) { 2182 sde_ier = raw_reg_read(regs, SDEIER); 2183 raw_reg_write(regs, SDEIER, 0); 2184 } 2185 2186 /* Find, clear, then process each source of interrupt */ 2187 2188 gt_iir = raw_reg_read(regs, GTIIR); 2189 if (gt_iir) { 2190 raw_reg_write(regs, GTIIR, gt_iir); 2191 if (GRAPHICS_VER(i915) >= 6) 2192 gen6_gt_irq_handler(to_gt(i915), gt_iir); 2193 else 2194 gen5_gt_irq_handler(to_gt(i915), gt_iir); 2195 ret = IRQ_HANDLED; 2196 } 2197 2198 de_iir = raw_reg_read(regs, DEIIR); 2199 if (de_iir) { 2200 raw_reg_write(regs, DEIIR, de_iir); 2201 if (DISPLAY_VER(i915) >= 7) 2202 ivb_display_irq_handler(i915, de_iir); 2203 else 2204 ilk_display_irq_handler(i915, de_iir); 2205 ret = IRQ_HANDLED; 2206 } 2207 2208 if (GRAPHICS_VER(i915) >= 6) { 2209 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 2210 if (pm_iir) { 2211 raw_reg_write(regs, GEN6_PMIIR, pm_iir); 2212 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir); 2213 ret = IRQ_HANDLED; 2214 } 2215 } 2216 2217 raw_reg_write(regs, DEIER, de_ier); 2218 if (sde_ier) 2219 raw_reg_write(regs, SDEIER, sde_ier); 2220 2221 pmu_irq_stats(i915, ret); 2222 2223 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2224 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2225 2226 return ret; 2227 } 2228 2229 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2230 u32 hotplug_trigger) 2231 { 2232 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2233 2234 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 2235 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 2236 2237 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2238 hotplug_trigger, dig_hotplug_reg, 2239 dev_priv->hotplug.hpd, 2240 bxt_port_hotplug_long_detect); 2241 2242 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2243 } 2244 2245 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2246 { 2247 u32 pin_mask = 0, long_mask = 0; 2248 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2249 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2250 2251 if (trigger_tc) { 2252 u32 dig_hotplug_reg; 2253 2254 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 2255 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2256 2257 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2258 trigger_tc, dig_hotplug_reg, 2259 dev_priv->hotplug.hpd, 2260 gen11_port_hotplug_long_detect); 2261 } 2262 2263 if (trigger_tbt) { 2264 u32 dig_hotplug_reg; 2265 2266 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 2267 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2268 2269 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2270 trigger_tbt, dig_hotplug_reg, 2271 dev_priv->hotplug.hpd, 2272 gen11_port_hotplug_long_detect); 2273 } 2274 2275 if (pin_mask) 2276 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2277 else 2278 drm_err(&dev_priv->drm, 2279 "Unexpected DE HPD interrupt 0x%08x\n", iir); 2280 } 2281 2282 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2283 { 2284 u32 mask; 2285 2286 if (DISPLAY_VER(dev_priv) >= 13) 2287 return TGL_DE_PORT_AUX_DDIA | 2288 TGL_DE_PORT_AUX_DDIB | 2289 TGL_DE_PORT_AUX_DDIC | 2290 XELPD_DE_PORT_AUX_DDID | 2291 XELPD_DE_PORT_AUX_DDIE | 2292 TGL_DE_PORT_AUX_USBC1 | 2293 TGL_DE_PORT_AUX_USBC2 | 2294 TGL_DE_PORT_AUX_USBC3 | 2295 TGL_DE_PORT_AUX_USBC4; 2296 else if (DISPLAY_VER(dev_priv) >= 12) 2297 return TGL_DE_PORT_AUX_DDIA | 2298 TGL_DE_PORT_AUX_DDIB | 2299 TGL_DE_PORT_AUX_DDIC | 2300 TGL_DE_PORT_AUX_USBC1 | 2301 TGL_DE_PORT_AUX_USBC2 | 2302 TGL_DE_PORT_AUX_USBC3 | 2303 TGL_DE_PORT_AUX_USBC4 | 2304 TGL_DE_PORT_AUX_USBC5 | 2305 TGL_DE_PORT_AUX_USBC6; 2306 2307 2308 mask = GEN8_AUX_CHANNEL_A; 2309 if (DISPLAY_VER(dev_priv) >= 9) 2310 mask |= GEN9_AUX_CHANNEL_B | 2311 GEN9_AUX_CHANNEL_C | 2312 GEN9_AUX_CHANNEL_D; 2313 2314 if (DISPLAY_VER(dev_priv) == 11) { 2315 mask |= ICL_AUX_CHANNEL_F; 2316 mask |= ICL_AUX_CHANNEL_E; 2317 } 2318 2319 return mask; 2320 } 2321 2322 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 2323 { 2324 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) 2325 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 2326 else if (DISPLAY_VER(dev_priv) >= 11) 2327 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 2328 else if (DISPLAY_VER(dev_priv) >= 9) 2329 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2330 else 2331 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2332 } 2333 2334 static void 2335 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2336 { 2337 bool found = false; 2338 2339 if (iir & GEN8_DE_MISC_GSE) { 2340 intel_opregion_asle_intr(dev_priv); 2341 found = true; 2342 } 2343 2344 if (iir & GEN8_DE_EDP_PSR) { 2345 struct intel_encoder *encoder; 2346 u32 psr_iir; 2347 i915_reg_t iir_reg; 2348 2349 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2350 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2351 2352 if (DISPLAY_VER(dev_priv) >= 12) 2353 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); 2354 else 2355 iir_reg = EDP_PSR_IIR; 2356 2357 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg); 2358 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir); 2359 2360 if (psr_iir) 2361 found = true; 2362 2363 intel_psr_irq_handler(intel_dp, psr_iir); 2364 2365 /* prior GEN12 only have one EDP PSR */ 2366 if (DISPLAY_VER(dev_priv) < 12) 2367 break; 2368 } 2369 } 2370 2371 if (!found) 2372 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); 2373 } 2374 2375 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 2376 u32 te_trigger) 2377 { 2378 enum pipe pipe = INVALID_PIPE; 2379 enum transcoder dsi_trans; 2380 enum port port; 2381 u32 val, tmp; 2382 2383 /* 2384 * Incase of dual link, TE comes from DSI_1 2385 * this is to check if dual link is enabled 2386 */ 2387 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 2388 val &= PORT_SYNC_MODE_ENABLE; 2389 2390 /* 2391 * if dual link is enabled, then read DSI_0 2392 * transcoder registers 2393 */ 2394 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 2395 PORT_A : PORT_B; 2396 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 2397 2398 /* Check if DSI configured in command mode */ 2399 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 2400 val = val & OP_MODE_MASK; 2401 2402 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 2403 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 2404 return; 2405 } 2406 2407 /* Get PIPE for handling VBLANK event */ 2408 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); 2409 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 2410 case TRANS_DDI_EDP_INPUT_A_ON: 2411 pipe = PIPE_A; 2412 break; 2413 case TRANS_DDI_EDP_INPUT_B_ONOFF: 2414 pipe = PIPE_B; 2415 break; 2416 case TRANS_DDI_EDP_INPUT_C_ONOFF: 2417 pipe = PIPE_C; 2418 break; 2419 default: 2420 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 2421 return; 2422 } 2423 2424 intel_handle_vblank(dev_priv, pipe); 2425 2426 /* clear TE in dsi IIR */ 2427 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 2428 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2429 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2430 } 2431 2432 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 2433 { 2434 if (DISPLAY_VER(i915) >= 9) 2435 return GEN9_PIPE_PLANE1_FLIP_DONE; 2436 else 2437 return GEN8_PIPE_PRIMARY_FLIP_DONE; 2438 } 2439 2440 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) 2441 { 2442 u32 mask = GEN8_PIPE_FIFO_UNDERRUN; 2443 2444 if (DISPLAY_VER(dev_priv) >= 13) 2445 mask |= XELPD_PIPE_SOFT_UNDERRUN | 2446 XELPD_PIPE_HARD_UNDERRUN; 2447 2448 return mask; 2449 } 2450 2451 static irqreturn_t 2452 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2453 { 2454 irqreturn_t ret = IRQ_NONE; 2455 u32 iir; 2456 enum pipe pipe; 2457 2458 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 2459 2460 if (master_ctl & GEN8_DE_MISC_IRQ) { 2461 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 2462 if (iir) { 2463 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 2464 ret = IRQ_HANDLED; 2465 gen8_de_misc_irq_handler(dev_priv, iir); 2466 } else { 2467 drm_err(&dev_priv->drm, 2468 "The master control interrupt lied (DE MISC)!\n"); 2469 } 2470 } 2471 2472 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2473 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 2474 if (iir) { 2475 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 2476 ret = IRQ_HANDLED; 2477 gen11_hpd_irq_handler(dev_priv, iir); 2478 } else { 2479 drm_err(&dev_priv->drm, 2480 "The master control interrupt lied, (DE HPD)!\n"); 2481 } 2482 } 2483 2484 if (master_ctl & GEN8_DE_PORT_IRQ) { 2485 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 2486 if (iir) { 2487 bool found = false; 2488 2489 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 2490 ret = IRQ_HANDLED; 2491 2492 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2493 dp_aux_irq_handler(dev_priv); 2494 found = true; 2495 } 2496 2497 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 2498 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 2499 2500 if (hotplug_trigger) { 2501 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 2502 found = true; 2503 } 2504 } else if (IS_BROADWELL(dev_priv)) { 2505 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 2506 2507 if (hotplug_trigger) { 2508 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2509 found = true; 2510 } 2511 } 2512 2513 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 2514 (iir & BXT_DE_PORT_GMBUS)) { 2515 gmbus_irq_handler(dev_priv); 2516 found = true; 2517 } 2518 2519 if (DISPLAY_VER(dev_priv) >= 11) { 2520 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 2521 2522 if (te_trigger) { 2523 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 2524 found = true; 2525 } 2526 } 2527 2528 if (!found) 2529 drm_err(&dev_priv->drm, 2530 "Unexpected DE Port interrupt\n"); 2531 } 2532 else 2533 drm_err(&dev_priv->drm, 2534 "The master control interrupt lied (DE PORT)!\n"); 2535 } 2536 2537 for_each_pipe(dev_priv, pipe) { 2538 u32 fault_errors; 2539 2540 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2541 continue; 2542 2543 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 2544 if (!iir) { 2545 drm_err(&dev_priv->drm, 2546 "The master control interrupt lied (DE PIPE)!\n"); 2547 continue; 2548 } 2549 2550 ret = IRQ_HANDLED; 2551 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 2552 2553 if (iir & GEN8_PIPE_VBLANK) 2554 intel_handle_vblank(dev_priv, pipe); 2555 2556 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 2557 flip_done_handler(dev_priv, pipe); 2558 2559 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2560 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2561 2562 if (iir & gen8_de_pipe_underrun_mask(dev_priv)) 2563 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2564 2565 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2566 if (fault_errors) 2567 drm_err(&dev_priv->drm, 2568 "Fault errors on pipe %c: 0x%08x\n", 2569 pipe_name(pipe), 2570 fault_errors); 2571 } 2572 2573 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2574 master_ctl & GEN8_DE_PCH_IRQ) { 2575 /* 2576 * FIXME(BDW): Assume for now that the new interrupt handling 2577 * scheme also closed the SDE interrupt handling race we've seen 2578 * on older pch-split platforms. But this needs testing. 2579 */ 2580 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2581 if (iir) { 2582 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); 2583 ret = IRQ_HANDLED; 2584 2585 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2586 icp_irq_handler(dev_priv, iir); 2587 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2588 spt_irq_handler(dev_priv, iir); 2589 else 2590 cpt_irq_handler(dev_priv, iir); 2591 } else { 2592 /* 2593 * Like on previous PCH there seems to be something 2594 * fishy going on with forwarding PCH interrupts. 2595 */ 2596 drm_dbg(&dev_priv->drm, 2597 "The master control interrupt lied (SDE)!\n"); 2598 } 2599 } 2600 2601 return ret; 2602 } 2603 2604 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2605 { 2606 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2607 2608 /* 2609 * Now with master disabled, get a sample of level indications 2610 * for this interrupt. Indications will be cleared on related acks. 2611 * New indications can and will light up during processing, 2612 * and will generate new interrupt after enabling master. 2613 */ 2614 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2615 } 2616 2617 static inline void gen8_master_intr_enable(void __iomem * const regs) 2618 { 2619 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2620 } 2621 2622 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2623 { 2624 struct drm_i915_private *dev_priv = arg; 2625 void __iomem * const regs = dev_priv->uncore.regs; 2626 u32 master_ctl; 2627 2628 if (!intel_irqs_enabled(dev_priv)) 2629 return IRQ_NONE; 2630 2631 master_ctl = gen8_master_intr_disable(regs); 2632 if (!master_ctl) { 2633 gen8_master_intr_enable(regs); 2634 return IRQ_NONE; 2635 } 2636 2637 /* Find, queue (onto bottom-halves), then clear each source */ 2638 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 2639 2640 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2641 if (master_ctl & ~GEN8_GT_IRQS) { 2642 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2643 gen8_de_irq_handler(dev_priv, master_ctl); 2644 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2645 } 2646 2647 gen8_master_intr_enable(regs); 2648 2649 pmu_irq_stats(dev_priv, IRQ_HANDLED); 2650 2651 return IRQ_HANDLED; 2652 } 2653 2654 static u32 2655 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) 2656 { 2657 void __iomem * const regs = gt->uncore->regs; 2658 u32 iir; 2659 2660 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2661 return 0; 2662 2663 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2664 if (likely(iir)) 2665 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2666 2667 return iir; 2668 } 2669 2670 static void 2671 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) 2672 { 2673 if (iir & GEN11_GU_MISC_GSE) 2674 intel_opregion_asle_intr(gt->i915); 2675 } 2676 2677 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2678 { 2679 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2680 2681 /* 2682 * Now with master disabled, get a sample of level indications 2683 * for this interrupt. Indications will be cleared on related acks. 2684 * New indications can and will light up during processing, 2685 * and will generate new interrupt after enabling master. 2686 */ 2687 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2688 } 2689 2690 static inline void gen11_master_intr_enable(void __iomem * const regs) 2691 { 2692 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2693 } 2694 2695 static void 2696 gen11_display_irq_handler(struct drm_i915_private *i915) 2697 { 2698 void __iomem * const regs = i915->uncore.regs; 2699 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2700 2701 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2702 /* 2703 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2704 * for the display related bits. 2705 */ 2706 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 2707 gen8_de_irq_handler(i915, disp_ctl); 2708 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 2709 GEN11_DISPLAY_IRQ_ENABLE); 2710 2711 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2712 } 2713 2714 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2715 { 2716 struct drm_i915_private *i915 = arg; 2717 void __iomem * const regs = i915->uncore.regs; 2718 struct intel_gt *gt = to_gt(i915); 2719 u32 master_ctl; 2720 u32 gu_misc_iir; 2721 2722 if (!intel_irqs_enabled(i915)) 2723 return IRQ_NONE; 2724 2725 master_ctl = gen11_master_intr_disable(regs); 2726 if (!master_ctl) { 2727 gen11_master_intr_enable(regs); 2728 return IRQ_NONE; 2729 } 2730 2731 /* Find, queue (onto bottom-halves), then clear each source */ 2732 gen11_gt_irq_handler(gt, master_ctl); 2733 2734 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2735 if (master_ctl & GEN11_DISPLAY_IRQ) 2736 gen11_display_irq_handler(i915); 2737 2738 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2739 2740 gen11_master_intr_enable(regs); 2741 2742 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2743 2744 pmu_irq_stats(i915, IRQ_HANDLED); 2745 2746 return IRQ_HANDLED; 2747 } 2748 2749 static inline u32 dg1_master_intr_disable(void __iomem * const regs) 2750 { 2751 u32 val; 2752 2753 /* First disable interrupts */ 2754 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); 2755 2756 /* Get the indication levels and ack the master unit */ 2757 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); 2758 if (unlikely(!val)) 2759 return 0; 2760 2761 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); 2762 2763 return val; 2764 } 2765 2766 static inline void dg1_master_intr_enable(void __iomem * const regs) 2767 { 2768 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 2769 } 2770 2771 static irqreturn_t dg1_irq_handler(int irq, void *arg) 2772 { 2773 struct drm_i915_private * const i915 = arg; 2774 struct intel_gt *gt = to_gt(i915); 2775 void __iomem * const regs = gt->uncore->regs; 2776 u32 master_tile_ctl, master_ctl; 2777 u32 gu_misc_iir; 2778 2779 if (!intel_irqs_enabled(i915)) 2780 return IRQ_NONE; 2781 2782 master_tile_ctl = dg1_master_intr_disable(regs); 2783 if (!master_tile_ctl) { 2784 dg1_master_intr_enable(regs); 2785 return IRQ_NONE; 2786 } 2787 2788 /* FIXME: we only support tile 0 for now. */ 2789 if (master_tile_ctl & DG1_MSTR_TILE(0)) { 2790 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2791 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); 2792 } else { 2793 DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl); 2794 dg1_master_intr_enable(regs); 2795 return IRQ_NONE; 2796 } 2797 2798 gen11_gt_irq_handler(gt, master_ctl); 2799 2800 if (master_ctl & GEN11_DISPLAY_IRQ) 2801 gen11_display_irq_handler(i915); 2802 2803 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2804 2805 dg1_master_intr_enable(regs); 2806 2807 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2808 2809 pmu_irq_stats(i915, IRQ_HANDLED); 2810 2811 return IRQ_HANDLED; 2812 } 2813 2814 /* Called from drm generic code, passed 'crtc' which 2815 * we use as a pipe index 2816 */ 2817 int i8xx_enable_vblank(struct drm_crtc *crtc) 2818 { 2819 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2820 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2821 unsigned long irqflags; 2822 2823 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2824 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2825 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2826 2827 return 0; 2828 } 2829 2830 int i915gm_enable_vblank(struct drm_crtc *crtc) 2831 { 2832 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2833 2834 /* 2835 * Vblank interrupts fail to wake the device up from C2+. 2836 * Disabling render clock gating during C-states avoids 2837 * the problem. There is a small power cost so we do this 2838 * only when vblank interrupts are actually enabled. 2839 */ 2840 if (dev_priv->vblank_enabled++ == 0) 2841 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2842 2843 return i8xx_enable_vblank(crtc); 2844 } 2845 2846 int i965_enable_vblank(struct drm_crtc *crtc) 2847 { 2848 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2849 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2850 unsigned long irqflags; 2851 2852 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2853 i915_enable_pipestat(dev_priv, pipe, 2854 PIPE_START_VBLANK_INTERRUPT_STATUS); 2855 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2856 2857 return 0; 2858 } 2859 2860 int ilk_enable_vblank(struct drm_crtc *crtc) 2861 { 2862 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2863 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2864 unsigned long irqflags; 2865 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2866 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2867 2868 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2869 ilk_enable_display_irq(dev_priv, bit); 2870 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2871 2872 /* Even though there is no DMC, frame counter can get stuck when 2873 * PSR is active as no frames are generated. 2874 */ 2875 if (HAS_PSR(dev_priv)) 2876 drm_crtc_vblank_restore(crtc); 2877 2878 return 0; 2879 } 2880 2881 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 2882 bool enable) 2883 { 2884 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 2885 enum port port; 2886 u32 tmp; 2887 2888 if (!(intel_crtc->mode_flags & 2889 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 2890 return false; 2891 2892 /* for dual link cases we consider TE from slave */ 2893 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 2894 port = PORT_B; 2895 else 2896 port = PORT_A; 2897 2898 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port)); 2899 if (enable) 2900 tmp &= ~DSI_TE_EVENT; 2901 else 2902 tmp |= DSI_TE_EVENT; 2903 2904 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp); 2905 2906 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2907 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2908 2909 return true; 2910 } 2911 2912 int bdw_enable_vblank(struct drm_crtc *_crtc) 2913 { 2914 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2915 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2916 enum pipe pipe = crtc->pipe; 2917 unsigned long irqflags; 2918 2919 if (gen11_dsi_configure_te(crtc, true)) 2920 return 0; 2921 2922 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2923 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2924 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2925 2926 /* Even if there is no DMC, frame counter can get stuck when 2927 * PSR is active as no frames are generated, so check only for PSR. 2928 */ 2929 if (HAS_PSR(dev_priv)) 2930 drm_crtc_vblank_restore(&crtc->base); 2931 2932 return 0; 2933 } 2934 2935 /* Called from drm generic code, passed 'crtc' which 2936 * we use as a pipe index 2937 */ 2938 void i8xx_disable_vblank(struct drm_crtc *crtc) 2939 { 2940 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2941 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2942 unsigned long irqflags; 2943 2944 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2945 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2946 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2947 } 2948 2949 void i915gm_disable_vblank(struct drm_crtc *crtc) 2950 { 2951 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2952 2953 i8xx_disable_vblank(crtc); 2954 2955 if (--dev_priv->vblank_enabled == 0) 2956 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2957 } 2958 2959 void i965_disable_vblank(struct drm_crtc *crtc) 2960 { 2961 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2962 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2963 unsigned long irqflags; 2964 2965 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2966 i915_disable_pipestat(dev_priv, pipe, 2967 PIPE_START_VBLANK_INTERRUPT_STATUS); 2968 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2969 } 2970 2971 void ilk_disable_vblank(struct drm_crtc *crtc) 2972 { 2973 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2974 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2975 unsigned long irqflags; 2976 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2977 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2978 2979 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2980 ilk_disable_display_irq(dev_priv, bit); 2981 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2982 } 2983 2984 void bdw_disable_vblank(struct drm_crtc *_crtc) 2985 { 2986 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2987 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2988 enum pipe pipe = crtc->pipe; 2989 unsigned long irqflags; 2990 2991 if (gen11_dsi_configure_te(crtc, false)) 2992 return; 2993 2994 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2995 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2996 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2997 } 2998 2999 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3000 { 3001 struct intel_uncore *uncore = &dev_priv->uncore; 3002 3003 if (HAS_PCH_NOP(dev_priv)) 3004 return; 3005 3006 GEN3_IRQ_RESET(uncore, SDE); 3007 3008 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3009 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); 3010 } 3011 3012 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3013 { 3014 struct intel_uncore *uncore = &dev_priv->uncore; 3015 3016 if (IS_CHERRYVIEW(dev_priv)) 3017 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3018 else 3019 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 3020 3021 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3022 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 3023 3024 i9xx_pipestat_irq_reset(dev_priv); 3025 3026 GEN3_IRQ_RESET(uncore, VLV_); 3027 dev_priv->irq_mask = ~0u; 3028 } 3029 3030 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3031 { 3032 struct intel_uncore *uncore = &dev_priv->uncore; 3033 3034 u32 pipestat_mask; 3035 u32 enable_mask; 3036 enum pipe pipe; 3037 3038 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3039 3040 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3041 for_each_pipe(dev_priv, pipe) 3042 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3043 3044 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3045 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3046 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3047 I915_LPE_PIPE_A_INTERRUPT | 3048 I915_LPE_PIPE_B_INTERRUPT; 3049 3050 if (IS_CHERRYVIEW(dev_priv)) 3051 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3052 I915_LPE_PIPE_C_INTERRUPT; 3053 3054 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 3055 3056 dev_priv->irq_mask = ~enable_mask; 3057 3058 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 3059 } 3060 3061 /* drm_dma.h hooks 3062 */ 3063 static void ilk_irq_reset(struct drm_i915_private *dev_priv) 3064 { 3065 struct intel_uncore *uncore = &dev_priv->uncore; 3066 3067 GEN3_IRQ_RESET(uncore, DE); 3068 dev_priv->irq_mask = ~0u; 3069 3070 if (GRAPHICS_VER(dev_priv) == 7) 3071 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 3072 3073 if (IS_HASWELL(dev_priv)) { 3074 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3075 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3076 } 3077 3078 gen5_gt_irq_reset(to_gt(dev_priv)); 3079 3080 ibx_irq_reset(dev_priv); 3081 } 3082 3083 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 3084 { 3085 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 3086 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3087 3088 gen5_gt_irq_reset(to_gt(dev_priv)); 3089 3090 spin_lock_irq(&dev_priv->irq_lock); 3091 if (dev_priv->display_irqs_enabled) 3092 vlv_display_irq_reset(dev_priv); 3093 spin_unlock_irq(&dev_priv->irq_lock); 3094 } 3095 3096 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 3097 { 3098 struct intel_uncore *uncore = &dev_priv->uncore; 3099 enum pipe pipe; 3100 3101 if (!HAS_DISPLAY(dev_priv)) 3102 return; 3103 3104 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3105 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3106 3107 for_each_pipe(dev_priv, pipe) 3108 if (intel_display_power_is_enabled(dev_priv, 3109 POWER_DOMAIN_PIPE(pipe))) 3110 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3111 3112 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3113 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3114 } 3115 3116 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 3117 { 3118 struct intel_uncore *uncore = &dev_priv->uncore; 3119 3120 gen8_master_intr_disable(dev_priv->uncore.regs); 3121 3122 gen8_gt_irq_reset(to_gt(dev_priv)); 3123 gen8_display_irq_reset(dev_priv); 3124 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3125 3126 if (HAS_PCH_SPLIT(dev_priv)) 3127 ibx_irq_reset(dev_priv); 3128 3129 } 3130 3131 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 3132 { 3133 struct intel_uncore *uncore = &dev_priv->uncore; 3134 enum pipe pipe; 3135 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3136 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3137 3138 if (!HAS_DISPLAY(dev_priv)) 3139 return; 3140 3141 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 3142 3143 if (DISPLAY_VER(dev_priv) >= 12) { 3144 enum transcoder trans; 3145 3146 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3147 enum intel_display_power_domain domain; 3148 3149 domain = POWER_DOMAIN_TRANSCODER(trans); 3150 if (!intel_display_power_is_enabled(dev_priv, domain)) 3151 continue; 3152 3153 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 3154 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 3155 } 3156 } else { 3157 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3158 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3159 } 3160 3161 for_each_pipe(dev_priv, pipe) 3162 if (intel_display_power_is_enabled(dev_priv, 3163 POWER_DOMAIN_PIPE(pipe))) 3164 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3165 3166 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3167 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3168 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 3169 3170 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3171 GEN3_IRQ_RESET(uncore, SDE); 3172 } 3173 3174 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 3175 { 3176 struct intel_gt *gt = to_gt(dev_priv); 3177 struct intel_uncore *uncore = gt->uncore; 3178 3179 gen11_master_intr_disable(dev_priv->uncore.regs); 3180 3181 gen11_gt_irq_reset(gt); 3182 gen11_display_irq_reset(dev_priv); 3183 3184 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3185 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3186 } 3187 3188 static void dg1_irq_reset(struct drm_i915_private *dev_priv) 3189 { 3190 struct intel_gt *gt = to_gt(dev_priv); 3191 struct intel_uncore *uncore = gt->uncore; 3192 3193 dg1_master_intr_disable(dev_priv->uncore.regs); 3194 3195 gen11_gt_irq_reset(gt); 3196 gen11_display_irq_reset(dev_priv); 3197 3198 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3199 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3200 } 3201 3202 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3203 u8 pipe_mask) 3204 { 3205 struct intel_uncore *uncore = &dev_priv->uncore; 3206 u32 extra_ier = GEN8_PIPE_VBLANK | 3207 gen8_de_pipe_underrun_mask(dev_priv) | 3208 gen8_de_pipe_flip_done_mask(dev_priv); 3209 enum pipe pipe; 3210 3211 spin_lock_irq(&dev_priv->irq_lock); 3212 3213 if (!intel_irqs_enabled(dev_priv)) { 3214 spin_unlock_irq(&dev_priv->irq_lock); 3215 return; 3216 } 3217 3218 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3219 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3220 dev_priv->de_irq_mask[pipe], 3221 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3222 3223 spin_unlock_irq(&dev_priv->irq_lock); 3224 } 3225 3226 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3227 u8 pipe_mask) 3228 { 3229 struct intel_uncore *uncore = &dev_priv->uncore; 3230 enum pipe pipe; 3231 3232 spin_lock_irq(&dev_priv->irq_lock); 3233 3234 if (!intel_irqs_enabled(dev_priv)) { 3235 spin_unlock_irq(&dev_priv->irq_lock); 3236 return; 3237 } 3238 3239 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3240 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3241 3242 spin_unlock_irq(&dev_priv->irq_lock); 3243 3244 /* make sure we're done processing display irqs */ 3245 intel_synchronize_irq(dev_priv); 3246 } 3247 3248 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 3249 { 3250 struct intel_uncore *uncore = &dev_priv->uncore; 3251 3252 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 3253 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3254 3255 gen8_gt_irq_reset(to_gt(dev_priv)); 3256 3257 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3258 3259 spin_lock_irq(&dev_priv->irq_lock); 3260 if (dev_priv->display_irqs_enabled) 3261 vlv_display_irq_reset(dev_priv); 3262 spin_unlock_irq(&dev_priv->irq_lock); 3263 } 3264 3265 static u32 ibx_hotplug_enables(struct drm_i915_private *i915, 3266 enum hpd_pin pin) 3267 { 3268 switch (pin) { 3269 case HPD_PORT_A: 3270 /* 3271 * When CPU and PCH are on the same package, port A 3272 * HPD must be enabled in both north and south. 3273 */ 3274 return HAS_PCH_LPT_LP(i915) ? 3275 PORTA_HOTPLUG_ENABLE : 0; 3276 case HPD_PORT_B: 3277 return PORTB_HOTPLUG_ENABLE | 3278 PORTB_PULSE_DURATION_2ms; 3279 case HPD_PORT_C: 3280 return PORTC_HOTPLUG_ENABLE | 3281 PORTC_PULSE_DURATION_2ms; 3282 case HPD_PORT_D: 3283 return PORTD_HOTPLUG_ENABLE | 3284 PORTD_PULSE_DURATION_2ms; 3285 default: 3286 return 0; 3287 } 3288 } 3289 3290 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3291 { 3292 u32 hotplug; 3293 3294 /* 3295 * Enable digital hotplug on the PCH, and configure the DP short pulse 3296 * duration to 2ms (which is the minimum in the Display Port spec). 3297 * The pulse duration bits are reserved on LPT+. 3298 */ 3299 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3300 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3301 PORTB_HOTPLUG_ENABLE | 3302 PORTC_HOTPLUG_ENABLE | 3303 PORTD_HOTPLUG_ENABLE | 3304 PORTB_PULSE_DURATION_MASK | 3305 PORTC_PULSE_DURATION_MASK | 3306 PORTD_PULSE_DURATION_MASK); 3307 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables); 3308 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3309 } 3310 3311 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3312 { 3313 u32 hotplug_irqs, enabled_irqs; 3314 3315 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3316 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3317 3318 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3319 3320 ibx_hpd_detection_setup(dev_priv); 3321 } 3322 3323 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, 3324 enum hpd_pin pin) 3325 { 3326 switch (pin) { 3327 case HPD_PORT_A: 3328 case HPD_PORT_B: 3329 case HPD_PORT_C: 3330 case HPD_PORT_D: 3331 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); 3332 default: 3333 return 0; 3334 } 3335 } 3336 3337 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, 3338 enum hpd_pin pin) 3339 { 3340 switch (pin) { 3341 case HPD_PORT_TC1: 3342 case HPD_PORT_TC2: 3343 case HPD_PORT_TC3: 3344 case HPD_PORT_TC4: 3345 case HPD_PORT_TC5: 3346 case HPD_PORT_TC6: 3347 return ICP_TC_HPD_ENABLE(pin); 3348 default: 3349 return 0; 3350 } 3351 } 3352 3353 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) 3354 { 3355 u32 hotplug; 3356 3357 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 3358 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | 3359 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | 3360 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | 3361 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D)); 3362 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables); 3363 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug); 3364 } 3365 3366 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3367 { 3368 u32 hotplug; 3369 3370 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 3371 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | 3372 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | 3373 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | 3374 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) | 3375 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | 3376 ICP_TC_HPD_ENABLE(HPD_PORT_TC6)); 3377 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables); 3378 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug); 3379 } 3380 3381 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3382 { 3383 u32 hotplug_irqs, enabled_irqs; 3384 3385 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3386 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3387 3388 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) 3389 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3390 3391 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3392 3393 icp_ddi_hpd_detection_setup(dev_priv); 3394 icp_tc_hpd_detection_setup(dev_priv); 3395 } 3396 3397 static u32 gen11_hotplug_enables(struct drm_i915_private *i915, 3398 enum hpd_pin pin) 3399 { 3400 switch (pin) { 3401 case HPD_PORT_TC1: 3402 case HPD_PORT_TC2: 3403 case HPD_PORT_TC3: 3404 case HPD_PORT_TC4: 3405 case HPD_PORT_TC5: 3406 case HPD_PORT_TC6: 3407 return GEN11_HOTPLUG_CTL_ENABLE(pin); 3408 default: 3409 return 0; 3410 } 3411 } 3412 3413 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) 3414 { 3415 u32 val; 3416 3417 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3418 val |= (INVERT_DDIA_HPD | 3419 INVERT_DDIB_HPD | 3420 INVERT_DDIC_HPD | 3421 INVERT_DDID_HPD); 3422 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3423 3424 icp_hpd_irq_setup(dev_priv); 3425 } 3426 3427 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3428 { 3429 u32 hotplug; 3430 3431 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 3432 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3433 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3434 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3435 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3436 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3437 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3438 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3439 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug); 3440 } 3441 3442 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3443 { 3444 u32 hotplug; 3445 3446 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 3447 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3448 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3449 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3450 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3451 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3452 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3453 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3454 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug); 3455 } 3456 3457 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3458 { 3459 u32 hotplug_irqs, enabled_irqs; 3460 u32 val; 3461 3462 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3463 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3464 3465 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3466 val &= ~hotplug_irqs; 3467 val |= ~enabled_irqs & hotplug_irqs; 3468 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val); 3469 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3470 3471 gen11_tc_hpd_detection_setup(dev_priv); 3472 gen11_tbt_hpd_detection_setup(dev_priv); 3473 3474 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3475 icp_hpd_irq_setup(dev_priv); 3476 } 3477 3478 static u32 spt_hotplug_enables(struct drm_i915_private *i915, 3479 enum hpd_pin pin) 3480 { 3481 switch (pin) { 3482 case HPD_PORT_A: 3483 return PORTA_HOTPLUG_ENABLE; 3484 case HPD_PORT_B: 3485 return PORTB_HOTPLUG_ENABLE; 3486 case HPD_PORT_C: 3487 return PORTC_HOTPLUG_ENABLE; 3488 case HPD_PORT_D: 3489 return PORTD_HOTPLUG_ENABLE; 3490 default: 3491 return 0; 3492 } 3493 } 3494 3495 static u32 spt_hotplug2_enables(struct drm_i915_private *i915, 3496 enum hpd_pin pin) 3497 { 3498 switch (pin) { 3499 case HPD_PORT_E: 3500 return PORTE_HOTPLUG_ENABLE; 3501 default: 3502 return 0; 3503 } 3504 } 3505 3506 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3507 { 3508 u32 val, hotplug; 3509 3510 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3511 if (HAS_PCH_CNP(dev_priv)) { 3512 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3513 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3514 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3515 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3516 } 3517 3518 /* Enable digital hotplug on the PCH */ 3519 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3520 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3521 PORTB_HOTPLUG_ENABLE | 3522 PORTC_HOTPLUG_ENABLE | 3523 PORTD_HOTPLUG_ENABLE); 3524 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables); 3525 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3526 3527 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 3528 hotplug &= ~PORTE_HOTPLUG_ENABLE; 3529 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables); 3530 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug); 3531 } 3532 3533 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3534 { 3535 u32 hotplug_irqs, enabled_irqs; 3536 3537 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3538 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3539 3540 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3541 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3542 3543 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3544 3545 spt_hpd_detection_setup(dev_priv); 3546 } 3547 3548 static u32 ilk_hotplug_enables(struct drm_i915_private *i915, 3549 enum hpd_pin pin) 3550 { 3551 switch (pin) { 3552 case HPD_PORT_A: 3553 return DIGITAL_PORTA_HOTPLUG_ENABLE | 3554 DIGITAL_PORTA_PULSE_DURATION_2ms; 3555 default: 3556 return 0; 3557 } 3558 } 3559 3560 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3561 { 3562 u32 hotplug; 3563 3564 /* 3565 * Enable digital hotplug on the CPU, and configure the DP short pulse 3566 * duration to 2ms (which is the minimum in the Display Port spec) 3567 * The pulse duration bits are reserved on HSW+. 3568 */ 3569 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 3570 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE | 3571 DIGITAL_PORTA_PULSE_DURATION_MASK); 3572 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables); 3573 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3574 } 3575 3576 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3577 { 3578 u32 hotplug_irqs, enabled_irqs; 3579 3580 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3581 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3582 3583 if (DISPLAY_VER(dev_priv) >= 8) 3584 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3585 else 3586 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3587 3588 ilk_hpd_detection_setup(dev_priv); 3589 3590 ibx_hpd_irq_setup(dev_priv); 3591 } 3592 3593 static u32 bxt_hotplug_enables(struct drm_i915_private *i915, 3594 enum hpd_pin pin) 3595 { 3596 u32 hotplug; 3597 3598 switch (pin) { 3599 case HPD_PORT_A: 3600 hotplug = PORTA_HOTPLUG_ENABLE; 3601 if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) 3602 hotplug |= BXT_DDIA_HPD_INVERT; 3603 return hotplug; 3604 case HPD_PORT_B: 3605 hotplug = PORTB_HOTPLUG_ENABLE; 3606 if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) 3607 hotplug |= BXT_DDIB_HPD_INVERT; 3608 return hotplug; 3609 case HPD_PORT_C: 3610 hotplug = PORTC_HOTPLUG_ENABLE; 3611 if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) 3612 hotplug |= BXT_DDIC_HPD_INVERT; 3613 return hotplug; 3614 default: 3615 return 0; 3616 } 3617 } 3618 3619 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3620 { 3621 u32 hotplug; 3622 3623 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3624 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3625 PORTB_HOTPLUG_ENABLE | 3626 PORTC_HOTPLUG_ENABLE | 3627 BXT_DDIA_HPD_INVERT | 3628 BXT_DDIB_HPD_INVERT | 3629 BXT_DDIC_HPD_INVERT); 3630 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables); 3631 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3632 } 3633 3634 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3635 { 3636 u32 hotplug_irqs, enabled_irqs; 3637 3638 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3639 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3640 3641 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3642 3643 bxt_hpd_detection_setup(dev_priv); 3644 } 3645 3646 /* 3647 * SDEIER is also touched by the interrupt handler to work around missed PCH 3648 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3649 * instead we unconditionally enable all PCH interrupt sources here, but then 3650 * only unmask them as needed with SDEIMR. 3651 * 3652 * Note that we currently do this after installing the interrupt handler, 3653 * but before we enable the master interrupt. That should be sufficient 3654 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 3655 * interrupts could still race. 3656 */ 3657 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3658 { 3659 struct intel_uncore *uncore = &dev_priv->uncore; 3660 u32 mask; 3661 3662 if (HAS_PCH_NOP(dev_priv)) 3663 return; 3664 3665 if (HAS_PCH_IBX(dev_priv)) 3666 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3667 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3668 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3669 else 3670 mask = SDE_GMBUS_CPT; 3671 3672 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3673 } 3674 3675 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 3676 { 3677 struct intel_uncore *uncore = &dev_priv->uncore; 3678 u32 display_mask, extra_mask; 3679 3680 if (GRAPHICS_VER(dev_priv) >= 7) { 3681 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3682 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3683 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3684 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3685 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 3686 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 3687 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 3688 DE_DP_A_HOTPLUG_IVB); 3689 } else { 3690 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3691 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3692 DE_PIPEA_CRC_DONE | DE_POISON); 3693 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 3694 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3695 DE_PLANE_FLIP_DONE(PLANE_A) | 3696 DE_PLANE_FLIP_DONE(PLANE_B) | 3697 DE_DP_A_HOTPLUG); 3698 } 3699 3700 if (IS_HASWELL(dev_priv)) { 3701 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3702 display_mask |= DE_EDP_PSR_INT_HSW; 3703 } 3704 3705 if (IS_IRONLAKE_M(dev_priv)) 3706 extra_mask |= DE_PCU_EVENT; 3707 3708 dev_priv->irq_mask = ~display_mask; 3709 3710 ibx_irq_postinstall(dev_priv); 3711 3712 gen5_gt_irq_postinstall(to_gt(dev_priv)); 3713 3714 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3715 display_mask | extra_mask); 3716 } 3717 3718 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3719 { 3720 lockdep_assert_held(&dev_priv->irq_lock); 3721 3722 if (dev_priv->display_irqs_enabled) 3723 return; 3724 3725 dev_priv->display_irqs_enabled = true; 3726 3727 if (intel_irqs_enabled(dev_priv)) { 3728 vlv_display_irq_reset(dev_priv); 3729 vlv_display_irq_postinstall(dev_priv); 3730 } 3731 } 3732 3733 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3734 { 3735 lockdep_assert_held(&dev_priv->irq_lock); 3736 3737 if (!dev_priv->display_irqs_enabled) 3738 return; 3739 3740 dev_priv->display_irqs_enabled = false; 3741 3742 if (intel_irqs_enabled(dev_priv)) 3743 vlv_display_irq_reset(dev_priv); 3744 } 3745 3746 3747 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 3748 { 3749 gen5_gt_irq_postinstall(to_gt(dev_priv)); 3750 3751 spin_lock_irq(&dev_priv->irq_lock); 3752 if (dev_priv->display_irqs_enabled) 3753 vlv_display_irq_postinstall(dev_priv); 3754 spin_unlock_irq(&dev_priv->irq_lock); 3755 3756 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3757 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3758 } 3759 3760 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3761 { 3762 struct intel_uncore *uncore = &dev_priv->uncore; 3763 3764 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 3765 GEN8_PIPE_CDCLK_CRC_DONE; 3766 u32 de_pipe_enables; 3767 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 3768 u32 de_port_enables; 3769 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3770 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3771 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3772 enum pipe pipe; 3773 3774 if (!HAS_DISPLAY(dev_priv)) 3775 return; 3776 3777 if (DISPLAY_VER(dev_priv) <= 10) 3778 de_misc_masked |= GEN8_DE_MISC_GSE; 3779 3780 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3781 de_port_masked |= BXT_DE_PORT_GMBUS; 3782 3783 if (DISPLAY_VER(dev_priv) >= 11) { 3784 enum port port; 3785 3786 if (intel_bios_is_dsi_present(dev_priv, &port)) 3787 de_port_masked |= DSI0_TE | DSI1_TE; 3788 } 3789 3790 de_pipe_enables = de_pipe_masked | 3791 GEN8_PIPE_VBLANK | 3792 gen8_de_pipe_underrun_mask(dev_priv) | 3793 gen8_de_pipe_flip_done_mask(dev_priv); 3794 3795 de_port_enables = de_port_masked; 3796 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3797 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3798 else if (IS_BROADWELL(dev_priv)) 3799 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 3800 3801 if (DISPLAY_VER(dev_priv) >= 12) { 3802 enum transcoder trans; 3803 3804 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3805 enum intel_display_power_domain domain; 3806 3807 domain = POWER_DOMAIN_TRANSCODER(trans); 3808 if (!intel_display_power_is_enabled(dev_priv, domain)) 3809 continue; 3810 3811 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 3812 } 3813 } else { 3814 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3815 } 3816 3817 for_each_pipe(dev_priv, pipe) { 3818 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3819 3820 if (intel_display_power_is_enabled(dev_priv, 3821 POWER_DOMAIN_PIPE(pipe))) 3822 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3823 dev_priv->de_irq_mask[pipe], 3824 de_pipe_enables); 3825 } 3826 3827 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3828 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3829 3830 if (DISPLAY_VER(dev_priv) >= 11) { 3831 u32 de_hpd_masked = 0; 3832 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3833 GEN11_DE_TBT_HOTPLUG_MASK; 3834 3835 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 3836 de_hpd_enables); 3837 } 3838 } 3839 3840 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 3841 { 3842 struct intel_uncore *uncore = &dev_priv->uncore; 3843 u32 mask = SDE_GMBUS_ICP; 3844 3845 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3846 } 3847 3848 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 3849 { 3850 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3851 icp_irq_postinstall(dev_priv); 3852 else if (HAS_PCH_SPLIT(dev_priv)) 3853 ibx_irq_postinstall(dev_priv); 3854 3855 gen8_gt_irq_postinstall(to_gt(dev_priv)); 3856 gen8_de_irq_postinstall(dev_priv); 3857 3858 gen8_master_intr_enable(dev_priv->uncore.regs); 3859 } 3860 3861 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 3862 { 3863 if (!HAS_DISPLAY(dev_priv)) 3864 return; 3865 3866 gen8_de_irq_postinstall(dev_priv); 3867 3868 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3869 GEN11_DISPLAY_IRQ_ENABLE); 3870 } 3871 3872 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 3873 { 3874 struct intel_gt *gt = to_gt(dev_priv); 3875 struct intel_uncore *uncore = gt->uncore; 3876 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3877 3878 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3879 icp_irq_postinstall(dev_priv); 3880 3881 gen11_gt_irq_postinstall(gt); 3882 gen11_de_irq_postinstall(dev_priv); 3883 3884 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3885 3886 gen11_master_intr_enable(uncore->regs); 3887 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 3888 } 3889 3890 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) 3891 { 3892 struct intel_gt *gt = to_gt(dev_priv); 3893 struct intel_uncore *uncore = gt->uncore; 3894 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3895 3896 gen11_gt_irq_postinstall(gt); 3897 3898 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3899 3900 if (HAS_DISPLAY(dev_priv)) { 3901 icp_irq_postinstall(dev_priv); 3902 gen8_de_irq_postinstall(dev_priv); 3903 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3904 GEN11_DISPLAY_IRQ_ENABLE); 3905 } 3906 3907 dg1_master_intr_enable(uncore->regs); 3908 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); 3909 } 3910 3911 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 3912 { 3913 gen8_gt_irq_postinstall(to_gt(dev_priv)); 3914 3915 spin_lock_irq(&dev_priv->irq_lock); 3916 if (dev_priv->display_irqs_enabled) 3917 vlv_display_irq_postinstall(dev_priv); 3918 spin_unlock_irq(&dev_priv->irq_lock); 3919 3920 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3921 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3922 } 3923 3924 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 3925 { 3926 struct intel_uncore *uncore = &dev_priv->uncore; 3927 3928 i9xx_pipestat_irq_reset(dev_priv); 3929 3930 GEN2_IRQ_RESET(uncore); 3931 dev_priv->irq_mask = ~0u; 3932 } 3933 3934 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3935 { 3936 struct intel_uncore *uncore = &dev_priv->uncore; 3937 u16 enable_mask; 3938 3939 intel_uncore_write16(uncore, 3940 EMR, 3941 ~(I915_ERROR_PAGE_TABLE | 3942 I915_ERROR_MEMORY_REFRESH)); 3943 3944 /* Unmask the interrupts that we always want on. */ 3945 dev_priv->irq_mask = 3946 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3947 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3948 I915_MASTER_ERROR_INTERRUPT); 3949 3950 enable_mask = 3951 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3952 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3953 I915_MASTER_ERROR_INTERRUPT | 3954 I915_USER_INTERRUPT; 3955 3956 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask); 3957 3958 /* Interrupt setup is already guaranteed to be single-threaded, this is 3959 * just to make the assert_spin_locked check happy. */ 3960 spin_lock_irq(&dev_priv->irq_lock); 3961 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3962 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3963 spin_unlock_irq(&dev_priv->irq_lock); 3964 } 3965 3966 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 3967 u16 *eir, u16 *eir_stuck) 3968 { 3969 struct intel_uncore *uncore = &i915->uncore; 3970 u16 emr; 3971 3972 *eir = intel_uncore_read16(uncore, EIR); 3973 3974 if (*eir) 3975 intel_uncore_write16(uncore, EIR, *eir); 3976 3977 *eir_stuck = intel_uncore_read16(uncore, EIR); 3978 if (*eir_stuck == 0) 3979 return; 3980 3981 /* 3982 * Toggle all EMR bits to make sure we get an edge 3983 * in the ISR master error bit if we don't clear 3984 * all the EIR bits. Otherwise the edge triggered 3985 * IIR on i965/g4x wouldn't notice that an interrupt 3986 * is still pending. Also some EIR bits can't be 3987 * cleared except by handling the underlying error 3988 * (or by a GPU reset) so we mask any bit that 3989 * remains set. 3990 */ 3991 emr = intel_uncore_read16(uncore, EMR); 3992 intel_uncore_write16(uncore, EMR, 0xffff); 3993 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 3994 } 3995 3996 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 3997 u16 eir, u16 eir_stuck) 3998 { 3999 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4000 4001 if (eir_stuck) 4002 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 4003 eir_stuck); 4004 } 4005 4006 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4007 u32 *eir, u32 *eir_stuck) 4008 { 4009 u32 emr; 4010 4011 *eir = intel_uncore_read(&dev_priv->uncore, EIR); 4012 4013 intel_uncore_write(&dev_priv->uncore, EIR, *eir); 4014 4015 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 4016 if (*eir_stuck == 0) 4017 return; 4018 4019 /* 4020 * Toggle all EMR bits to make sure we get an edge 4021 * in the ISR master error bit if we don't clear 4022 * all the EIR bits. Otherwise the edge triggered 4023 * IIR on i965/g4x wouldn't notice that an interrupt 4024 * is still pending. Also some EIR bits can't be 4025 * cleared except by handling the underlying error 4026 * (or by a GPU reset) so we mask any bit that 4027 * remains set. 4028 */ 4029 emr = intel_uncore_read(&dev_priv->uncore, EMR); 4030 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 4031 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 4032 } 4033 4034 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4035 u32 eir, u32 eir_stuck) 4036 { 4037 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4038 4039 if (eir_stuck) 4040 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 4041 eir_stuck); 4042 } 4043 4044 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4045 { 4046 struct drm_i915_private *dev_priv = arg; 4047 irqreturn_t ret = IRQ_NONE; 4048 4049 if (!intel_irqs_enabled(dev_priv)) 4050 return IRQ_NONE; 4051 4052 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4053 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4054 4055 do { 4056 u32 pipe_stats[I915_MAX_PIPES] = {}; 4057 u16 eir = 0, eir_stuck = 0; 4058 u16 iir; 4059 4060 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 4061 if (iir == 0) 4062 break; 4063 4064 ret = IRQ_HANDLED; 4065 4066 /* Call regardless, as some status bits might not be 4067 * signalled in iir */ 4068 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4069 4070 if (iir & I915_MASTER_ERROR_INTERRUPT) 4071 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4072 4073 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 4074 4075 if (iir & I915_USER_INTERRUPT) 4076 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 4077 4078 if (iir & I915_MASTER_ERROR_INTERRUPT) 4079 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4080 4081 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4082 } while (0); 4083 4084 pmu_irq_stats(dev_priv, ret); 4085 4086 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4087 4088 return ret; 4089 } 4090 4091 static void i915_irq_reset(struct drm_i915_private *dev_priv) 4092 { 4093 struct intel_uncore *uncore = &dev_priv->uncore; 4094 4095 if (I915_HAS_HOTPLUG(dev_priv)) { 4096 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4097 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 4098 } 4099 4100 i9xx_pipestat_irq_reset(dev_priv); 4101 4102 GEN3_IRQ_RESET(uncore, GEN2_); 4103 dev_priv->irq_mask = ~0u; 4104 } 4105 4106 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 4107 { 4108 struct intel_uncore *uncore = &dev_priv->uncore; 4109 u32 enable_mask; 4110 4111 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE | 4112 I915_ERROR_MEMORY_REFRESH)); 4113 4114 /* Unmask the interrupts that we always want on. */ 4115 dev_priv->irq_mask = 4116 ~(I915_ASLE_INTERRUPT | 4117 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4118 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4119 I915_MASTER_ERROR_INTERRUPT); 4120 4121 enable_mask = 4122 I915_ASLE_INTERRUPT | 4123 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4124 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4125 I915_MASTER_ERROR_INTERRUPT | 4126 I915_USER_INTERRUPT; 4127 4128 if (I915_HAS_HOTPLUG(dev_priv)) { 4129 /* Enable in IER... */ 4130 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4131 /* and unmask in IMR */ 4132 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4133 } 4134 4135 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4136 4137 /* Interrupt setup is already guaranteed to be single-threaded, this is 4138 * just to make the assert_spin_locked check happy. */ 4139 spin_lock_irq(&dev_priv->irq_lock); 4140 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4141 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4142 spin_unlock_irq(&dev_priv->irq_lock); 4143 4144 i915_enable_asle_pipestat(dev_priv); 4145 } 4146 4147 static irqreturn_t i915_irq_handler(int irq, void *arg) 4148 { 4149 struct drm_i915_private *dev_priv = arg; 4150 irqreturn_t ret = IRQ_NONE; 4151 4152 if (!intel_irqs_enabled(dev_priv)) 4153 return IRQ_NONE; 4154 4155 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4156 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4157 4158 do { 4159 u32 pipe_stats[I915_MAX_PIPES] = {}; 4160 u32 eir = 0, eir_stuck = 0; 4161 u32 hotplug_status = 0; 4162 u32 iir; 4163 4164 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4165 if (iir == 0) 4166 break; 4167 4168 ret = IRQ_HANDLED; 4169 4170 if (I915_HAS_HOTPLUG(dev_priv) && 4171 iir & I915_DISPLAY_PORT_INTERRUPT) 4172 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4173 4174 /* Call regardless, as some status bits might not be 4175 * signalled in iir */ 4176 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4177 4178 if (iir & I915_MASTER_ERROR_INTERRUPT) 4179 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4180 4181 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4182 4183 if (iir & I915_USER_INTERRUPT) 4184 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 4185 4186 if (iir & I915_MASTER_ERROR_INTERRUPT) 4187 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4188 4189 if (hotplug_status) 4190 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4191 4192 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4193 } while (0); 4194 4195 pmu_irq_stats(dev_priv, ret); 4196 4197 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4198 4199 return ret; 4200 } 4201 4202 static void i965_irq_reset(struct drm_i915_private *dev_priv) 4203 { 4204 struct intel_uncore *uncore = &dev_priv->uncore; 4205 4206 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4207 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 4208 4209 i9xx_pipestat_irq_reset(dev_priv); 4210 4211 GEN3_IRQ_RESET(uncore, GEN2_); 4212 dev_priv->irq_mask = ~0u; 4213 } 4214 4215 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 4216 { 4217 struct intel_uncore *uncore = &dev_priv->uncore; 4218 u32 enable_mask; 4219 u32 error_mask; 4220 4221 /* 4222 * Enable some error detection, note the instruction error mask 4223 * bit is reserved, so we leave it masked. 4224 */ 4225 if (IS_G4X(dev_priv)) { 4226 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4227 GM45_ERROR_MEM_PRIV | 4228 GM45_ERROR_CP_PRIV | 4229 I915_ERROR_MEMORY_REFRESH); 4230 } else { 4231 error_mask = ~(I915_ERROR_PAGE_TABLE | 4232 I915_ERROR_MEMORY_REFRESH); 4233 } 4234 intel_uncore_write(&dev_priv->uncore, EMR, error_mask); 4235 4236 /* Unmask the interrupts that we always want on. */ 4237 dev_priv->irq_mask = 4238 ~(I915_ASLE_INTERRUPT | 4239 I915_DISPLAY_PORT_INTERRUPT | 4240 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4241 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4242 I915_MASTER_ERROR_INTERRUPT); 4243 4244 enable_mask = 4245 I915_ASLE_INTERRUPT | 4246 I915_DISPLAY_PORT_INTERRUPT | 4247 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4248 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4249 I915_MASTER_ERROR_INTERRUPT | 4250 I915_USER_INTERRUPT; 4251 4252 if (IS_G4X(dev_priv)) 4253 enable_mask |= I915_BSD_USER_INTERRUPT; 4254 4255 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4256 4257 /* Interrupt setup is already guaranteed to be single-threaded, this is 4258 * just to make the assert_spin_locked check happy. */ 4259 spin_lock_irq(&dev_priv->irq_lock); 4260 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4261 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4262 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4263 spin_unlock_irq(&dev_priv->irq_lock); 4264 4265 i915_enable_asle_pipestat(dev_priv); 4266 } 4267 4268 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4269 { 4270 u32 hotplug_en; 4271 4272 lockdep_assert_held(&dev_priv->irq_lock); 4273 4274 /* Note HDMI and DP share hotplug bits */ 4275 /* enable bits are the same for all generations */ 4276 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4277 /* Programming the CRT detection parameters tends 4278 to generate a spurious hotplug event about three 4279 seconds later. So just do it once. 4280 */ 4281 if (IS_G4X(dev_priv)) 4282 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4283 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4284 4285 /* Ignore TV since it's buggy */ 4286 i915_hotplug_interrupt_update_locked(dev_priv, 4287 HOTPLUG_INT_EN_MASK | 4288 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4289 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4290 hotplug_en); 4291 } 4292 4293 static irqreturn_t i965_irq_handler(int irq, void *arg) 4294 { 4295 struct drm_i915_private *dev_priv = arg; 4296 irqreturn_t ret = IRQ_NONE; 4297 4298 if (!intel_irqs_enabled(dev_priv)) 4299 return IRQ_NONE; 4300 4301 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4302 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4303 4304 do { 4305 u32 pipe_stats[I915_MAX_PIPES] = {}; 4306 u32 eir = 0, eir_stuck = 0; 4307 u32 hotplug_status = 0; 4308 u32 iir; 4309 4310 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4311 if (iir == 0) 4312 break; 4313 4314 ret = IRQ_HANDLED; 4315 4316 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4317 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4318 4319 /* Call regardless, as some status bits might not be 4320 * signalled in iir */ 4321 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4322 4323 if (iir & I915_MASTER_ERROR_INTERRUPT) 4324 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4325 4326 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4327 4328 if (iir & I915_USER_INTERRUPT) 4329 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], 4330 iir); 4331 4332 if (iir & I915_BSD_USER_INTERRUPT) 4333 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0], 4334 iir >> 25); 4335 4336 if (iir & I915_MASTER_ERROR_INTERRUPT) 4337 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4338 4339 if (hotplug_status) 4340 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4341 4342 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4343 } while (0); 4344 4345 pmu_irq_stats(dev_priv, IRQ_HANDLED); 4346 4347 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4348 4349 return ret; 4350 } 4351 4352 #define HPD_FUNCS(platform) \ 4353 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ 4354 .hpd_irq_setup = platform##_hpd_irq_setup, \ 4355 } 4356 4357 HPD_FUNCS(i915); 4358 HPD_FUNCS(dg1); 4359 HPD_FUNCS(gen11); 4360 HPD_FUNCS(bxt); 4361 HPD_FUNCS(icp); 4362 HPD_FUNCS(spt); 4363 HPD_FUNCS(ilk); 4364 #undef HPD_FUNCS 4365 4366 /** 4367 * intel_irq_init - initializes irq support 4368 * @dev_priv: i915 device instance 4369 * 4370 * This function initializes all the irq support including work items, timers 4371 * and all the vtables. It does not setup the interrupt itself though. 4372 */ 4373 void intel_irq_init(struct drm_i915_private *dev_priv) 4374 { 4375 struct drm_device *dev = &dev_priv->drm; 4376 int i; 4377 4378 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 4379 for (i = 0; i < MAX_L3_SLICES; ++i) 4380 dev_priv->l3_parity.remap_info[i] = NULL; 4381 4382 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 4383 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) 4384 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; 4385 4386 if (!HAS_DISPLAY(dev_priv)) 4387 return; 4388 4389 intel_hpd_init_pins(dev_priv); 4390 4391 intel_hpd_init_work(dev_priv); 4392 4393 dev->vblank_disable_immediate = true; 4394 4395 /* Most platforms treat the display irq block as an always-on 4396 * power domain. vlv/chv can disable it at runtime and need 4397 * special care to avoid writing any of the display block registers 4398 * outside of the power domain. We defer setting up the display irqs 4399 * in this case to the runtime pm. 4400 */ 4401 dev_priv->display_irqs_enabled = true; 4402 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4403 dev_priv->display_irqs_enabled = false; 4404 4405 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4406 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4407 * detection, as short HPD storms will occur as a natural part of 4408 * sideband messaging with MST. 4409 * On older platforms however, IRQ storms can occur with both long and 4410 * short pulses, as seen on some G4x systems. 4411 */ 4412 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4413 4414 if (HAS_GMCH(dev_priv)) { 4415 if (I915_HAS_HOTPLUG(dev_priv)) 4416 dev_priv->hotplug_funcs = &i915_hpd_funcs; 4417 } else { 4418 if (HAS_PCH_DG1(dev_priv)) 4419 dev_priv->hotplug_funcs = &dg1_hpd_funcs; 4420 else if (DISPLAY_VER(dev_priv) >= 11) 4421 dev_priv->hotplug_funcs = &gen11_hpd_funcs; 4422 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 4423 dev_priv->hotplug_funcs = &bxt_hpd_funcs; 4424 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 4425 dev_priv->hotplug_funcs = &icp_hpd_funcs; 4426 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 4427 dev_priv->hotplug_funcs = &spt_hpd_funcs; 4428 else 4429 dev_priv->hotplug_funcs = &ilk_hpd_funcs; 4430 } 4431 } 4432 4433 /** 4434 * intel_irq_fini - deinitializes IRQ support 4435 * @i915: i915 device instance 4436 * 4437 * This function deinitializes all the IRQ support. 4438 */ 4439 void intel_irq_fini(struct drm_i915_private *i915) 4440 { 4441 int i; 4442 4443 for (i = 0; i < MAX_L3_SLICES; ++i) 4444 kfree(i915->l3_parity.remap_info[i]); 4445 } 4446 4447 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 4448 { 4449 if (HAS_GMCH(dev_priv)) { 4450 if (IS_CHERRYVIEW(dev_priv)) 4451 return cherryview_irq_handler; 4452 else if (IS_VALLEYVIEW(dev_priv)) 4453 return valleyview_irq_handler; 4454 else if (GRAPHICS_VER(dev_priv) == 4) 4455 return i965_irq_handler; 4456 else if (GRAPHICS_VER(dev_priv) == 3) 4457 return i915_irq_handler; 4458 else 4459 return i8xx_irq_handler; 4460 } else { 4461 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4462 return dg1_irq_handler; 4463 else if (GRAPHICS_VER(dev_priv) >= 11) 4464 return gen11_irq_handler; 4465 else if (GRAPHICS_VER(dev_priv) >= 8) 4466 return gen8_irq_handler; 4467 else 4468 return ilk_irq_handler; 4469 } 4470 } 4471 4472 static void intel_irq_reset(struct drm_i915_private *dev_priv) 4473 { 4474 if (HAS_GMCH(dev_priv)) { 4475 if (IS_CHERRYVIEW(dev_priv)) 4476 cherryview_irq_reset(dev_priv); 4477 else if (IS_VALLEYVIEW(dev_priv)) 4478 valleyview_irq_reset(dev_priv); 4479 else if (GRAPHICS_VER(dev_priv) == 4) 4480 i965_irq_reset(dev_priv); 4481 else if (GRAPHICS_VER(dev_priv) == 3) 4482 i915_irq_reset(dev_priv); 4483 else 4484 i8xx_irq_reset(dev_priv); 4485 } else { 4486 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4487 dg1_irq_reset(dev_priv); 4488 else if (GRAPHICS_VER(dev_priv) >= 11) 4489 gen11_irq_reset(dev_priv); 4490 else if (GRAPHICS_VER(dev_priv) >= 8) 4491 gen8_irq_reset(dev_priv); 4492 else 4493 ilk_irq_reset(dev_priv); 4494 } 4495 } 4496 4497 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 4498 { 4499 if (HAS_GMCH(dev_priv)) { 4500 if (IS_CHERRYVIEW(dev_priv)) 4501 cherryview_irq_postinstall(dev_priv); 4502 else if (IS_VALLEYVIEW(dev_priv)) 4503 valleyview_irq_postinstall(dev_priv); 4504 else if (GRAPHICS_VER(dev_priv) == 4) 4505 i965_irq_postinstall(dev_priv); 4506 else if (GRAPHICS_VER(dev_priv) == 3) 4507 i915_irq_postinstall(dev_priv); 4508 else 4509 i8xx_irq_postinstall(dev_priv); 4510 } else { 4511 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4512 dg1_irq_postinstall(dev_priv); 4513 else if (GRAPHICS_VER(dev_priv) >= 11) 4514 gen11_irq_postinstall(dev_priv); 4515 else if (GRAPHICS_VER(dev_priv) >= 8) 4516 gen8_irq_postinstall(dev_priv); 4517 else 4518 ilk_irq_postinstall(dev_priv); 4519 } 4520 } 4521 4522 /** 4523 * intel_irq_install - enables the hardware interrupt 4524 * @dev_priv: i915 device instance 4525 * 4526 * This function enables the hardware interrupt handling, but leaves the hotplug 4527 * handling still disabled. It is called after intel_irq_init(). 4528 * 4529 * In the driver load and resume code we need working interrupts in a few places 4530 * but don't want to deal with the hassle of concurrent probe and hotplug 4531 * workers. Hence the split into this two-stage approach. 4532 */ 4533 int intel_irq_install(struct drm_i915_private *dev_priv) 4534 { 4535 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4536 int ret; 4537 4538 /* 4539 * We enable some interrupt sources in our postinstall hooks, so mark 4540 * interrupts as enabled _before_ actually enabling them to avoid 4541 * special cases in our ordering checks. 4542 */ 4543 dev_priv->runtime_pm.irqs_enabled = true; 4544 4545 dev_priv->irq_enabled = true; 4546 4547 intel_irq_reset(dev_priv); 4548 4549 ret = request_irq(irq, intel_irq_handler(dev_priv), 4550 IRQF_SHARED, DRIVER_NAME, dev_priv); 4551 if (ret < 0) { 4552 dev_priv->irq_enabled = false; 4553 return ret; 4554 } 4555 4556 intel_irq_postinstall(dev_priv); 4557 4558 return ret; 4559 } 4560 4561 /** 4562 * intel_irq_uninstall - finilizes all irq handling 4563 * @dev_priv: i915 device instance 4564 * 4565 * This stops interrupt and hotplug handling and unregisters and frees all 4566 * resources acquired in the init functions. 4567 */ 4568 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4569 { 4570 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4571 4572 /* 4573 * FIXME we can get called twice during driver probe 4574 * error handling as well as during driver remove due to 4575 * intel_modeset_driver_remove() calling us out of sequence. 4576 * Would be nice if it didn't do that... 4577 */ 4578 if (!dev_priv->irq_enabled) 4579 return; 4580 4581 dev_priv->irq_enabled = false; 4582 4583 intel_irq_reset(dev_priv); 4584 4585 free_irq(irq, dev_priv); 4586 4587 intel_hpd_cancel_work(dev_priv); 4588 dev_priv->runtime_pm.irqs_enabled = false; 4589 } 4590 4591 /** 4592 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4593 * @dev_priv: i915 device instance 4594 * 4595 * This function is used to disable interrupts at runtime, both in the runtime 4596 * pm and the system suspend/resume code. 4597 */ 4598 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4599 { 4600 intel_irq_reset(dev_priv); 4601 dev_priv->runtime_pm.irqs_enabled = false; 4602 intel_synchronize_irq(dev_priv); 4603 } 4604 4605 /** 4606 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4607 * @dev_priv: i915 device instance 4608 * 4609 * This function is used to enable interrupts at runtime, both in the runtime 4610 * pm and the system suspend/resume code. 4611 */ 4612 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4613 { 4614 dev_priv->runtime_pm.irqs_enabled = true; 4615 intel_irq_reset(dev_priv); 4616 intel_irq_postinstall(dev_priv); 4617 } 4618 4619 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 4620 { 4621 return dev_priv->runtime_pm.irqs_enabled; 4622 } 4623 4624 void intel_synchronize_irq(struct drm_i915_private *i915) 4625 { 4626 synchronize_irq(to_pci_dev(i915->drm.dev)->irq); 4627 } 4628 4629 void intel_synchronize_hardirq(struct drm_i915_private *i915) 4630 { 4631 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); 4632 } 4633