1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/circ_buf.h> 32 #include <linux/slab.h> 33 #include <linux/sysrq.h> 34 35 #include <drm/drm_drv.h> 36 37 #include "display/intel_de.h" 38 #include "display/intel_display_types.h" 39 #include "display/intel_fifo_underrun.h" 40 #include "display/intel_hotplug.h" 41 #include "display/intel_lpe_audio.h" 42 #include "display/intel_psr.h" 43 44 #include "gt/intel_breadcrumbs.h" 45 #include "gt/intel_gt.h" 46 #include "gt/intel_gt_irq.h" 47 #include "gt/intel_gt_pm_irq.h" 48 #include "gt/intel_rps.h" 49 50 #include "i915_drv.h" 51 #include "i915_irq.h" 52 #include "i915_trace.h" 53 #include "intel_pm.h" 54 55 /** 56 * DOC: interrupt handling 57 * 58 * These functions provide the basic support for enabling and disabling the 59 * interrupt handling support. There's a lot more functionality in i915_irq.c 60 * and related files, but that will be described in separate chapters. 61 */ 62 63 /* 64 * Interrupt statistic for PMU. Increments the counter only if the 65 * interrupt originated from the the GPU so interrupts from a device which 66 * shares the interrupt line are not accounted. 67 */ 68 static inline void pmu_irq_stats(struct drm_i915_private *i915, 69 irqreturn_t res) 70 { 71 if (unlikely(res != IRQ_HANDLED)) 72 return; 73 74 /* 75 * A clever compiler translates that into INC. A not so clever one 76 * should at least prevent store tearing. 77 */ 78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); 79 } 80 81 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 82 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, 83 enum hpd_pin pin); 84 85 static const u32 hpd_ilk[HPD_NUM_PINS] = { 86 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 87 }; 88 89 static const u32 hpd_ivb[HPD_NUM_PINS] = { 90 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 91 }; 92 93 static const u32 hpd_bdw[HPD_NUM_PINS] = { 94 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 95 }; 96 97 static const u32 hpd_ibx[HPD_NUM_PINS] = { 98 [HPD_CRT] = SDE_CRT_HOTPLUG, 99 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 100 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 101 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 102 [HPD_PORT_D] = SDE_PORTD_HOTPLUG, 103 }; 104 105 static const u32 hpd_cpt[HPD_NUM_PINS] = { 106 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 107 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 108 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 109 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 110 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 111 }; 112 113 static const u32 hpd_spt[HPD_NUM_PINS] = { 114 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 115 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 116 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 117 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 118 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, 119 }; 120 121 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 122 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 123 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 124 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 125 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 126 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 127 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, 128 }; 129 130 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 131 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 132 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 133 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 134 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 135 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 136 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 137 }; 138 139 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 140 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 141 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 142 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 143 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 144 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 145 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 146 }; 147 148 static const u32 hpd_bxt[HPD_NUM_PINS] = { 149 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 150 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), 151 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), 152 }; 153 154 static const u32 hpd_gen11[HPD_NUM_PINS] = { 155 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), 156 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), 157 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), 158 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), 159 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), 160 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), 161 }; 162 163 static const u32 hpd_icp[HPD_NUM_PINS] = { 164 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 165 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 166 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 167 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), 168 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), 169 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), 170 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), 171 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), 172 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), 173 }; 174 175 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { 176 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 177 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 178 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 179 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), 180 }; 181 182 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) 183 { 184 struct i915_hotplug *hpd = &dev_priv->hotplug; 185 186 if (HAS_GMCH(dev_priv)) { 187 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 188 IS_CHERRYVIEW(dev_priv)) 189 hpd->hpd = hpd_status_g4x; 190 else 191 hpd->hpd = hpd_status_i915; 192 return; 193 } 194 195 if (DISPLAY_VER(dev_priv) >= 11) 196 hpd->hpd = hpd_gen11; 197 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 198 hpd->hpd = hpd_bxt; 199 else if (DISPLAY_VER(dev_priv) >= 8) 200 hpd->hpd = hpd_bdw; 201 else if (DISPLAY_VER(dev_priv) >= 7) 202 hpd->hpd = hpd_ivb; 203 else 204 hpd->hpd = hpd_ilk; 205 206 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && 207 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) 208 return; 209 210 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) 211 hpd->pch_hpd = hpd_sde_dg1; 212 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 213 hpd->pch_hpd = hpd_icp; 214 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) 215 hpd->pch_hpd = hpd_spt; 216 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) 217 hpd->pch_hpd = hpd_cpt; 218 else if (HAS_PCH_IBX(dev_priv)) 219 hpd->pch_hpd = hpd_ibx; 220 else 221 MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); 222 } 223 224 static void 225 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 226 { 227 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 228 229 drm_crtc_handle_vblank(&crtc->base); 230 } 231 232 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 233 i915_reg_t iir, i915_reg_t ier) 234 { 235 intel_uncore_write(uncore, imr, 0xffffffff); 236 intel_uncore_posting_read(uncore, imr); 237 238 intel_uncore_write(uncore, ier, 0); 239 240 /* IIR can theoretically queue up two events. Be paranoid. */ 241 intel_uncore_write(uncore, iir, 0xffffffff); 242 intel_uncore_posting_read(uncore, iir); 243 intel_uncore_write(uncore, iir, 0xffffffff); 244 intel_uncore_posting_read(uncore, iir); 245 } 246 247 void gen2_irq_reset(struct intel_uncore *uncore) 248 { 249 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 250 intel_uncore_posting_read16(uncore, GEN2_IMR); 251 252 intel_uncore_write16(uncore, GEN2_IER, 0); 253 254 /* IIR can theoretically queue up two events. Be paranoid. */ 255 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 256 intel_uncore_posting_read16(uncore, GEN2_IIR); 257 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 258 intel_uncore_posting_read16(uncore, GEN2_IIR); 259 } 260 261 /* 262 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 263 */ 264 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 265 { 266 u32 val = intel_uncore_read(uncore, reg); 267 268 if (val == 0) 269 return; 270 271 drm_WARN(&uncore->i915->drm, 1, 272 "Interrupt register 0x%x is not zero: 0x%08x\n", 273 i915_mmio_reg_offset(reg), val); 274 intel_uncore_write(uncore, reg, 0xffffffff); 275 intel_uncore_posting_read(uncore, reg); 276 intel_uncore_write(uncore, reg, 0xffffffff); 277 intel_uncore_posting_read(uncore, reg); 278 } 279 280 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 281 { 282 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 283 284 if (val == 0) 285 return; 286 287 drm_WARN(&uncore->i915->drm, 1, 288 "Interrupt register 0x%x is not zero: 0x%08x\n", 289 i915_mmio_reg_offset(GEN2_IIR), val); 290 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 291 intel_uncore_posting_read16(uncore, GEN2_IIR); 292 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 293 intel_uncore_posting_read16(uncore, GEN2_IIR); 294 } 295 296 void gen3_irq_init(struct intel_uncore *uncore, 297 i915_reg_t imr, u32 imr_val, 298 i915_reg_t ier, u32 ier_val, 299 i915_reg_t iir) 300 { 301 gen3_assert_iir_is_zero(uncore, iir); 302 303 intel_uncore_write(uncore, ier, ier_val); 304 intel_uncore_write(uncore, imr, imr_val); 305 intel_uncore_posting_read(uncore, imr); 306 } 307 308 void gen2_irq_init(struct intel_uncore *uncore, 309 u32 imr_val, u32 ier_val) 310 { 311 gen2_assert_iir_is_zero(uncore); 312 313 intel_uncore_write16(uncore, GEN2_IER, ier_val); 314 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 315 intel_uncore_posting_read16(uncore, GEN2_IMR); 316 } 317 318 /* For display hotplug interrupt */ 319 static inline void 320 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 321 u32 mask, 322 u32 bits) 323 { 324 u32 val; 325 326 lockdep_assert_held(&dev_priv->irq_lock); 327 drm_WARN_ON(&dev_priv->drm, bits & ~mask); 328 329 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN); 330 val &= ~mask; 331 val |= bits; 332 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val); 333 } 334 335 /** 336 * i915_hotplug_interrupt_update - update hotplug interrupt enable 337 * @dev_priv: driver private 338 * @mask: bits to update 339 * @bits: bits to enable 340 * NOTE: the HPD enable bits are modified both inside and outside 341 * of an interrupt context. To avoid that read-modify-write cycles 342 * interfer, these bits are protected by a spinlock. Since this 343 * function is usually not called from a context where the lock is 344 * held already, this function acquires the lock itself. A non-locking 345 * version is also available. 346 */ 347 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 348 u32 mask, 349 u32 bits) 350 { 351 spin_lock_irq(&dev_priv->irq_lock); 352 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 353 spin_unlock_irq(&dev_priv->irq_lock); 354 } 355 356 /** 357 * ilk_update_display_irq - update DEIMR 358 * @dev_priv: driver private 359 * @interrupt_mask: mask of interrupt bits to update 360 * @enabled_irq_mask: mask of interrupt bits to enable 361 */ 362 static void ilk_update_display_irq(struct drm_i915_private *dev_priv, 363 u32 interrupt_mask, u32 enabled_irq_mask) 364 { 365 u32 new_val; 366 367 lockdep_assert_held(&dev_priv->irq_lock); 368 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 369 370 new_val = dev_priv->irq_mask; 371 new_val &= ~interrupt_mask; 372 new_val |= (~enabled_irq_mask & interrupt_mask); 373 374 if (new_val != dev_priv->irq_mask && 375 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 376 dev_priv->irq_mask = new_val; 377 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 378 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 379 } 380 } 381 382 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 383 { 384 ilk_update_display_irq(i915, bits, bits); 385 } 386 387 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 388 { 389 ilk_update_display_irq(i915, bits, 0); 390 } 391 392 /** 393 * bdw_update_port_irq - update DE port interrupt 394 * @dev_priv: driver private 395 * @interrupt_mask: mask of interrupt bits to update 396 * @enabled_irq_mask: mask of interrupt bits to enable 397 */ 398 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 399 u32 interrupt_mask, 400 u32 enabled_irq_mask) 401 { 402 u32 new_val; 403 u32 old_val; 404 405 lockdep_assert_held(&dev_priv->irq_lock); 406 407 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 408 409 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 410 return; 411 412 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 413 414 new_val = old_val; 415 new_val &= ~interrupt_mask; 416 new_val |= (~enabled_irq_mask & interrupt_mask); 417 418 if (new_val != old_val) { 419 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 420 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 421 } 422 } 423 424 /** 425 * bdw_update_pipe_irq - update DE pipe interrupt 426 * @dev_priv: driver private 427 * @pipe: pipe whose interrupt to update 428 * @interrupt_mask: mask of interrupt bits to update 429 * @enabled_irq_mask: mask of interrupt bits to enable 430 */ 431 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 432 enum pipe pipe, u32 interrupt_mask, 433 u32 enabled_irq_mask) 434 { 435 u32 new_val; 436 437 lockdep_assert_held(&dev_priv->irq_lock); 438 439 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 440 441 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 442 return; 443 444 new_val = dev_priv->de_irq_mask[pipe]; 445 new_val &= ~interrupt_mask; 446 new_val |= (~enabled_irq_mask & interrupt_mask); 447 448 if (new_val != dev_priv->de_irq_mask[pipe]) { 449 dev_priv->de_irq_mask[pipe] = new_val; 450 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 451 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 452 } 453 } 454 455 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 456 enum pipe pipe, u32 bits) 457 { 458 bdw_update_pipe_irq(i915, pipe, bits, bits); 459 } 460 461 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 462 enum pipe pipe, u32 bits) 463 { 464 bdw_update_pipe_irq(i915, pipe, bits, 0); 465 } 466 467 /** 468 * ibx_display_interrupt_update - update SDEIMR 469 * @dev_priv: driver private 470 * @interrupt_mask: mask of interrupt bits to update 471 * @enabled_irq_mask: mask of interrupt bits to enable 472 */ 473 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 474 u32 interrupt_mask, 475 u32 enabled_irq_mask) 476 { 477 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 478 sdeimr &= ~interrupt_mask; 479 sdeimr |= (~enabled_irq_mask & interrupt_mask); 480 481 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 482 483 lockdep_assert_held(&dev_priv->irq_lock); 484 485 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 486 return; 487 488 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 489 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 490 } 491 492 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 493 { 494 ibx_display_interrupt_update(i915, bits, bits); 495 } 496 497 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 498 { 499 ibx_display_interrupt_update(i915, bits, 0); 500 } 501 502 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 503 enum pipe pipe) 504 { 505 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 506 u32 enable_mask = status_mask << 16; 507 508 lockdep_assert_held(&dev_priv->irq_lock); 509 510 if (DISPLAY_VER(dev_priv) < 5) 511 goto out; 512 513 /* 514 * On pipe A we don't support the PSR interrupt yet, 515 * on pipe B and C the same bit MBZ. 516 */ 517 if (drm_WARN_ON_ONCE(&dev_priv->drm, 518 status_mask & PIPE_A_PSR_STATUS_VLV)) 519 return 0; 520 /* 521 * On pipe B and C we don't support the PSR interrupt yet, on pipe 522 * A the same bit is for perf counters which we don't use either. 523 */ 524 if (drm_WARN_ON_ONCE(&dev_priv->drm, 525 status_mask & PIPE_B_PSR_STATUS_VLV)) 526 return 0; 527 528 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 529 SPRITE0_FLIP_DONE_INT_EN_VLV | 530 SPRITE1_FLIP_DONE_INT_EN_VLV); 531 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 532 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 533 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 534 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 535 536 out: 537 drm_WARN_ONCE(&dev_priv->drm, 538 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 539 status_mask & ~PIPESTAT_INT_STATUS_MASK, 540 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 541 pipe_name(pipe), enable_mask, status_mask); 542 543 return enable_mask; 544 } 545 546 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 547 enum pipe pipe, u32 status_mask) 548 { 549 i915_reg_t reg = PIPESTAT(pipe); 550 u32 enable_mask; 551 552 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 553 "pipe %c: status_mask=0x%x\n", 554 pipe_name(pipe), status_mask); 555 556 lockdep_assert_held(&dev_priv->irq_lock); 557 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 558 559 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 560 return; 561 562 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 563 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 564 565 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 566 intel_uncore_posting_read(&dev_priv->uncore, reg); 567 } 568 569 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 570 enum pipe pipe, u32 status_mask) 571 { 572 i915_reg_t reg = PIPESTAT(pipe); 573 u32 enable_mask; 574 575 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 576 "pipe %c: status_mask=0x%x\n", 577 pipe_name(pipe), status_mask); 578 579 lockdep_assert_held(&dev_priv->irq_lock); 580 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 581 582 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 583 return; 584 585 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 586 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 587 588 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 589 intel_uncore_posting_read(&dev_priv->uncore, reg); 590 } 591 592 static bool i915_has_asle(struct drm_i915_private *dev_priv) 593 { 594 if (!dev_priv->opregion.asle) 595 return false; 596 597 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 598 } 599 600 /** 601 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 602 * @dev_priv: i915 device private 603 */ 604 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 605 { 606 if (!i915_has_asle(dev_priv)) 607 return; 608 609 spin_lock_irq(&dev_priv->irq_lock); 610 611 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 612 if (DISPLAY_VER(dev_priv) >= 4) 613 i915_enable_pipestat(dev_priv, PIPE_A, 614 PIPE_LEGACY_BLC_EVENT_STATUS); 615 616 spin_unlock_irq(&dev_priv->irq_lock); 617 } 618 619 /* 620 * This timing diagram depicts the video signal in and 621 * around the vertical blanking period. 622 * 623 * Assumptions about the fictitious mode used in this example: 624 * vblank_start >= 3 625 * vsync_start = vblank_start + 1 626 * vsync_end = vblank_start + 2 627 * vtotal = vblank_start + 3 628 * 629 * start of vblank: 630 * latch double buffered registers 631 * increment frame counter (ctg+) 632 * generate start of vblank interrupt (gen4+) 633 * | 634 * | frame start: 635 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 636 * | may be shifted forward 1-3 extra lines via PIPECONF 637 * | | 638 * | | start of vsync: 639 * | | generate vsync interrupt 640 * | | | 641 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 642 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 643 * ----va---> <-----------------vb--------------------> <--------va------------- 644 * | | <----vs-----> | 645 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 646 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 647 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 648 * | | | 649 * last visible pixel first visible pixel 650 * | increment frame counter (gen3/4) 651 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 652 * 653 * x = horizontal active 654 * _ = horizontal blanking 655 * hs = horizontal sync 656 * va = vertical active 657 * vb = vertical blanking 658 * vs = vertical sync 659 * vbs = vblank_start (number) 660 * 661 * Summary: 662 * - most events happen at the start of horizontal sync 663 * - frame start happens at the start of horizontal blank, 1-4 lines 664 * (depending on PIPECONF settings) after the start of vblank 665 * - gen3/4 pixel and frame counter are synchronized with the start 666 * of horizontal active on the first line of vertical active 667 */ 668 669 /* Called from drm generic code, passed a 'crtc', which 670 * we use as a pipe index 671 */ 672 u32 i915_get_vblank_counter(struct drm_crtc *crtc) 673 { 674 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 675 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 676 const struct drm_display_mode *mode = &vblank->hwmode; 677 enum pipe pipe = to_intel_crtc(crtc)->pipe; 678 i915_reg_t high_frame, low_frame; 679 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 680 unsigned long irqflags; 681 682 /* 683 * On i965gm TV output the frame counter only works up to 684 * the point when we enable the TV encoder. After that the 685 * frame counter ceases to work and reads zero. We need a 686 * vblank wait before enabling the TV encoder and so we 687 * have to enable vblank interrupts while the frame counter 688 * is still in a working state. However the core vblank code 689 * does not like us returning non-zero frame counter values 690 * when we've told it that we don't have a working frame 691 * counter. Thus we must stop non-zero values leaking out. 692 */ 693 if (!vblank->max_vblank_count) 694 return 0; 695 696 htotal = mode->crtc_htotal; 697 hsync_start = mode->crtc_hsync_start; 698 vbl_start = mode->crtc_vblank_start; 699 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 700 vbl_start = DIV_ROUND_UP(vbl_start, 2); 701 702 /* Convert to pixel count */ 703 vbl_start *= htotal; 704 705 /* Start of vblank event occurs at start of hsync */ 706 vbl_start -= htotal - hsync_start; 707 708 high_frame = PIPEFRAME(pipe); 709 low_frame = PIPEFRAMEPIXEL(pipe); 710 711 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 712 713 /* 714 * High & low register fields aren't synchronized, so make sure 715 * we get a low value that's stable across two reads of the high 716 * register. 717 */ 718 do { 719 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 720 low = intel_de_read_fw(dev_priv, low_frame); 721 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 722 } while (high1 != high2); 723 724 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 725 726 high1 >>= PIPE_FRAME_HIGH_SHIFT; 727 pixel = low & PIPE_PIXEL_MASK; 728 low >>= PIPE_FRAME_LOW_SHIFT; 729 730 /* 731 * The frame counter increments at beginning of active. 732 * Cook up a vblank counter by also checking the pixel 733 * counter against vblank start. 734 */ 735 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 736 } 737 738 u32 g4x_get_vblank_counter(struct drm_crtc *crtc) 739 { 740 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 741 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 742 enum pipe pipe = to_intel_crtc(crtc)->pipe; 743 744 if (!vblank->max_vblank_count) 745 return 0; 746 747 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe)); 748 } 749 750 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc) 751 { 752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 753 struct drm_vblank_crtc *vblank = 754 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 755 const struct drm_display_mode *mode = &vblank->hwmode; 756 u32 htotal = mode->crtc_htotal; 757 u32 clock = mode->crtc_clock; 758 u32 scan_prev_time, scan_curr_time, scan_post_time; 759 760 /* 761 * To avoid the race condition where we might cross into the 762 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 763 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 764 * during the same frame. 765 */ 766 do { 767 /* 768 * This field provides read back of the display 769 * pipe frame time stamp. The time stamp value 770 * is sampled at every start of vertical blank. 771 */ 772 scan_prev_time = intel_de_read_fw(dev_priv, 773 PIPE_FRMTMSTMP(crtc->pipe)); 774 775 /* 776 * The TIMESTAMP_CTR register has the current 777 * time stamp value. 778 */ 779 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); 780 781 scan_post_time = intel_de_read_fw(dev_priv, 782 PIPE_FRMTMSTMP(crtc->pipe)); 783 } while (scan_post_time != scan_prev_time); 784 785 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 786 clock), 1000 * htotal); 787 } 788 789 /* 790 * On certain encoders on certain platforms, pipe 791 * scanline register will not work to get the scanline, 792 * since the timings are driven from the PORT or issues 793 * with scanline register updates. 794 * This function will use Framestamp and current 795 * timestamp registers to calculate the scanline. 796 */ 797 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 798 { 799 struct drm_vblank_crtc *vblank = 800 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 801 const struct drm_display_mode *mode = &vblank->hwmode; 802 u32 vblank_start = mode->crtc_vblank_start; 803 u32 vtotal = mode->crtc_vtotal; 804 u32 scanline; 805 806 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc); 807 scanline = min(scanline, vtotal - 1); 808 scanline = (scanline + vblank_start) % vtotal; 809 810 return scanline; 811 } 812 813 /* 814 * intel_de_read_fw(), only for fast reads of display block, no need for 815 * forcewake etc. 816 */ 817 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 818 { 819 struct drm_device *dev = crtc->base.dev; 820 struct drm_i915_private *dev_priv = to_i915(dev); 821 const struct drm_display_mode *mode; 822 struct drm_vblank_crtc *vblank; 823 enum pipe pipe = crtc->pipe; 824 int position, vtotal; 825 826 if (!crtc->active) 827 return 0; 828 829 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 830 mode = &vblank->hwmode; 831 832 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 833 return __intel_get_crtc_scanline_from_timestamp(crtc); 834 835 vtotal = mode->crtc_vtotal; 836 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 837 vtotal /= 2; 838 839 if (DISPLAY_VER(dev_priv) == 2) 840 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 841 else 842 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 843 844 /* 845 * On HSW, the DSL reg (0x70000) appears to return 0 if we 846 * read it just before the start of vblank. So try it again 847 * so we don't accidentally end up spanning a vblank frame 848 * increment, causing the pipe_update_end() code to squak at us. 849 * 850 * The nature of this problem means we can't simply check the ISR 851 * bit and return the vblank start value; nor can we use the scanline 852 * debug register in the transcoder as it appears to have the same 853 * problem. We may need to extend this to include other platforms, 854 * but so far testing only shows the problem on HSW. 855 */ 856 if (HAS_DDI(dev_priv) && !position) { 857 int i, temp; 858 859 for (i = 0; i < 100; i++) { 860 udelay(1); 861 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 862 if (temp != position) { 863 position = temp; 864 break; 865 } 866 } 867 } 868 869 /* 870 * See update_scanline_offset() for the details on the 871 * scanline_offset adjustment. 872 */ 873 return (position + crtc->scanline_offset) % vtotal; 874 } 875 876 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, 877 bool in_vblank_irq, 878 int *vpos, int *hpos, 879 ktime_t *stime, ktime_t *etime, 880 const struct drm_display_mode *mode) 881 { 882 struct drm_device *dev = _crtc->dev; 883 struct drm_i915_private *dev_priv = to_i915(dev); 884 struct intel_crtc *crtc = to_intel_crtc(_crtc); 885 enum pipe pipe = crtc->pipe; 886 int position; 887 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 888 unsigned long irqflags; 889 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 || 890 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 || 891 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 892 893 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { 894 drm_dbg(&dev_priv->drm, 895 "trying to get scanoutpos for disabled " 896 "pipe %c\n", pipe_name(pipe)); 897 return false; 898 } 899 900 htotal = mode->crtc_htotal; 901 hsync_start = mode->crtc_hsync_start; 902 vtotal = mode->crtc_vtotal; 903 vbl_start = mode->crtc_vblank_start; 904 vbl_end = mode->crtc_vblank_end; 905 906 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 907 vbl_start = DIV_ROUND_UP(vbl_start, 2); 908 vbl_end /= 2; 909 vtotal /= 2; 910 } 911 912 /* 913 * Lock uncore.lock, as we will do multiple timing critical raw 914 * register reads, potentially with preemption disabled, so the 915 * following code must not block on uncore.lock. 916 */ 917 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 918 919 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 920 921 /* Get optional system timestamp before query. */ 922 if (stime) 923 *stime = ktime_get(); 924 925 if (crtc->mode_flags & I915_MODE_FLAG_VRR) { 926 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc); 927 928 position = __intel_get_crtc_scanline(crtc); 929 930 /* 931 * Already exiting vblank? If so, shift our position 932 * so it looks like we're already apporaching the full 933 * vblank end. This should make the generated timestamp 934 * more or less match when the active portion will start. 935 */ 936 if (position >= vbl_start && scanlines < position) 937 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1); 938 } else if (use_scanline_counter) { 939 /* No obvious pixelcount register. Only query vertical 940 * scanout position from Display scan line register. 941 */ 942 position = __intel_get_crtc_scanline(crtc); 943 } else { 944 /* Have access to pixelcount since start of frame. 945 * We can split this into vertical and horizontal 946 * scanout position. 947 */ 948 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 949 950 /* convert to pixel counts */ 951 vbl_start *= htotal; 952 vbl_end *= htotal; 953 vtotal *= htotal; 954 955 /* 956 * In interlaced modes, the pixel counter counts all pixels, 957 * so one field will have htotal more pixels. In order to avoid 958 * the reported position from jumping backwards when the pixel 959 * counter is beyond the length of the shorter field, just 960 * clamp the position the length of the shorter field. This 961 * matches how the scanline counter based position works since 962 * the scanline counter doesn't count the two half lines. 963 */ 964 if (position >= vtotal) 965 position = vtotal - 1; 966 967 /* 968 * Start of vblank interrupt is triggered at start of hsync, 969 * just prior to the first active line of vblank. However we 970 * consider lines to start at the leading edge of horizontal 971 * active. So, should we get here before we've crossed into 972 * the horizontal active of the first line in vblank, we would 973 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 974 * always add htotal-hsync_start to the current pixel position. 975 */ 976 position = (position + htotal - hsync_start) % vtotal; 977 } 978 979 /* Get optional system timestamp after query. */ 980 if (etime) 981 *etime = ktime_get(); 982 983 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 984 985 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 986 987 /* 988 * While in vblank, position will be negative 989 * counting up towards 0 at vbl_end. And outside 990 * vblank, position will be positive counting 991 * up since vbl_end. 992 */ 993 if (position >= vbl_start) 994 position -= vbl_end; 995 else 996 position += vtotal - vbl_end; 997 998 if (use_scanline_counter) { 999 *vpos = position; 1000 *hpos = 0; 1001 } else { 1002 *vpos = position / htotal; 1003 *hpos = position - (*vpos * htotal); 1004 } 1005 1006 return true; 1007 } 1008 1009 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, 1010 ktime_t *vblank_time, bool in_vblank_irq) 1011 { 1012 return drm_crtc_vblank_helper_get_vblank_timestamp_internal( 1013 crtc, max_error, vblank_time, in_vblank_irq, 1014 i915_get_crtc_scanoutpos); 1015 } 1016 1017 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1018 { 1019 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1020 unsigned long irqflags; 1021 int position; 1022 1023 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1024 position = __intel_get_crtc_scanline(crtc); 1025 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1026 1027 return position; 1028 } 1029 1030 /** 1031 * ivb_parity_work - Workqueue called when a parity error interrupt 1032 * occurred. 1033 * @work: workqueue struct 1034 * 1035 * Doesn't actually do anything except notify userspace. As a consequence of 1036 * this event, userspace should try to remap the bad rows since statistically 1037 * it is likely the same row is more likely to go bad again. 1038 */ 1039 static void ivb_parity_work(struct work_struct *work) 1040 { 1041 struct drm_i915_private *dev_priv = 1042 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1043 struct intel_gt *gt = &dev_priv->gt; 1044 u32 error_status, row, bank, subbank; 1045 char *parity_event[6]; 1046 u32 misccpctl; 1047 u8 slice = 0; 1048 1049 /* We must turn off DOP level clock gating to access the L3 registers. 1050 * In order to prevent a get/put style interface, acquire struct mutex 1051 * any time we access those registers. 1052 */ 1053 mutex_lock(&dev_priv->drm.struct_mutex); 1054 1055 /* If we've screwed up tracking, just let the interrupt fire again */ 1056 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 1057 goto out; 1058 1059 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1060 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1061 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1062 1063 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1064 i915_reg_t reg; 1065 1066 slice--; 1067 if (drm_WARN_ON_ONCE(&dev_priv->drm, 1068 slice >= NUM_L3_SLICES(dev_priv))) 1069 break; 1070 1071 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1072 1073 reg = GEN7_L3CDERRST1(slice); 1074 1075 error_status = intel_uncore_read(&dev_priv->uncore, reg); 1076 row = GEN7_PARITY_ERROR_ROW(error_status); 1077 bank = GEN7_PARITY_ERROR_BANK(error_status); 1078 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1079 1080 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1081 intel_uncore_posting_read(&dev_priv->uncore, reg); 1082 1083 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1084 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1085 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1086 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1087 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1088 parity_event[5] = NULL; 1089 1090 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1091 KOBJ_CHANGE, parity_event); 1092 1093 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1094 slice, row, bank, subbank); 1095 1096 kfree(parity_event[4]); 1097 kfree(parity_event[3]); 1098 kfree(parity_event[2]); 1099 kfree(parity_event[1]); 1100 } 1101 1102 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 1103 1104 out: 1105 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 1106 spin_lock_irq(>->irq_lock); 1107 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 1108 spin_unlock_irq(>->irq_lock); 1109 1110 mutex_unlock(&dev_priv->drm.struct_mutex); 1111 } 1112 1113 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1114 { 1115 switch (pin) { 1116 case HPD_PORT_TC1: 1117 case HPD_PORT_TC2: 1118 case HPD_PORT_TC3: 1119 case HPD_PORT_TC4: 1120 case HPD_PORT_TC5: 1121 case HPD_PORT_TC6: 1122 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); 1123 default: 1124 return false; 1125 } 1126 } 1127 1128 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1129 { 1130 switch (pin) { 1131 case HPD_PORT_A: 1132 return val & PORTA_HOTPLUG_LONG_DETECT; 1133 case HPD_PORT_B: 1134 return val & PORTB_HOTPLUG_LONG_DETECT; 1135 case HPD_PORT_C: 1136 return val & PORTC_HOTPLUG_LONG_DETECT; 1137 default: 1138 return false; 1139 } 1140 } 1141 1142 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1143 { 1144 switch (pin) { 1145 case HPD_PORT_A: 1146 case HPD_PORT_B: 1147 case HPD_PORT_C: 1148 case HPD_PORT_D: 1149 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); 1150 default: 1151 return false; 1152 } 1153 } 1154 1155 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1156 { 1157 switch (pin) { 1158 case HPD_PORT_TC1: 1159 case HPD_PORT_TC2: 1160 case HPD_PORT_TC3: 1161 case HPD_PORT_TC4: 1162 case HPD_PORT_TC5: 1163 case HPD_PORT_TC6: 1164 return val & ICP_TC_HPD_LONG_DETECT(pin); 1165 default: 1166 return false; 1167 } 1168 } 1169 1170 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1171 { 1172 switch (pin) { 1173 case HPD_PORT_E: 1174 return val & PORTE_HOTPLUG_LONG_DETECT; 1175 default: 1176 return false; 1177 } 1178 } 1179 1180 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1181 { 1182 switch (pin) { 1183 case HPD_PORT_A: 1184 return val & PORTA_HOTPLUG_LONG_DETECT; 1185 case HPD_PORT_B: 1186 return val & PORTB_HOTPLUG_LONG_DETECT; 1187 case HPD_PORT_C: 1188 return val & PORTC_HOTPLUG_LONG_DETECT; 1189 case HPD_PORT_D: 1190 return val & PORTD_HOTPLUG_LONG_DETECT; 1191 default: 1192 return false; 1193 } 1194 } 1195 1196 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1197 { 1198 switch (pin) { 1199 case HPD_PORT_A: 1200 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1201 default: 1202 return false; 1203 } 1204 } 1205 1206 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1207 { 1208 switch (pin) { 1209 case HPD_PORT_B: 1210 return val & PORTB_HOTPLUG_LONG_DETECT; 1211 case HPD_PORT_C: 1212 return val & PORTC_HOTPLUG_LONG_DETECT; 1213 case HPD_PORT_D: 1214 return val & PORTD_HOTPLUG_LONG_DETECT; 1215 default: 1216 return false; 1217 } 1218 } 1219 1220 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1221 { 1222 switch (pin) { 1223 case HPD_PORT_B: 1224 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1225 case HPD_PORT_C: 1226 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1227 case HPD_PORT_D: 1228 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1229 default: 1230 return false; 1231 } 1232 } 1233 1234 /* 1235 * Get a bit mask of pins that have triggered, and which ones may be long. 1236 * This can be called multiple times with the same masks to accumulate 1237 * hotplug detection results from several registers. 1238 * 1239 * Note that the caller is expected to zero out the masks initially. 1240 */ 1241 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1242 u32 *pin_mask, u32 *long_mask, 1243 u32 hotplug_trigger, u32 dig_hotplug_reg, 1244 const u32 hpd[HPD_NUM_PINS], 1245 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1246 { 1247 enum hpd_pin pin; 1248 1249 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); 1250 1251 for_each_hpd_pin(pin) { 1252 if ((hpd[pin] & hotplug_trigger) == 0) 1253 continue; 1254 1255 *pin_mask |= BIT(pin); 1256 1257 if (long_pulse_detect(pin, dig_hotplug_reg)) 1258 *long_mask |= BIT(pin); 1259 } 1260 1261 drm_dbg(&dev_priv->drm, 1262 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1263 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1264 1265 } 1266 1267 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 1268 const u32 hpd[HPD_NUM_PINS]) 1269 { 1270 struct intel_encoder *encoder; 1271 u32 enabled_irqs = 0; 1272 1273 for_each_intel_encoder(&dev_priv->drm, encoder) 1274 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 1275 enabled_irqs |= hpd[encoder->hpd_pin]; 1276 1277 return enabled_irqs; 1278 } 1279 1280 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, 1281 const u32 hpd[HPD_NUM_PINS]) 1282 { 1283 struct intel_encoder *encoder; 1284 u32 hotplug_irqs = 0; 1285 1286 for_each_intel_encoder(&dev_priv->drm, encoder) 1287 hotplug_irqs |= hpd[encoder->hpd_pin]; 1288 1289 return hotplug_irqs; 1290 } 1291 1292 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, 1293 hotplug_enables_func hotplug_enables) 1294 { 1295 struct intel_encoder *encoder; 1296 u32 hotplug = 0; 1297 1298 for_each_intel_encoder(&i915->drm, encoder) 1299 hotplug |= hotplug_enables(i915, encoder->hpd_pin); 1300 1301 return hotplug; 1302 } 1303 1304 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1305 { 1306 wake_up_all(&dev_priv->gmbus_wait_queue); 1307 } 1308 1309 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1310 { 1311 wake_up_all(&dev_priv->gmbus_wait_queue); 1312 } 1313 1314 #if defined(CONFIG_DEBUG_FS) 1315 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1316 enum pipe pipe, 1317 u32 crc0, u32 crc1, 1318 u32 crc2, u32 crc3, 1319 u32 crc4) 1320 { 1321 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1322 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 1323 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1324 1325 trace_intel_pipe_crc(crtc, crcs); 1326 1327 spin_lock(&pipe_crc->lock); 1328 /* 1329 * For some not yet identified reason, the first CRC is 1330 * bonkers. So let's just wait for the next vblank and read 1331 * out the buggy result. 1332 * 1333 * On GEN8+ sometimes the second CRC is bonkers as well, so 1334 * don't trust that one either. 1335 */ 1336 if (pipe_crc->skipped <= 0 || 1337 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1338 pipe_crc->skipped++; 1339 spin_unlock(&pipe_crc->lock); 1340 return; 1341 } 1342 spin_unlock(&pipe_crc->lock); 1343 1344 drm_crtc_add_crc_entry(&crtc->base, true, 1345 drm_crtc_accurate_vblank_count(&crtc->base), 1346 crcs); 1347 } 1348 #else 1349 static inline void 1350 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1351 enum pipe pipe, 1352 u32 crc0, u32 crc1, 1353 u32 crc2, u32 crc3, 1354 u32 crc4) {} 1355 #endif 1356 1357 static void flip_done_handler(struct drm_i915_private *i915, 1358 enum pipe pipe) 1359 { 1360 struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe); 1361 struct drm_crtc_state *crtc_state = crtc->base.state; 1362 struct drm_pending_vblank_event *e = crtc_state->event; 1363 struct drm_device *dev = &i915->drm; 1364 unsigned long irqflags; 1365 1366 spin_lock_irqsave(&dev->event_lock, irqflags); 1367 1368 crtc_state->event = NULL; 1369 1370 drm_crtc_send_vblank_event(&crtc->base, e); 1371 1372 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1373 } 1374 1375 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1376 enum pipe pipe) 1377 { 1378 display_pipe_crc_irq_handler(dev_priv, pipe, 1379 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1380 0, 0, 0, 0); 1381 } 1382 1383 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1384 enum pipe pipe) 1385 { 1386 display_pipe_crc_irq_handler(dev_priv, pipe, 1387 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1388 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 1389 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 1390 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 1391 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 1392 } 1393 1394 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1395 enum pipe pipe) 1396 { 1397 u32 res1, res2; 1398 1399 if (DISPLAY_VER(dev_priv) >= 3) 1400 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); 1401 else 1402 res1 = 0; 1403 1404 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 1405 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); 1406 else 1407 res2 = 0; 1408 1409 display_pipe_crc_irq_handler(dev_priv, pipe, 1410 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), 1411 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), 1412 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), 1413 res1, res2); 1414 } 1415 1416 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1417 { 1418 enum pipe pipe; 1419 1420 for_each_pipe(dev_priv, pipe) { 1421 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), 1422 PIPESTAT_INT_STATUS_MASK | 1423 PIPE_FIFO_UNDERRUN_STATUS); 1424 1425 dev_priv->pipestat_irq_mask[pipe] = 0; 1426 } 1427 } 1428 1429 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1430 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1431 { 1432 enum pipe pipe; 1433 1434 spin_lock(&dev_priv->irq_lock); 1435 1436 if (!dev_priv->display_irqs_enabled) { 1437 spin_unlock(&dev_priv->irq_lock); 1438 return; 1439 } 1440 1441 for_each_pipe(dev_priv, pipe) { 1442 i915_reg_t reg; 1443 u32 status_mask, enable_mask, iir_bit = 0; 1444 1445 /* 1446 * PIPESTAT bits get signalled even when the interrupt is 1447 * disabled with the mask bits, and some of the status bits do 1448 * not generate interrupts at all (like the underrun bit). Hence 1449 * we need to be careful that we only handle what we want to 1450 * handle. 1451 */ 1452 1453 /* fifo underruns are filterered in the underrun handler. */ 1454 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1455 1456 switch (pipe) { 1457 default: 1458 case PIPE_A: 1459 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1460 break; 1461 case PIPE_B: 1462 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1463 break; 1464 case PIPE_C: 1465 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1466 break; 1467 } 1468 if (iir & iir_bit) 1469 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1470 1471 if (!status_mask) 1472 continue; 1473 1474 reg = PIPESTAT(pipe); 1475 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 1476 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1477 1478 /* 1479 * Clear the PIPE*STAT regs before the IIR 1480 * 1481 * Toggle the enable bits to make sure we get an 1482 * edge in the ISR pipe event bit if we don't clear 1483 * all the enabled status bits. Otherwise the edge 1484 * triggered IIR on i965/g4x wouldn't notice that 1485 * an interrupt is still pending. 1486 */ 1487 if (pipe_stats[pipe]) { 1488 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 1489 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 1490 } 1491 } 1492 spin_unlock(&dev_priv->irq_lock); 1493 } 1494 1495 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1496 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1497 { 1498 enum pipe pipe; 1499 1500 for_each_pipe(dev_priv, pipe) { 1501 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1502 intel_handle_vblank(dev_priv, pipe); 1503 1504 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1505 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1506 1507 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1508 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1509 } 1510 } 1511 1512 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1513 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1514 { 1515 bool blc_event = false; 1516 enum pipe pipe; 1517 1518 for_each_pipe(dev_priv, pipe) { 1519 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1520 intel_handle_vblank(dev_priv, pipe); 1521 1522 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1523 blc_event = true; 1524 1525 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1526 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1527 1528 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1529 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1530 } 1531 1532 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1533 intel_opregion_asle_intr(dev_priv); 1534 } 1535 1536 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1537 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1538 { 1539 bool blc_event = false; 1540 enum pipe pipe; 1541 1542 for_each_pipe(dev_priv, pipe) { 1543 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1544 intel_handle_vblank(dev_priv, pipe); 1545 1546 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1547 blc_event = true; 1548 1549 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1550 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1551 1552 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1553 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1554 } 1555 1556 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1557 intel_opregion_asle_intr(dev_priv); 1558 1559 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1560 gmbus_irq_handler(dev_priv); 1561 } 1562 1563 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1564 u32 pipe_stats[I915_MAX_PIPES]) 1565 { 1566 enum pipe pipe; 1567 1568 for_each_pipe(dev_priv, pipe) { 1569 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1570 intel_handle_vblank(dev_priv, pipe); 1571 1572 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1573 flip_done_handler(dev_priv, pipe); 1574 1575 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1576 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1577 1578 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1579 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1580 } 1581 1582 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1583 gmbus_irq_handler(dev_priv); 1584 } 1585 1586 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1587 { 1588 u32 hotplug_status = 0, hotplug_status_mask; 1589 int i; 1590 1591 if (IS_G4X(dev_priv) || 1592 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1593 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1594 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1595 else 1596 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1597 1598 /* 1599 * We absolutely have to clear all the pending interrupt 1600 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1601 * interrupt bit won't have an edge, and the i965/g4x 1602 * edge triggered IIR will not notice that an interrupt 1603 * is still pending. We can't use PORT_HOTPLUG_EN to 1604 * guarantee the edge as the act of toggling the enable 1605 * bits can itself generate a new hotplug interrupt :( 1606 */ 1607 for (i = 0; i < 10; i++) { 1608 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; 1609 1610 if (tmp == 0) 1611 return hotplug_status; 1612 1613 hotplug_status |= tmp; 1614 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); 1615 } 1616 1617 drm_WARN_ONCE(&dev_priv->drm, 1, 1618 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1619 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 1620 1621 return hotplug_status; 1622 } 1623 1624 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1625 u32 hotplug_status) 1626 { 1627 u32 pin_mask = 0, long_mask = 0; 1628 u32 hotplug_trigger; 1629 1630 if (IS_G4X(dev_priv) || 1631 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1632 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1633 else 1634 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1635 1636 if (hotplug_trigger) { 1637 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1638 hotplug_trigger, hotplug_trigger, 1639 dev_priv->hotplug.hpd, 1640 i9xx_port_hotplug_long_detect); 1641 1642 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1643 } 1644 1645 if ((IS_G4X(dev_priv) || 1646 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1647 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1648 dp_aux_irq_handler(dev_priv); 1649 } 1650 1651 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1652 { 1653 struct drm_i915_private *dev_priv = arg; 1654 irqreturn_t ret = IRQ_NONE; 1655 1656 if (!intel_irqs_enabled(dev_priv)) 1657 return IRQ_NONE; 1658 1659 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1660 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1661 1662 do { 1663 u32 iir, gt_iir, pm_iir; 1664 u32 pipe_stats[I915_MAX_PIPES] = {}; 1665 u32 hotplug_status = 0; 1666 u32 ier = 0; 1667 1668 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 1669 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 1670 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1671 1672 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1673 break; 1674 1675 ret = IRQ_HANDLED; 1676 1677 /* 1678 * Theory on interrupt generation, based on empirical evidence: 1679 * 1680 * x = ((VLV_IIR & VLV_IER) || 1681 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1682 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1683 * 1684 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1685 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1686 * guarantee the CPU interrupt will be raised again even if we 1687 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1688 * bits this time around. 1689 */ 1690 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 1691 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1692 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1693 1694 if (gt_iir) 1695 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 1696 if (pm_iir) 1697 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 1698 1699 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1700 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1701 1702 /* Call regardless, as some status bits might not be 1703 * signalled in iir */ 1704 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1705 1706 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1707 I915_LPE_PIPE_B_INTERRUPT)) 1708 intel_lpe_audio_irq_handler(dev_priv); 1709 1710 /* 1711 * VLV_IIR is single buffered, and reflects the level 1712 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1713 */ 1714 if (iir) 1715 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1716 1717 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1718 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1719 1720 if (gt_iir) 1721 gen6_gt_irq_handler(&dev_priv->gt, gt_iir); 1722 if (pm_iir) 1723 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); 1724 1725 if (hotplug_status) 1726 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1727 1728 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1729 } while (0); 1730 1731 pmu_irq_stats(dev_priv, ret); 1732 1733 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1734 1735 return ret; 1736 } 1737 1738 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1739 { 1740 struct drm_i915_private *dev_priv = arg; 1741 irqreturn_t ret = IRQ_NONE; 1742 1743 if (!intel_irqs_enabled(dev_priv)) 1744 return IRQ_NONE; 1745 1746 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1747 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1748 1749 do { 1750 u32 master_ctl, iir; 1751 u32 pipe_stats[I915_MAX_PIPES] = {}; 1752 u32 hotplug_status = 0; 1753 u32 ier = 0; 1754 1755 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1756 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1757 1758 if (master_ctl == 0 && iir == 0) 1759 break; 1760 1761 ret = IRQ_HANDLED; 1762 1763 /* 1764 * Theory on interrupt generation, based on empirical evidence: 1765 * 1766 * x = ((VLV_IIR & VLV_IER) || 1767 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1768 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1769 * 1770 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1771 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1772 * guarantee the CPU interrupt will be raised again even if we 1773 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1774 * bits this time around. 1775 */ 1776 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 1777 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1778 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1779 1780 gen8_gt_irq_handler(&dev_priv->gt, master_ctl); 1781 1782 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1783 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1784 1785 /* Call regardless, as some status bits might not be 1786 * signalled in iir */ 1787 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1788 1789 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1790 I915_LPE_PIPE_B_INTERRUPT | 1791 I915_LPE_PIPE_C_INTERRUPT)) 1792 intel_lpe_audio_irq_handler(dev_priv); 1793 1794 /* 1795 * VLV_IIR is single buffered, and reflects the level 1796 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1797 */ 1798 if (iir) 1799 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1800 1801 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1802 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1803 1804 if (hotplug_status) 1805 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1806 1807 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1808 } while (0); 1809 1810 pmu_irq_stats(dev_priv, ret); 1811 1812 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1813 1814 return ret; 1815 } 1816 1817 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1818 u32 hotplug_trigger) 1819 { 1820 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1821 1822 /* 1823 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1824 * unless we touch the hotplug register, even if hotplug_trigger is 1825 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1826 * errors. 1827 */ 1828 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1829 if (!hotplug_trigger) { 1830 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1831 PORTD_HOTPLUG_STATUS_MASK | 1832 PORTC_HOTPLUG_STATUS_MASK | 1833 PORTB_HOTPLUG_STATUS_MASK; 1834 dig_hotplug_reg &= ~mask; 1835 } 1836 1837 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1838 if (!hotplug_trigger) 1839 return; 1840 1841 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1842 hotplug_trigger, dig_hotplug_reg, 1843 dev_priv->hotplug.pch_hpd, 1844 pch_port_hotplug_long_detect); 1845 1846 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1847 } 1848 1849 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1850 { 1851 enum pipe pipe; 1852 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1853 1854 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1855 1856 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1857 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1858 SDE_AUDIO_POWER_SHIFT); 1859 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 1860 port_name(port)); 1861 } 1862 1863 if (pch_iir & SDE_AUX_MASK) 1864 dp_aux_irq_handler(dev_priv); 1865 1866 if (pch_iir & SDE_GMBUS) 1867 gmbus_irq_handler(dev_priv); 1868 1869 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1870 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 1871 1872 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1873 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 1874 1875 if (pch_iir & SDE_POISON) 1876 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1877 1878 if (pch_iir & SDE_FDI_MASK) { 1879 for_each_pipe(dev_priv, pipe) 1880 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1881 pipe_name(pipe), 1882 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1883 } 1884 1885 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1886 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 1887 1888 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1889 drm_dbg(&dev_priv->drm, 1890 "PCH transcoder CRC error interrupt\n"); 1891 1892 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1893 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 1894 1895 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1896 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 1897 } 1898 1899 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1900 { 1901 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 1902 enum pipe pipe; 1903 1904 if (err_int & ERR_INT_POISON) 1905 drm_err(&dev_priv->drm, "Poison interrupt\n"); 1906 1907 for_each_pipe(dev_priv, pipe) { 1908 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1909 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1910 1911 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1912 if (IS_IVYBRIDGE(dev_priv)) 1913 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1914 else 1915 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1916 } 1917 } 1918 1919 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 1920 } 1921 1922 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1923 { 1924 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 1925 enum pipe pipe; 1926 1927 if (serr_int & SERR_INT_POISON) 1928 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1929 1930 for_each_pipe(dev_priv, pipe) 1931 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1932 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1933 1934 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 1935 } 1936 1937 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1938 { 1939 enum pipe pipe; 1940 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1941 1942 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1943 1944 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1945 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1946 SDE_AUDIO_POWER_SHIFT_CPT); 1947 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 1948 port_name(port)); 1949 } 1950 1951 if (pch_iir & SDE_AUX_MASK_CPT) 1952 dp_aux_irq_handler(dev_priv); 1953 1954 if (pch_iir & SDE_GMBUS_CPT) 1955 gmbus_irq_handler(dev_priv); 1956 1957 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1958 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 1959 1960 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1961 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 1962 1963 if (pch_iir & SDE_FDI_MASK_CPT) { 1964 for_each_pipe(dev_priv, pipe) 1965 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1966 pipe_name(pipe), 1967 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1968 } 1969 1970 if (pch_iir & SDE_ERROR_CPT) 1971 cpt_serr_int_handler(dev_priv); 1972 } 1973 1974 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1975 { 1976 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; 1977 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; 1978 u32 pin_mask = 0, long_mask = 0; 1979 1980 if (ddi_hotplug_trigger) { 1981 u32 dig_hotplug_reg; 1982 1983 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 1984 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg); 1985 1986 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1987 ddi_hotplug_trigger, dig_hotplug_reg, 1988 dev_priv->hotplug.pch_hpd, 1989 icp_ddi_port_hotplug_long_detect); 1990 } 1991 1992 if (tc_hotplug_trigger) { 1993 u32 dig_hotplug_reg; 1994 1995 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 1996 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg); 1997 1998 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1999 tc_hotplug_trigger, dig_hotplug_reg, 2000 dev_priv->hotplug.pch_hpd, 2001 icp_tc_port_hotplug_long_detect); 2002 } 2003 2004 if (pin_mask) 2005 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2006 2007 if (pch_iir & SDE_GMBUS_ICP) 2008 gmbus_irq_handler(dev_priv); 2009 } 2010 2011 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2012 { 2013 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2014 ~SDE_PORTE_HOTPLUG_SPT; 2015 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2016 u32 pin_mask = 0, long_mask = 0; 2017 2018 if (hotplug_trigger) { 2019 u32 dig_hotplug_reg; 2020 2021 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 2022 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 2023 2024 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2025 hotplug_trigger, dig_hotplug_reg, 2026 dev_priv->hotplug.pch_hpd, 2027 spt_port_hotplug_long_detect); 2028 } 2029 2030 if (hotplug2_trigger) { 2031 u32 dig_hotplug_reg; 2032 2033 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 2034 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2035 2036 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2037 hotplug2_trigger, dig_hotplug_reg, 2038 dev_priv->hotplug.pch_hpd, 2039 spt_port_hotplug2_long_detect); 2040 } 2041 2042 if (pin_mask) 2043 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2044 2045 if (pch_iir & SDE_GMBUS_CPT) 2046 gmbus_irq_handler(dev_priv); 2047 } 2048 2049 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2050 u32 hotplug_trigger) 2051 { 2052 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2053 2054 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 2055 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2056 2057 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2058 hotplug_trigger, dig_hotplug_reg, 2059 dev_priv->hotplug.hpd, 2060 ilk_port_hotplug_long_detect); 2061 2062 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2063 } 2064 2065 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2066 u32 de_iir) 2067 { 2068 enum pipe pipe; 2069 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2070 2071 if (hotplug_trigger) 2072 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2073 2074 if (de_iir & DE_AUX_CHANNEL_A) 2075 dp_aux_irq_handler(dev_priv); 2076 2077 if (de_iir & DE_GSE) 2078 intel_opregion_asle_intr(dev_priv); 2079 2080 if (de_iir & DE_POISON) 2081 drm_err(&dev_priv->drm, "Poison interrupt\n"); 2082 2083 for_each_pipe(dev_priv, pipe) { 2084 if (de_iir & DE_PIPE_VBLANK(pipe)) 2085 intel_handle_vblank(dev_priv, pipe); 2086 2087 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2088 flip_done_handler(dev_priv, pipe); 2089 2090 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2091 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2092 2093 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2094 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2095 } 2096 2097 /* check event from PCH */ 2098 if (de_iir & DE_PCH_EVENT) { 2099 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2100 2101 if (HAS_PCH_CPT(dev_priv)) 2102 cpt_irq_handler(dev_priv, pch_iir); 2103 else 2104 ibx_irq_handler(dev_priv, pch_iir); 2105 2106 /* should clear PCH hotplug event before clear CPU irq */ 2107 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2108 } 2109 2110 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 2111 gen5_rps_irq_handler(&dev_priv->gt.rps); 2112 } 2113 2114 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2115 u32 de_iir) 2116 { 2117 enum pipe pipe; 2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2119 2120 if (hotplug_trigger) 2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2122 2123 if (de_iir & DE_ERR_INT_IVB) 2124 ivb_err_int_handler(dev_priv); 2125 2126 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2127 dp_aux_irq_handler(dev_priv); 2128 2129 if (de_iir & DE_GSE_IVB) 2130 intel_opregion_asle_intr(dev_priv); 2131 2132 for_each_pipe(dev_priv, pipe) { 2133 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 2134 intel_handle_vblank(dev_priv, pipe); 2135 2136 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2137 flip_done_handler(dev_priv, pipe); 2138 } 2139 2140 /* check event from PCH */ 2141 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2142 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2143 2144 cpt_irq_handler(dev_priv, pch_iir); 2145 2146 /* clear PCH hotplug event before clear CPU irq */ 2147 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2148 } 2149 } 2150 2151 /* 2152 * To handle irqs with the minimum potential races with fresh interrupts, we: 2153 * 1 - Disable Master Interrupt Control. 2154 * 2 - Find the source(s) of the interrupt. 2155 * 3 - Clear the Interrupt Identity bits (IIR). 2156 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2157 * 5 - Re-enable Master Interrupt Control. 2158 */ 2159 static irqreturn_t ilk_irq_handler(int irq, void *arg) 2160 { 2161 struct drm_i915_private *i915 = arg; 2162 void __iomem * const regs = i915->uncore.regs; 2163 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2164 irqreturn_t ret = IRQ_NONE; 2165 2166 if (unlikely(!intel_irqs_enabled(i915))) 2167 return IRQ_NONE; 2168 2169 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2170 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2171 2172 /* disable master interrupt before clearing iir */ 2173 de_ier = raw_reg_read(regs, DEIER); 2174 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2175 2176 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2177 * interrupts will will be stored on its back queue, and then we'll be 2178 * able to process them after we restore SDEIER (as soon as we restore 2179 * it, we'll get an interrupt if SDEIIR still has something to process 2180 * due to its back queue). */ 2181 if (!HAS_PCH_NOP(i915)) { 2182 sde_ier = raw_reg_read(regs, SDEIER); 2183 raw_reg_write(regs, SDEIER, 0); 2184 } 2185 2186 /* Find, clear, then process each source of interrupt */ 2187 2188 gt_iir = raw_reg_read(regs, GTIIR); 2189 if (gt_iir) { 2190 raw_reg_write(regs, GTIIR, gt_iir); 2191 if (GRAPHICS_VER(i915) >= 6) 2192 gen6_gt_irq_handler(&i915->gt, gt_iir); 2193 else 2194 gen5_gt_irq_handler(&i915->gt, gt_iir); 2195 ret = IRQ_HANDLED; 2196 } 2197 2198 de_iir = raw_reg_read(regs, DEIIR); 2199 if (de_iir) { 2200 raw_reg_write(regs, DEIIR, de_iir); 2201 if (DISPLAY_VER(i915) >= 7) 2202 ivb_display_irq_handler(i915, de_iir); 2203 else 2204 ilk_display_irq_handler(i915, de_iir); 2205 ret = IRQ_HANDLED; 2206 } 2207 2208 if (GRAPHICS_VER(i915) >= 6) { 2209 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 2210 if (pm_iir) { 2211 raw_reg_write(regs, GEN6_PMIIR, pm_iir); 2212 gen6_rps_irq_handler(&i915->gt.rps, pm_iir); 2213 ret = IRQ_HANDLED; 2214 } 2215 } 2216 2217 raw_reg_write(regs, DEIER, de_ier); 2218 if (sde_ier) 2219 raw_reg_write(regs, SDEIER, sde_ier); 2220 2221 pmu_irq_stats(i915, ret); 2222 2223 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2224 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2225 2226 return ret; 2227 } 2228 2229 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2230 u32 hotplug_trigger) 2231 { 2232 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2233 2234 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 2235 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 2236 2237 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2238 hotplug_trigger, dig_hotplug_reg, 2239 dev_priv->hotplug.hpd, 2240 bxt_port_hotplug_long_detect); 2241 2242 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2243 } 2244 2245 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2246 { 2247 u32 pin_mask = 0, long_mask = 0; 2248 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2249 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2250 2251 if (trigger_tc) { 2252 u32 dig_hotplug_reg; 2253 2254 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 2255 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2256 2257 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2258 trigger_tc, dig_hotplug_reg, 2259 dev_priv->hotplug.hpd, 2260 gen11_port_hotplug_long_detect); 2261 } 2262 2263 if (trigger_tbt) { 2264 u32 dig_hotplug_reg; 2265 2266 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 2267 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2268 2269 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2270 trigger_tbt, dig_hotplug_reg, 2271 dev_priv->hotplug.hpd, 2272 gen11_port_hotplug_long_detect); 2273 } 2274 2275 if (pin_mask) 2276 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2277 else 2278 drm_err(&dev_priv->drm, 2279 "Unexpected DE HPD interrupt 0x%08x\n", iir); 2280 } 2281 2282 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2283 { 2284 u32 mask; 2285 2286 if (DISPLAY_VER(dev_priv) >= 13) 2287 return TGL_DE_PORT_AUX_DDIA | 2288 TGL_DE_PORT_AUX_DDIB | 2289 TGL_DE_PORT_AUX_DDIC | 2290 XELPD_DE_PORT_AUX_DDID | 2291 XELPD_DE_PORT_AUX_DDIE | 2292 TGL_DE_PORT_AUX_USBC1 | 2293 TGL_DE_PORT_AUX_USBC2 | 2294 TGL_DE_PORT_AUX_USBC3 | 2295 TGL_DE_PORT_AUX_USBC4; 2296 else if (DISPLAY_VER(dev_priv) >= 12) 2297 return TGL_DE_PORT_AUX_DDIA | 2298 TGL_DE_PORT_AUX_DDIB | 2299 TGL_DE_PORT_AUX_DDIC | 2300 TGL_DE_PORT_AUX_USBC1 | 2301 TGL_DE_PORT_AUX_USBC2 | 2302 TGL_DE_PORT_AUX_USBC3 | 2303 TGL_DE_PORT_AUX_USBC4 | 2304 TGL_DE_PORT_AUX_USBC5 | 2305 TGL_DE_PORT_AUX_USBC6; 2306 2307 2308 mask = GEN8_AUX_CHANNEL_A; 2309 if (DISPLAY_VER(dev_priv) >= 9) 2310 mask |= GEN9_AUX_CHANNEL_B | 2311 GEN9_AUX_CHANNEL_C | 2312 GEN9_AUX_CHANNEL_D; 2313 2314 if (DISPLAY_VER(dev_priv) == 11) { 2315 mask |= ICL_AUX_CHANNEL_F; 2316 mask |= ICL_AUX_CHANNEL_E; 2317 } 2318 2319 return mask; 2320 } 2321 2322 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 2323 { 2324 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) 2325 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 2326 else if (DISPLAY_VER(dev_priv) >= 11) 2327 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 2328 else if (DISPLAY_VER(dev_priv) >= 9) 2329 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2330 else 2331 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2332 } 2333 2334 static void 2335 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2336 { 2337 bool found = false; 2338 2339 if (iir & GEN8_DE_MISC_GSE) { 2340 intel_opregion_asle_intr(dev_priv); 2341 found = true; 2342 } 2343 2344 if (iir & GEN8_DE_EDP_PSR) { 2345 struct intel_encoder *encoder; 2346 u32 psr_iir; 2347 i915_reg_t iir_reg; 2348 2349 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2350 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2351 2352 if (DISPLAY_VER(dev_priv) >= 12) 2353 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); 2354 else 2355 iir_reg = EDP_PSR_IIR; 2356 2357 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg); 2358 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir); 2359 2360 if (psr_iir) 2361 found = true; 2362 2363 intel_psr_irq_handler(intel_dp, psr_iir); 2364 2365 /* prior GEN12 only have one EDP PSR */ 2366 if (DISPLAY_VER(dev_priv) < 12) 2367 break; 2368 } 2369 } 2370 2371 if (!found) 2372 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); 2373 } 2374 2375 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 2376 u32 te_trigger) 2377 { 2378 enum pipe pipe = INVALID_PIPE; 2379 enum transcoder dsi_trans; 2380 enum port port; 2381 u32 val, tmp; 2382 2383 /* 2384 * Incase of dual link, TE comes from DSI_1 2385 * this is to check if dual link is enabled 2386 */ 2387 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 2388 val &= PORT_SYNC_MODE_ENABLE; 2389 2390 /* 2391 * if dual link is enabled, then read DSI_0 2392 * transcoder registers 2393 */ 2394 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 2395 PORT_A : PORT_B; 2396 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 2397 2398 /* Check if DSI configured in command mode */ 2399 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 2400 val = val & OP_MODE_MASK; 2401 2402 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 2403 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 2404 return; 2405 } 2406 2407 /* Get PIPE for handling VBLANK event */ 2408 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); 2409 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 2410 case TRANS_DDI_EDP_INPUT_A_ON: 2411 pipe = PIPE_A; 2412 break; 2413 case TRANS_DDI_EDP_INPUT_B_ONOFF: 2414 pipe = PIPE_B; 2415 break; 2416 case TRANS_DDI_EDP_INPUT_C_ONOFF: 2417 pipe = PIPE_C; 2418 break; 2419 default: 2420 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 2421 return; 2422 } 2423 2424 intel_handle_vblank(dev_priv, pipe); 2425 2426 /* clear TE in dsi IIR */ 2427 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 2428 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2429 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2430 } 2431 2432 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 2433 { 2434 if (DISPLAY_VER(i915) >= 9) 2435 return GEN9_PIPE_PLANE1_FLIP_DONE; 2436 else 2437 return GEN8_PIPE_PRIMARY_FLIP_DONE; 2438 } 2439 2440 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) 2441 { 2442 u32 mask = GEN8_PIPE_FIFO_UNDERRUN; 2443 2444 if (DISPLAY_VER(dev_priv) >= 13) 2445 mask |= XELPD_PIPE_SOFT_UNDERRUN | 2446 XELPD_PIPE_HARD_UNDERRUN; 2447 2448 return mask; 2449 } 2450 2451 static irqreturn_t 2452 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2453 { 2454 irqreturn_t ret = IRQ_NONE; 2455 u32 iir; 2456 enum pipe pipe; 2457 2458 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 2459 2460 if (master_ctl & GEN8_DE_MISC_IRQ) { 2461 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 2462 if (iir) { 2463 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 2464 ret = IRQ_HANDLED; 2465 gen8_de_misc_irq_handler(dev_priv, iir); 2466 } else { 2467 drm_err(&dev_priv->drm, 2468 "The master control interrupt lied (DE MISC)!\n"); 2469 } 2470 } 2471 2472 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2473 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 2474 if (iir) { 2475 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 2476 ret = IRQ_HANDLED; 2477 gen11_hpd_irq_handler(dev_priv, iir); 2478 } else { 2479 drm_err(&dev_priv->drm, 2480 "The master control interrupt lied, (DE HPD)!\n"); 2481 } 2482 } 2483 2484 if (master_ctl & GEN8_DE_PORT_IRQ) { 2485 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 2486 if (iir) { 2487 bool found = false; 2488 2489 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 2490 ret = IRQ_HANDLED; 2491 2492 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2493 dp_aux_irq_handler(dev_priv); 2494 found = true; 2495 } 2496 2497 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 2498 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 2499 2500 if (hotplug_trigger) { 2501 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 2502 found = true; 2503 } 2504 } else if (IS_BROADWELL(dev_priv)) { 2505 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 2506 2507 if (hotplug_trigger) { 2508 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2509 found = true; 2510 } 2511 } 2512 2513 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 2514 (iir & BXT_DE_PORT_GMBUS)) { 2515 gmbus_irq_handler(dev_priv); 2516 found = true; 2517 } 2518 2519 if (DISPLAY_VER(dev_priv) >= 11) { 2520 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 2521 2522 if (te_trigger) { 2523 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 2524 found = true; 2525 } 2526 } 2527 2528 if (!found) 2529 drm_err(&dev_priv->drm, 2530 "Unexpected DE Port interrupt\n"); 2531 } 2532 else 2533 drm_err(&dev_priv->drm, 2534 "The master control interrupt lied (DE PORT)!\n"); 2535 } 2536 2537 for_each_pipe(dev_priv, pipe) { 2538 u32 fault_errors; 2539 2540 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2541 continue; 2542 2543 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 2544 if (!iir) { 2545 drm_err(&dev_priv->drm, 2546 "The master control interrupt lied (DE PIPE)!\n"); 2547 continue; 2548 } 2549 2550 ret = IRQ_HANDLED; 2551 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 2552 2553 if (iir & GEN8_PIPE_VBLANK) 2554 intel_handle_vblank(dev_priv, pipe); 2555 2556 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 2557 flip_done_handler(dev_priv, pipe); 2558 2559 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2560 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2561 2562 if (iir & gen8_de_pipe_underrun_mask(dev_priv)) 2563 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2564 2565 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2566 if (fault_errors) 2567 drm_err(&dev_priv->drm, 2568 "Fault errors on pipe %c: 0x%08x\n", 2569 pipe_name(pipe), 2570 fault_errors); 2571 } 2572 2573 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2574 master_ctl & GEN8_DE_PCH_IRQ) { 2575 /* 2576 * FIXME(BDW): Assume for now that the new interrupt handling 2577 * scheme also closed the SDE interrupt handling race we've seen 2578 * on older pch-split platforms. But this needs testing. 2579 */ 2580 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2581 if (iir) { 2582 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); 2583 ret = IRQ_HANDLED; 2584 2585 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2586 icp_irq_handler(dev_priv, iir); 2587 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2588 spt_irq_handler(dev_priv, iir); 2589 else 2590 cpt_irq_handler(dev_priv, iir); 2591 } else { 2592 /* 2593 * Like on previous PCH there seems to be something 2594 * fishy going on with forwarding PCH interrupts. 2595 */ 2596 drm_dbg(&dev_priv->drm, 2597 "The master control interrupt lied (SDE)!\n"); 2598 } 2599 } 2600 2601 return ret; 2602 } 2603 2604 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2605 { 2606 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2607 2608 /* 2609 * Now with master disabled, get a sample of level indications 2610 * for this interrupt. Indications will be cleared on related acks. 2611 * New indications can and will light up during processing, 2612 * and will generate new interrupt after enabling master. 2613 */ 2614 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2615 } 2616 2617 static inline void gen8_master_intr_enable(void __iomem * const regs) 2618 { 2619 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2620 } 2621 2622 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2623 { 2624 struct drm_i915_private *dev_priv = arg; 2625 void __iomem * const regs = dev_priv->uncore.regs; 2626 u32 master_ctl; 2627 2628 if (!intel_irqs_enabled(dev_priv)) 2629 return IRQ_NONE; 2630 2631 master_ctl = gen8_master_intr_disable(regs); 2632 if (!master_ctl) { 2633 gen8_master_intr_enable(regs); 2634 return IRQ_NONE; 2635 } 2636 2637 /* Find, queue (onto bottom-halves), then clear each source */ 2638 gen8_gt_irq_handler(&dev_priv->gt, master_ctl); 2639 2640 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2641 if (master_ctl & ~GEN8_GT_IRQS) { 2642 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2643 gen8_de_irq_handler(dev_priv, master_ctl); 2644 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2645 } 2646 2647 gen8_master_intr_enable(regs); 2648 2649 pmu_irq_stats(dev_priv, IRQ_HANDLED); 2650 2651 return IRQ_HANDLED; 2652 } 2653 2654 static u32 2655 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) 2656 { 2657 void __iomem * const regs = gt->uncore->regs; 2658 u32 iir; 2659 2660 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2661 return 0; 2662 2663 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2664 if (likely(iir)) 2665 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2666 2667 return iir; 2668 } 2669 2670 static void 2671 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) 2672 { 2673 if (iir & GEN11_GU_MISC_GSE) 2674 intel_opregion_asle_intr(gt->i915); 2675 } 2676 2677 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2678 { 2679 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2680 2681 /* 2682 * Now with master disabled, get a sample of level indications 2683 * for this interrupt. Indications will be cleared on related acks. 2684 * New indications can and will light up during processing, 2685 * and will generate new interrupt after enabling master. 2686 */ 2687 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2688 } 2689 2690 static inline void gen11_master_intr_enable(void __iomem * const regs) 2691 { 2692 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2693 } 2694 2695 static void 2696 gen11_display_irq_handler(struct drm_i915_private *i915) 2697 { 2698 void __iomem * const regs = i915->uncore.regs; 2699 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2700 2701 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2702 /* 2703 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2704 * for the display related bits. 2705 */ 2706 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 2707 gen8_de_irq_handler(i915, disp_ctl); 2708 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 2709 GEN11_DISPLAY_IRQ_ENABLE); 2710 2711 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2712 } 2713 2714 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2715 { 2716 struct drm_i915_private *i915 = arg; 2717 void __iomem * const regs = i915->uncore.regs; 2718 struct intel_gt *gt = &i915->gt; 2719 u32 master_ctl; 2720 u32 gu_misc_iir; 2721 2722 if (!intel_irqs_enabled(i915)) 2723 return IRQ_NONE; 2724 2725 master_ctl = gen11_master_intr_disable(regs); 2726 if (!master_ctl) { 2727 gen11_master_intr_enable(regs); 2728 return IRQ_NONE; 2729 } 2730 2731 /* Find, queue (onto bottom-halves), then clear each source */ 2732 gen11_gt_irq_handler(gt, master_ctl); 2733 2734 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2735 if (master_ctl & GEN11_DISPLAY_IRQ) 2736 gen11_display_irq_handler(i915); 2737 2738 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2739 2740 gen11_master_intr_enable(regs); 2741 2742 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2743 2744 pmu_irq_stats(i915, IRQ_HANDLED); 2745 2746 return IRQ_HANDLED; 2747 } 2748 2749 static inline u32 dg1_master_intr_disable(void __iomem * const regs) 2750 { 2751 u32 val; 2752 2753 /* First disable interrupts */ 2754 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); 2755 2756 /* Get the indication levels and ack the master unit */ 2757 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); 2758 if (unlikely(!val)) 2759 return 0; 2760 2761 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); 2762 2763 return val; 2764 } 2765 2766 static inline void dg1_master_intr_enable(void __iomem * const regs) 2767 { 2768 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 2769 } 2770 2771 static irqreturn_t dg1_irq_handler(int irq, void *arg) 2772 { 2773 struct drm_i915_private * const i915 = arg; 2774 struct intel_gt *gt = &i915->gt; 2775 void __iomem * const regs = i915->uncore.regs; 2776 u32 master_tile_ctl, master_ctl; 2777 u32 gu_misc_iir; 2778 2779 if (!intel_irqs_enabled(i915)) 2780 return IRQ_NONE; 2781 2782 master_tile_ctl = dg1_master_intr_disable(regs); 2783 if (!master_tile_ctl) { 2784 dg1_master_intr_enable(regs); 2785 return IRQ_NONE; 2786 } 2787 2788 /* FIXME: we only support tile 0 for now. */ 2789 if (master_tile_ctl & DG1_MSTR_TILE(0)) { 2790 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2791 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); 2792 } else { 2793 DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl); 2794 dg1_master_intr_enable(regs); 2795 return IRQ_NONE; 2796 } 2797 2798 gen11_gt_irq_handler(gt, master_ctl); 2799 2800 if (master_ctl & GEN11_DISPLAY_IRQ) 2801 gen11_display_irq_handler(i915); 2802 2803 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2804 2805 dg1_master_intr_enable(regs); 2806 2807 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2808 2809 pmu_irq_stats(i915, IRQ_HANDLED); 2810 2811 return IRQ_HANDLED; 2812 } 2813 2814 /* Called from drm generic code, passed 'crtc' which 2815 * we use as a pipe index 2816 */ 2817 int i8xx_enable_vblank(struct drm_crtc *crtc) 2818 { 2819 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2820 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2821 unsigned long irqflags; 2822 2823 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2824 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2825 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2826 2827 return 0; 2828 } 2829 2830 int i915gm_enable_vblank(struct drm_crtc *crtc) 2831 { 2832 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2833 2834 /* 2835 * Vblank interrupts fail to wake the device up from C2+. 2836 * Disabling render clock gating during C-states avoids 2837 * the problem. There is a small power cost so we do this 2838 * only when vblank interrupts are actually enabled. 2839 */ 2840 if (dev_priv->vblank_enabled++ == 0) 2841 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2842 2843 return i8xx_enable_vblank(crtc); 2844 } 2845 2846 int i965_enable_vblank(struct drm_crtc *crtc) 2847 { 2848 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2849 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2850 unsigned long irqflags; 2851 2852 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2853 i915_enable_pipestat(dev_priv, pipe, 2854 PIPE_START_VBLANK_INTERRUPT_STATUS); 2855 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2856 2857 return 0; 2858 } 2859 2860 int ilk_enable_vblank(struct drm_crtc *crtc) 2861 { 2862 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2863 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2864 unsigned long irqflags; 2865 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2866 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2867 2868 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2869 ilk_enable_display_irq(dev_priv, bit); 2870 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2871 2872 /* Even though there is no DMC, frame counter can get stuck when 2873 * PSR is active as no frames are generated. 2874 */ 2875 if (HAS_PSR(dev_priv)) 2876 drm_crtc_vblank_restore(crtc); 2877 2878 return 0; 2879 } 2880 2881 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 2882 bool enable) 2883 { 2884 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 2885 enum port port; 2886 u32 tmp; 2887 2888 if (!(intel_crtc->mode_flags & 2889 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 2890 return false; 2891 2892 /* for dual link cases we consider TE from slave */ 2893 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 2894 port = PORT_B; 2895 else 2896 port = PORT_A; 2897 2898 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port)); 2899 if (enable) 2900 tmp &= ~DSI_TE_EVENT; 2901 else 2902 tmp |= DSI_TE_EVENT; 2903 2904 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp); 2905 2906 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2907 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2908 2909 return true; 2910 } 2911 2912 int bdw_enable_vblank(struct drm_crtc *_crtc) 2913 { 2914 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2915 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2916 enum pipe pipe = crtc->pipe; 2917 unsigned long irqflags; 2918 2919 if (gen11_dsi_configure_te(crtc, true)) 2920 return 0; 2921 2922 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2923 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2924 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2925 2926 /* Even if there is no DMC, frame counter can get stuck when 2927 * PSR is active as no frames are generated, so check only for PSR. 2928 */ 2929 if (HAS_PSR(dev_priv)) 2930 drm_crtc_vblank_restore(&crtc->base); 2931 2932 return 0; 2933 } 2934 2935 /* Called from drm generic code, passed 'crtc' which 2936 * we use as a pipe index 2937 */ 2938 void i8xx_disable_vblank(struct drm_crtc *crtc) 2939 { 2940 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2941 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2942 unsigned long irqflags; 2943 2944 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2945 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2946 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2947 } 2948 2949 void i915gm_disable_vblank(struct drm_crtc *crtc) 2950 { 2951 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2952 2953 i8xx_disable_vblank(crtc); 2954 2955 if (--dev_priv->vblank_enabled == 0) 2956 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2957 } 2958 2959 void i965_disable_vblank(struct drm_crtc *crtc) 2960 { 2961 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2962 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2963 unsigned long irqflags; 2964 2965 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2966 i915_disable_pipestat(dev_priv, pipe, 2967 PIPE_START_VBLANK_INTERRUPT_STATUS); 2968 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2969 } 2970 2971 void ilk_disable_vblank(struct drm_crtc *crtc) 2972 { 2973 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2974 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2975 unsigned long irqflags; 2976 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 2977 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2978 2979 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2980 ilk_disable_display_irq(dev_priv, bit); 2981 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2982 } 2983 2984 void bdw_disable_vblank(struct drm_crtc *_crtc) 2985 { 2986 struct intel_crtc *crtc = to_intel_crtc(_crtc); 2987 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2988 enum pipe pipe = crtc->pipe; 2989 unsigned long irqflags; 2990 2991 if (gen11_dsi_configure_te(crtc, false)) 2992 return; 2993 2994 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2995 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2996 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2997 } 2998 2999 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3000 { 3001 struct intel_uncore *uncore = &dev_priv->uncore; 3002 3003 if (HAS_PCH_NOP(dev_priv)) 3004 return; 3005 3006 GEN3_IRQ_RESET(uncore, SDE); 3007 3008 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3009 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); 3010 } 3011 3012 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3013 { 3014 struct intel_uncore *uncore = &dev_priv->uncore; 3015 3016 if (IS_CHERRYVIEW(dev_priv)) 3017 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3018 else 3019 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); 3020 3021 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3022 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 3023 3024 i9xx_pipestat_irq_reset(dev_priv); 3025 3026 GEN3_IRQ_RESET(uncore, VLV_); 3027 dev_priv->irq_mask = ~0u; 3028 } 3029 3030 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3031 { 3032 struct intel_uncore *uncore = &dev_priv->uncore; 3033 3034 u32 pipestat_mask; 3035 u32 enable_mask; 3036 enum pipe pipe; 3037 3038 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3039 3040 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3041 for_each_pipe(dev_priv, pipe) 3042 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3043 3044 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3045 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3046 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3047 I915_LPE_PIPE_A_INTERRUPT | 3048 I915_LPE_PIPE_B_INTERRUPT; 3049 3050 if (IS_CHERRYVIEW(dev_priv)) 3051 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3052 I915_LPE_PIPE_C_INTERRUPT; 3053 3054 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 3055 3056 dev_priv->irq_mask = ~enable_mask; 3057 3058 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 3059 } 3060 3061 /* drm_dma.h hooks 3062 */ 3063 static void ilk_irq_reset(struct drm_i915_private *dev_priv) 3064 { 3065 struct intel_uncore *uncore = &dev_priv->uncore; 3066 3067 GEN3_IRQ_RESET(uncore, DE); 3068 dev_priv->irq_mask = ~0u; 3069 3070 if (GRAPHICS_VER(dev_priv) == 7) 3071 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 3072 3073 if (IS_HASWELL(dev_priv)) { 3074 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3075 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3076 } 3077 3078 gen5_gt_irq_reset(&dev_priv->gt); 3079 3080 ibx_irq_reset(dev_priv); 3081 } 3082 3083 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 3084 { 3085 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 3086 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3087 3088 gen5_gt_irq_reset(&dev_priv->gt); 3089 3090 spin_lock_irq(&dev_priv->irq_lock); 3091 if (dev_priv->display_irqs_enabled) 3092 vlv_display_irq_reset(dev_priv); 3093 spin_unlock_irq(&dev_priv->irq_lock); 3094 } 3095 3096 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 3097 { 3098 struct intel_uncore *uncore = &dev_priv->uncore; 3099 enum pipe pipe; 3100 3101 if (!HAS_DISPLAY(dev_priv)) 3102 return; 3103 3104 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3105 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3106 3107 for_each_pipe(dev_priv, pipe) 3108 if (intel_display_power_is_enabled(dev_priv, 3109 POWER_DOMAIN_PIPE(pipe))) 3110 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3111 3112 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3113 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3114 } 3115 3116 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 3117 { 3118 struct intel_uncore *uncore = &dev_priv->uncore; 3119 3120 gen8_master_intr_disable(dev_priv->uncore.regs); 3121 3122 gen8_gt_irq_reset(&dev_priv->gt); 3123 gen8_display_irq_reset(dev_priv); 3124 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3125 3126 if (HAS_PCH_SPLIT(dev_priv)) 3127 ibx_irq_reset(dev_priv); 3128 3129 } 3130 3131 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 3132 { 3133 struct intel_uncore *uncore = &dev_priv->uncore; 3134 enum pipe pipe; 3135 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3136 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3137 3138 if (!HAS_DISPLAY(dev_priv)) 3139 return; 3140 3141 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 3142 3143 if (DISPLAY_VER(dev_priv) >= 12) { 3144 enum transcoder trans; 3145 3146 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3147 enum intel_display_power_domain domain; 3148 3149 domain = POWER_DOMAIN_TRANSCODER(trans); 3150 if (!intel_display_power_is_enabled(dev_priv, domain)) 3151 continue; 3152 3153 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 3154 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 3155 } 3156 } else { 3157 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3158 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3159 } 3160 3161 for_each_pipe(dev_priv, pipe) 3162 if (intel_display_power_is_enabled(dev_priv, 3163 POWER_DOMAIN_PIPE(pipe))) 3164 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3165 3166 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3167 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3168 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 3169 3170 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3171 GEN3_IRQ_RESET(uncore, SDE); 3172 } 3173 3174 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 3175 { 3176 struct intel_uncore *uncore = &dev_priv->uncore; 3177 3178 gen11_master_intr_disable(dev_priv->uncore.regs); 3179 3180 gen11_gt_irq_reset(&dev_priv->gt); 3181 gen11_display_irq_reset(dev_priv); 3182 3183 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3184 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3185 } 3186 3187 static void dg1_irq_reset(struct drm_i915_private *dev_priv) 3188 { 3189 struct intel_uncore *uncore = &dev_priv->uncore; 3190 3191 dg1_master_intr_disable(dev_priv->uncore.regs); 3192 3193 gen11_gt_irq_reset(&dev_priv->gt); 3194 gen11_display_irq_reset(dev_priv); 3195 3196 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3197 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3198 } 3199 3200 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3201 u8 pipe_mask) 3202 { 3203 struct intel_uncore *uncore = &dev_priv->uncore; 3204 u32 extra_ier = GEN8_PIPE_VBLANK | 3205 gen8_de_pipe_underrun_mask(dev_priv) | 3206 gen8_de_pipe_flip_done_mask(dev_priv); 3207 enum pipe pipe; 3208 3209 spin_lock_irq(&dev_priv->irq_lock); 3210 3211 if (!intel_irqs_enabled(dev_priv)) { 3212 spin_unlock_irq(&dev_priv->irq_lock); 3213 return; 3214 } 3215 3216 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3217 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3218 dev_priv->de_irq_mask[pipe], 3219 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3220 3221 spin_unlock_irq(&dev_priv->irq_lock); 3222 } 3223 3224 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3225 u8 pipe_mask) 3226 { 3227 struct intel_uncore *uncore = &dev_priv->uncore; 3228 enum pipe pipe; 3229 3230 spin_lock_irq(&dev_priv->irq_lock); 3231 3232 if (!intel_irqs_enabled(dev_priv)) { 3233 spin_unlock_irq(&dev_priv->irq_lock); 3234 return; 3235 } 3236 3237 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3238 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3239 3240 spin_unlock_irq(&dev_priv->irq_lock); 3241 3242 /* make sure we're done processing display irqs */ 3243 intel_synchronize_irq(dev_priv); 3244 } 3245 3246 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 3247 { 3248 struct intel_uncore *uncore = &dev_priv->uncore; 3249 3250 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 3251 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3252 3253 gen8_gt_irq_reset(&dev_priv->gt); 3254 3255 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3256 3257 spin_lock_irq(&dev_priv->irq_lock); 3258 if (dev_priv->display_irqs_enabled) 3259 vlv_display_irq_reset(dev_priv); 3260 spin_unlock_irq(&dev_priv->irq_lock); 3261 } 3262 3263 static u32 ibx_hotplug_enables(struct drm_i915_private *i915, 3264 enum hpd_pin pin) 3265 { 3266 switch (pin) { 3267 case HPD_PORT_A: 3268 /* 3269 * When CPU and PCH are on the same package, port A 3270 * HPD must be enabled in both north and south. 3271 */ 3272 return HAS_PCH_LPT_LP(i915) ? 3273 PORTA_HOTPLUG_ENABLE : 0; 3274 case HPD_PORT_B: 3275 return PORTB_HOTPLUG_ENABLE | 3276 PORTB_PULSE_DURATION_2ms; 3277 case HPD_PORT_C: 3278 return PORTC_HOTPLUG_ENABLE | 3279 PORTC_PULSE_DURATION_2ms; 3280 case HPD_PORT_D: 3281 return PORTD_HOTPLUG_ENABLE | 3282 PORTD_PULSE_DURATION_2ms; 3283 default: 3284 return 0; 3285 } 3286 } 3287 3288 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3289 { 3290 u32 hotplug; 3291 3292 /* 3293 * Enable digital hotplug on the PCH, and configure the DP short pulse 3294 * duration to 2ms (which is the minimum in the Display Port spec). 3295 * The pulse duration bits are reserved on LPT+. 3296 */ 3297 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3298 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3299 PORTB_HOTPLUG_ENABLE | 3300 PORTC_HOTPLUG_ENABLE | 3301 PORTD_HOTPLUG_ENABLE | 3302 PORTB_PULSE_DURATION_MASK | 3303 PORTC_PULSE_DURATION_MASK | 3304 PORTD_PULSE_DURATION_MASK); 3305 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables); 3306 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3307 } 3308 3309 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3310 { 3311 u32 hotplug_irqs, enabled_irqs; 3312 3313 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3314 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3315 3316 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3317 3318 ibx_hpd_detection_setup(dev_priv); 3319 } 3320 3321 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, 3322 enum hpd_pin pin) 3323 { 3324 switch (pin) { 3325 case HPD_PORT_A: 3326 case HPD_PORT_B: 3327 case HPD_PORT_C: 3328 case HPD_PORT_D: 3329 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); 3330 default: 3331 return 0; 3332 } 3333 } 3334 3335 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, 3336 enum hpd_pin pin) 3337 { 3338 switch (pin) { 3339 case HPD_PORT_TC1: 3340 case HPD_PORT_TC2: 3341 case HPD_PORT_TC3: 3342 case HPD_PORT_TC4: 3343 case HPD_PORT_TC5: 3344 case HPD_PORT_TC6: 3345 return ICP_TC_HPD_ENABLE(pin); 3346 default: 3347 return 0; 3348 } 3349 } 3350 3351 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) 3352 { 3353 u32 hotplug; 3354 3355 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 3356 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | 3357 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | 3358 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | 3359 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D)); 3360 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables); 3361 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug); 3362 } 3363 3364 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3365 { 3366 u32 hotplug; 3367 3368 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 3369 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | 3370 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | 3371 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | 3372 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) | 3373 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | 3374 ICP_TC_HPD_ENABLE(HPD_PORT_TC6)); 3375 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables); 3376 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug); 3377 } 3378 3379 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3380 { 3381 u32 hotplug_irqs, enabled_irqs; 3382 3383 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3384 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3385 3386 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) 3387 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3388 3389 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3390 3391 icp_ddi_hpd_detection_setup(dev_priv); 3392 icp_tc_hpd_detection_setup(dev_priv); 3393 } 3394 3395 static u32 gen11_hotplug_enables(struct drm_i915_private *i915, 3396 enum hpd_pin pin) 3397 { 3398 switch (pin) { 3399 case HPD_PORT_TC1: 3400 case HPD_PORT_TC2: 3401 case HPD_PORT_TC3: 3402 case HPD_PORT_TC4: 3403 case HPD_PORT_TC5: 3404 case HPD_PORT_TC6: 3405 return GEN11_HOTPLUG_CTL_ENABLE(pin); 3406 default: 3407 return 0; 3408 } 3409 } 3410 3411 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) 3412 { 3413 u32 val; 3414 3415 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3416 val |= (INVERT_DDIA_HPD | 3417 INVERT_DDIB_HPD | 3418 INVERT_DDIC_HPD | 3419 INVERT_DDID_HPD); 3420 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3421 3422 icp_hpd_irq_setup(dev_priv); 3423 } 3424 3425 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3426 { 3427 u32 hotplug; 3428 3429 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 3430 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3431 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3432 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3433 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3434 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3435 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3436 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3437 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug); 3438 } 3439 3440 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3441 { 3442 u32 hotplug; 3443 3444 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 3445 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3446 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3447 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3448 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3449 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3450 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3451 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3452 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug); 3453 } 3454 3455 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3456 { 3457 u32 hotplug_irqs, enabled_irqs; 3458 u32 val; 3459 3460 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3461 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3462 3463 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3464 val &= ~hotplug_irqs; 3465 val |= ~enabled_irqs & hotplug_irqs; 3466 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val); 3467 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3468 3469 gen11_tc_hpd_detection_setup(dev_priv); 3470 gen11_tbt_hpd_detection_setup(dev_priv); 3471 3472 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3473 icp_hpd_irq_setup(dev_priv); 3474 } 3475 3476 static u32 spt_hotplug_enables(struct drm_i915_private *i915, 3477 enum hpd_pin pin) 3478 { 3479 switch (pin) { 3480 case HPD_PORT_A: 3481 return PORTA_HOTPLUG_ENABLE; 3482 case HPD_PORT_B: 3483 return PORTB_HOTPLUG_ENABLE; 3484 case HPD_PORT_C: 3485 return PORTC_HOTPLUG_ENABLE; 3486 case HPD_PORT_D: 3487 return PORTD_HOTPLUG_ENABLE; 3488 default: 3489 return 0; 3490 } 3491 } 3492 3493 static u32 spt_hotplug2_enables(struct drm_i915_private *i915, 3494 enum hpd_pin pin) 3495 { 3496 switch (pin) { 3497 case HPD_PORT_E: 3498 return PORTE_HOTPLUG_ENABLE; 3499 default: 3500 return 0; 3501 } 3502 } 3503 3504 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3505 { 3506 u32 val, hotplug; 3507 3508 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3509 if (HAS_PCH_CNP(dev_priv)) { 3510 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3511 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3512 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3513 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3514 } 3515 3516 /* Enable digital hotplug on the PCH */ 3517 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3518 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3519 PORTB_HOTPLUG_ENABLE | 3520 PORTC_HOTPLUG_ENABLE | 3521 PORTD_HOTPLUG_ENABLE); 3522 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables); 3523 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3524 3525 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 3526 hotplug &= ~PORTE_HOTPLUG_ENABLE; 3527 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables); 3528 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug); 3529 } 3530 3531 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3532 { 3533 u32 hotplug_irqs, enabled_irqs; 3534 3535 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3536 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3537 3538 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3539 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3540 3541 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3542 3543 spt_hpd_detection_setup(dev_priv); 3544 } 3545 3546 static u32 ilk_hotplug_enables(struct drm_i915_private *i915, 3547 enum hpd_pin pin) 3548 { 3549 switch (pin) { 3550 case HPD_PORT_A: 3551 return DIGITAL_PORTA_HOTPLUG_ENABLE | 3552 DIGITAL_PORTA_PULSE_DURATION_2ms; 3553 default: 3554 return 0; 3555 } 3556 } 3557 3558 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3559 { 3560 u32 hotplug; 3561 3562 /* 3563 * Enable digital hotplug on the CPU, and configure the DP short pulse 3564 * duration to 2ms (which is the minimum in the Display Port spec) 3565 * The pulse duration bits are reserved on HSW+. 3566 */ 3567 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 3568 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE | 3569 DIGITAL_PORTA_PULSE_DURATION_MASK); 3570 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables); 3571 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3572 } 3573 3574 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3575 { 3576 u32 hotplug_irqs, enabled_irqs; 3577 3578 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3579 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3580 3581 if (DISPLAY_VER(dev_priv) >= 8) 3582 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3583 else 3584 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3585 3586 ilk_hpd_detection_setup(dev_priv); 3587 3588 ibx_hpd_irq_setup(dev_priv); 3589 } 3590 3591 static u32 bxt_hotplug_enables(struct drm_i915_private *i915, 3592 enum hpd_pin pin) 3593 { 3594 u32 hotplug; 3595 3596 switch (pin) { 3597 case HPD_PORT_A: 3598 hotplug = PORTA_HOTPLUG_ENABLE; 3599 if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) 3600 hotplug |= BXT_DDIA_HPD_INVERT; 3601 return hotplug; 3602 case HPD_PORT_B: 3603 hotplug = PORTB_HOTPLUG_ENABLE; 3604 if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) 3605 hotplug |= BXT_DDIB_HPD_INVERT; 3606 return hotplug; 3607 case HPD_PORT_C: 3608 hotplug = PORTC_HOTPLUG_ENABLE; 3609 if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) 3610 hotplug |= BXT_DDIC_HPD_INVERT; 3611 return hotplug; 3612 default: 3613 return 0; 3614 } 3615 } 3616 3617 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3618 { 3619 u32 hotplug; 3620 3621 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3622 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3623 PORTB_HOTPLUG_ENABLE | 3624 PORTC_HOTPLUG_ENABLE | 3625 BXT_DDIA_HPD_INVERT | 3626 BXT_DDIB_HPD_INVERT | 3627 BXT_DDIC_HPD_INVERT); 3628 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables); 3629 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3630 } 3631 3632 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3633 { 3634 u32 hotplug_irqs, enabled_irqs; 3635 3636 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3637 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3638 3639 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3640 3641 bxt_hpd_detection_setup(dev_priv); 3642 } 3643 3644 /* 3645 * SDEIER is also touched by the interrupt handler to work around missed PCH 3646 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3647 * instead we unconditionally enable all PCH interrupt sources here, but then 3648 * only unmask them as needed with SDEIMR. 3649 * 3650 * Note that we currently do this after installing the interrupt handler, 3651 * but before we enable the master interrupt. That should be sufficient 3652 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 3653 * interrupts could still race. 3654 */ 3655 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3656 { 3657 struct intel_uncore *uncore = &dev_priv->uncore; 3658 u32 mask; 3659 3660 if (HAS_PCH_NOP(dev_priv)) 3661 return; 3662 3663 if (HAS_PCH_IBX(dev_priv)) 3664 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3665 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3666 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3667 else 3668 mask = SDE_GMBUS_CPT; 3669 3670 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3671 } 3672 3673 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 3674 { 3675 struct intel_uncore *uncore = &dev_priv->uncore; 3676 u32 display_mask, extra_mask; 3677 3678 if (GRAPHICS_VER(dev_priv) >= 7) { 3679 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3680 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3681 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3682 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3683 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 3684 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 3685 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 3686 DE_DP_A_HOTPLUG_IVB); 3687 } else { 3688 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3689 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3690 DE_PIPEA_CRC_DONE | DE_POISON); 3691 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 3692 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3693 DE_PLANE_FLIP_DONE(PLANE_A) | 3694 DE_PLANE_FLIP_DONE(PLANE_B) | 3695 DE_DP_A_HOTPLUG); 3696 } 3697 3698 if (IS_HASWELL(dev_priv)) { 3699 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3700 display_mask |= DE_EDP_PSR_INT_HSW; 3701 } 3702 3703 if (IS_IRONLAKE_M(dev_priv)) 3704 extra_mask |= DE_PCU_EVENT; 3705 3706 dev_priv->irq_mask = ~display_mask; 3707 3708 ibx_irq_postinstall(dev_priv); 3709 3710 gen5_gt_irq_postinstall(&dev_priv->gt); 3711 3712 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3713 display_mask | extra_mask); 3714 } 3715 3716 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3717 { 3718 lockdep_assert_held(&dev_priv->irq_lock); 3719 3720 if (dev_priv->display_irqs_enabled) 3721 return; 3722 3723 dev_priv->display_irqs_enabled = true; 3724 3725 if (intel_irqs_enabled(dev_priv)) { 3726 vlv_display_irq_reset(dev_priv); 3727 vlv_display_irq_postinstall(dev_priv); 3728 } 3729 } 3730 3731 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3732 { 3733 lockdep_assert_held(&dev_priv->irq_lock); 3734 3735 if (!dev_priv->display_irqs_enabled) 3736 return; 3737 3738 dev_priv->display_irqs_enabled = false; 3739 3740 if (intel_irqs_enabled(dev_priv)) 3741 vlv_display_irq_reset(dev_priv); 3742 } 3743 3744 3745 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 3746 { 3747 gen5_gt_irq_postinstall(&dev_priv->gt); 3748 3749 spin_lock_irq(&dev_priv->irq_lock); 3750 if (dev_priv->display_irqs_enabled) 3751 vlv_display_irq_postinstall(dev_priv); 3752 spin_unlock_irq(&dev_priv->irq_lock); 3753 3754 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3755 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3756 } 3757 3758 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3759 { 3760 struct intel_uncore *uncore = &dev_priv->uncore; 3761 3762 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 3763 GEN8_PIPE_CDCLK_CRC_DONE; 3764 u32 de_pipe_enables; 3765 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 3766 u32 de_port_enables; 3767 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3768 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3769 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3770 enum pipe pipe; 3771 3772 if (!HAS_DISPLAY(dev_priv)) 3773 return; 3774 3775 if (DISPLAY_VER(dev_priv) <= 10) 3776 de_misc_masked |= GEN8_DE_MISC_GSE; 3777 3778 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3779 de_port_masked |= BXT_DE_PORT_GMBUS; 3780 3781 if (DISPLAY_VER(dev_priv) >= 11) { 3782 enum port port; 3783 3784 if (intel_bios_is_dsi_present(dev_priv, &port)) 3785 de_port_masked |= DSI0_TE | DSI1_TE; 3786 } 3787 3788 de_pipe_enables = de_pipe_masked | 3789 GEN8_PIPE_VBLANK | 3790 gen8_de_pipe_underrun_mask(dev_priv) | 3791 gen8_de_pipe_flip_done_mask(dev_priv); 3792 3793 de_port_enables = de_port_masked; 3794 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3795 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3796 else if (IS_BROADWELL(dev_priv)) 3797 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 3798 3799 if (DISPLAY_VER(dev_priv) >= 12) { 3800 enum transcoder trans; 3801 3802 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3803 enum intel_display_power_domain domain; 3804 3805 domain = POWER_DOMAIN_TRANSCODER(trans); 3806 if (!intel_display_power_is_enabled(dev_priv, domain)) 3807 continue; 3808 3809 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 3810 } 3811 } else { 3812 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3813 } 3814 3815 for_each_pipe(dev_priv, pipe) { 3816 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3817 3818 if (intel_display_power_is_enabled(dev_priv, 3819 POWER_DOMAIN_PIPE(pipe))) 3820 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3821 dev_priv->de_irq_mask[pipe], 3822 de_pipe_enables); 3823 } 3824 3825 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3826 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3827 3828 if (DISPLAY_VER(dev_priv) >= 11) { 3829 u32 de_hpd_masked = 0; 3830 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3831 GEN11_DE_TBT_HOTPLUG_MASK; 3832 3833 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 3834 de_hpd_enables); 3835 } 3836 } 3837 3838 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 3839 { 3840 struct intel_uncore *uncore = &dev_priv->uncore; 3841 u32 mask = SDE_GMBUS_ICP; 3842 3843 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3844 } 3845 3846 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 3847 { 3848 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3849 icp_irq_postinstall(dev_priv); 3850 else if (HAS_PCH_SPLIT(dev_priv)) 3851 ibx_irq_postinstall(dev_priv); 3852 3853 gen8_gt_irq_postinstall(&dev_priv->gt); 3854 gen8_de_irq_postinstall(dev_priv); 3855 3856 gen8_master_intr_enable(dev_priv->uncore.regs); 3857 } 3858 3859 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 3860 { 3861 if (!HAS_DISPLAY(dev_priv)) 3862 return; 3863 3864 gen8_de_irq_postinstall(dev_priv); 3865 3866 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3867 GEN11_DISPLAY_IRQ_ENABLE); 3868 } 3869 3870 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 3871 { 3872 struct intel_uncore *uncore = &dev_priv->uncore; 3873 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3874 3875 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3876 icp_irq_postinstall(dev_priv); 3877 3878 gen11_gt_irq_postinstall(&dev_priv->gt); 3879 gen11_de_irq_postinstall(dev_priv); 3880 3881 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3882 3883 gen11_master_intr_enable(uncore->regs); 3884 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 3885 } 3886 3887 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) 3888 { 3889 struct intel_uncore *uncore = &dev_priv->uncore; 3890 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3891 3892 gen11_gt_irq_postinstall(&dev_priv->gt); 3893 3894 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3895 3896 if (HAS_DISPLAY(dev_priv)) { 3897 icp_irq_postinstall(dev_priv); 3898 gen8_de_irq_postinstall(dev_priv); 3899 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 3900 GEN11_DISPLAY_IRQ_ENABLE); 3901 } 3902 3903 dg1_master_intr_enable(dev_priv->uncore.regs); 3904 intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR); 3905 } 3906 3907 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 3908 { 3909 gen8_gt_irq_postinstall(&dev_priv->gt); 3910 3911 spin_lock_irq(&dev_priv->irq_lock); 3912 if (dev_priv->display_irqs_enabled) 3913 vlv_display_irq_postinstall(dev_priv); 3914 spin_unlock_irq(&dev_priv->irq_lock); 3915 3916 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3917 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3918 } 3919 3920 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 3921 { 3922 struct intel_uncore *uncore = &dev_priv->uncore; 3923 3924 i9xx_pipestat_irq_reset(dev_priv); 3925 3926 GEN2_IRQ_RESET(uncore); 3927 dev_priv->irq_mask = ~0u; 3928 } 3929 3930 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3931 { 3932 struct intel_uncore *uncore = &dev_priv->uncore; 3933 u16 enable_mask; 3934 3935 intel_uncore_write16(uncore, 3936 EMR, 3937 ~(I915_ERROR_PAGE_TABLE | 3938 I915_ERROR_MEMORY_REFRESH)); 3939 3940 /* Unmask the interrupts that we always want on. */ 3941 dev_priv->irq_mask = 3942 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3943 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3944 I915_MASTER_ERROR_INTERRUPT); 3945 3946 enable_mask = 3947 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3948 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3949 I915_MASTER_ERROR_INTERRUPT | 3950 I915_USER_INTERRUPT; 3951 3952 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask); 3953 3954 /* Interrupt setup is already guaranteed to be single-threaded, this is 3955 * just to make the assert_spin_locked check happy. */ 3956 spin_lock_irq(&dev_priv->irq_lock); 3957 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3958 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3959 spin_unlock_irq(&dev_priv->irq_lock); 3960 } 3961 3962 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 3963 u16 *eir, u16 *eir_stuck) 3964 { 3965 struct intel_uncore *uncore = &i915->uncore; 3966 u16 emr; 3967 3968 *eir = intel_uncore_read16(uncore, EIR); 3969 3970 if (*eir) 3971 intel_uncore_write16(uncore, EIR, *eir); 3972 3973 *eir_stuck = intel_uncore_read16(uncore, EIR); 3974 if (*eir_stuck == 0) 3975 return; 3976 3977 /* 3978 * Toggle all EMR bits to make sure we get an edge 3979 * in the ISR master error bit if we don't clear 3980 * all the EIR bits. Otherwise the edge triggered 3981 * IIR on i965/g4x wouldn't notice that an interrupt 3982 * is still pending. Also some EIR bits can't be 3983 * cleared except by handling the underlying error 3984 * (or by a GPU reset) so we mask any bit that 3985 * remains set. 3986 */ 3987 emr = intel_uncore_read16(uncore, EMR); 3988 intel_uncore_write16(uncore, EMR, 0xffff); 3989 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 3990 } 3991 3992 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 3993 u16 eir, u16 eir_stuck) 3994 { 3995 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 3996 3997 if (eir_stuck) 3998 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 3999 eir_stuck); 4000 } 4001 4002 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4003 u32 *eir, u32 *eir_stuck) 4004 { 4005 u32 emr; 4006 4007 *eir = intel_uncore_read(&dev_priv->uncore, EIR); 4008 4009 intel_uncore_write(&dev_priv->uncore, EIR, *eir); 4010 4011 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 4012 if (*eir_stuck == 0) 4013 return; 4014 4015 /* 4016 * Toggle all EMR bits to make sure we get an edge 4017 * in the ISR master error bit if we don't clear 4018 * all the EIR bits. Otherwise the edge triggered 4019 * IIR on i965/g4x wouldn't notice that an interrupt 4020 * is still pending. Also some EIR bits can't be 4021 * cleared except by handling the underlying error 4022 * (or by a GPU reset) so we mask any bit that 4023 * remains set. 4024 */ 4025 emr = intel_uncore_read(&dev_priv->uncore, EMR); 4026 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 4027 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 4028 } 4029 4030 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4031 u32 eir, u32 eir_stuck) 4032 { 4033 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4034 4035 if (eir_stuck) 4036 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 4037 eir_stuck); 4038 } 4039 4040 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4041 { 4042 struct drm_i915_private *dev_priv = arg; 4043 irqreturn_t ret = IRQ_NONE; 4044 4045 if (!intel_irqs_enabled(dev_priv)) 4046 return IRQ_NONE; 4047 4048 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4049 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4050 4051 do { 4052 u32 pipe_stats[I915_MAX_PIPES] = {}; 4053 u16 eir = 0, eir_stuck = 0; 4054 u16 iir; 4055 4056 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 4057 if (iir == 0) 4058 break; 4059 4060 ret = IRQ_HANDLED; 4061 4062 /* Call regardless, as some status bits might not be 4063 * signalled in iir */ 4064 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4065 4066 if (iir & I915_MASTER_ERROR_INTERRUPT) 4067 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4068 4069 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 4070 4071 if (iir & I915_USER_INTERRUPT) 4072 intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir); 4073 4074 if (iir & I915_MASTER_ERROR_INTERRUPT) 4075 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4076 4077 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4078 } while (0); 4079 4080 pmu_irq_stats(dev_priv, ret); 4081 4082 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4083 4084 return ret; 4085 } 4086 4087 static void i915_irq_reset(struct drm_i915_private *dev_priv) 4088 { 4089 struct intel_uncore *uncore = &dev_priv->uncore; 4090 4091 if (I915_HAS_HOTPLUG(dev_priv)) { 4092 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4093 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 4094 } 4095 4096 i9xx_pipestat_irq_reset(dev_priv); 4097 4098 GEN3_IRQ_RESET(uncore, GEN2_); 4099 dev_priv->irq_mask = ~0u; 4100 } 4101 4102 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 4103 { 4104 struct intel_uncore *uncore = &dev_priv->uncore; 4105 u32 enable_mask; 4106 4107 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE | 4108 I915_ERROR_MEMORY_REFRESH)); 4109 4110 /* Unmask the interrupts that we always want on. */ 4111 dev_priv->irq_mask = 4112 ~(I915_ASLE_INTERRUPT | 4113 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4114 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4115 I915_MASTER_ERROR_INTERRUPT); 4116 4117 enable_mask = 4118 I915_ASLE_INTERRUPT | 4119 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4120 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4121 I915_MASTER_ERROR_INTERRUPT | 4122 I915_USER_INTERRUPT; 4123 4124 if (I915_HAS_HOTPLUG(dev_priv)) { 4125 /* Enable in IER... */ 4126 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4127 /* and unmask in IMR */ 4128 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4129 } 4130 4131 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4132 4133 /* Interrupt setup is already guaranteed to be single-threaded, this is 4134 * just to make the assert_spin_locked check happy. */ 4135 spin_lock_irq(&dev_priv->irq_lock); 4136 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4137 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4138 spin_unlock_irq(&dev_priv->irq_lock); 4139 4140 i915_enable_asle_pipestat(dev_priv); 4141 } 4142 4143 static irqreturn_t i915_irq_handler(int irq, void *arg) 4144 { 4145 struct drm_i915_private *dev_priv = arg; 4146 irqreturn_t ret = IRQ_NONE; 4147 4148 if (!intel_irqs_enabled(dev_priv)) 4149 return IRQ_NONE; 4150 4151 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4152 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4153 4154 do { 4155 u32 pipe_stats[I915_MAX_PIPES] = {}; 4156 u32 eir = 0, eir_stuck = 0; 4157 u32 hotplug_status = 0; 4158 u32 iir; 4159 4160 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4161 if (iir == 0) 4162 break; 4163 4164 ret = IRQ_HANDLED; 4165 4166 if (I915_HAS_HOTPLUG(dev_priv) && 4167 iir & I915_DISPLAY_PORT_INTERRUPT) 4168 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4169 4170 /* Call regardless, as some status bits might not be 4171 * signalled in iir */ 4172 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4173 4174 if (iir & I915_MASTER_ERROR_INTERRUPT) 4175 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4176 4177 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4178 4179 if (iir & I915_USER_INTERRUPT) 4180 intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir); 4181 4182 if (iir & I915_MASTER_ERROR_INTERRUPT) 4183 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4184 4185 if (hotplug_status) 4186 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4187 4188 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4189 } while (0); 4190 4191 pmu_irq_stats(dev_priv, ret); 4192 4193 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4194 4195 return ret; 4196 } 4197 4198 static void i965_irq_reset(struct drm_i915_private *dev_priv) 4199 { 4200 struct intel_uncore *uncore = &dev_priv->uncore; 4201 4202 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4203 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 4204 4205 i9xx_pipestat_irq_reset(dev_priv); 4206 4207 GEN3_IRQ_RESET(uncore, GEN2_); 4208 dev_priv->irq_mask = ~0u; 4209 } 4210 4211 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 4212 { 4213 struct intel_uncore *uncore = &dev_priv->uncore; 4214 u32 enable_mask; 4215 u32 error_mask; 4216 4217 /* 4218 * Enable some error detection, note the instruction error mask 4219 * bit is reserved, so we leave it masked. 4220 */ 4221 if (IS_G4X(dev_priv)) { 4222 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4223 GM45_ERROR_MEM_PRIV | 4224 GM45_ERROR_CP_PRIV | 4225 I915_ERROR_MEMORY_REFRESH); 4226 } else { 4227 error_mask = ~(I915_ERROR_PAGE_TABLE | 4228 I915_ERROR_MEMORY_REFRESH); 4229 } 4230 intel_uncore_write(&dev_priv->uncore, EMR, error_mask); 4231 4232 /* Unmask the interrupts that we always want on. */ 4233 dev_priv->irq_mask = 4234 ~(I915_ASLE_INTERRUPT | 4235 I915_DISPLAY_PORT_INTERRUPT | 4236 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4237 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4238 I915_MASTER_ERROR_INTERRUPT); 4239 4240 enable_mask = 4241 I915_ASLE_INTERRUPT | 4242 I915_DISPLAY_PORT_INTERRUPT | 4243 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4244 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4245 I915_MASTER_ERROR_INTERRUPT | 4246 I915_USER_INTERRUPT; 4247 4248 if (IS_G4X(dev_priv)) 4249 enable_mask |= I915_BSD_USER_INTERRUPT; 4250 4251 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4252 4253 /* Interrupt setup is already guaranteed to be single-threaded, this is 4254 * just to make the assert_spin_locked check happy. */ 4255 spin_lock_irq(&dev_priv->irq_lock); 4256 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4257 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4258 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4259 spin_unlock_irq(&dev_priv->irq_lock); 4260 4261 i915_enable_asle_pipestat(dev_priv); 4262 } 4263 4264 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4265 { 4266 u32 hotplug_en; 4267 4268 lockdep_assert_held(&dev_priv->irq_lock); 4269 4270 /* Note HDMI and DP share hotplug bits */ 4271 /* enable bits are the same for all generations */ 4272 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4273 /* Programming the CRT detection parameters tends 4274 to generate a spurious hotplug event about three 4275 seconds later. So just do it once. 4276 */ 4277 if (IS_G4X(dev_priv)) 4278 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4279 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4280 4281 /* Ignore TV since it's buggy */ 4282 i915_hotplug_interrupt_update_locked(dev_priv, 4283 HOTPLUG_INT_EN_MASK | 4284 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4285 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4286 hotplug_en); 4287 } 4288 4289 static irqreturn_t i965_irq_handler(int irq, void *arg) 4290 { 4291 struct drm_i915_private *dev_priv = arg; 4292 irqreturn_t ret = IRQ_NONE; 4293 4294 if (!intel_irqs_enabled(dev_priv)) 4295 return IRQ_NONE; 4296 4297 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4298 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4299 4300 do { 4301 u32 pipe_stats[I915_MAX_PIPES] = {}; 4302 u32 eir = 0, eir_stuck = 0; 4303 u32 hotplug_status = 0; 4304 u32 iir; 4305 4306 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4307 if (iir == 0) 4308 break; 4309 4310 ret = IRQ_HANDLED; 4311 4312 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4313 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4314 4315 /* Call regardless, as some status bits might not be 4316 * signalled in iir */ 4317 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4318 4319 if (iir & I915_MASTER_ERROR_INTERRUPT) 4320 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4321 4322 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4323 4324 if (iir & I915_USER_INTERRUPT) 4325 intel_engine_cs_irq(dev_priv->gt.engine[RCS0], 4326 iir); 4327 4328 if (iir & I915_BSD_USER_INTERRUPT) 4329 intel_engine_cs_irq(dev_priv->gt.engine[VCS0], 4330 iir >> 25); 4331 4332 if (iir & I915_MASTER_ERROR_INTERRUPT) 4333 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4334 4335 if (hotplug_status) 4336 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4337 4338 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4339 } while (0); 4340 4341 pmu_irq_stats(dev_priv, IRQ_HANDLED); 4342 4343 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4344 4345 return ret; 4346 } 4347 4348 #define HPD_FUNCS(platform) \ 4349 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ 4350 .hpd_irq_setup = platform##_hpd_irq_setup, \ 4351 } 4352 4353 HPD_FUNCS(i915); 4354 HPD_FUNCS(dg1); 4355 HPD_FUNCS(gen11); 4356 HPD_FUNCS(bxt); 4357 HPD_FUNCS(icp); 4358 HPD_FUNCS(spt); 4359 HPD_FUNCS(ilk); 4360 #undef HPD_FUNCS 4361 4362 /** 4363 * intel_irq_init - initializes irq support 4364 * @dev_priv: i915 device instance 4365 * 4366 * This function initializes all the irq support including work items, timers 4367 * and all the vtables. It does not setup the interrupt itself though. 4368 */ 4369 void intel_irq_init(struct drm_i915_private *dev_priv) 4370 { 4371 struct drm_device *dev = &dev_priv->drm; 4372 int i; 4373 4374 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 4375 for (i = 0; i < MAX_L3_SLICES; ++i) 4376 dev_priv->l3_parity.remap_info[i] = NULL; 4377 4378 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 4379 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) 4380 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; 4381 4382 if (!HAS_DISPLAY(dev_priv)) 4383 return; 4384 4385 intel_hpd_init_pins(dev_priv); 4386 4387 intel_hpd_init_work(dev_priv); 4388 4389 dev->vblank_disable_immediate = true; 4390 4391 /* Most platforms treat the display irq block as an always-on 4392 * power domain. vlv/chv can disable it at runtime and need 4393 * special care to avoid writing any of the display block registers 4394 * outside of the power domain. We defer setting up the display irqs 4395 * in this case to the runtime pm. 4396 */ 4397 dev_priv->display_irqs_enabled = true; 4398 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4399 dev_priv->display_irqs_enabled = false; 4400 4401 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4402 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4403 * detection, as short HPD storms will occur as a natural part of 4404 * sideband messaging with MST. 4405 * On older platforms however, IRQ storms can occur with both long and 4406 * short pulses, as seen on some G4x systems. 4407 */ 4408 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4409 4410 if (HAS_GMCH(dev_priv)) { 4411 if (I915_HAS_HOTPLUG(dev_priv)) 4412 dev_priv->hotplug_funcs = &i915_hpd_funcs; 4413 } else { 4414 if (HAS_PCH_DG1(dev_priv)) 4415 dev_priv->hotplug_funcs = &dg1_hpd_funcs; 4416 else if (DISPLAY_VER(dev_priv) >= 11) 4417 dev_priv->hotplug_funcs = &gen11_hpd_funcs; 4418 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 4419 dev_priv->hotplug_funcs = &bxt_hpd_funcs; 4420 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 4421 dev_priv->hotplug_funcs = &icp_hpd_funcs; 4422 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 4423 dev_priv->hotplug_funcs = &spt_hpd_funcs; 4424 else 4425 dev_priv->hotplug_funcs = &ilk_hpd_funcs; 4426 } 4427 } 4428 4429 /** 4430 * intel_irq_fini - deinitializes IRQ support 4431 * @i915: i915 device instance 4432 * 4433 * This function deinitializes all the IRQ support. 4434 */ 4435 void intel_irq_fini(struct drm_i915_private *i915) 4436 { 4437 int i; 4438 4439 for (i = 0; i < MAX_L3_SLICES; ++i) 4440 kfree(i915->l3_parity.remap_info[i]); 4441 } 4442 4443 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 4444 { 4445 if (HAS_GMCH(dev_priv)) { 4446 if (IS_CHERRYVIEW(dev_priv)) 4447 return cherryview_irq_handler; 4448 else if (IS_VALLEYVIEW(dev_priv)) 4449 return valleyview_irq_handler; 4450 else if (GRAPHICS_VER(dev_priv) == 4) 4451 return i965_irq_handler; 4452 else if (GRAPHICS_VER(dev_priv) == 3) 4453 return i915_irq_handler; 4454 else 4455 return i8xx_irq_handler; 4456 } else { 4457 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4458 return dg1_irq_handler; 4459 else if (GRAPHICS_VER(dev_priv) >= 11) 4460 return gen11_irq_handler; 4461 else if (GRAPHICS_VER(dev_priv) >= 8) 4462 return gen8_irq_handler; 4463 else 4464 return ilk_irq_handler; 4465 } 4466 } 4467 4468 static void intel_irq_reset(struct drm_i915_private *dev_priv) 4469 { 4470 if (HAS_GMCH(dev_priv)) { 4471 if (IS_CHERRYVIEW(dev_priv)) 4472 cherryview_irq_reset(dev_priv); 4473 else if (IS_VALLEYVIEW(dev_priv)) 4474 valleyview_irq_reset(dev_priv); 4475 else if (GRAPHICS_VER(dev_priv) == 4) 4476 i965_irq_reset(dev_priv); 4477 else if (GRAPHICS_VER(dev_priv) == 3) 4478 i915_irq_reset(dev_priv); 4479 else 4480 i8xx_irq_reset(dev_priv); 4481 } else { 4482 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4483 dg1_irq_reset(dev_priv); 4484 else if (GRAPHICS_VER(dev_priv) >= 11) 4485 gen11_irq_reset(dev_priv); 4486 else if (GRAPHICS_VER(dev_priv) >= 8) 4487 gen8_irq_reset(dev_priv); 4488 else 4489 ilk_irq_reset(dev_priv); 4490 } 4491 } 4492 4493 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 4494 { 4495 if (HAS_GMCH(dev_priv)) { 4496 if (IS_CHERRYVIEW(dev_priv)) 4497 cherryview_irq_postinstall(dev_priv); 4498 else if (IS_VALLEYVIEW(dev_priv)) 4499 valleyview_irq_postinstall(dev_priv); 4500 else if (GRAPHICS_VER(dev_priv) == 4) 4501 i965_irq_postinstall(dev_priv); 4502 else if (GRAPHICS_VER(dev_priv) == 3) 4503 i915_irq_postinstall(dev_priv); 4504 else 4505 i8xx_irq_postinstall(dev_priv); 4506 } else { 4507 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 4508 dg1_irq_postinstall(dev_priv); 4509 else if (GRAPHICS_VER(dev_priv) >= 11) 4510 gen11_irq_postinstall(dev_priv); 4511 else if (GRAPHICS_VER(dev_priv) >= 8) 4512 gen8_irq_postinstall(dev_priv); 4513 else 4514 ilk_irq_postinstall(dev_priv); 4515 } 4516 } 4517 4518 /** 4519 * intel_irq_install - enables the hardware interrupt 4520 * @dev_priv: i915 device instance 4521 * 4522 * This function enables the hardware interrupt handling, but leaves the hotplug 4523 * handling still disabled. It is called after intel_irq_init(). 4524 * 4525 * In the driver load and resume code we need working interrupts in a few places 4526 * but don't want to deal with the hassle of concurrent probe and hotplug 4527 * workers. Hence the split into this two-stage approach. 4528 */ 4529 int intel_irq_install(struct drm_i915_private *dev_priv) 4530 { 4531 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4532 int ret; 4533 4534 /* 4535 * We enable some interrupt sources in our postinstall hooks, so mark 4536 * interrupts as enabled _before_ actually enabling them to avoid 4537 * special cases in our ordering checks. 4538 */ 4539 dev_priv->runtime_pm.irqs_enabled = true; 4540 4541 dev_priv->irq_enabled = true; 4542 4543 intel_irq_reset(dev_priv); 4544 4545 ret = request_irq(irq, intel_irq_handler(dev_priv), 4546 IRQF_SHARED, DRIVER_NAME, dev_priv); 4547 if (ret < 0) { 4548 dev_priv->irq_enabled = false; 4549 return ret; 4550 } 4551 4552 intel_irq_postinstall(dev_priv); 4553 4554 return ret; 4555 } 4556 4557 /** 4558 * intel_irq_uninstall - finilizes all irq handling 4559 * @dev_priv: i915 device instance 4560 * 4561 * This stops interrupt and hotplug handling and unregisters and frees all 4562 * resources acquired in the init functions. 4563 */ 4564 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4565 { 4566 int irq = to_pci_dev(dev_priv->drm.dev)->irq; 4567 4568 /* 4569 * FIXME we can get called twice during driver probe 4570 * error handling as well as during driver remove due to 4571 * intel_modeset_driver_remove() calling us out of sequence. 4572 * Would be nice if it didn't do that... 4573 */ 4574 if (!dev_priv->irq_enabled) 4575 return; 4576 4577 dev_priv->irq_enabled = false; 4578 4579 intel_irq_reset(dev_priv); 4580 4581 free_irq(irq, dev_priv); 4582 4583 intel_hpd_cancel_work(dev_priv); 4584 dev_priv->runtime_pm.irqs_enabled = false; 4585 } 4586 4587 /** 4588 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4589 * @dev_priv: i915 device instance 4590 * 4591 * This function is used to disable interrupts at runtime, both in the runtime 4592 * pm and the system suspend/resume code. 4593 */ 4594 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4595 { 4596 intel_irq_reset(dev_priv); 4597 dev_priv->runtime_pm.irqs_enabled = false; 4598 intel_synchronize_irq(dev_priv); 4599 } 4600 4601 /** 4602 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4603 * @dev_priv: i915 device instance 4604 * 4605 * This function is used to enable interrupts at runtime, both in the runtime 4606 * pm and the system suspend/resume code. 4607 */ 4608 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4609 { 4610 dev_priv->runtime_pm.irqs_enabled = true; 4611 intel_irq_reset(dev_priv); 4612 intel_irq_postinstall(dev_priv); 4613 } 4614 4615 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 4616 { 4617 return dev_priv->runtime_pm.irqs_enabled; 4618 } 4619 4620 void intel_synchronize_irq(struct drm_i915_private *i915) 4621 { 4622 synchronize_irq(to_pci_dev(i915->drm.dev)->irq); 4623 } 4624 4625 void intel_synchronize_hardirq(struct drm_i915_private *i915) 4626 { 4627 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); 4628 } 4629