1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/circ_buf.h> 32 #include <linux/slab.h> 33 #include <linux/sysrq.h> 34 35 #include <drm/drm_drv.h> 36 #include <drm/drm_irq.h> 37 38 #include "display/intel_display_types.h" 39 #include "display/intel_fifo_underrun.h" 40 #include "display/intel_hotplug.h" 41 #include "display/intel_lpe_audio.h" 42 #include "display/intel_psr.h" 43 44 #include "gt/intel_breadcrumbs.h" 45 #include "gt/intel_gt.h" 46 #include "gt/intel_gt_irq.h" 47 #include "gt/intel_gt_pm_irq.h" 48 #include "gt/intel_rps.h" 49 50 #include "i915_drv.h" 51 #include "i915_irq.h" 52 #include "i915_trace.h" 53 #include "intel_pm.h" 54 55 /** 56 * DOC: interrupt handling 57 * 58 * These functions provide the basic support for enabling and disabling the 59 * interrupt handling support. There's a lot more functionality in i915_irq.c 60 * and related files, but that will be described in separate chapters. 61 */ 62 63 /* 64 * Interrupt statistic for PMU. Increments the counter only if the 65 * interrupt originated from the the GPU so interrupts from a device which 66 * shares the interrupt line are not accounted. 67 */ 68 static inline void pmu_irq_stats(struct drm_i915_private *i915, 69 irqreturn_t res) 70 { 71 if (unlikely(res != IRQ_HANDLED)) 72 return; 73 74 /* 75 * A clever compiler translates that into INC. A not so clever one 76 * should at least prevent store tearing. 77 */ 78 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); 79 } 80 81 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 82 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, 83 enum hpd_pin pin); 84 85 static const u32 hpd_ilk[HPD_NUM_PINS] = { 86 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 87 }; 88 89 static const u32 hpd_ivb[HPD_NUM_PINS] = { 90 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 91 }; 92 93 static const u32 hpd_bdw[HPD_NUM_PINS] = { 94 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 95 }; 96 97 static const u32 hpd_ibx[HPD_NUM_PINS] = { 98 [HPD_CRT] = SDE_CRT_HOTPLUG, 99 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 100 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 101 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 102 [HPD_PORT_D] = SDE_PORTD_HOTPLUG, 103 }; 104 105 static const u32 hpd_cpt[HPD_NUM_PINS] = { 106 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 107 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 108 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 109 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 110 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 111 }; 112 113 static const u32 hpd_spt[HPD_NUM_PINS] = { 114 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 115 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 116 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 117 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 118 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, 119 }; 120 121 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 122 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 123 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 124 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 125 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 126 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 127 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, 128 }; 129 130 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 131 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 132 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 133 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 134 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 135 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 136 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 137 }; 138 139 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 140 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 141 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 142 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 143 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 144 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 145 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 146 }; 147 148 static const u32 hpd_bxt[HPD_NUM_PINS] = { 149 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A), 150 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B), 151 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C), 152 }; 153 154 static const u32 hpd_gen11[HPD_NUM_PINS] = { 155 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1), 156 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2), 157 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3), 158 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4), 159 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5), 160 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6), 161 }; 162 163 static const u32 hpd_icp[HPD_NUM_PINS] = { 164 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 165 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 166 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 167 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1), 168 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2), 169 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3), 170 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), 171 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5), 172 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6), 173 }; 174 175 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { 176 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A), 177 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B), 178 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C), 179 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D), 180 }; 181 182 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) 183 { 184 struct i915_hotplug *hpd = &dev_priv->hotplug; 185 186 if (HAS_GMCH(dev_priv)) { 187 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 188 IS_CHERRYVIEW(dev_priv)) 189 hpd->hpd = hpd_status_g4x; 190 else 191 hpd->hpd = hpd_status_i915; 192 return; 193 } 194 195 if (INTEL_GEN(dev_priv) >= 11) 196 hpd->hpd = hpd_gen11; 197 else if (IS_GEN9_LP(dev_priv)) 198 hpd->hpd = hpd_bxt; 199 else if (INTEL_GEN(dev_priv) >= 8) 200 hpd->hpd = hpd_bdw; 201 else if (INTEL_GEN(dev_priv) >= 7) 202 hpd->hpd = hpd_ivb; 203 else 204 hpd->hpd = hpd_ilk; 205 206 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) && 207 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))) 208 return; 209 210 if (HAS_PCH_DG1(dev_priv)) 211 hpd->pch_hpd = hpd_sde_dg1; 212 else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) || 213 HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv)) 214 hpd->pch_hpd = hpd_icp; 215 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) 216 hpd->pch_hpd = hpd_spt; 217 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) 218 hpd->pch_hpd = hpd_cpt; 219 else if (HAS_PCH_IBX(dev_priv)) 220 hpd->pch_hpd = hpd_ibx; 221 else 222 MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); 223 } 224 225 static void 226 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 227 { 228 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 229 230 drm_crtc_handle_vblank(&crtc->base); 231 } 232 233 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 234 i915_reg_t iir, i915_reg_t ier) 235 { 236 intel_uncore_write(uncore, imr, 0xffffffff); 237 intel_uncore_posting_read(uncore, imr); 238 239 intel_uncore_write(uncore, ier, 0); 240 241 /* IIR can theoretically queue up two events. Be paranoid. */ 242 intel_uncore_write(uncore, iir, 0xffffffff); 243 intel_uncore_posting_read(uncore, iir); 244 intel_uncore_write(uncore, iir, 0xffffffff); 245 intel_uncore_posting_read(uncore, iir); 246 } 247 248 void gen2_irq_reset(struct intel_uncore *uncore) 249 { 250 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 251 intel_uncore_posting_read16(uncore, GEN2_IMR); 252 253 intel_uncore_write16(uncore, GEN2_IER, 0); 254 255 /* IIR can theoretically queue up two events. Be paranoid. */ 256 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 257 intel_uncore_posting_read16(uncore, GEN2_IIR); 258 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 259 intel_uncore_posting_read16(uncore, GEN2_IIR); 260 } 261 262 /* 263 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 264 */ 265 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 266 { 267 u32 val = intel_uncore_read(uncore, reg); 268 269 if (val == 0) 270 return; 271 272 drm_WARN(&uncore->i915->drm, 1, 273 "Interrupt register 0x%x is not zero: 0x%08x\n", 274 i915_mmio_reg_offset(reg), val); 275 intel_uncore_write(uncore, reg, 0xffffffff); 276 intel_uncore_posting_read(uncore, reg); 277 intel_uncore_write(uncore, reg, 0xffffffff); 278 intel_uncore_posting_read(uncore, reg); 279 } 280 281 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 282 { 283 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 284 285 if (val == 0) 286 return; 287 288 drm_WARN(&uncore->i915->drm, 1, 289 "Interrupt register 0x%x is not zero: 0x%08x\n", 290 i915_mmio_reg_offset(GEN2_IIR), val); 291 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 292 intel_uncore_posting_read16(uncore, GEN2_IIR); 293 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 294 intel_uncore_posting_read16(uncore, GEN2_IIR); 295 } 296 297 void gen3_irq_init(struct intel_uncore *uncore, 298 i915_reg_t imr, u32 imr_val, 299 i915_reg_t ier, u32 ier_val, 300 i915_reg_t iir) 301 { 302 gen3_assert_iir_is_zero(uncore, iir); 303 304 intel_uncore_write(uncore, ier, ier_val); 305 intel_uncore_write(uncore, imr, imr_val); 306 intel_uncore_posting_read(uncore, imr); 307 } 308 309 void gen2_irq_init(struct intel_uncore *uncore, 310 u32 imr_val, u32 ier_val) 311 { 312 gen2_assert_iir_is_zero(uncore); 313 314 intel_uncore_write16(uncore, GEN2_IER, ier_val); 315 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 316 intel_uncore_posting_read16(uncore, GEN2_IMR); 317 } 318 319 /* For display hotplug interrupt */ 320 static inline void 321 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 322 u32 mask, 323 u32 bits) 324 { 325 u32 val; 326 327 lockdep_assert_held(&dev_priv->irq_lock); 328 drm_WARN_ON(&dev_priv->drm, bits & ~mask); 329 330 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN); 331 val &= ~mask; 332 val |= bits; 333 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val); 334 } 335 336 /** 337 * i915_hotplug_interrupt_update - update hotplug interrupt enable 338 * @dev_priv: driver private 339 * @mask: bits to update 340 * @bits: bits to enable 341 * NOTE: the HPD enable bits are modified both inside and outside 342 * of an interrupt context. To avoid that read-modify-write cycles 343 * interfer, these bits are protected by a spinlock. Since this 344 * function is usually not called from a context where the lock is 345 * held already, this function acquires the lock itself. A non-locking 346 * version is also available. 347 */ 348 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 349 u32 mask, 350 u32 bits) 351 { 352 spin_lock_irq(&dev_priv->irq_lock); 353 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 354 spin_unlock_irq(&dev_priv->irq_lock); 355 } 356 357 /** 358 * ilk_update_display_irq - update DEIMR 359 * @dev_priv: driver private 360 * @interrupt_mask: mask of interrupt bits to update 361 * @enabled_irq_mask: mask of interrupt bits to enable 362 */ 363 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 364 u32 interrupt_mask, 365 u32 enabled_irq_mask) 366 { 367 u32 new_val; 368 369 lockdep_assert_held(&dev_priv->irq_lock); 370 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 371 372 new_val = dev_priv->irq_mask; 373 new_val &= ~interrupt_mask; 374 new_val |= (~enabled_irq_mask & interrupt_mask); 375 376 if (new_val != dev_priv->irq_mask && 377 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 378 dev_priv->irq_mask = new_val; 379 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 380 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 381 } 382 } 383 384 /** 385 * bdw_update_port_irq - update DE port interrupt 386 * @dev_priv: driver private 387 * @interrupt_mask: mask of interrupt bits to update 388 * @enabled_irq_mask: mask of interrupt bits to enable 389 */ 390 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 391 u32 interrupt_mask, 392 u32 enabled_irq_mask) 393 { 394 u32 new_val; 395 u32 old_val; 396 397 lockdep_assert_held(&dev_priv->irq_lock); 398 399 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 400 401 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 402 return; 403 404 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 405 406 new_val = old_val; 407 new_val &= ~interrupt_mask; 408 new_val |= (~enabled_irq_mask & interrupt_mask); 409 410 if (new_val != old_val) { 411 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 412 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 413 } 414 } 415 416 /** 417 * bdw_update_pipe_irq - update DE pipe interrupt 418 * @dev_priv: driver private 419 * @pipe: pipe whose interrupt to update 420 * @interrupt_mask: mask of interrupt bits to update 421 * @enabled_irq_mask: mask of interrupt bits to enable 422 */ 423 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 424 enum pipe pipe, 425 u32 interrupt_mask, 426 u32 enabled_irq_mask) 427 { 428 u32 new_val; 429 430 lockdep_assert_held(&dev_priv->irq_lock); 431 432 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 433 434 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 435 return; 436 437 new_val = dev_priv->de_irq_mask[pipe]; 438 new_val &= ~interrupt_mask; 439 new_val |= (~enabled_irq_mask & interrupt_mask); 440 441 if (new_val != dev_priv->de_irq_mask[pipe]) { 442 dev_priv->de_irq_mask[pipe] = new_val; 443 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 444 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 445 } 446 } 447 448 /** 449 * ibx_display_interrupt_update - update SDEIMR 450 * @dev_priv: driver private 451 * @interrupt_mask: mask of interrupt bits to update 452 * @enabled_irq_mask: mask of interrupt bits to enable 453 */ 454 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 455 u32 interrupt_mask, 456 u32 enabled_irq_mask) 457 { 458 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 459 sdeimr &= ~interrupt_mask; 460 sdeimr |= (~enabled_irq_mask & interrupt_mask); 461 462 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 463 464 lockdep_assert_held(&dev_priv->irq_lock); 465 466 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 467 return; 468 469 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 470 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 471 } 472 473 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 474 enum pipe pipe) 475 { 476 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 477 u32 enable_mask = status_mask << 16; 478 479 lockdep_assert_held(&dev_priv->irq_lock); 480 481 if (INTEL_GEN(dev_priv) < 5) 482 goto out; 483 484 /* 485 * On pipe A we don't support the PSR interrupt yet, 486 * on pipe B and C the same bit MBZ. 487 */ 488 if (drm_WARN_ON_ONCE(&dev_priv->drm, 489 status_mask & PIPE_A_PSR_STATUS_VLV)) 490 return 0; 491 /* 492 * On pipe B and C we don't support the PSR interrupt yet, on pipe 493 * A the same bit is for perf counters which we don't use either. 494 */ 495 if (drm_WARN_ON_ONCE(&dev_priv->drm, 496 status_mask & PIPE_B_PSR_STATUS_VLV)) 497 return 0; 498 499 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 500 SPRITE0_FLIP_DONE_INT_EN_VLV | 501 SPRITE1_FLIP_DONE_INT_EN_VLV); 502 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 503 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 504 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 505 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 506 507 out: 508 drm_WARN_ONCE(&dev_priv->drm, 509 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 510 status_mask & ~PIPESTAT_INT_STATUS_MASK, 511 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 512 pipe_name(pipe), enable_mask, status_mask); 513 514 return enable_mask; 515 } 516 517 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 518 enum pipe pipe, u32 status_mask) 519 { 520 i915_reg_t reg = PIPESTAT(pipe); 521 u32 enable_mask; 522 523 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 524 "pipe %c: status_mask=0x%x\n", 525 pipe_name(pipe), status_mask); 526 527 lockdep_assert_held(&dev_priv->irq_lock); 528 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 529 530 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 531 return; 532 533 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 534 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 535 536 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 537 intel_uncore_posting_read(&dev_priv->uncore, reg); 538 } 539 540 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 541 enum pipe pipe, u32 status_mask) 542 { 543 i915_reg_t reg = PIPESTAT(pipe); 544 u32 enable_mask; 545 546 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 547 "pipe %c: status_mask=0x%x\n", 548 pipe_name(pipe), status_mask); 549 550 lockdep_assert_held(&dev_priv->irq_lock); 551 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 552 553 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 554 return; 555 556 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 557 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 558 559 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 560 intel_uncore_posting_read(&dev_priv->uncore, reg); 561 } 562 563 static bool i915_has_asle(struct drm_i915_private *dev_priv) 564 { 565 if (!dev_priv->opregion.asle) 566 return false; 567 568 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 569 } 570 571 /** 572 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 573 * @dev_priv: i915 device private 574 */ 575 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 576 { 577 if (!i915_has_asle(dev_priv)) 578 return; 579 580 spin_lock_irq(&dev_priv->irq_lock); 581 582 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 583 if (INTEL_GEN(dev_priv) >= 4) 584 i915_enable_pipestat(dev_priv, PIPE_A, 585 PIPE_LEGACY_BLC_EVENT_STATUS); 586 587 spin_unlock_irq(&dev_priv->irq_lock); 588 } 589 590 /* 591 * This timing diagram depicts the video signal in and 592 * around the vertical blanking period. 593 * 594 * Assumptions about the fictitious mode used in this example: 595 * vblank_start >= 3 596 * vsync_start = vblank_start + 1 597 * vsync_end = vblank_start + 2 598 * vtotal = vblank_start + 3 599 * 600 * start of vblank: 601 * latch double buffered registers 602 * increment frame counter (ctg+) 603 * generate start of vblank interrupt (gen4+) 604 * | 605 * | frame start: 606 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 607 * | may be shifted forward 1-3 extra lines via PIPECONF 608 * | | 609 * | | start of vsync: 610 * | | generate vsync interrupt 611 * | | | 612 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 613 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 614 * ----va---> <-----------------vb--------------------> <--------va------------- 615 * | | <----vs-----> | 616 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 617 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 618 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 619 * | | | 620 * last visible pixel first visible pixel 621 * | increment frame counter (gen3/4) 622 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 623 * 624 * x = horizontal active 625 * _ = horizontal blanking 626 * hs = horizontal sync 627 * va = vertical active 628 * vb = vertical blanking 629 * vs = vertical sync 630 * vbs = vblank_start (number) 631 * 632 * Summary: 633 * - most events happen at the start of horizontal sync 634 * - frame start happens at the start of horizontal blank, 1-4 lines 635 * (depending on PIPECONF settings) after the start of vblank 636 * - gen3/4 pixel and frame counter are synchronized with the start 637 * of horizontal active on the first line of vertical active 638 */ 639 640 /* Called from drm generic code, passed a 'crtc', which 641 * we use as a pipe index 642 */ 643 u32 i915_get_vblank_counter(struct drm_crtc *crtc) 644 { 645 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 646 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 647 const struct drm_display_mode *mode = &vblank->hwmode; 648 enum pipe pipe = to_intel_crtc(crtc)->pipe; 649 i915_reg_t high_frame, low_frame; 650 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 651 unsigned long irqflags; 652 653 /* 654 * On i965gm TV output the frame counter only works up to 655 * the point when we enable the TV encoder. After that the 656 * frame counter ceases to work and reads zero. We need a 657 * vblank wait before enabling the TV encoder and so we 658 * have to enable vblank interrupts while the frame counter 659 * is still in a working state. However the core vblank code 660 * does not like us returning non-zero frame counter values 661 * when we've told it that we don't have a working frame 662 * counter. Thus we must stop non-zero values leaking out. 663 */ 664 if (!vblank->max_vblank_count) 665 return 0; 666 667 htotal = mode->crtc_htotal; 668 hsync_start = mode->crtc_hsync_start; 669 vbl_start = mode->crtc_vblank_start; 670 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 671 vbl_start = DIV_ROUND_UP(vbl_start, 2); 672 673 /* Convert to pixel count */ 674 vbl_start *= htotal; 675 676 /* Start of vblank event occurs at start of hsync */ 677 vbl_start -= htotal - hsync_start; 678 679 high_frame = PIPEFRAME(pipe); 680 low_frame = PIPEFRAMEPIXEL(pipe); 681 682 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 683 684 /* 685 * High & low register fields aren't synchronized, so make sure 686 * we get a low value that's stable across two reads of the high 687 * register. 688 */ 689 do { 690 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 691 low = intel_de_read_fw(dev_priv, low_frame); 692 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 693 } while (high1 != high2); 694 695 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 696 697 high1 >>= PIPE_FRAME_HIGH_SHIFT; 698 pixel = low & PIPE_PIXEL_MASK; 699 low >>= PIPE_FRAME_LOW_SHIFT; 700 701 /* 702 * The frame counter increments at beginning of active. 703 * Cook up a vblank counter by also checking the pixel 704 * counter against vblank start. 705 */ 706 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 707 } 708 709 u32 g4x_get_vblank_counter(struct drm_crtc *crtc) 710 { 711 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 712 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 713 enum pipe pipe = to_intel_crtc(crtc)->pipe; 714 715 if (!vblank->max_vblank_count) 716 return 0; 717 718 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe)); 719 } 720 721 /* 722 * On certain encoders on certain platforms, pipe 723 * scanline register will not work to get the scanline, 724 * since the timings are driven from the PORT or issues 725 * with scanline register updates. 726 * This function will use Framestamp and current 727 * timestamp registers to calculate the scanline. 728 */ 729 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 730 { 731 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 732 struct drm_vblank_crtc *vblank = 733 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 734 const struct drm_display_mode *mode = &vblank->hwmode; 735 u32 vblank_start = mode->crtc_vblank_start; 736 u32 vtotal = mode->crtc_vtotal; 737 u32 htotal = mode->crtc_htotal; 738 u32 clock = mode->crtc_clock; 739 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 740 741 /* 742 * To avoid the race condition where we might cross into the 743 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 744 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 745 * during the same frame. 746 */ 747 do { 748 /* 749 * This field provides read back of the display 750 * pipe frame time stamp. The time stamp value 751 * is sampled at every start of vertical blank. 752 */ 753 scan_prev_time = intel_de_read_fw(dev_priv, 754 PIPE_FRMTMSTMP(crtc->pipe)); 755 756 /* 757 * The TIMESTAMP_CTR register has the current 758 * time stamp value. 759 */ 760 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); 761 762 scan_post_time = intel_de_read_fw(dev_priv, 763 PIPE_FRMTMSTMP(crtc->pipe)); 764 } while (scan_post_time != scan_prev_time); 765 766 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 767 clock), 1000 * htotal); 768 scanline = min(scanline, vtotal - 1); 769 scanline = (scanline + vblank_start) % vtotal; 770 771 return scanline; 772 } 773 774 /* 775 * intel_de_read_fw(), only for fast reads of display block, no need for 776 * forcewake etc. 777 */ 778 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 779 { 780 struct drm_device *dev = crtc->base.dev; 781 struct drm_i915_private *dev_priv = to_i915(dev); 782 const struct drm_display_mode *mode; 783 struct drm_vblank_crtc *vblank; 784 enum pipe pipe = crtc->pipe; 785 int position, vtotal; 786 787 if (!crtc->active) 788 return -1; 789 790 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 791 mode = &vblank->hwmode; 792 793 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 794 return __intel_get_crtc_scanline_from_timestamp(crtc); 795 796 vtotal = mode->crtc_vtotal; 797 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 798 vtotal /= 2; 799 800 if (IS_GEN(dev_priv, 2)) 801 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 802 else 803 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 804 805 /* 806 * On HSW, the DSL reg (0x70000) appears to return 0 if we 807 * read it just before the start of vblank. So try it again 808 * so we don't accidentally end up spanning a vblank frame 809 * increment, causing the pipe_update_end() code to squak at us. 810 * 811 * The nature of this problem means we can't simply check the ISR 812 * bit and return the vblank start value; nor can we use the scanline 813 * debug register in the transcoder as it appears to have the same 814 * problem. We may need to extend this to include other platforms, 815 * but so far testing only shows the problem on HSW. 816 */ 817 if (HAS_DDI(dev_priv) && !position) { 818 int i, temp; 819 820 for (i = 0; i < 100; i++) { 821 udelay(1); 822 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 823 if (temp != position) { 824 position = temp; 825 break; 826 } 827 } 828 } 829 830 /* 831 * See update_scanline_offset() for the details on the 832 * scanline_offset adjustment. 833 */ 834 return (position + crtc->scanline_offset) % vtotal; 835 } 836 837 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, 838 bool in_vblank_irq, 839 int *vpos, int *hpos, 840 ktime_t *stime, ktime_t *etime, 841 const struct drm_display_mode *mode) 842 { 843 struct drm_device *dev = _crtc->dev; 844 struct drm_i915_private *dev_priv = to_i915(dev); 845 struct intel_crtc *crtc = to_intel_crtc(_crtc); 846 enum pipe pipe = crtc->pipe; 847 int position; 848 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 849 unsigned long irqflags; 850 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 || 851 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || 852 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 853 854 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { 855 drm_dbg(&dev_priv->drm, 856 "trying to get scanoutpos for disabled " 857 "pipe %c\n", pipe_name(pipe)); 858 return false; 859 } 860 861 htotal = mode->crtc_htotal; 862 hsync_start = mode->crtc_hsync_start; 863 vtotal = mode->crtc_vtotal; 864 vbl_start = mode->crtc_vblank_start; 865 vbl_end = mode->crtc_vblank_end; 866 867 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 868 vbl_start = DIV_ROUND_UP(vbl_start, 2); 869 vbl_end /= 2; 870 vtotal /= 2; 871 } 872 873 /* 874 * Lock uncore.lock, as we will do multiple timing critical raw 875 * register reads, potentially with preemption disabled, so the 876 * following code must not block on uncore.lock. 877 */ 878 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 879 880 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 881 882 /* Get optional system timestamp before query. */ 883 if (stime) 884 *stime = ktime_get(); 885 886 if (use_scanline_counter) { 887 /* No obvious pixelcount register. Only query vertical 888 * scanout position from Display scan line register. 889 */ 890 position = __intel_get_crtc_scanline(crtc); 891 } else { 892 /* Have access to pixelcount since start of frame. 893 * We can split this into vertical and horizontal 894 * scanout position. 895 */ 896 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 897 898 /* convert to pixel counts */ 899 vbl_start *= htotal; 900 vbl_end *= htotal; 901 vtotal *= htotal; 902 903 /* 904 * In interlaced modes, the pixel counter counts all pixels, 905 * so one field will have htotal more pixels. In order to avoid 906 * the reported position from jumping backwards when the pixel 907 * counter is beyond the length of the shorter field, just 908 * clamp the position the length of the shorter field. This 909 * matches how the scanline counter based position works since 910 * the scanline counter doesn't count the two half lines. 911 */ 912 if (position >= vtotal) 913 position = vtotal - 1; 914 915 /* 916 * Start of vblank interrupt is triggered at start of hsync, 917 * just prior to the first active line of vblank. However we 918 * consider lines to start at the leading edge of horizontal 919 * active. So, should we get here before we've crossed into 920 * the horizontal active of the first line in vblank, we would 921 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 922 * always add htotal-hsync_start to the current pixel position. 923 */ 924 position = (position + htotal - hsync_start) % vtotal; 925 } 926 927 /* Get optional system timestamp after query. */ 928 if (etime) 929 *etime = ktime_get(); 930 931 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 932 933 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 934 935 /* 936 * While in vblank, position will be negative 937 * counting up towards 0 at vbl_end. And outside 938 * vblank, position will be positive counting 939 * up since vbl_end. 940 */ 941 if (position >= vbl_start) 942 position -= vbl_end; 943 else 944 position += vtotal - vbl_end; 945 946 if (use_scanline_counter) { 947 *vpos = position; 948 *hpos = 0; 949 } else { 950 *vpos = position / htotal; 951 *hpos = position - (*vpos * htotal); 952 } 953 954 return true; 955 } 956 957 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, 958 ktime_t *vblank_time, bool in_vblank_irq) 959 { 960 return drm_crtc_vblank_helper_get_vblank_timestamp_internal( 961 crtc, max_error, vblank_time, in_vblank_irq, 962 i915_get_crtc_scanoutpos); 963 } 964 965 int intel_get_crtc_scanline(struct intel_crtc *crtc) 966 { 967 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 968 unsigned long irqflags; 969 int position; 970 971 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 972 position = __intel_get_crtc_scanline(crtc); 973 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 974 975 return position; 976 } 977 978 /** 979 * ivb_parity_work - Workqueue called when a parity error interrupt 980 * occurred. 981 * @work: workqueue struct 982 * 983 * Doesn't actually do anything except notify userspace. As a consequence of 984 * this event, userspace should try to remap the bad rows since statistically 985 * it is likely the same row is more likely to go bad again. 986 */ 987 static void ivb_parity_work(struct work_struct *work) 988 { 989 struct drm_i915_private *dev_priv = 990 container_of(work, typeof(*dev_priv), l3_parity.error_work); 991 struct intel_gt *gt = &dev_priv->gt; 992 u32 error_status, row, bank, subbank; 993 char *parity_event[6]; 994 u32 misccpctl; 995 u8 slice = 0; 996 997 /* We must turn off DOP level clock gating to access the L3 registers. 998 * In order to prevent a get/put style interface, acquire struct mutex 999 * any time we access those registers. 1000 */ 1001 mutex_lock(&dev_priv->drm.struct_mutex); 1002 1003 /* If we've screwed up tracking, just let the interrupt fire again */ 1004 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 1005 goto out; 1006 1007 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1008 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1009 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1010 1011 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1012 i915_reg_t reg; 1013 1014 slice--; 1015 if (drm_WARN_ON_ONCE(&dev_priv->drm, 1016 slice >= NUM_L3_SLICES(dev_priv))) 1017 break; 1018 1019 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1020 1021 reg = GEN7_L3CDERRST1(slice); 1022 1023 error_status = intel_uncore_read(&dev_priv->uncore, reg); 1024 row = GEN7_PARITY_ERROR_ROW(error_status); 1025 bank = GEN7_PARITY_ERROR_BANK(error_status); 1026 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1027 1028 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1029 intel_uncore_posting_read(&dev_priv->uncore, reg); 1030 1031 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1032 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1033 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1034 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1035 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1036 parity_event[5] = NULL; 1037 1038 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1039 KOBJ_CHANGE, parity_event); 1040 1041 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1042 slice, row, bank, subbank); 1043 1044 kfree(parity_event[4]); 1045 kfree(parity_event[3]); 1046 kfree(parity_event[2]); 1047 kfree(parity_event[1]); 1048 } 1049 1050 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 1051 1052 out: 1053 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 1054 spin_lock_irq(>->irq_lock); 1055 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 1056 spin_unlock_irq(>->irq_lock); 1057 1058 mutex_unlock(&dev_priv->drm.struct_mutex); 1059 } 1060 1061 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1062 { 1063 switch (pin) { 1064 case HPD_PORT_TC1: 1065 case HPD_PORT_TC2: 1066 case HPD_PORT_TC3: 1067 case HPD_PORT_TC4: 1068 case HPD_PORT_TC5: 1069 case HPD_PORT_TC6: 1070 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); 1071 default: 1072 return false; 1073 } 1074 } 1075 1076 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1077 { 1078 switch (pin) { 1079 case HPD_PORT_A: 1080 return val & PORTA_HOTPLUG_LONG_DETECT; 1081 case HPD_PORT_B: 1082 return val & PORTB_HOTPLUG_LONG_DETECT; 1083 case HPD_PORT_C: 1084 return val & PORTC_HOTPLUG_LONG_DETECT; 1085 default: 1086 return false; 1087 } 1088 } 1089 1090 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1091 { 1092 switch (pin) { 1093 case HPD_PORT_A: 1094 case HPD_PORT_B: 1095 case HPD_PORT_C: 1096 case HPD_PORT_D: 1097 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); 1098 default: 1099 return false; 1100 } 1101 } 1102 1103 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1104 { 1105 switch (pin) { 1106 case HPD_PORT_TC1: 1107 case HPD_PORT_TC2: 1108 case HPD_PORT_TC3: 1109 case HPD_PORT_TC4: 1110 case HPD_PORT_TC5: 1111 case HPD_PORT_TC6: 1112 return val & ICP_TC_HPD_LONG_DETECT(pin); 1113 default: 1114 return false; 1115 } 1116 } 1117 1118 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1119 { 1120 switch (pin) { 1121 case HPD_PORT_E: 1122 return val & PORTE_HOTPLUG_LONG_DETECT; 1123 default: 1124 return false; 1125 } 1126 } 1127 1128 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1129 { 1130 switch (pin) { 1131 case HPD_PORT_A: 1132 return val & PORTA_HOTPLUG_LONG_DETECT; 1133 case HPD_PORT_B: 1134 return val & PORTB_HOTPLUG_LONG_DETECT; 1135 case HPD_PORT_C: 1136 return val & PORTC_HOTPLUG_LONG_DETECT; 1137 case HPD_PORT_D: 1138 return val & PORTD_HOTPLUG_LONG_DETECT; 1139 default: 1140 return false; 1141 } 1142 } 1143 1144 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1145 { 1146 switch (pin) { 1147 case HPD_PORT_A: 1148 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1149 default: 1150 return false; 1151 } 1152 } 1153 1154 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1155 { 1156 switch (pin) { 1157 case HPD_PORT_B: 1158 return val & PORTB_HOTPLUG_LONG_DETECT; 1159 case HPD_PORT_C: 1160 return val & PORTC_HOTPLUG_LONG_DETECT; 1161 case HPD_PORT_D: 1162 return val & PORTD_HOTPLUG_LONG_DETECT; 1163 default: 1164 return false; 1165 } 1166 } 1167 1168 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1169 { 1170 switch (pin) { 1171 case HPD_PORT_B: 1172 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1173 case HPD_PORT_C: 1174 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1175 case HPD_PORT_D: 1176 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1177 default: 1178 return false; 1179 } 1180 } 1181 1182 /* 1183 * Get a bit mask of pins that have triggered, and which ones may be long. 1184 * This can be called multiple times with the same masks to accumulate 1185 * hotplug detection results from several registers. 1186 * 1187 * Note that the caller is expected to zero out the masks initially. 1188 */ 1189 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1190 u32 *pin_mask, u32 *long_mask, 1191 u32 hotplug_trigger, u32 dig_hotplug_reg, 1192 const u32 hpd[HPD_NUM_PINS], 1193 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1194 { 1195 enum hpd_pin pin; 1196 1197 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); 1198 1199 for_each_hpd_pin(pin) { 1200 if ((hpd[pin] & hotplug_trigger) == 0) 1201 continue; 1202 1203 *pin_mask |= BIT(pin); 1204 1205 if (long_pulse_detect(pin, dig_hotplug_reg)) 1206 *long_mask |= BIT(pin); 1207 } 1208 1209 drm_dbg(&dev_priv->drm, 1210 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1211 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1212 1213 } 1214 1215 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 1216 const u32 hpd[HPD_NUM_PINS]) 1217 { 1218 struct intel_encoder *encoder; 1219 u32 enabled_irqs = 0; 1220 1221 for_each_intel_encoder(&dev_priv->drm, encoder) 1222 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 1223 enabled_irqs |= hpd[encoder->hpd_pin]; 1224 1225 return enabled_irqs; 1226 } 1227 1228 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, 1229 const u32 hpd[HPD_NUM_PINS]) 1230 { 1231 struct intel_encoder *encoder; 1232 u32 hotplug_irqs = 0; 1233 1234 for_each_intel_encoder(&dev_priv->drm, encoder) 1235 hotplug_irqs |= hpd[encoder->hpd_pin]; 1236 1237 return hotplug_irqs; 1238 } 1239 1240 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, 1241 hotplug_enables_func hotplug_enables) 1242 { 1243 struct intel_encoder *encoder; 1244 u32 hotplug = 0; 1245 1246 for_each_intel_encoder(&i915->drm, encoder) 1247 hotplug |= hotplug_enables(i915, encoder->hpd_pin); 1248 1249 return hotplug; 1250 } 1251 1252 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1253 { 1254 wake_up_all(&dev_priv->gmbus_wait_queue); 1255 } 1256 1257 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1258 { 1259 wake_up_all(&dev_priv->gmbus_wait_queue); 1260 } 1261 1262 #if defined(CONFIG_DEBUG_FS) 1263 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1264 enum pipe pipe, 1265 u32 crc0, u32 crc1, 1266 u32 crc2, u32 crc3, 1267 u32 crc4) 1268 { 1269 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1270 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 1271 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1272 1273 trace_intel_pipe_crc(crtc, crcs); 1274 1275 spin_lock(&pipe_crc->lock); 1276 /* 1277 * For some not yet identified reason, the first CRC is 1278 * bonkers. So let's just wait for the next vblank and read 1279 * out the buggy result. 1280 * 1281 * On GEN8+ sometimes the second CRC is bonkers as well, so 1282 * don't trust that one either. 1283 */ 1284 if (pipe_crc->skipped <= 0 || 1285 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1286 pipe_crc->skipped++; 1287 spin_unlock(&pipe_crc->lock); 1288 return; 1289 } 1290 spin_unlock(&pipe_crc->lock); 1291 1292 drm_crtc_add_crc_entry(&crtc->base, true, 1293 drm_crtc_accurate_vblank_count(&crtc->base), 1294 crcs); 1295 } 1296 #else 1297 static inline void 1298 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1299 enum pipe pipe, 1300 u32 crc0, u32 crc1, 1301 u32 crc2, u32 crc3, 1302 u32 crc4) {} 1303 #endif 1304 1305 static void flip_done_handler(struct drm_i915_private *i915, 1306 enum pipe pipe) 1307 { 1308 struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe); 1309 struct drm_crtc_state *crtc_state = crtc->base.state; 1310 struct drm_pending_vblank_event *e = crtc_state->event; 1311 struct drm_device *dev = &i915->drm; 1312 unsigned long irqflags; 1313 1314 spin_lock_irqsave(&dev->event_lock, irqflags); 1315 1316 crtc_state->event = NULL; 1317 1318 drm_crtc_send_vblank_event(&crtc->base, e); 1319 1320 spin_unlock_irqrestore(&dev->event_lock, irqflags); 1321 } 1322 1323 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1324 enum pipe pipe) 1325 { 1326 display_pipe_crc_irq_handler(dev_priv, pipe, 1327 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1328 0, 0, 0, 0); 1329 } 1330 1331 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1332 enum pipe pipe) 1333 { 1334 display_pipe_crc_irq_handler(dev_priv, pipe, 1335 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1336 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 1337 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 1338 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 1339 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 1340 } 1341 1342 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1343 enum pipe pipe) 1344 { 1345 u32 res1, res2; 1346 1347 if (INTEL_GEN(dev_priv) >= 3) 1348 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); 1349 else 1350 res1 = 0; 1351 1352 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1353 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); 1354 else 1355 res2 = 0; 1356 1357 display_pipe_crc_irq_handler(dev_priv, pipe, 1358 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), 1359 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), 1360 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), 1361 res1, res2); 1362 } 1363 1364 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1365 { 1366 enum pipe pipe; 1367 1368 for_each_pipe(dev_priv, pipe) { 1369 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), 1370 PIPESTAT_INT_STATUS_MASK | 1371 PIPE_FIFO_UNDERRUN_STATUS); 1372 1373 dev_priv->pipestat_irq_mask[pipe] = 0; 1374 } 1375 } 1376 1377 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1378 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1379 { 1380 enum pipe pipe; 1381 1382 spin_lock(&dev_priv->irq_lock); 1383 1384 if (!dev_priv->display_irqs_enabled) { 1385 spin_unlock(&dev_priv->irq_lock); 1386 return; 1387 } 1388 1389 for_each_pipe(dev_priv, pipe) { 1390 i915_reg_t reg; 1391 u32 status_mask, enable_mask, iir_bit = 0; 1392 1393 /* 1394 * PIPESTAT bits get signalled even when the interrupt is 1395 * disabled with the mask bits, and some of the status bits do 1396 * not generate interrupts at all (like the underrun bit). Hence 1397 * we need to be careful that we only handle what we want to 1398 * handle. 1399 */ 1400 1401 /* fifo underruns are filterered in the underrun handler. */ 1402 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1403 1404 switch (pipe) { 1405 default: 1406 case PIPE_A: 1407 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1408 break; 1409 case PIPE_B: 1410 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1411 break; 1412 case PIPE_C: 1413 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1414 break; 1415 } 1416 if (iir & iir_bit) 1417 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1418 1419 if (!status_mask) 1420 continue; 1421 1422 reg = PIPESTAT(pipe); 1423 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 1424 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1425 1426 /* 1427 * Clear the PIPE*STAT regs before the IIR 1428 * 1429 * Toggle the enable bits to make sure we get an 1430 * edge in the ISR pipe event bit if we don't clear 1431 * all the enabled status bits. Otherwise the edge 1432 * triggered IIR on i965/g4x wouldn't notice that 1433 * an interrupt is still pending. 1434 */ 1435 if (pipe_stats[pipe]) { 1436 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 1437 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 1438 } 1439 } 1440 spin_unlock(&dev_priv->irq_lock); 1441 } 1442 1443 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1444 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1445 { 1446 enum pipe pipe; 1447 1448 for_each_pipe(dev_priv, pipe) { 1449 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1450 intel_handle_vblank(dev_priv, pipe); 1451 1452 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1453 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1454 1455 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1456 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1457 } 1458 } 1459 1460 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1461 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1462 { 1463 bool blc_event = false; 1464 enum pipe pipe; 1465 1466 for_each_pipe(dev_priv, pipe) { 1467 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1468 intel_handle_vblank(dev_priv, pipe); 1469 1470 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1471 blc_event = true; 1472 1473 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1474 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1475 1476 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1477 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1478 } 1479 1480 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1481 intel_opregion_asle_intr(dev_priv); 1482 } 1483 1484 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1485 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1486 { 1487 bool blc_event = false; 1488 enum pipe pipe; 1489 1490 for_each_pipe(dev_priv, pipe) { 1491 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1492 intel_handle_vblank(dev_priv, pipe); 1493 1494 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1495 blc_event = true; 1496 1497 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1498 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1499 1500 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1501 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1502 } 1503 1504 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1505 intel_opregion_asle_intr(dev_priv); 1506 1507 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1508 gmbus_irq_handler(dev_priv); 1509 } 1510 1511 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1512 u32 pipe_stats[I915_MAX_PIPES]) 1513 { 1514 enum pipe pipe; 1515 1516 for_each_pipe(dev_priv, pipe) { 1517 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1518 intel_handle_vblank(dev_priv, pipe); 1519 1520 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1521 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1522 1523 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1524 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1525 } 1526 1527 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1528 gmbus_irq_handler(dev_priv); 1529 } 1530 1531 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1532 { 1533 u32 hotplug_status = 0, hotplug_status_mask; 1534 int i; 1535 1536 if (IS_G4X(dev_priv) || 1537 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1538 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1539 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1540 else 1541 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1542 1543 /* 1544 * We absolutely have to clear all the pending interrupt 1545 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1546 * interrupt bit won't have an edge, and the i965/g4x 1547 * edge triggered IIR will not notice that an interrupt 1548 * is still pending. We can't use PORT_HOTPLUG_EN to 1549 * guarantee the edge as the act of toggling the enable 1550 * bits can itself generate a new hotplug interrupt :( 1551 */ 1552 for (i = 0; i < 10; i++) { 1553 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; 1554 1555 if (tmp == 0) 1556 return hotplug_status; 1557 1558 hotplug_status |= tmp; 1559 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); 1560 } 1561 1562 drm_WARN_ONCE(&dev_priv->drm, 1, 1563 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1564 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 1565 1566 return hotplug_status; 1567 } 1568 1569 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1570 u32 hotplug_status) 1571 { 1572 u32 pin_mask = 0, long_mask = 0; 1573 u32 hotplug_trigger; 1574 1575 if (IS_G4X(dev_priv) || 1576 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1577 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1578 else 1579 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1580 1581 if (hotplug_trigger) { 1582 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1583 hotplug_trigger, hotplug_trigger, 1584 dev_priv->hotplug.hpd, 1585 i9xx_port_hotplug_long_detect); 1586 1587 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1588 } 1589 1590 if ((IS_G4X(dev_priv) || 1591 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1592 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1593 dp_aux_irq_handler(dev_priv); 1594 } 1595 1596 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1597 { 1598 struct drm_i915_private *dev_priv = arg; 1599 irqreturn_t ret = IRQ_NONE; 1600 1601 if (!intel_irqs_enabled(dev_priv)) 1602 return IRQ_NONE; 1603 1604 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1605 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1606 1607 do { 1608 u32 iir, gt_iir, pm_iir; 1609 u32 pipe_stats[I915_MAX_PIPES] = {}; 1610 u32 hotplug_status = 0; 1611 u32 ier = 0; 1612 1613 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 1614 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 1615 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1616 1617 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1618 break; 1619 1620 ret = IRQ_HANDLED; 1621 1622 /* 1623 * Theory on interrupt generation, based on empirical evidence: 1624 * 1625 * x = ((VLV_IIR & VLV_IER) || 1626 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1627 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1628 * 1629 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1630 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1631 * guarantee the CPU interrupt will be raised again even if we 1632 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1633 * bits this time around. 1634 */ 1635 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 1636 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1637 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1638 1639 if (gt_iir) 1640 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 1641 if (pm_iir) 1642 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 1643 1644 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1645 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1646 1647 /* Call regardless, as some status bits might not be 1648 * signalled in iir */ 1649 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1650 1651 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1652 I915_LPE_PIPE_B_INTERRUPT)) 1653 intel_lpe_audio_irq_handler(dev_priv); 1654 1655 /* 1656 * VLV_IIR is single buffered, and reflects the level 1657 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1658 */ 1659 if (iir) 1660 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1661 1662 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1663 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1664 1665 if (gt_iir) 1666 gen6_gt_irq_handler(&dev_priv->gt, gt_iir); 1667 if (pm_iir) 1668 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); 1669 1670 if (hotplug_status) 1671 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1672 1673 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1674 } while (0); 1675 1676 pmu_irq_stats(dev_priv, ret); 1677 1678 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1679 1680 return ret; 1681 } 1682 1683 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1684 { 1685 struct drm_i915_private *dev_priv = arg; 1686 irqreturn_t ret = IRQ_NONE; 1687 1688 if (!intel_irqs_enabled(dev_priv)) 1689 return IRQ_NONE; 1690 1691 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1692 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1693 1694 do { 1695 u32 master_ctl, iir; 1696 u32 pipe_stats[I915_MAX_PIPES] = {}; 1697 u32 hotplug_status = 0; 1698 u32 ier = 0; 1699 1700 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1701 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1702 1703 if (master_ctl == 0 && iir == 0) 1704 break; 1705 1706 ret = IRQ_HANDLED; 1707 1708 /* 1709 * Theory on interrupt generation, based on empirical evidence: 1710 * 1711 * x = ((VLV_IIR & VLV_IER) || 1712 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1713 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1714 * 1715 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1716 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1717 * guarantee the CPU interrupt will be raised again even if we 1718 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1719 * bits this time around. 1720 */ 1721 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 1722 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1723 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1724 1725 gen8_gt_irq_handler(&dev_priv->gt, master_ctl); 1726 1727 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1728 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1729 1730 /* Call regardless, as some status bits might not be 1731 * signalled in iir */ 1732 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1733 1734 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1735 I915_LPE_PIPE_B_INTERRUPT | 1736 I915_LPE_PIPE_C_INTERRUPT)) 1737 intel_lpe_audio_irq_handler(dev_priv); 1738 1739 /* 1740 * VLV_IIR is single buffered, and reflects the level 1741 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1742 */ 1743 if (iir) 1744 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1745 1746 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1747 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1748 1749 if (hotplug_status) 1750 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1751 1752 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1753 } while (0); 1754 1755 pmu_irq_stats(dev_priv, ret); 1756 1757 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1758 1759 return ret; 1760 } 1761 1762 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1763 u32 hotplug_trigger) 1764 { 1765 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1766 1767 /* 1768 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1769 * unless we touch the hotplug register, even if hotplug_trigger is 1770 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1771 * errors. 1772 */ 1773 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1774 if (!hotplug_trigger) { 1775 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1776 PORTD_HOTPLUG_STATUS_MASK | 1777 PORTC_HOTPLUG_STATUS_MASK | 1778 PORTB_HOTPLUG_STATUS_MASK; 1779 dig_hotplug_reg &= ~mask; 1780 } 1781 1782 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1783 if (!hotplug_trigger) 1784 return; 1785 1786 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1787 hotplug_trigger, dig_hotplug_reg, 1788 dev_priv->hotplug.pch_hpd, 1789 pch_port_hotplug_long_detect); 1790 1791 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1792 } 1793 1794 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1795 { 1796 enum pipe pipe; 1797 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1798 1799 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1800 1801 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1802 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1803 SDE_AUDIO_POWER_SHIFT); 1804 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 1805 port_name(port)); 1806 } 1807 1808 if (pch_iir & SDE_AUX_MASK) 1809 dp_aux_irq_handler(dev_priv); 1810 1811 if (pch_iir & SDE_GMBUS) 1812 gmbus_irq_handler(dev_priv); 1813 1814 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1815 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 1816 1817 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1818 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 1819 1820 if (pch_iir & SDE_POISON) 1821 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1822 1823 if (pch_iir & SDE_FDI_MASK) { 1824 for_each_pipe(dev_priv, pipe) 1825 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1826 pipe_name(pipe), 1827 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1828 } 1829 1830 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1831 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 1832 1833 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1834 drm_dbg(&dev_priv->drm, 1835 "PCH transcoder CRC error interrupt\n"); 1836 1837 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1838 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 1839 1840 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1841 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 1842 } 1843 1844 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1845 { 1846 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 1847 enum pipe pipe; 1848 1849 if (err_int & ERR_INT_POISON) 1850 drm_err(&dev_priv->drm, "Poison interrupt\n"); 1851 1852 for_each_pipe(dev_priv, pipe) { 1853 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1854 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1855 1856 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1857 if (IS_IVYBRIDGE(dev_priv)) 1858 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1859 else 1860 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1861 } 1862 } 1863 1864 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 1865 } 1866 1867 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1868 { 1869 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 1870 enum pipe pipe; 1871 1872 if (serr_int & SERR_INT_POISON) 1873 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1874 1875 for_each_pipe(dev_priv, pipe) 1876 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1877 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1878 1879 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 1880 } 1881 1882 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1883 { 1884 enum pipe pipe; 1885 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1886 1887 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1888 1889 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1890 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1891 SDE_AUDIO_POWER_SHIFT_CPT); 1892 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 1893 port_name(port)); 1894 } 1895 1896 if (pch_iir & SDE_AUX_MASK_CPT) 1897 dp_aux_irq_handler(dev_priv); 1898 1899 if (pch_iir & SDE_GMBUS_CPT) 1900 gmbus_irq_handler(dev_priv); 1901 1902 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1903 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 1904 1905 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1906 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 1907 1908 if (pch_iir & SDE_FDI_MASK_CPT) { 1909 for_each_pipe(dev_priv, pipe) 1910 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1911 pipe_name(pipe), 1912 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1913 } 1914 1915 if (pch_iir & SDE_ERROR_CPT) 1916 cpt_serr_int_handler(dev_priv); 1917 } 1918 1919 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1920 { 1921 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; 1922 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; 1923 u32 pin_mask = 0, long_mask = 0; 1924 1925 if (ddi_hotplug_trigger) { 1926 u32 dig_hotplug_reg; 1927 1928 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 1929 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg); 1930 1931 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1932 ddi_hotplug_trigger, dig_hotplug_reg, 1933 dev_priv->hotplug.pch_hpd, 1934 icp_ddi_port_hotplug_long_detect); 1935 } 1936 1937 if (tc_hotplug_trigger) { 1938 u32 dig_hotplug_reg; 1939 1940 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 1941 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg); 1942 1943 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1944 tc_hotplug_trigger, dig_hotplug_reg, 1945 dev_priv->hotplug.pch_hpd, 1946 icp_tc_port_hotplug_long_detect); 1947 } 1948 1949 if (pin_mask) 1950 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1951 1952 if (pch_iir & SDE_GMBUS_ICP) 1953 gmbus_irq_handler(dev_priv); 1954 } 1955 1956 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1957 { 1958 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 1959 ~SDE_PORTE_HOTPLUG_SPT; 1960 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 1961 u32 pin_mask = 0, long_mask = 0; 1962 1963 if (hotplug_trigger) { 1964 u32 dig_hotplug_reg; 1965 1966 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1967 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1968 1969 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1970 hotplug_trigger, dig_hotplug_reg, 1971 dev_priv->hotplug.pch_hpd, 1972 spt_port_hotplug_long_detect); 1973 } 1974 1975 if (hotplug2_trigger) { 1976 u32 dig_hotplug_reg; 1977 1978 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 1979 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg); 1980 1981 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1982 hotplug2_trigger, dig_hotplug_reg, 1983 dev_priv->hotplug.pch_hpd, 1984 spt_port_hotplug2_long_detect); 1985 } 1986 1987 if (pin_mask) 1988 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1989 1990 if (pch_iir & SDE_GMBUS_CPT) 1991 gmbus_irq_handler(dev_priv); 1992 } 1993 1994 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 1995 u32 hotplug_trigger) 1996 { 1997 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1998 1999 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 2000 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2001 2002 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2003 hotplug_trigger, dig_hotplug_reg, 2004 dev_priv->hotplug.hpd, 2005 ilk_port_hotplug_long_detect); 2006 2007 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2008 } 2009 2010 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2011 u32 de_iir) 2012 { 2013 enum pipe pipe; 2014 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2015 2016 if (hotplug_trigger) 2017 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2018 2019 if (de_iir & DE_AUX_CHANNEL_A) 2020 dp_aux_irq_handler(dev_priv); 2021 2022 if (de_iir & DE_GSE) 2023 intel_opregion_asle_intr(dev_priv); 2024 2025 if (de_iir & DE_POISON) 2026 drm_err(&dev_priv->drm, "Poison interrupt\n"); 2027 2028 for_each_pipe(dev_priv, pipe) { 2029 if (de_iir & DE_PIPE_VBLANK(pipe)) 2030 intel_handle_vblank(dev_priv, pipe); 2031 2032 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2033 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2034 2035 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2036 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2037 } 2038 2039 /* check event from PCH */ 2040 if (de_iir & DE_PCH_EVENT) { 2041 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2042 2043 if (HAS_PCH_CPT(dev_priv)) 2044 cpt_irq_handler(dev_priv, pch_iir); 2045 else 2046 ibx_irq_handler(dev_priv, pch_iir); 2047 2048 /* should clear PCH hotplug event before clear CPU irq */ 2049 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2050 } 2051 2052 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 2053 gen5_rps_irq_handler(&dev_priv->gt.rps); 2054 } 2055 2056 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2057 u32 de_iir) 2058 { 2059 enum pipe pipe; 2060 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2061 2062 if (hotplug_trigger) 2063 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2064 2065 if (de_iir & DE_ERR_INT_IVB) 2066 ivb_err_int_handler(dev_priv); 2067 2068 if (de_iir & DE_EDP_PSR_INT_HSW) { 2069 u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR); 2070 2071 intel_psr_irq_handler(dev_priv, psr_iir); 2072 intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir); 2073 } 2074 2075 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2076 dp_aux_irq_handler(dev_priv); 2077 2078 if (de_iir & DE_GSE_IVB) 2079 intel_opregion_asle_intr(dev_priv); 2080 2081 for_each_pipe(dev_priv, pipe) { 2082 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2083 intel_handle_vblank(dev_priv, pipe); 2084 } 2085 2086 /* check event from PCH */ 2087 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2088 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2089 2090 cpt_irq_handler(dev_priv, pch_iir); 2091 2092 /* clear PCH hotplug event before clear CPU irq */ 2093 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2094 } 2095 } 2096 2097 /* 2098 * To handle irqs with the minimum potential races with fresh interrupts, we: 2099 * 1 - Disable Master Interrupt Control. 2100 * 2 - Find the source(s) of the interrupt. 2101 * 3 - Clear the Interrupt Identity bits (IIR). 2102 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2103 * 5 - Re-enable Master Interrupt Control. 2104 */ 2105 static irqreturn_t ilk_irq_handler(int irq, void *arg) 2106 { 2107 struct drm_i915_private *i915 = arg; 2108 void __iomem * const regs = i915->uncore.regs; 2109 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2110 irqreturn_t ret = IRQ_NONE; 2111 2112 if (unlikely(!intel_irqs_enabled(i915))) 2113 return IRQ_NONE; 2114 2115 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2116 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2117 2118 /* disable master interrupt before clearing iir */ 2119 de_ier = raw_reg_read(regs, DEIER); 2120 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2121 2122 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2123 * interrupts will will be stored on its back queue, and then we'll be 2124 * able to process them after we restore SDEIER (as soon as we restore 2125 * it, we'll get an interrupt if SDEIIR still has something to process 2126 * due to its back queue). */ 2127 if (!HAS_PCH_NOP(i915)) { 2128 sde_ier = raw_reg_read(regs, SDEIER); 2129 raw_reg_write(regs, SDEIER, 0); 2130 } 2131 2132 /* Find, clear, then process each source of interrupt */ 2133 2134 gt_iir = raw_reg_read(regs, GTIIR); 2135 if (gt_iir) { 2136 raw_reg_write(regs, GTIIR, gt_iir); 2137 if (INTEL_GEN(i915) >= 6) 2138 gen6_gt_irq_handler(&i915->gt, gt_iir); 2139 else 2140 gen5_gt_irq_handler(&i915->gt, gt_iir); 2141 ret = IRQ_HANDLED; 2142 } 2143 2144 de_iir = raw_reg_read(regs, DEIIR); 2145 if (de_iir) { 2146 raw_reg_write(regs, DEIIR, de_iir); 2147 if (INTEL_GEN(i915) >= 7) 2148 ivb_display_irq_handler(i915, de_iir); 2149 else 2150 ilk_display_irq_handler(i915, de_iir); 2151 ret = IRQ_HANDLED; 2152 } 2153 2154 if (INTEL_GEN(i915) >= 6) { 2155 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 2156 if (pm_iir) { 2157 raw_reg_write(regs, GEN6_PMIIR, pm_iir); 2158 gen6_rps_irq_handler(&i915->gt.rps, pm_iir); 2159 ret = IRQ_HANDLED; 2160 } 2161 } 2162 2163 raw_reg_write(regs, DEIER, de_ier); 2164 if (sde_ier) 2165 raw_reg_write(regs, SDEIER, sde_ier); 2166 2167 pmu_irq_stats(i915, ret); 2168 2169 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2170 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2171 2172 return ret; 2173 } 2174 2175 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2176 u32 hotplug_trigger) 2177 { 2178 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2179 2180 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 2181 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 2182 2183 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2184 hotplug_trigger, dig_hotplug_reg, 2185 dev_priv->hotplug.hpd, 2186 bxt_port_hotplug_long_detect); 2187 2188 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2189 } 2190 2191 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2192 { 2193 u32 pin_mask = 0, long_mask = 0; 2194 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2195 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2196 2197 if (trigger_tc) { 2198 u32 dig_hotplug_reg; 2199 2200 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 2201 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2202 2203 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2204 trigger_tc, dig_hotplug_reg, 2205 dev_priv->hotplug.hpd, 2206 gen11_port_hotplug_long_detect); 2207 } 2208 2209 if (trigger_tbt) { 2210 u32 dig_hotplug_reg; 2211 2212 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 2213 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2214 2215 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2216 trigger_tbt, dig_hotplug_reg, 2217 dev_priv->hotplug.hpd, 2218 gen11_port_hotplug_long_detect); 2219 } 2220 2221 if (pin_mask) 2222 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2223 else 2224 drm_err(&dev_priv->drm, 2225 "Unexpected DE HPD interrupt 0x%08x\n", iir); 2226 } 2227 2228 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2229 { 2230 u32 mask; 2231 2232 if (INTEL_GEN(dev_priv) >= 12) 2233 return TGL_DE_PORT_AUX_DDIA | 2234 TGL_DE_PORT_AUX_DDIB | 2235 TGL_DE_PORT_AUX_DDIC | 2236 TGL_DE_PORT_AUX_USBC1 | 2237 TGL_DE_PORT_AUX_USBC2 | 2238 TGL_DE_PORT_AUX_USBC3 | 2239 TGL_DE_PORT_AUX_USBC4 | 2240 TGL_DE_PORT_AUX_USBC5 | 2241 TGL_DE_PORT_AUX_USBC6; 2242 2243 2244 mask = GEN8_AUX_CHANNEL_A; 2245 if (INTEL_GEN(dev_priv) >= 9) 2246 mask |= GEN9_AUX_CHANNEL_B | 2247 GEN9_AUX_CHANNEL_C | 2248 GEN9_AUX_CHANNEL_D; 2249 2250 if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11)) 2251 mask |= CNL_AUX_CHANNEL_F; 2252 2253 if (IS_GEN(dev_priv, 11)) 2254 mask |= ICL_AUX_CHANNEL_E; 2255 2256 return mask; 2257 } 2258 2259 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 2260 { 2261 if (IS_ROCKETLAKE(dev_priv)) 2262 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 2263 else if (INTEL_GEN(dev_priv) >= 11) 2264 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 2265 else if (INTEL_GEN(dev_priv) >= 9) 2266 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2267 else 2268 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2269 } 2270 2271 static void 2272 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2273 { 2274 bool found = false; 2275 2276 if (iir & GEN8_DE_MISC_GSE) { 2277 intel_opregion_asle_intr(dev_priv); 2278 found = true; 2279 } 2280 2281 if (iir & GEN8_DE_EDP_PSR) { 2282 u32 psr_iir; 2283 i915_reg_t iir_reg; 2284 2285 if (INTEL_GEN(dev_priv) >= 12) 2286 iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder); 2287 else 2288 iir_reg = EDP_PSR_IIR; 2289 2290 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg); 2291 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir); 2292 2293 if (psr_iir) 2294 found = true; 2295 2296 intel_psr_irq_handler(dev_priv, psr_iir); 2297 } 2298 2299 if (!found) 2300 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); 2301 } 2302 2303 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 2304 u32 te_trigger) 2305 { 2306 enum pipe pipe = INVALID_PIPE; 2307 enum transcoder dsi_trans; 2308 enum port port; 2309 u32 val, tmp; 2310 2311 /* 2312 * Incase of dual link, TE comes from DSI_1 2313 * this is to check if dual link is enabled 2314 */ 2315 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 2316 val &= PORT_SYNC_MODE_ENABLE; 2317 2318 /* 2319 * if dual link is enabled, then read DSI_0 2320 * transcoder registers 2321 */ 2322 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 2323 PORT_A : PORT_B; 2324 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 2325 2326 /* Check if DSI configured in command mode */ 2327 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 2328 val = val & OP_MODE_MASK; 2329 2330 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 2331 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 2332 return; 2333 } 2334 2335 /* Get PIPE for handling VBLANK event */ 2336 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); 2337 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 2338 case TRANS_DDI_EDP_INPUT_A_ON: 2339 pipe = PIPE_A; 2340 break; 2341 case TRANS_DDI_EDP_INPUT_B_ONOFF: 2342 pipe = PIPE_B; 2343 break; 2344 case TRANS_DDI_EDP_INPUT_C_ONOFF: 2345 pipe = PIPE_C; 2346 break; 2347 default: 2348 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 2349 return; 2350 } 2351 2352 intel_handle_vblank(dev_priv, pipe); 2353 2354 /* clear TE in dsi IIR */ 2355 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 2356 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2357 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2358 } 2359 2360 static irqreturn_t 2361 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2362 { 2363 irqreturn_t ret = IRQ_NONE; 2364 u32 iir; 2365 enum pipe pipe; 2366 2367 if (master_ctl & GEN8_DE_MISC_IRQ) { 2368 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 2369 if (iir) { 2370 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 2371 ret = IRQ_HANDLED; 2372 gen8_de_misc_irq_handler(dev_priv, iir); 2373 } else { 2374 drm_err(&dev_priv->drm, 2375 "The master control interrupt lied (DE MISC)!\n"); 2376 } 2377 } 2378 2379 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2380 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 2381 if (iir) { 2382 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 2383 ret = IRQ_HANDLED; 2384 gen11_hpd_irq_handler(dev_priv, iir); 2385 } else { 2386 drm_err(&dev_priv->drm, 2387 "The master control interrupt lied, (DE HPD)!\n"); 2388 } 2389 } 2390 2391 if (master_ctl & GEN8_DE_PORT_IRQ) { 2392 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 2393 if (iir) { 2394 bool found = false; 2395 2396 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 2397 ret = IRQ_HANDLED; 2398 2399 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2400 dp_aux_irq_handler(dev_priv); 2401 found = true; 2402 } 2403 2404 if (IS_GEN9_LP(dev_priv)) { 2405 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 2406 2407 if (hotplug_trigger) { 2408 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 2409 found = true; 2410 } 2411 } else if (IS_BROADWELL(dev_priv)) { 2412 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 2413 2414 if (hotplug_trigger) { 2415 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2416 found = true; 2417 } 2418 } 2419 2420 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2421 gmbus_irq_handler(dev_priv); 2422 found = true; 2423 } 2424 2425 if (INTEL_GEN(dev_priv) >= 11) { 2426 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 2427 2428 if (te_trigger) { 2429 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 2430 found = true; 2431 } 2432 } 2433 2434 if (!found) 2435 drm_err(&dev_priv->drm, 2436 "Unexpected DE Port interrupt\n"); 2437 } 2438 else 2439 drm_err(&dev_priv->drm, 2440 "The master control interrupt lied (DE PORT)!\n"); 2441 } 2442 2443 for_each_pipe(dev_priv, pipe) { 2444 u32 fault_errors; 2445 2446 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2447 continue; 2448 2449 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 2450 if (!iir) { 2451 drm_err(&dev_priv->drm, 2452 "The master control interrupt lied (DE PIPE)!\n"); 2453 continue; 2454 } 2455 2456 ret = IRQ_HANDLED; 2457 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 2458 2459 if (iir & GEN8_PIPE_VBLANK) 2460 intel_handle_vblank(dev_priv, pipe); 2461 2462 if (iir & GEN9_PIPE_PLANE1_FLIP_DONE) 2463 flip_done_handler(dev_priv, pipe); 2464 2465 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2466 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2467 2468 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2469 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2470 2471 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2472 if (fault_errors) 2473 drm_err(&dev_priv->drm, 2474 "Fault errors on pipe %c: 0x%08x\n", 2475 pipe_name(pipe), 2476 fault_errors); 2477 } 2478 2479 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2480 master_ctl & GEN8_DE_PCH_IRQ) { 2481 /* 2482 * FIXME(BDW): Assume for now that the new interrupt handling 2483 * scheme also closed the SDE interrupt handling race we've seen 2484 * on older pch-split platforms. But this needs testing. 2485 */ 2486 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2487 if (iir) { 2488 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); 2489 ret = IRQ_HANDLED; 2490 2491 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2492 icp_irq_handler(dev_priv, iir); 2493 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2494 spt_irq_handler(dev_priv, iir); 2495 else 2496 cpt_irq_handler(dev_priv, iir); 2497 } else { 2498 /* 2499 * Like on previous PCH there seems to be something 2500 * fishy going on with forwarding PCH interrupts. 2501 */ 2502 drm_dbg(&dev_priv->drm, 2503 "The master control interrupt lied (SDE)!\n"); 2504 } 2505 } 2506 2507 return ret; 2508 } 2509 2510 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2511 { 2512 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2513 2514 /* 2515 * Now with master disabled, get a sample of level indications 2516 * for this interrupt. Indications will be cleared on related acks. 2517 * New indications can and will light up during processing, 2518 * and will generate new interrupt after enabling master. 2519 */ 2520 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2521 } 2522 2523 static inline void gen8_master_intr_enable(void __iomem * const regs) 2524 { 2525 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2526 } 2527 2528 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2529 { 2530 struct drm_i915_private *dev_priv = arg; 2531 void __iomem * const regs = dev_priv->uncore.regs; 2532 u32 master_ctl; 2533 2534 if (!intel_irqs_enabled(dev_priv)) 2535 return IRQ_NONE; 2536 2537 master_ctl = gen8_master_intr_disable(regs); 2538 if (!master_ctl) { 2539 gen8_master_intr_enable(regs); 2540 return IRQ_NONE; 2541 } 2542 2543 /* Find, queue (onto bottom-halves), then clear each source */ 2544 gen8_gt_irq_handler(&dev_priv->gt, master_ctl); 2545 2546 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2547 if (master_ctl & ~GEN8_GT_IRQS) { 2548 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2549 gen8_de_irq_handler(dev_priv, master_ctl); 2550 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2551 } 2552 2553 gen8_master_intr_enable(regs); 2554 2555 pmu_irq_stats(dev_priv, IRQ_HANDLED); 2556 2557 return IRQ_HANDLED; 2558 } 2559 2560 static u32 2561 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) 2562 { 2563 void __iomem * const regs = gt->uncore->regs; 2564 u32 iir; 2565 2566 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2567 return 0; 2568 2569 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2570 if (likely(iir)) 2571 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2572 2573 return iir; 2574 } 2575 2576 static void 2577 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) 2578 { 2579 if (iir & GEN11_GU_MISC_GSE) 2580 intel_opregion_asle_intr(gt->i915); 2581 } 2582 2583 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2584 { 2585 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2586 2587 /* 2588 * Now with master disabled, get a sample of level indications 2589 * for this interrupt. Indications will be cleared on related acks. 2590 * New indications can and will light up during processing, 2591 * and will generate new interrupt after enabling master. 2592 */ 2593 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2594 } 2595 2596 static inline void gen11_master_intr_enable(void __iomem * const regs) 2597 { 2598 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2599 } 2600 2601 static void 2602 gen11_display_irq_handler(struct drm_i915_private *i915) 2603 { 2604 void __iomem * const regs = i915->uncore.regs; 2605 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2606 2607 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2608 /* 2609 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2610 * for the display related bits. 2611 */ 2612 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 2613 gen8_de_irq_handler(i915, disp_ctl); 2614 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 2615 GEN11_DISPLAY_IRQ_ENABLE); 2616 2617 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2618 } 2619 2620 static __always_inline irqreturn_t 2621 __gen11_irq_handler(struct drm_i915_private * const i915, 2622 u32 (*intr_disable)(void __iomem * const regs), 2623 void (*intr_enable)(void __iomem * const regs)) 2624 { 2625 void __iomem * const regs = i915->uncore.regs; 2626 struct intel_gt *gt = &i915->gt; 2627 u32 master_ctl; 2628 u32 gu_misc_iir; 2629 2630 if (!intel_irqs_enabled(i915)) 2631 return IRQ_NONE; 2632 2633 master_ctl = intr_disable(regs); 2634 if (!master_ctl) { 2635 intr_enable(regs); 2636 return IRQ_NONE; 2637 } 2638 2639 /* Find, queue (onto bottom-halves), then clear each source */ 2640 gen11_gt_irq_handler(gt, master_ctl); 2641 2642 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2643 if (master_ctl & GEN11_DISPLAY_IRQ) 2644 gen11_display_irq_handler(i915); 2645 2646 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2647 2648 intr_enable(regs); 2649 2650 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2651 2652 pmu_irq_stats(i915, IRQ_HANDLED); 2653 2654 return IRQ_HANDLED; 2655 } 2656 2657 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2658 { 2659 return __gen11_irq_handler(arg, 2660 gen11_master_intr_disable, 2661 gen11_master_intr_enable); 2662 } 2663 2664 static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs) 2665 { 2666 u32 val; 2667 2668 /* First disable interrupts */ 2669 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0); 2670 2671 /* Get the indication levels and ack the master unit */ 2672 val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR); 2673 if (unlikely(!val)) 2674 return 0; 2675 2676 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val); 2677 2678 /* 2679 * Now with master disabled, get a sample of level indications 2680 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ 2681 * out as this bit doesn't exist anymore for DG1 2682 */ 2683 val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ; 2684 if (unlikely(!val)) 2685 return 0; 2686 2687 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val); 2688 2689 return val; 2690 } 2691 2692 static inline void dg1_master_intr_enable(void __iomem * const regs) 2693 { 2694 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ); 2695 } 2696 2697 static irqreturn_t dg1_irq_handler(int irq, void *arg) 2698 { 2699 return __gen11_irq_handler(arg, 2700 dg1_master_intr_disable_and_ack, 2701 dg1_master_intr_enable); 2702 } 2703 2704 /* Called from drm generic code, passed 'crtc' which 2705 * we use as a pipe index 2706 */ 2707 int i8xx_enable_vblank(struct drm_crtc *crtc) 2708 { 2709 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2710 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2711 unsigned long irqflags; 2712 2713 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2714 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2715 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2716 2717 return 0; 2718 } 2719 2720 int i915gm_enable_vblank(struct drm_crtc *crtc) 2721 { 2722 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2723 2724 /* 2725 * Vblank interrupts fail to wake the device up from C2+. 2726 * Disabling render clock gating during C-states avoids 2727 * the problem. There is a small power cost so we do this 2728 * only when vblank interrupts are actually enabled. 2729 */ 2730 if (dev_priv->vblank_enabled++ == 0) 2731 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2732 2733 return i8xx_enable_vblank(crtc); 2734 } 2735 2736 int i965_enable_vblank(struct drm_crtc *crtc) 2737 { 2738 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2739 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2740 unsigned long irqflags; 2741 2742 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2743 i915_enable_pipestat(dev_priv, pipe, 2744 PIPE_START_VBLANK_INTERRUPT_STATUS); 2745 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2746 2747 return 0; 2748 } 2749 2750 int ilk_enable_vblank(struct drm_crtc *crtc) 2751 { 2752 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2753 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2754 unsigned long irqflags; 2755 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 2756 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2757 2758 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2759 ilk_enable_display_irq(dev_priv, bit); 2760 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2761 2762 /* Even though there is no DMC, frame counter can get stuck when 2763 * PSR is active as no frames are generated. 2764 */ 2765 if (HAS_PSR(dev_priv)) 2766 drm_crtc_vblank_restore(crtc); 2767 2768 return 0; 2769 } 2770 2771 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 2772 bool enable) 2773 { 2774 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 2775 enum port port; 2776 u32 tmp; 2777 2778 if (!(intel_crtc->mode_flags & 2779 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 2780 return false; 2781 2782 /* for dual link cases we consider TE from slave */ 2783 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 2784 port = PORT_B; 2785 else 2786 port = PORT_A; 2787 2788 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port)); 2789 if (enable) 2790 tmp &= ~DSI_TE_EVENT; 2791 else 2792 tmp |= DSI_TE_EVENT; 2793 2794 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp); 2795 2796 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2797 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2798 2799 return true; 2800 } 2801 2802 int bdw_enable_vblank(struct drm_crtc *crtc) 2803 { 2804 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2805 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2806 enum pipe pipe = intel_crtc->pipe; 2807 unsigned long irqflags; 2808 2809 if (gen11_dsi_configure_te(intel_crtc, true)) 2810 return 0; 2811 2812 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2813 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2814 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2815 2816 /* Even if there is no DMC, frame counter can get stuck when 2817 * PSR is active as no frames are generated, so check only for PSR. 2818 */ 2819 if (HAS_PSR(dev_priv)) 2820 drm_crtc_vblank_restore(crtc); 2821 2822 return 0; 2823 } 2824 2825 void skl_enable_flip_done(struct intel_crtc *crtc) 2826 { 2827 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2828 enum pipe pipe = crtc->pipe; 2829 unsigned long irqflags; 2830 2831 spin_lock_irqsave(&i915->irq_lock, irqflags); 2832 2833 bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE); 2834 2835 spin_unlock_irqrestore(&i915->irq_lock, irqflags); 2836 } 2837 2838 /* Called from drm generic code, passed 'crtc' which 2839 * we use as a pipe index 2840 */ 2841 void i8xx_disable_vblank(struct drm_crtc *crtc) 2842 { 2843 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2844 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2845 unsigned long irqflags; 2846 2847 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2848 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2849 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2850 } 2851 2852 void i915gm_disable_vblank(struct drm_crtc *crtc) 2853 { 2854 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2855 2856 i8xx_disable_vblank(crtc); 2857 2858 if (--dev_priv->vblank_enabled == 0) 2859 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2860 } 2861 2862 void i965_disable_vblank(struct drm_crtc *crtc) 2863 { 2864 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2865 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2866 unsigned long irqflags; 2867 2868 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2869 i915_disable_pipestat(dev_priv, pipe, 2870 PIPE_START_VBLANK_INTERRUPT_STATUS); 2871 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2872 } 2873 2874 void ilk_disable_vblank(struct drm_crtc *crtc) 2875 { 2876 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2877 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2878 unsigned long irqflags; 2879 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 2880 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2881 2882 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2883 ilk_disable_display_irq(dev_priv, bit); 2884 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2885 } 2886 2887 void bdw_disable_vblank(struct drm_crtc *crtc) 2888 { 2889 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2890 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2891 enum pipe pipe = intel_crtc->pipe; 2892 unsigned long irqflags; 2893 2894 if (gen11_dsi_configure_te(intel_crtc, false)) 2895 return; 2896 2897 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2898 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2899 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2900 } 2901 2902 void skl_disable_flip_done(struct intel_crtc *crtc) 2903 { 2904 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2905 enum pipe pipe = crtc->pipe; 2906 unsigned long irqflags; 2907 2908 spin_lock_irqsave(&i915->irq_lock, irqflags); 2909 2910 bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE); 2911 2912 spin_unlock_irqrestore(&i915->irq_lock, irqflags); 2913 } 2914 2915 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2916 { 2917 struct intel_uncore *uncore = &dev_priv->uncore; 2918 2919 if (HAS_PCH_NOP(dev_priv)) 2920 return; 2921 2922 GEN3_IRQ_RESET(uncore, SDE); 2923 2924 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2925 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); 2926 } 2927 2928 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2929 { 2930 struct intel_uncore *uncore = &dev_priv->uncore; 2931 2932 if (IS_CHERRYVIEW(dev_priv)) 2933 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2934 else 2935 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); 2936 2937 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2938 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 2939 2940 i9xx_pipestat_irq_reset(dev_priv); 2941 2942 GEN3_IRQ_RESET(uncore, VLV_); 2943 dev_priv->irq_mask = ~0u; 2944 } 2945 2946 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2947 { 2948 struct intel_uncore *uncore = &dev_priv->uncore; 2949 2950 u32 pipestat_mask; 2951 u32 enable_mask; 2952 enum pipe pipe; 2953 2954 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 2955 2956 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2957 for_each_pipe(dev_priv, pipe) 2958 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2959 2960 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2961 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2962 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2963 I915_LPE_PIPE_A_INTERRUPT | 2964 I915_LPE_PIPE_B_INTERRUPT; 2965 2966 if (IS_CHERRYVIEW(dev_priv)) 2967 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2968 I915_LPE_PIPE_C_INTERRUPT; 2969 2970 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 2971 2972 dev_priv->irq_mask = ~enable_mask; 2973 2974 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 2975 } 2976 2977 /* drm_dma.h hooks 2978 */ 2979 static void ilk_irq_reset(struct drm_i915_private *dev_priv) 2980 { 2981 struct intel_uncore *uncore = &dev_priv->uncore; 2982 2983 GEN3_IRQ_RESET(uncore, DE); 2984 dev_priv->irq_mask = ~0u; 2985 2986 if (IS_GEN(dev_priv, 7)) 2987 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 2988 2989 if (IS_HASWELL(dev_priv)) { 2990 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2991 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2992 } 2993 2994 gen5_gt_irq_reset(&dev_priv->gt); 2995 2996 ibx_irq_reset(dev_priv); 2997 } 2998 2999 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 3000 { 3001 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 3002 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3003 3004 gen5_gt_irq_reset(&dev_priv->gt); 3005 3006 spin_lock_irq(&dev_priv->irq_lock); 3007 if (dev_priv->display_irqs_enabled) 3008 vlv_display_irq_reset(dev_priv); 3009 spin_unlock_irq(&dev_priv->irq_lock); 3010 } 3011 3012 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 3013 { 3014 struct intel_uncore *uncore = &dev_priv->uncore; 3015 enum pipe pipe; 3016 3017 gen8_master_intr_disable(dev_priv->uncore.regs); 3018 3019 gen8_gt_irq_reset(&dev_priv->gt); 3020 3021 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3022 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3023 3024 for_each_pipe(dev_priv, pipe) 3025 if (intel_display_power_is_enabled(dev_priv, 3026 POWER_DOMAIN_PIPE(pipe))) 3027 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3028 3029 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3030 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3031 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3032 3033 if (HAS_PCH_SPLIT(dev_priv)) 3034 ibx_irq_reset(dev_priv); 3035 } 3036 3037 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 3038 { 3039 struct intel_uncore *uncore = &dev_priv->uncore; 3040 enum pipe pipe; 3041 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3042 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3043 3044 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 3045 3046 if (INTEL_GEN(dev_priv) >= 12) { 3047 enum transcoder trans; 3048 3049 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3050 enum intel_display_power_domain domain; 3051 3052 domain = POWER_DOMAIN_TRANSCODER(trans); 3053 if (!intel_display_power_is_enabled(dev_priv, domain)) 3054 continue; 3055 3056 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 3057 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 3058 } 3059 } else { 3060 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3061 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3062 } 3063 3064 for_each_pipe(dev_priv, pipe) 3065 if (intel_display_power_is_enabled(dev_priv, 3066 POWER_DOMAIN_PIPE(pipe))) 3067 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3068 3069 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3070 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3071 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 3072 3073 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3074 GEN3_IRQ_RESET(uncore, SDE); 3075 3076 /* Wa_14010685332:cnp/cmp,tgp,adp */ 3077 if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP || 3078 (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && 3079 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) { 3080 intel_uncore_rmw(uncore, SOUTH_CHICKEN1, 3081 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 3082 intel_uncore_rmw(uncore, SOUTH_CHICKEN1, 3083 SBCLK_RUN_REFCLK_DIS, 0); 3084 } 3085 } 3086 3087 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 3088 { 3089 struct intel_uncore *uncore = &dev_priv->uncore; 3090 3091 if (HAS_MASTER_UNIT_IRQ(dev_priv)) 3092 dg1_master_intr_disable_and_ack(dev_priv->uncore.regs); 3093 else 3094 gen11_master_intr_disable(dev_priv->uncore.regs); 3095 3096 gen11_gt_irq_reset(&dev_priv->gt); 3097 gen11_display_irq_reset(dev_priv); 3098 3099 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3100 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3101 } 3102 3103 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3104 u8 pipe_mask) 3105 { 3106 struct intel_uncore *uncore = &dev_priv->uncore; 3107 3108 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3109 enum pipe pipe; 3110 3111 if (INTEL_GEN(dev_priv) >= 9) 3112 extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE; 3113 3114 spin_lock_irq(&dev_priv->irq_lock); 3115 3116 if (!intel_irqs_enabled(dev_priv)) { 3117 spin_unlock_irq(&dev_priv->irq_lock); 3118 return; 3119 } 3120 3121 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3122 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3123 dev_priv->de_irq_mask[pipe], 3124 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3125 3126 spin_unlock_irq(&dev_priv->irq_lock); 3127 } 3128 3129 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3130 u8 pipe_mask) 3131 { 3132 struct intel_uncore *uncore = &dev_priv->uncore; 3133 enum pipe pipe; 3134 3135 spin_lock_irq(&dev_priv->irq_lock); 3136 3137 if (!intel_irqs_enabled(dev_priv)) { 3138 spin_unlock_irq(&dev_priv->irq_lock); 3139 return; 3140 } 3141 3142 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3143 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3144 3145 spin_unlock_irq(&dev_priv->irq_lock); 3146 3147 /* make sure we're done processing display irqs */ 3148 intel_synchronize_irq(dev_priv); 3149 } 3150 3151 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 3152 { 3153 struct intel_uncore *uncore = &dev_priv->uncore; 3154 3155 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 3156 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3157 3158 gen8_gt_irq_reset(&dev_priv->gt); 3159 3160 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3161 3162 spin_lock_irq(&dev_priv->irq_lock); 3163 if (dev_priv->display_irqs_enabled) 3164 vlv_display_irq_reset(dev_priv); 3165 spin_unlock_irq(&dev_priv->irq_lock); 3166 } 3167 3168 static u32 ibx_hotplug_enables(struct drm_i915_private *i915, 3169 enum hpd_pin pin) 3170 { 3171 switch (pin) { 3172 case HPD_PORT_A: 3173 /* 3174 * When CPU and PCH are on the same package, port A 3175 * HPD must be enabled in both north and south. 3176 */ 3177 return HAS_PCH_LPT_LP(i915) ? 3178 PORTA_HOTPLUG_ENABLE : 0; 3179 case HPD_PORT_B: 3180 return PORTB_HOTPLUG_ENABLE | 3181 PORTB_PULSE_DURATION_2ms; 3182 case HPD_PORT_C: 3183 return PORTC_HOTPLUG_ENABLE | 3184 PORTC_PULSE_DURATION_2ms; 3185 case HPD_PORT_D: 3186 return PORTD_HOTPLUG_ENABLE | 3187 PORTD_PULSE_DURATION_2ms; 3188 default: 3189 return 0; 3190 } 3191 } 3192 3193 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3194 { 3195 u32 hotplug; 3196 3197 /* 3198 * Enable digital hotplug on the PCH, and configure the DP short pulse 3199 * duration to 2ms (which is the minimum in the Display Port spec). 3200 * The pulse duration bits are reserved on LPT+. 3201 */ 3202 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3203 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3204 PORTB_HOTPLUG_ENABLE | 3205 PORTC_HOTPLUG_ENABLE | 3206 PORTD_HOTPLUG_ENABLE | 3207 PORTB_PULSE_DURATION_MASK | 3208 PORTC_PULSE_DURATION_MASK | 3209 PORTD_PULSE_DURATION_MASK); 3210 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables); 3211 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3212 } 3213 3214 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3215 { 3216 u32 hotplug_irqs, enabled_irqs; 3217 3218 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3219 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3220 3221 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3222 3223 ibx_hpd_detection_setup(dev_priv); 3224 } 3225 3226 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, 3227 enum hpd_pin pin) 3228 { 3229 switch (pin) { 3230 case HPD_PORT_A: 3231 case HPD_PORT_B: 3232 case HPD_PORT_C: 3233 case HPD_PORT_D: 3234 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); 3235 default: 3236 return 0; 3237 } 3238 } 3239 3240 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, 3241 enum hpd_pin pin) 3242 { 3243 switch (pin) { 3244 case HPD_PORT_TC1: 3245 case HPD_PORT_TC2: 3246 case HPD_PORT_TC3: 3247 case HPD_PORT_TC4: 3248 case HPD_PORT_TC5: 3249 case HPD_PORT_TC6: 3250 return ICP_TC_HPD_ENABLE(pin); 3251 default: 3252 return 0; 3253 } 3254 } 3255 3256 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) 3257 { 3258 u32 hotplug; 3259 3260 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 3261 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | 3262 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | 3263 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | 3264 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D)); 3265 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables); 3266 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug); 3267 } 3268 3269 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3270 { 3271 u32 hotplug; 3272 3273 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 3274 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | 3275 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | 3276 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | 3277 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) | 3278 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | 3279 ICP_TC_HPD_ENABLE(HPD_PORT_TC6)); 3280 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables); 3281 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug); 3282 } 3283 3284 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3285 { 3286 u32 hotplug_irqs, enabled_irqs; 3287 3288 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3289 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3290 3291 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) 3292 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3293 3294 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3295 3296 icp_ddi_hpd_detection_setup(dev_priv); 3297 icp_tc_hpd_detection_setup(dev_priv); 3298 } 3299 3300 static u32 gen11_hotplug_enables(struct drm_i915_private *i915, 3301 enum hpd_pin pin) 3302 { 3303 switch (pin) { 3304 case HPD_PORT_TC1: 3305 case HPD_PORT_TC2: 3306 case HPD_PORT_TC3: 3307 case HPD_PORT_TC4: 3308 case HPD_PORT_TC5: 3309 case HPD_PORT_TC6: 3310 return GEN11_HOTPLUG_CTL_ENABLE(pin); 3311 default: 3312 return 0; 3313 } 3314 } 3315 3316 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) 3317 { 3318 u32 val; 3319 3320 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3321 val |= (INVERT_DDIA_HPD | 3322 INVERT_DDIB_HPD | 3323 INVERT_DDIC_HPD | 3324 INVERT_DDID_HPD); 3325 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3326 3327 icp_hpd_irq_setup(dev_priv); 3328 } 3329 3330 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3331 { 3332 u32 hotplug; 3333 3334 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 3335 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3336 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3337 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3338 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3339 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3340 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3341 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3342 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug); 3343 } 3344 3345 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3346 { 3347 u32 hotplug; 3348 3349 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 3350 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3351 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3352 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | 3353 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) | 3354 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3355 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3356 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3357 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug); 3358 } 3359 3360 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3361 { 3362 u32 hotplug_irqs, enabled_irqs; 3363 u32 val; 3364 3365 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3366 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3367 3368 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3369 val &= ~hotplug_irqs; 3370 val |= ~enabled_irqs & hotplug_irqs; 3371 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val); 3372 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3373 3374 gen11_tc_hpd_detection_setup(dev_priv); 3375 gen11_tbt_hpd_detection_setup(dev_priv); 3376 3377 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3378 icp_hpd_irq_setup(dev_priv); 3379 } 3380 3381 static u32 spt_hotplug_enables(struct drm_i915_private *i915, 3382 enum hpd_pin pin) 3383 { 3384 switch (pin) { 3385 case HPD_PORT_A: 3386 return PORTA_HOTPLUG_ENABLE; 3387 case HPD_PORT_B: 3388 return PORTB_HOTPLUG_ENABLE; 3389 case HPD_PORT_C: 3390 return PORTC_HOTPLUG_ENABLE; 3391 case HPD_PORT_D: 3392 return PORTD_HOTPLUG_ENABLE; 3393 default: 3394 return 0; 3395 } 3396 } 3397 3398 static u32 spt_hotplug2_enables(struct drm_i915_private *i915, 3399 enum hpd_pin pin) 3400 { 3401 switch (pin) { 3402 case HPD_PORT_E: 3403 return PORTE_HOTPLUG_ENABLE; 3404 default: 3405 return 0; 3406 } 3407 } 3408 3409 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3410 { 3411 u32 val, hotplug; 3412 3413 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3414 if (HAS_PCH_CNP(dev_priv)) { 3415 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3416 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3417 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3418 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3419 } 3420 3421 /* Enable digital hotplug on the PCH */ 3422 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3423 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3424 PORTB_HOTPLUG_ENABLE | 3425 PORTC_HOTPLUG_ENABLE | 3426 PORTD_HOTPLUG_ENABLE); 3427 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables); 3428 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3429 3430 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 3431 hotplug &= ~PORTE_HOTPLUG_ENABLE; 3432 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables); 3433 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug); 3434 } 3435 3436 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3437 { 3438 u32 hotplug_irqs, enabled_irqs; 3439 3440 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3441 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3442 3443 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3444 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3445 3446 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3447 3448 spt_hpd_detection_setup(dev_priv); 3449 } 3450 3451 static u32 ilk_hotplug_enables(struct drm_i915_private *i915, 3452 enum hpd_pin pin) 3453 { 3454 switch (pin) { 3455 case HPD_PORT_A: 3456 return DIGITAL_PORTA_HOTPLUG_ENABLE | 3457 DIGITAL_PORTA_PULSE_DURATION_2ms; 3458 default: 3459 return 0; 3460 } 3461 } 3462 3463 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3464 { 3465 u32 hotplug; 3466 3467 /* 3468 * Enable digital hotplug on the CPU, and configure the DP short pulse 3469 * duration to 2ms (which is the minimum in the Display Port spec) 3470 * The pulse duration bits are reserved on HSW+. 3471 */ 3472 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 3473 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE | 3474 DIGITAL_PORTA_PULSE_DURATION_MASK); 3475 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables); 3476 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3477 } 3478 3479 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3480 { 3481 u32 hotplug_irqs, enabled_irqs; 3482 3483 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3484 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3485 3486 if (INTEL_GEN(dev_priv) >= 8) 3487 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3488 else 3489 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3490 3491 ilk_hpd_detection_setup(dev_priv); 3492 3493 ibx_hpd_irq_setup(dev_priv); 3494 } 3495 3496 static u32 bxt_hotplug_enables(struct drm_i915_private *i915, 3497 enum hpd_pin pin) 3498 { 3499 u32 hotplug; 3500 3501 switch (pin) { 3502 case HPD_PORT_A: 3503 hotplug = PORTA_HOTPLUG_ENABLE; 3504 if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) 3505 hotplug |= BXT_DDIA_HPD_INVERT; 3506 return hotplug; 3507 case HPD_PORT_B: 3508 hotplug = PORTB_HOTPLUG_ENABLE; 3509 if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) 3510 hotplug |= BXT_DDIB_HPD_INVERT; 3511 return hotplug; 3512 case HPD_PORT_C: 3513 hotplug = PORTC_HOTPLUG_ENABLE; 3514 if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) 3515 hotplug |= BXT_DDIC_HPD_INVERT; 3516 return hotplug; 3517 default: 3518 return 0; 3519 } 3520 } 3521 3522 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3523 { 3524 u32 hotplug; 3525 3526 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3527 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3528 PORTB_HOTPLUG_ENABLE | 3529 PORTC_HOTPLUG_ENABLE | 3530 BXT_DDIA_HPD_INVERT | 3531 BXT_DDIB_HPD_INVERT | 3532 BXT_DDIC_HPD_INVERT); 3533 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables); 3534 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3535 } 3536 3537 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3538 { 3539 u32 hotplug_irqs, enabled_irqs; 3540 3541 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3542 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3543 3544 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3545 3546 bxt_hpd_detection_setup(dev_priv); 3547 } 3548 3549 /* 3550 * SDEIER is also touched by the interrupt handler to work around missed PCH 3551 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3552 * instead we unconditionally enable all PCH interrupt sources here, but then 3553 * only unmask them as needed with SDEIMR. 3554 * 3555 * Note that we currently do this after installing the interrupt handler, 3556 * but before we enable the master interrupt. That should be sufficient 3557 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 3558 * interrupts could still race. 3559 */ 3560 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3561 { 3562 struct intel_uncore *uncore = &dev_priv->uncore; 3563 u32 mask; 3564 3565 if (HAS_PCH_NOP(dev_priv)) 3566 return; 3567 3568 if (HAS_PCH_IBX(dev_priv)) 3569 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3570 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3571 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3572 else 3573 mask = SDE_GMBUS_CPT; 3574 3575 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3576 } 3577 3578 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 3579 { 3580 struct intel_uncore *uncore = &dev_priv->uncore; 3581 u32 display_mask, extra_mask; 3582 3583 if (INTEL_GEN(dev_priv) >= 7) { 3584 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3585 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3586 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3587 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3588 DE_DP_A_HOTPLUG_IVB); 3589 } else { 3590 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3591 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3592 DE_PIPEA_CRC_DONE | DE_POISON); 3593 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 3594 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3595 DE_DP_A_HOTPLUG); 3596 } 3597 3598 if (IS_HASWELL(dev_priv)) { 3599 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3600 display_mask |= DE_EDP_PSR_INT_HSW; 3601 } 3602 3603 if (IS_IRONLAKE_M(dev_priv)) 3604 extra_mask |= DE_PCU_EVENT; 3605 3606 dev_priv->irq_mask = ~display_mask; 3607 3608 ibx_irq_postinstall(dev_priv); 3609 3610 gen5_gt_irq_postinstall(&dev_priv->gt); 3611 3612 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3613 display_mask | extra_mask); 3614 } 3615 3616 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3617 { 3618 lockdep_assert_held(&dev_priv->irq_lock); 3619 3620 if (dev_priv->display_irqs_enabled) 3621 return; 3622 3623 dev_priv->display_irqs_enabled = true; 3624 3625 if (intel_irqs_enabled(dev_priv)) { 3626 vlv_display_irq_reset(dev_priv); 3627 vlv_display_irq_postinstall(dev_priv); 3628 } 3629 } 3630 3631 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3632 { 3633 lockdep_assert_held(&dev_priv->irq_lock); 3634 3635 if (!dev_priv->display_irqs_enabled) 3636 return; 3637 3638 dev_priv->display_irqs_enabled = false; 3639 3640 if (intel_irqs_enabled(dev_priv)) 3641 vlv_display_irq_reset(dev_priv); 3642 } 3643 3644 3645 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 3646 { 3647 gen5_gt_irq_postinstall(&dev_priv->gt); 3648 3649 spin_lock_irq(&dev_priv->irq_lock); 3650 if (dev_priv->display_irqs_enabled) 3651 vlv_display_irq_postinstall(dev_priv); 3652 spin_unlock_irq(&dev_priv->irq_lock); 3653 3654 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3655 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3656 } 3657 3658 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3659 { 3660 struct intel_uncore *uncore = &dev_priv->uncore; 3661 3662 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 3663 GEN8_PIPE_CDCLK_CRC_DONE; 3664 u32 de_pipe_enables; 3665 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 3666 u32 de_port_enables; 3667 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3668 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3669 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3670 enum pipe pipe; 3671 3672 if (INTEL_GEN(dev_priv) <= 10) 3673 de_misc_masked |= GEN8_DE_MISC_GSE; 3674 3675 if (IS_GEN9_LP(dev_priv)) 3676 de_port_masked |= BXT_DE_PORT_GMBUS; 3677 3678 if (INTEL_GEN(dev_priv) >= 11) { 3679 enum port port; 3680 3681 if (intel_bios_is_dsi_present(dev_priv, &port)) 3682 de_port_masked |= DSI0_TE | DSI1_TE; 3683 } 3684 3685 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3686 GEN8_PIPE_FIFO_UNDERRUN; 3687 3688 if (INTEL_GEN(dev_priv) >= 9) 3689 de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE; 3690 3691 de_port_enables = de_port_masked; 3692 if (IS_GEN9_LP(dev_priv)) 3693 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3694 else if (IS_BROADWELL(dev_priv)) 3695 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 3696 3697 if (INTEL_GEN(dev_priv) >= 12) { 3698 enum transcoder trans; 3699 3700 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3701 enum intel_display_power_domain domain; 3702 3703 domain = POWER_DOMAIN_TRANSCODER(trans); 3704 if (!intel_display_power_is_enabled(dev_priv, domain)) 3705 continue; 3706 3707 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 3708 } 3709 } else { 3710 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3711 } 3712 3713 for_each_pipe(dev_priv, pipe) { 3714 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3715 3716 if (intel_display_power_is_enabled(dev_priv, 3717 POWER_DOMAIN_PIPE(pipe))) 3718 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3719 dev_priv->de_irq_mask[pipe], 3720 de_pipe_enables); 3721 } 3722 3723 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3724 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3725 3726 if (INTEL_GEN(dev_priv) >= 11) { 3727 u32 de_hpd_masked = 0; 3728 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3729 GEN11_DE_TBT_HOTPLUG_MASK; 3730 3731 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 3732 de_hpd_enables); 3733 } 3734 } 3735 3736 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 3737 { 3738 if (HAS_PCH_SPLIT(dev_priv)) 3739 ibx_irq_postinstall(dev_priv); 3740 3741 gen8_gt_irq_postinstall(&dev_priv->gt); 3742 gen8_de_irq_postinstall(dev_priv); 3743 3744 gen8_master_intr_enable(dev_priv->uncore.regs); 3745 } 3746 3747 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 3748 { 3749 struct intel_uncore *uncore = &dev_priv->uncore; 3750 u32 mask = SDE_GMBUS_ICP; 3751 3752 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 3753 } 3754 3755 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 3756 { 3757 struct intel_uncore *uncore = &dev_priv->uncore; 3758 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3759 3760 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3761 icp_irq_postinstall(dev_priv); 3762 3763 gen11_gt_irq_postinstall(&dev_priv->gt); 3764 gen8_de_irq_postinstall(dev_priv); 3765 3766 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3767 3768 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 3769 3770 if (HAS_MASTER_UNIT_IRQ(dev_priv)) { 3771 dg1_master_intr_enable(uncore->regs); 3772 intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR); 3773 } else { 3774 gen11_master_intr_enable(uncore->regs); 3775 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 3776 } 3777 } 3778 3779 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 3780 { 3781 gen8_gt_irq_postinstall(&dev_priv->gt); 3782 3783 spin_lock_irq(&dev_priv->irq_lock); 3784 if (dev_priv->display_irqs_enabled) 3785 vlv_display_irq_postinstall(dev_priv); 3786 spin_unlock_irq(&dev_priv->irq_lock); 3787 3788 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3789 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3790 } 3791 3792 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 3793 { 3794 struct intel_uncore *uncore = &dev_priv->uncore; 3795 3796 i9xx_pipestat_irq_reset(dev_priv); 3797 3798 GEN2_IRQ_RESET(uncore); 3799 dev_priv->irq_mask = ~0u; 3800 } 3801 3802 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3803 { 3804 struct intel_uncore *uncore = &dev_priv->uncore; 3805 u16 enable_mask; 3806 3807 intel_uncore_write16(uncore, 3808 EMR, 3809 ~(I915_ERROR_PAGE_TABLE | 3810 I915_ERROR_MEMORY_REFRESH)); 3811 3812 /* Unmask the interrupts that we always want on. */ 3813 dev_priv->irq_mask = 3814 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3815 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3816 I915_MASTER_ERROR_INTERRUPT); 3817 3818 enable_mask = 3819 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3820 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3821 I915_MASTER_ERROR_INTERRUPT | 3822 I915_USER_INTERRUPT; 3823 3824 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask); 3825 3826 /* Interrupt setup is already guaranteed to be single-threaded, this is 3827 * just to make the assert_spin_locked check happy. */ 3828 spin_lock_irq(&dev_priv->irq_lock); 3829 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3830 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3831 spin_unlock_irq(&dev_priv->irq_lock); 3832 } 3833 3834 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 3835 u16 *eir, u16 *eir_stuck) 3836 { 3837 struct intel_uncore *uncore = &i915->uncore; 3838 u16 emr; 3839 3840 *eir = intel_uncore_read16(uncore, EIR); 3841 3842 if (*eir) 3843 intel_uncore_write16(uncore, EIR, *eir); 3844 3845 *eir_stuck = intel_uncore_read16(uncore, EIR); 3846 if (*eir_stuck == 0) 3847 return; 3848 3849 /* 3850 * Toggle all EMR bits to make sure we get an edge 3851 * in the ISR master error bit if we don't clear 3852 * all the EIR bits. Otherwise the edge triggered 3853 * IIR on i965/g4x wouldn't notice that an interrupt 3854 * is still pending. Also some EIR bits can't be 3855 * cleared except by handling the underlying error 3856 * (or by a GPU reset) so we mask any bit that 3857 * remains set. 3858 */ 3859 emr = intel_uncore_read16(uncore, EMR); 3860 intel_uncore_write16(uncore, EMR, 0xffff); 3861 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 3862 } 3863 3864 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 3865 u16 eir, u16 eir_stuck) 3866 { 3867 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 3868 3869 if (eir_stuck) 3870 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 3871 eir_stuck); 3872 } 3873 3874 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 3875 u32 *eir, u32 *eir_stuck) 3876 { 3877 u32 emr; 3878 3879 *eir = intel_uncore_read(&dev_priv->uncore, EIR); 3880 3881 intel_uncore_write(&dev_priv->uncore, EIR, *eir); 3882 3883 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 3884 if (*eir_stuck == 0) 3885 return; 3886 3887 /* 3888 * Toggle all EMR bits to make sure we get an edge 3889 * in the ISR master error bit if we don't clear 3890 * all the EIR bits. Otherwise the edge triggered 3891 * IIR on i965/g4x wouldn't notice that an interrupt 3892 * is still pending. Also some EIR bits can't be 3893 * cleared except by handling the underlying error 3894 * (or by a GPU reset) so we mask any bit that 3895 * remains set. 3896 */ 3897 emr = intel_uncore_read(&dev_priv->uncore, EMR); 3898 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 3899 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 3900 } 3901 3902 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 3903 u32 eir, u32 eir_stuck) 3904 { 3905 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 3906 3907 if (eir_stuck) 3908 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 3909 eir_stuck); 3910 } 3911 3912 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3913 { 3914 struct drm_i915_private *dev_priv = arg; 3915 irqreturn_t ret = IRQ_NONE; 3916 3917 if (!intel_irqs_enabled(dev_priv)) 3918 return IRQ_NONE; 3919 3920 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3921 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3922 3923 do { 3924 u32 pipe_stats[I915_MAX_PIPES] = {}; 3925 u16 eir = 0, eir_stuck = 0; 3926 u16 iir; 3927 3928 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 3929 if (iir == 0) 3930 break; 3931 3932 ret = IRQ_HANDLED; 3933 3934 /* Call regardless, as some status bits might not be 3935 * signalled in iir */ 3936 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3937 3938 if (iir & I915_MASTER_ERROR_INTERRUPT) 3939 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3940 3941 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 3942 3943 if (iir & I915_USER_INTERRUPT) 3944 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); 3945 3946 if (iir & I915_MASTER_ERROR_INTERRUPT) 3947 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 3948 3949 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3950 } while (0); 3951 3952 pmu_irq_stats(dev_priv, ret); 3953 3954 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3955 3956 return ret; 3957 } 3958 3959 static void i915_irq_reset(struct drm_i915_private *dev_priv) 3960 { 3961 struct intel_uncore *uncore = &dev_priv->uncore; 3962 3963 if (I915_HAS_HOTPLUG(dev_priv)) { 3964 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3965 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 3966 } 3967 3968 i9xx_pipestat_irq_reset(dev_priv); 3969 3970 GEN3_IRQ_RESET(uncore, GEN2_); 3971 dev_priv->irq_mask = ~0u; 3972 } 3973 3974 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 3975 { 3976 struct intel_uncore *uncore = &dev_priv->uncore; 3977 u32 enable_mask; 3978 3979 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE | 3980 I915_ERROR_MEMORY_REFRESH)); 3981 3982 /* Unmask the interrupts that we always want on. */ 3983 dev_priv->irq_mask = 3984 ~(I915_ASLE_INTERRUPT | 3985 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3986 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3987 I915_MASTER_ERROR_INTERRUPT); 3988 3989 enable_mask = 3990 I915_ASLE_INTERRUPT | 3991 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3992 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3993 I915_MASTER_ERROR_INTERRUPT | 3994 I915_USER_INTERRUPT; 3995 3996 if (I915_HAS_HOTPLUG(dev_priv)) { 3997 /* Enable in IER... */ 3998 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3999 /* and unmask in IMR */ 4000 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4001 } 4002 4003 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4004 4005 /* Interrupt setup is already guaranteed to be single-threaded, this is 4006 * just to make the assert_spin_locked check happy. */ 4007 spin_lock_irq(&dev_priv->irq_lock); 4008 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4009 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4010 spin_unlock_irq(&dev_priv->irq_lock); 4011 4012 i915_enable_asle_pipestat(dev_priv); 4013 } 4014 4015 static irqreturn_t i915_irq_handler(int irq, void *arg) 4016 { 4017 struct drm_i915_private *dev_priv = arg; 4018 irqreturn_t ret = IRQ_NONE; 4019 4020 if (!intel_irqs_enabled(dev_priv)) 4021 return IRQ_NONE; 4022 4023 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4024 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4025 4026 do { 4027 u32 pipe_stats[I915_MAX_PIPES] = {}; 4028 u32 eir = 0, eir_stuck = 0; 4029 u32 hotplug_status = 0; 4030 u32 iir; 4031 4032 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4033 if (iir == 0) 4034 break; 4035 4036 ret = IRQ_HANDLED; 4037 4038 if (I915_HAS_HOTPLUG(dev_priv) && 4039 iir & I915_DISPLAY_PORT_INTERRUPT) 4040 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4041 4042 /* Call regardless, as some status bits might not be 4043 * signalled in iir */ 4044 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4045 4046 if (iir & I915_MASTER_ERROR_INTERRUPT) 4047 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4048 4049 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4050 4051 if (iir & I915_USER_INTERRUPT) 4052 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); 4053 4054 if (iir & I915_MASTER_ERROR_INTERRUPT) 4055 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4056 4057 if (hotplug_status) 4058 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4059 4060 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4061 } while (0); 4062 4063 pmu_irq_stats(dev_priv, ret); 4064 4065 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4066 4067 return ret; 4068 } 4069 4070 static void i965_irq_reset(struct drm_i915_private *dev_priv) 4071 { 4072 struct intel_uncore *uncore = &dev_priv->uncore; 4073 4074 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4075 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 4076 4077 i9xx_pipestat_irq_reset(dev_priv); 4078 4079 GEN3_IRQ_RESET(uncore, GEN2_); 4080 dev_priv->irq_mask = ~0u; 4081 } 4082 4083 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 4084 { 4085 struct intel_uncore *uncore = &dev_priv->uncore; 4086 u32 enable_mask; 4087 u32 error_mask; 4088 4089 /* 4090 * Enable some error detection, note the instruction error mask 4091 * bit is reserved, so we leave it masked. 4092 */ 4093 if (IS_G4X(dev_priv)) { 4094 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4095 GM45_ERROR_MEM_PRIV | 4096 GM45_ERROR_CP_PRIV | 4097 I915_ERROR_MEMORY_REFRESH); 4098 } else { 4099 error_mask = ~(I915_ERROR_PAGE_TABLE | 4100 I915_ERROR_MEMORY_REFRESH); 4101 } 4102 intel_uncore_write(&dev_priv->uncore, EMR, error_mask); 4103 4104 /* Unmask the interrupts that we always want on. */ 4105 dev_priv->irq_mask = 4106 ~(I915_ASLE_INTERRUPT | 4107 I915_DISPLAY_PORT_INTERRUPT | 4108 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4109 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4110 I915_MASTER_ERROR_INTERRUPT); 4111 4112 enable_mask = 4113 I915_ASLE_INTERRUPT | 4114 I915_DISPLAY_PORT_INTERRUPT | 4115 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4116 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4117 I915_MASTER_ERROR_INTERRUPT | 4118 I915_USER_INTERRUPT; 4119 4120 if (IS_G4X(dev_priv)) 4121 enable_mask |= I915_BSD_USER_INTERRUPT; 4122 4123 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4124 4125 /* Interrupt setup is already guaranteed to be single-threaded, this is 4126 * just to make the assert_spin_locked check happy. */ 4127 spin_lock_irq(&dev_priv->irq_lock); 4128 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4129 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4130 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4131 spin_unlock_irq(&dev_priv->irq_lock); 4132 4133 i915_enable_asle_pipestat(dev_priv); 4134 } 4135 4136 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4137 { 4138 u32 hotplug_en; 4139 4140 lockdep_assert_held(&dev_priv->irq_lock); 4141 4142 /* Note HDMI and DP share hotplug bits */ 4143 /* enable bits are the same for all generations */ 4144 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4145 /* Programming the CRT detection parameters tends 4146 to generate a spurious hotplug event about three 4147 seconds later. So just do it once. 4148 */ 4149 if (IS_G4X(dev_priv)) 4150 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4151 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4152 4153 /* Ignore TV since it's buggy */ 4154 i915_hotplug_interrupt_update_locked(dev_priv, 4155 HOTPLUG_INT_EN_MASK | 4156 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4157 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4158 hotplug_en); 4159 } 4160 4161 static irqreturn_t i965_irq_handler(int irq, void *arg) 4162 { 4163 struct drm_i915_private *dev_priv = arg; 4164 irqreturn_t ret = IRQ_NONE; 4165 4166 if (!intel_irqs_enabled(dev_priv)) 4167 return IRQ_NONE; 4168 4169 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4170 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4171 4172 do { 4173 u32 pipe_stats[I915_MAX_PIPES] = {}; 4174 u32 eir = 0, eir_stuck = 0; 4175 u32 hotplug_status = 0; 4176 u32 iir; 4177 4178 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4179 if (iir == 0) 4180 break; 4181 4182 ret = IRQ_HANDLED; 4183 4184 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4185 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4186 4187 /* Call regardless, as some status bits might not be 4188 * signalled in iir */ 4189 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4190 4191 if (iir & I915_MASTER_ERROR_INTERRUPT) 4192 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4193 4194 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4195 4196 if (iir & I915_USER_INTERRUPT) 4197 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); 4198 4199 if (iir & I915_BSD_USER_INTERRUPT) 4200 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]); 4201 4202 if (iir & I915_MASTER_ERROR_INTERRUPT) 4203 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4204 4205 if (hotplug_status) 4206 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4207 4208 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4209 } while (0); 4210 4211 pmu_irq_stats(dev_priv, IRQ_HANDLED); 4212 4213 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4214 4215 return ret; 4216 } 4217 4218 /** 4219 * intel_irq_init - initializes irq support 4220 * @dev_priv: i915 device instance 4221 * 4222 * This function initializes all the irq support including work items, timers 4223 * and all the vtables. It does not setup the interrupt itself though. 4224 */ 4225 void intel_irq_init(struct drm_i915_private *dev_priv) 4226 { 4227 struct drm_device *dev = &dev_priv->drm; 4228 int i; 4229 4230 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 4231 for (i = 0; i < MAX_L3_SLICES; ++i) 4232 dev_priv->l3_parity.remap_info[i] = NULL; 4233 4234 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 4235 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) 4236 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; 4237 4238 if (!HAS_DISPLAY(dev_priv)) 4239 return; 4240 4241 intel_hpd_init_pins(dev_priv); 4242 4243 intel_hpd_init_work(dev_priv); 4244 4245 dev->vblank_disable_immediate = true; 4246 4247 /* Most platforms treat the display irq block as an always-on 4248 * power domain. vlv/chv can disable it at runtime and need 4249 * special care to avoid writing any of the display block registers 4250 * outside of the power domain. We defer setting up the display irqs 4251 * in this case to the runtime pm. 4252 */ 4253 dev_priv->display_irqs_enabled = true; 4254 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4255 dev_priv->display_irqs_enabled = false; 4256 4257 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4258 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4259 * detection, as short HPD storms will occur as a natural part of 4260 * sideband messaging with MST. 4261 * On older platforms however, IRQ storms can occur with both long and 4262 * short pulses, as seen on some G4x systems. 4263 */ 4264 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4265 4266 if (HAS_GMCH(dev_priv)) { 4267 if (I915_HAS_HOTPLUG(dev_priv)) 4268 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4269 } else { 4270 if (HAS_PCH_DG1(dev_priv)) 4271 dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup; 4272 else if (INTEL_GEN(dev_priv) >= 11) 4273 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4274 else if (IS_GEN9_LP(dev_priv)) 4275 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4276 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 4277 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4278 else 4279 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4280 } 4281 } 4282 4283 /** 4284 * intel_irq_fini - deinitializes IRQ support 4285 * @i915: i915 device instance 4286 * 4287 * This function deinitializes all the IRQ support. 4288 */ 4289 void intel_irq_fini(struct drm_i915_private *i915) 4290 { 4291 int i; 4292 4293 for (i = 0; i < MAX_L3_SLICES; ++i) 4294 kfree(i915->l3_parity.remap_info[i]); 4295 } 4296 4297 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 4298 { 4299 if (HAS_GMCH(dev_priv)) { 4300 if (IS_CHERRYVIEW(dev_priv)) 4301 return cherryview_irq_handler; 4302 else if (IS_VALLEYVIEW(dev_priv)) 4303 return valleyview_irq_handler; 4304 else if (IS_GEN(dev_priv, 4)) 4305 return i965_irq_handler; 4306 else if (IS_GEN(dev_priv, 3)) 4307 return i915_irq_handler; 4308 else 4309 return i8xx_irq_handler; 4310 } else { 4311 if (HAS_MASTER_UNIT_IRQ(dev_priv)) 4312 return dg1_irq_handler; 4313 if (INTEL_GEN(dev_priv) >= 11) 4314 return gen11_irq_handler; 4315 else if (INTEL_GEN(dev_priv) >= 8) 4316 return gen8_irq_handler; 4317 else 4318 return ilk_irq_handler; 4319 } 4320 } 4321 4322 static void intel_irq_reset(struct drm_i915_private *dev_priv) 4323 { 4324 if (HAS_GMCH(dev_priv)) { 4325 if (IS_CHERRYVIEW(dev_priv)) 4326 cherryview_irq_reset(dev_priv); 4327 else if (IS_VALLEYVIEW(dev_priv)) 4328 valleyview_irq_reset(dev_priv); 4329 else if (IS_GEN(dev_priv, 4)) 4330 i965_irq_reset(dev_priv); 4331 else if (IS_GEN(dev_priv, 3)) 4332 i915_irq_reset(dev_priv); 4333 else 4334 i8xx_irq_reset(dev_priv); 4335 } else { 4336 if (INTEL_GEN(dev_priv) >= 11) 4337 gen11_irq_reset(dev_priv); 4338 else if (INTEL_GEN(dev_priv) >= 8) 4339 gen8_irq_reset(dev_priv); 4340 else 4341 ilk_irq_reset(dev_priv); 4342 } 4343 } 4344 4345 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 4346 { 4347 if (HAS_GMCH(dev_priv)) { 4348 if (IS_CHERRYVIEW(dev_priv)) 4349 cherryview_irq_postinstall(dev_priv); 4350 else if (IS_VALLEYVIEW(dev_priv)) 4351 valleyview_irq_postinstall(dev_priv); 4352 else if (IS_GEN(dev_priv, 4)) 4353 i965_irq_postinstall(dev_priv); 4354 else if (IS_GEN(dev_priv, 3)) 4355 i915_irq_postinstall(dev_priv); 4356 else 4357 i8xx_irq_postinstall(dev_priv); 4358 } else { 4359 if (INTEL_GEN(dev_priv) >= 11) 4360 gen11_irq_postinstall(dev_priv); 4361 else if (INTEL_GEN(dev_priv) >= 8) 4362 gen8_irq_postinstall(dev_priv); 4363 else 4364 ilk_irq_postinstall(dev_priv); 4365 } 4366 } 4367 4368 /** 4369 * intel_irq_install - enables the hardware interrupt 4370 * @dev_priv: i915 device instance 4371 * 4372 * This function enables the hardware interrupt handling, but leaves the hotplug 4373 * handling still disabled. It is called after intel_irq_init(). 4374 * 4375 * In the driver load and resume code we need working interrupts in a few places 4376 * but don't want to deal with the hassle of concurrent probe and hotplug 4377 * workers. Hence the split into this two-stage approach. 4378 */ 4379 int intel_irq_install(struct drm_i915_private *dev_priv) 4380 { 4381 int irq = dev_priv->drm.pdev->irq; 4382 int ret; 4383 4384 /* 4385 * We enable some interrupt sources in our postinstall hooks, so mark 4386 * interrupts as enabled _before_ actually enabling them to avoid 4387 * special cases in our ordering checks. 4388 */ 4389 dev_priv->runtime_pm.irqs_enabled = true; 4390 4391 dev_priv->drm.irq_enabled = true; 4392 4393 intel_irq_reset(dev_priv); 4394 4395 ret = request_irq(irq, intel_irq_handler(dev_priv), 4396 IRQF_SHARED, DRIVER_NAME, dev_priv); 4397 if (ret < 0) { 4398 dev_priv->drm.irq_enabled = false; 4399 return ret; 4400 } 4401 4402 intel_irq_postinstall(dev_priv); 4403 4404 return ret; 4405 } 4406 4407 /** 4408 * intel_irq_uninstall - finilizes all irq handling 4409 * @dev_priv: i915 device instance 4410 * 4411 * This stops interrupt and hotplug handling and unregisters and frees all 4412 * resources acquired in the init functions. 4413 */ 4414 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4415 { 4416 int irq = dev_priv->drm.pdev->irq; 4417 4418 /* 4419 * FIXME we can get called twice during driver probe 4420 * error handling as well as during driver remove due to 4421 * intel_modeset_driver_remove() calling us out of sequence. 4422 * Would be nice if it didn't do that... 4423 */ 4424 if (!dev_priv->drm.irq_enabled) 4425 return; 4426 4427 dev_priv->drm.irq_enabled = false; 4428 4429 intel_irq_reset(dev_priv); 4430 4431 free_irq(irq, dev_priv); 4432 4433 intel_hpd_cancel_work(dev_priv); 4434 dev_priv->runtime_pm.irqs_enabled = false; 4435 } 4436 4437 /** 4438 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4439 * @dev_priv: i915 device instance 4440 * 4441 * This function is used to disable interrupts at runtime, both in the runtime 4442 * pm and the system suspend/resume code. 4443 */ 4444 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4445 { 4446 intel_irq_reset(dev_priv); 4447 dev_priv->runtime_pm.irqs_enabled = false; 4448 intel_synchronize_irq(dev_priv); 4449 } 4450 4451 /** 4452 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4453 * @dev_priv: i915 device instance 4454 * 4455 * This function is used to enable interrupts at runtime, both in the runtime 4456 * pm and the system suspend/resume code. 4457 */ 4458 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4459 { 4460 dev_priv->runtime_pm.irqs_enabled = true; 4461 intel_irq_reset(dev_priv); 4462 intel_irq_postinstall(dev_priv); 4463 } 4464 4465 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 4466 { 4467 /* 4468 * We only use drm_irq_uninstall() at unload and VT switch, so 4469 * this is the only thing we need to check. 4470 */ 4471 return dev_priv->runtime_pm.irqs_enabled; 4472 } 4473 4474 void intel_synchronize_irq(struct drm_i915_private *i915) 4475 { 4476 synchronize_irq(i915->drm.pdev->irq); 4477 } 4478