1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/circ_buf.h> 32 #include <linux/slab.h> 33 #include <linux/sysrq.h> 34 35 #include <drm/drm_drv.h> 36 #include <drm/drm_irq.h> 37 38 #include "display/intel_display_types.h" 39 #include "display/intel_fifo_underrun.h" 40 #include "display/intel_hotplug.h" 41 #include "display/intel_lpe_audio.h" 42 #include "display/intel_psr.h" 43 44 #include "gt/intel_breadcrumbs.h" 45 #include "gt/intel_gt.h" 46 #include "gt/intel_gt_irq.h" 47 #include "gt/intel_gt_pm_irq.h" 48 #include "gt/intel_rps.h" 49 50 #include "i915_drv.h" 51 #include "i915_irq.h" 52 #include "i915_trace.h" 53 #include "intel_pm.h" 54 55 /** 56 * DOC: interrupt handling 57 * 58 * These functions provide the basic support for enabling and disabling the 59 * interrupt handling support. There's a lot more functionality in i915_irq.c 60 * and related files, but that will be described in separate chapters. 61 */ 62 63 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 64 65 static const u32 hpd_ilk[HPD_NUM_PINS] = { 66 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 67 }; 68 69 static const u32 hpd_ivb[HPD_NUM_PINS] = { 70 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 71 }; 72 73 static const u32 hpd_bdw[HPD_NUM_PINS] = { 74 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 75 }; 76 77 static const u32 hpd_ibx[HPD_NUM_PINS] = { 78 [HPD_CRT] = SDE_CRT_HOTPLUG, 79 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 80 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 81 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 82 [HPD_PORT_D] = SDE_PORTD_HOTPLUG, 83 }; 84 85 static const u32 hpd_cpt[HPD_NUM_PINS] = { 86 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 87 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 88 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 89 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 90 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 91 }; 92 93 static const u32 hpd_spt[HPD_NUM_PINS] = { 94 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 95 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 96 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 97 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 98 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, 99 }; 100 101 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 102 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 103 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 104 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 105 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 106 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 107 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, 108 }; 109 110 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 111 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 112 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 113 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 114 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 115 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 116 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 117 }; 118 119 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 120 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 121 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 122 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 123 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 124 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 125 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, 126 }; 127 128 static const u32 hpd_bxt[HPD_NUM_PINS] = { 129 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 130 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 131 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC, 132 }; 133 134 static const u32 hpd_gen11[HPD_NUM_PINS] = { 135 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 136 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 137 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 138 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, 139 }; 140 141 static const u32 hpd_gen12[HPD_NUM_PINS] = { 142 [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 143 [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 144 [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 145 [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, 146 [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG, 147 [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG, 148 }; 149 150 static const u32 hpd_icp[HPD_NUM_PINS] = { 151 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), 152 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), 153 [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), 154 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2), 155 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3), 156 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4), 157 }; 158 159 static const u32 hpd_tgp[HPD_NUM_PINS] = { 160 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), 161 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), 162 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C), 163 [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1), 164 [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2), 165 [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3), 166 [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4), 167 [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5), 168 [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6), 169 }; 170 171 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) 172 { 173 struct i915_hotplug *hpd = &dev_priv->hotplug; 174 175 if (HAS_GMCH(dev_priv)) { 176 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 177 IS_CHERRYVIEW(dev_priv)) 178 hpd->hpd = hpd_status_g4x; 179 else 180 hpd->hpd = hpd_status_i915; 181 return; 182 } 183 184 if (INTEL_GEN(dev_priv) >= 12) 185 hpd->hpd = hpd_gen12; 186 else if (INTEL_GEN(dev_priv) >= 11) 187 hpd->hpd = hpd_gen11; 188 else if (IS_GEN9_LP(dev_priv)) 189 hpd->hpd = hpd_bxt; 190 else if (INTEL_GEN(dev_priv) >= 8) 191 hpd->hpd = hpd_bdw; 192 else if (INTEL_GEN(dev_priv) >= 7) 193 hpd->hpd = hpd_ivb; 194 else 195 hpd->hpd = hpd_ilk; 196 197 if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)) 198 return; 199 200 if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv)) 201 hpd->pch_hpd = hpd_tgp; 202 else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv)) 203 hpd->pch_hpd = hpd_icp; 204 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv)) 205 hpd->pch_hpd = hpd_spt; 206 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv)) 207 hpd->pch_hpd = hpd_cpt; 208 else if (HAS_PCH_IBX(dev_priv)) 209 hpd->pch_hpd = hpd_ibx; 210 else 211 MISSING_CASE(INTEL_PCH_TYPE(dev_priv)); 212 } 213 214 static void 215 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 216 { 217 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 218 219 drm_crtc_handle_vblank(&crtc->base); 220 } 221 222 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 223 i915_reg_t iir, i915_reg_t ier) 224 { 225 intel_uncore_write(uncore, imr, 0xffffffff); 226 intel_uncore_posting_read(uncore, imr); 227 228 intel_uncore_write(uncore, ier, 0); 229 230 /* IIR can theoretically queue up two events. Be paranoid. */ 231 intel_uncore_write(uncore, iir, 0xffffffff); 232 intel_uncore_posting_read(uncore, iir); 233 intel_uncore_write(uncore, iir, 0xffffffff); 234 intel_uncore_posting_read(uncore, iir); 235 } 236 237 void gen2_irq_reset(struct intel_uncore *uncore) 238 { 239 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 240 intel_uncore_posting_read16(uncore, GEN2_IMR); 241 242 intel_uncore_write16(uncore, GEN2_IER, 0); 243 244 /* IIR can theoretically queue up two events. Be paranoid. */ 245 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 246 intel_uncore_posting_read16(uncore, GEN2_IIR); 247 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 248 intel_uncore_posting_read16(uncore, GEN2_IIR); 249 } 250 251 /* 252 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 253 */ 254 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 255 { 256 u32 val = intel_uncore_read(uncore, reg); 257 258 if (val == 0) 259 return; 260 261 drm_WARN(&uncore->i915->drm, 1, 262 "Interrupt register 0x%x is not zero: 0x%08x\n", 263 i915_mmio_reg_offset(reg), val); 264 intel_uncore_write(uncore, reg, 0xffffffff); 265 intel_uncore_posting_read(uncore, reg); 266 intel_uncore_write(uncore, reg, 0xffffffff); 267 intel_uncore_posting_read(uncore, reg); 268 } 269 270 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 271 { 272 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 273 274 if (val == 0) 275 return; 276 277 drm_WARN(&uncore->i915->drm, 1, 278 "Interrupt register 0x%x is not zero: 0x%08x\n", 279 i915_mmio_reg_offset(GEN2_IIR), val); 280 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 281 intel_uncore_posting_read16(uncore, GEN2_IIR); 282 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 283 intel_uncore_posting_read16(uncore, GEN2_IIR); 284 } 285 286 void gen3_irq_init(struct intel_uncore *uncore, 287 i915_reg_t imr, u32 imr_val, 288 i915_reg_t ier, u32 ier_val, 289 i915_reg_t iir) 290 { 291 gen3_assert_iir_is_zero(uncore, iir); 292 293 intel_uncore_write(uncore, ier, ier_val); 294 intel_uncore_write(uncore, imr, imr_val); 295 intel_uncore_posting_read(uncore, imr); 296 } 297 298 void gen2_irq_init(struct intel_uncore *uncore, 299 u32 imr_val, u32 ier_val) 300 { 301 gen2_assert_iir_is_zero(uncore); 302 303 intel_uncore_write16(uncore, GEN2_IER, ier_val); 304 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 305 intel_uncore_posting_read16(uncore, GEN2_IMR); 306 } 307 308 /* For display hotplug interrupt */ 309 static inline void 310 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 311 u32 mask, 312 u32 bits) 313 { 314 u32 val; 315 316 lockdep_assert_held(&dev_priv->irq_lock); 317 drm_WARN_ON(&dev_priv->drm, bits & ~mask); 318 319 val = I915_READ(PORT_HOTPLUG_EN); 320 val &= ~mask; 321 val |= bits; 322 I915_WRITE(PORT_HOTPLUG_EN, val); 323 } 324 325 /** 326 * i915_hotplug_interrupt_update - update hotplug interrupt enable 327 * @dev_priv: driver private 328 * @mask: bits to update 329 * @bits: bits to enable 330 * NOTE: the HPD enable bits are modified both inside and outside 331 * of an interrupt context. To avoid that read-modify-write cycles 332 * interfer, these bits are protected by a spinlock. Since this 333 * function is usually not called from a context where the lock is 334 * held already, this function acquires the lock itself. A non-locking 335 * version is also available. 336 */ 337 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 338 u32 mask, 339 u32 bits) 340 { 341 spin_lock_irq(&dev_priv->irq_lock); 342 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 343 spin_unlock_irq(&dev_priv->irq_lock); 344 } 345 346 /** 347 * ilk_update_display_irq - update DEIMR 348 * @dev_priv: driver private 349 * @interrupt_mask: mask of interrupt bits to update 350 * @enabled_irq_mask: mask of interrupt bits to enable 351 */ 352 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 353 u32 interrupt_mask, 354 u32 enabled_irq_mask) 355 { 356 u32 new_val; 357 358 lockdep_assert_held(&dev_priv->irq_lock); 359 360 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 361 362 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 363 return; 364 365 new_val = dev_priv->irq_mask; 366 new_val &= ~interrupt_mask; 367 new_val |= (~enabled_irq_mask & interrupt_mask); 368 369 if (new_val != dev_priv->irq_mask) { 370 dev_priv->irq_mask = new_val; 371 I915_WRITE(DEIMR, dev_priv->irq_mask); 372 POSTING_READ(DEIMR); 373 } 374 } 375 376 /** 377 * bdw_update_port_irq - update DE port interrupt 378 * @dev_priv: driver private 379 * @interrupt_mask: mask of interrupt bits to update 380 * @enabled_irq_mask: mask of interrupt bits to enable 381 */ 382 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 383 u32 interrupt_mask, 384 u32 enabled_irq_mask) 385 { 386 u32 new_val; 387 u32 old_val; 388 389 lockdep_assert_held(&dev_priv->irq_lock); 390 391 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 392 393 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 394 return; 395 396 old_val = I915_READ(GEN8_DE_PORT_IMR); 397 398 new_val = old_val; 399 new_val &= ~interrupt_mask; 400 new_val |= (~enabled_irq_mask & interrupt_mask); 401 402 if (new_val != old_val) { 403 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 404 POSTING_READ(GEN8_DE_PORT_IMR); 405 } 406 } 407 408 /** 409 * bdw_update_pipe_irq - update DE pipe interrupt 410 * @dev_priv: driver private 411 * @pipe: pipe whose interrupt to update 412 * @interrupt_mask: mask of interrupt bits to update 413 * @enabled_irq_mask: mask of interrupt bits to enable 414 */ 415 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 416 enum pipe pipe, 417 u32 interrupt_mask, 418 u32 enabled_irq_mask) 419 { 420 u32 new_val; 421 422 lockdep_assert_held(&dev_priv->irq_lock); 423 424 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 425 426 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 427 return; 428 429 new_val = dev_priv->de_irq_mask[pipe]; 430 new_val &= ~interrupt_mask; 431 new_val |= (~enabled_irq_mask & interrupt_mask); 432 433 if (new_val != dev_priv->de_irq_mask[pipe]) { 434 dev_priv->de_irq_mask[pipe] = new_val; 435 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 436 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 437 } 438 } 439 440 /** 441 * ibx_display_interrupt_update - update SDEIMR 442 * @dev_priv: driver private 443 * @interrupt_mask: mask of interrupt bits to update 444 * @enabled_irq_mask: mask of interrupt bits to enable 445 */ 446 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 447 u32 interrupt_mask, 448 u32 enabled_irq_mask) 449 { 450 u32 sdeimr = I915_READ(SDEIMR); 451 sdeimr &= ~interrupt_mask; 452 sdeimr |= (~enabled_irq_mask & interrupt_mask); 453 454 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 455 456 lockdep_assert_held(&dev_priv->irq_lock); 457 458 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 459 return; 460 461 I915_WRITE(SDEIMR, sdeimr); 462 POSTING_READ(SDEIMR); 463 } 464 465 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 466 enum pipe pipe) 467 { 468 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 469 u32 enable_mask = status_mask << 16; 470 471 lockdep_assert_held(&dev_priv->irq_lock); 472 473 if (INTEL_GEN(dev_priv) < 5) 474 goto out; 475 476 /* 477 * On pipe A we don't support the PSR interrupt yet, 478 * on pipe B and C the same bit MBZ. 479 */ 480 if (drm_WARN_ON_ONCE(&dev_priv->drm, 481 status_mask & PIPE_A_PSR_STATUS_VLV)) 482 return 0; 483 /* 484 * On pipe B and C we don't support the PSR interrupt yet, on pipe 485 * A the same bit is for perf counters which we don't use either. 486 */ 487 if (drm_WARN_ON_ONCE(&dev_priv->drm, 488 status_mask & PIPE_B_PSR_STATUS_VLV)) 489 return 0; 490 491 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 492 SPRITE0_FLIP_DONE_INT_EN_VLV | 493 SPRITE1_FLIP_DONE_INT_EN_VLV); 494 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 495 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 496 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 497 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 498 499 out: 500 drm_WARN_ONCE(&dev_priv->drm, 501 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 502 status_mask & ~PIPESTAT_INT_STATUS_MASK, 503 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 504 pipe_name(pipe), enable_mask, status_mask); 505 506 return enable_mask; 507 } 508 509 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 510 enum pipe pipe, u32 status_mask) 511 { 512 i915_reg_t reg = PIPESTAT(pipe); 513 u32 enable_mask; 514 515 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 516 "pipe %c: status_mask=0x%x\n", 517 pipe_name(pipe), status_mask); 518 519 lockdep_assert_held(&dev_priv->irq_lock); 520 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 521 522 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 523 return; 524 525 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 526 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 527 528 I915_WRITE(reg, enable_mask | status_mask); 529 POSTING_READ(reg); 530 } 531 532 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 533 enum pipe pipe, u32 status_mask) 534 { 535 i915_reg_t reg = PIPESTAT(pipe); 536 u32 enable_mask; 537 538 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 539 "pipe %c: status_mask=0x%x\n", 540 pipe_name(pipe), status_mask); 541 542 lockdep_assert_held(&dev_priv->irq_lock); 543 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 544 545 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 546 return; 547 548 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 549 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 550 551 I915_WRITE(reg, enable_mask | status_mask); 552 POSTING_READ(reg); 553 } 554 555 static bool i915_has_asle(struct drm_i915_private *dev_priv) 556 { 557 if (!dev_priv->opregion.asle) 558 return false; 559 560 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 561 } 562 563 /** 564 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 565 * @dev_priv: i915 device private 566 */ 567 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 568 { 569 if (!i915_has_asle(dev_priv)) 570 return; 571 572 spin_lock_irq(&dev_priv->irq_lock); 573 574 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 575 if (INTEL_GEN(dev_priv) >= 4) 576 i915_enable_pipestat(dev_priv, PIPE_A, 577 PIPE_LEGACY_BLC_EVENT_STATUS); 578 579 spin_unlock_irq(&dev_priv->irq_lock); 580 } 581 582 /* 583 * This timing diagram depicts the video signal in and 584 * around the vertical blanking period. 585 * 586 * Assumptions about the fictitious mode used in this example: 587 * vblank_start >= 3 588 * vsync_start = vblank_start + 1 589 * vsync_end = vblank_start + 2 590 * vtotal = vblank_start + 3 591 * 592 * start of vblank: 593 * latch double buffered registers 594 * increment frame counter (ctg+) 595 * generate start of vblank interrupt (gen4+) 596 * | 597 * | frame start: 598 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 599 * | may be shifted forward 1-3 extra lines via PIPECONF 600 * | | 601 * | | start of vsync: 602 * | | generate vsync interrupt 603 * | | | 604 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 605 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 606 * ----va---> <-----------------vb--------------------> <--------va------------- 607 * | | <----vs-----> | 608 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 609 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 610 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 611 * | | | 612 * last visible pixel first visible pixel 613 * | increment frame counter (gen3/4) 614 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 615 * 616 * x = horizontal active 617 * _ = horizontal blanking 618 * hs = horizontal sync 619 * va = vertical active 620 * vb = vertical blanking 621 * vs = vertical sync 622 * vbs = vblank_start (number) 623 * 624 * Summary: 625 * - most events happen at the start of horizontal sync 626 * - frame start happens at the start of horizontal blank, 1-4 lines 627 * (depending on PIPECONF settings) after the start of vblank 628 * - gen3/4 pixel and frame counter are synchronized with the start 629 * of horizontal active on the first line of vertical active 630 */ 631 632 /* Called from drm generic code, passed a 'crtc', which 633 * we use as a pipe index 634 */ 635 u32 i915_get_vblank_counter(struct drm_crtc *crtc) 636 { 637 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 638 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 639 const struct drm_display_mode *mode = &vblank->hwmode; 640 enum pipe pipe = to_intel_crtc(crtc)->pipe; 641 i915_reg_t high_frame, low_frame; 642 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 643 unsigned long irqflags; 644 645 /* 646 * On i965gm TV output the frame counter only works up to 647 * the point when we enable the TV encoder. After that the 648 * frame counter ceases to work and reads zero. We need a 649 * vblank wait before enabling the TV encoder and so we 650 * have to enable vblank interrupts while the frame counter 651 * is still in a working state. However the core vblank code 652 * does not like us returning non-zero frame counter values 653 * when we've told it that we don't have a working frame 654 * counter. Thus we must stop non-zero values leaking out. 655 */ 656 if (!vblank->max_vblank_count) 657 return 0; 658 659 htotal = mode->crtc_htotal; 660 hsync_start = mode->crtc_hsync_start; 661 vbl_start = mode->crtc_vblank_start; 662 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 663 vbl_start = DIV_ROUND_UP(vbl_start, 2); 664 665 /* Convert to pixel count */ 666 vbl_start *= htotal; 667 668 /* Start of vblank event occurs at start of hsync */ 669 vbl_start -= htotal - hsync_start; 670 671 high_frame = PIPEFRAME(pipe); 672 low_frame = PIPEFRAMEPIXEL(pipe); 673 674 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 675 676 /* 677 * High & low register fields aren't synchronized, so make sure 678 * we get a low value that's stable across two reads of the high 679 * register. 680 */ 681 do { 682 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 683 low = intel_de_read_fw(dev_priv, low_frame); 684 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; 685 } while (high1 != high2); 686 687 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 688 689 high1 >>= PIPE_FRAME_HIGH_SHIFT; 690 pixel = low & PIPE_PIXEL_MASK; 691 low >>= PIPE_FRAME_LOW_SHIFT; 692 693 /* 694 * The frame counter increments at beginning of active. 695 * Cook up a vblank counter by also checking the pixel 696 * counter against vblank start. 697 */ 698 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 699 } 700 701 u32 g4x_get_vblank_counter(struct drm_crtc *crtc) 702 { 703 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 704 enum pipe pipe = to_intel_crtc(crtc)->pipe; 705 706 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 707 } 708 709 /* 710 * On certain encoders on certain platforms, pipe 711 * scanline register will not work to get the scanline, 712 * since the timings are driven from the PORT or issues 713 * with scanline register updates. 714 * This function will use Framestamp and current 715 * timestamp registers to calculate the scanline. 716 */ 717 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 718 { 719 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 720 struct drm_vblank_crtc *vblank = 721 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 722 const struct drm_display_mode *mode = &vblank->hwmode; 723 u32 vblank_start = mode->crtc_vblank_start; 724 u32 vtotal = mode->crtc_vtotal; 725 u32 htotal = mode->crtc_htotal; 726 u32 clock = mode->crtc_clock; 727 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 728 729 /* 730 * To avoid the race condition where we might cross into the 731 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 732 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 733 * during the same frame. 734 */ 735 do { 736 /* 737 * This field provides read back of the display 738 * pipe frame time stamp. The time stamp value 739 * is sampled at every start of vertical blank. 740 */ 741 scan_prev_time = intel_de_read_fw(dev_priv, 742 PIPE_FRMTMSTMP(crtc->pipe)); 743 744 /* 745 * The TIMESTAMP_CTR register has the current 746 * time stamp value. 747 */ 748 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); 749 750 scan_post_time = intel_de_read_fw(dev_priv, 751 PIPE_FRMTMSTMP(crtc->pipe)); 752 } while (scan_post_time != scan_prev_time); 753 754 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 755 clock), 1000 * htotal); 756 scanline = min(scanline, vtotal - 1); 757 scanline = (scanline + vblank_start) % vtotal; 758 759 return scanline; 760 } 761 762 /* 763 * intel_de_read_fw(), only for fast reads of display block, no need for 764 * forcewake etc. 765 */ 766 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 767 { 768 struct drm_device *dev = crtc->base.dev; 769 struct drm_i915_private *dev_priv = to_i915(dev); 770 const struct drm_display_mode *mode; 771 struct drm_vblank_crtc *vblank; 772 enum pipe pipe = crtc->pipe; 773 int position, vtotal; 774 775 if (!crtc->active) 776 return -1; 777 778 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 779 mode = &vblank->hwmode; 780 781 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 782 return __intel_get_crtc_scanline_from_timestamp(crtc); 783 784 vtotal = mode->crtc_vtotal; 785 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 786 vtotal /= 2; 787 788 if (IS_GEN(dev_priv, 2)) 789 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 790 else 791 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 792 793 /* 794 * On HSW, the DSL reg (0x70000) appears to return 0 if we 795 * read it just before the start of vblank. So try it again 796 * so we don't accidentally end up spanning a vblank frame 797 * increment, causing the pipe_update_end() code to squak at us. 798 * 799 * The nature of this problem means we can't simply check the ISR 800 * bit and return the vblank start value; nor can we use the scanline 801 * debug register in the transcoder as it appears to have the same 802 * problem. We may need to extend this to include other platforms, 803 * but so far testing only shows the problem on HSW. 804 */ 805 if (HAS_DDI(dev_priv) && !position) { 806 int i, temp; 807 808 for (i = 0; i < 100; i++) { 809 udelay(1); 810 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 811 if (temp != position) { 812 position = temp; 813 break; 814 } 815 } 816 } 817 818 /* 819 * See update_scanline_offset() for the details on the 820 * scanline_offset adjustment. 821 */ 822 return (position + crtc->scanline_offset) % vtotal; 823 } 824 825 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, 826 bool in_vblank_irq, 827 int *vpos, int *hpos, 828 ktime_t *stime, ktime_t *etime, 829 const struct drm_display_mode *mode) 830 { 831 struct drm_device *dev = _crtc->dev; 832 struct drm_i915_private *dev_priv = to_i915(dev); 833 struct intel_crtc *crtc = to_intel_crtc(_crtc); 834 enum pipe pipe = crtc->pipe; 835 int position; 836 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 837 unsigned long irqflags; 838 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 || 839 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || 840 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 841 842 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { 843 drm_dbg(&dev_priv->drm, 844 "trying to get scanoutpos for disabled " 845 "pipe %c\n", pipe_name(pipe)); 846 return false; 847 } 848 849 htotal = mode->crtc_htotal; 850 hsync_start = mode->crtc_hsync_start; 851 vtotal = mode->crtc_vtotal; 852 vbl_start = mode->crtc_vblank_start; 853 vbl_end = mode->crtc_vblank_end; 854 855 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 856 vbl_start = DIV_ROUND_UP(vbl_start, 2); 857 vbl_end /= 2; 858 vtotal /= 2; 859 } 860 861 /* 862 * Lock uncore.lock, as we will do multiple timing critical raw 863 * register reads, potentially with preemption disabled, so the 864 * following code must not block on uncore.lock. 865 */ 866 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 867 868 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 869 870 /* Get optional system timestamp before query. */ 871 if (stime) 872 *stime = ktime_get(); 873 874 if (use_scanline_counter) { 875 /* No obvious pixelcount register. Only query vertical 876 * scanout position from Display scan line register. 877 */ 878 position = __intel_get_crtc_scanline(crtc); 879 } else { 880 /* Have access to pixelcount since start of frame. 881 * We can split this into vertical and horizontal 882 * scanout position. 883 */ 884 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 885 886 /* convert to pixel counts */ 887 vbl_start *= htotal; 888 vbl_end *= htotal; 889 vtotal *= htotal; 890 891 /* 892 * In interlaced modes, the pixel counter counts all pixels, 893 * so one field will have htotal more pixels. In order to avoid 894 * the reported position from jumping backwards when the pixel 895 * counter is beyond the length of the shorter field, just 896 * clamp the position the length of the shorter field. This 897 * matches how the scanline counter based position works since 898 * the scanline counter doesn't count the two half lines. 899 */ 900 if (position >= vtotal) 901 position = vtotal - 1; 902 903 /* 904 * Start of vblank interrupt is triggered at start of hsync, 905 * just prior to the first active line of vblank. However we 906 * consider lines to start at the leading edge of horizontal 907 * active. So, should we get here before we've crossed into 908 * the horizontal active of the first line in vblank, we would 909 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 910 * always add htotal-hsync_start to the current pixel position. 911 */ 912 position = (position + htotal - hsync_start) % vtotal; 913 } 914 915 /* Get optional system timestamp after query. */ 916 if (etime) 917 *etime = ktime_get(); 918 919 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 920 921 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 922 923 /* 924 * While in vblank, position will be negative 925 * counting up towards 0 at vbl_end. And outside 926 * vblank, position will be positive counting 927 * up since vbl_end. 928 */ 929 if (position >= vbl_start) 930 position -= vbl_end; 931 else 932 position += vtotal - vbl_end; 933 934 if (use_scanline_counter) { 935 *vpos = position; 936 *hpos = 0; 937 } else { 938 *vpos = position / htotal; 939 *hpos = position - (*vpos * htotal); 940 } 941 942 return true; 943 } 944 945 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, 946 ktime_t *vblank_time, bool in_vblank_irq) 947 { 948 return drm_crtc_vblank_helper_get_vblank_timestamp_internal( 949 crtc, max_error, vblank_time, in_vblank_irq, 950 i915_get_crtc_scanoutpos); 951 } 952 953 int intel_get_crtc_scanline(struct intel_crtc *crtc) 954 { 955 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 956 unsigned long irqflags; 957 int position; 958 959 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 960 position = __intel_get_crtc_scanline(crtc); 961 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 962 963 return position; 964 } 965 966 /** 967 * ivb_parity_work - Workqueue called when a parity error interrupt 968 * occurred. 969 * @work: workqueue struct 970 * 971 * Doesn't actually do anything except notify userspace. As a consequence of 972 * this event, userspace should try to remap the bad rows since statistically 973 * it is likely the same row is more likely to go bad again. 974 */ 975 static void ivb_parity_work(struct work_struct *work) 976 { 977 struct drm_i915_private *dev_priv = 978 container_of(work, typeof(*dev_priv), l3_parity.error_work); 979 struct intel_gt *gt = &dev_priv->gt; 980 u32 error_status, row, bank, subbank; 981 char *parity_event[6]; 982 u32 misccpctl; 983 u8 slice = 0; 984 985 /* We must turn off DOP level clock gating to access the L3 registers. 986 * In order to prevent a get/put style interface, acquire struct mutex 987 * any time we access those registers. 988 */ 989 mutex_lock(&dev_priv->drm.struct_mutex); 990 991 /* If we've screwed up tracking, just let the interrupt fire again */ 992 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 993 goto out; 994 995 misccpctl = I915_READ(GEN7_MISCCPCTL); 996 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 997 POSTING_READ(GEN7_MISCCPCTL); 998 999 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1000 i915_reg_t reg; 1001 1002 slice--; 1003 if (drm_WARN_ON_ONCE(&dev_priv->drm, 1004 slice >= NUM_L3_SLICES(dev_priv))) 1005 break; 1006 1007 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1008 1009 reg = GEN7_L3CDERRST1(slice); 1010 1011 error_status = I915_READ(reg); 1012 row = GEN7_PARITY_ERROR_ROW(error_status); 1013 bank = GEN7_PARITY_ERROR_BANK(error_status); 1014 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1015 1016 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1017 POSTING_READ(reg); 1018 1019 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1020 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1021 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1022 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1023 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1024 parity_event[5] = NULL; 1025 1026 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1027 KOBJ_CHANGE, parity_event); 1028 1029 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1030 slice, row, bank, subbank); 1031 1032 kfree(parity_event[4]); 1033 kfree(parity_event[3]); 1034 kfree(parity_event[2]); 1035 kfree(parity_event[1]); 1036 } 1037 1038 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1039 1040 out: 1041 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 1042 spin_lock_irq(>->irq_lock); 1043 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 1044 spin_unlock_irq(>->irq_lock); 1045 1046 mutex_unlock(&dev_priv->drm.struct_mutex); 1047 } 1048 1049 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1050 { 1051 switch (pin) { 1052 case HPD_PORT_C: 1053 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1054 case HPD_PORT_D: 1055 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1056 case HPD_PORT_E: 1057 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1058 case HPD_PORT_F: 1059 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1060 default: 1061 return false; 1062 } 1063 } 1064 1065 static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1066 { 1067 switch (pin) { 1068 case HPD_PORT_D: 1069 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1070 case HPD_PORT_E: 1071 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1072 case HPD_PORT_F: 1073 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1074 case HPD_PORT_G: 1075 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1076 case HPD_PORT_H: 1077 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5); 1078 case HPD_PORT_I: 1079 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6); 1080 default: 1081 return false; 1082 } 1083 } 1084 1085 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1086 { 1087 switch (pin) { 1088 case HPD_PORT_A: 1089 return val & PORTA_HOTPLUG_LONG_DETECT; 1090 case HPD_PORT_B: 1091 return val & PORTB_HOTPLUG_LONG_DETECT; 1092 case HPD_PORT_C: 1093 return val & PORTC_HOTPLUG_LONG_DETECT; 1094 default: 1095 return false; 1096 } 1097 } 1098 1099 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1100 { 1101 switch (pin) { 1102 case HPD_PORT_A: 1103 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A); 1104 case HPD_PORT_B: 1105 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B); 1106 case HPD_PORT_C: 1107 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C); 1108 default: 1109 return false; 1110 } 1111 } 1112 1113 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1114 { 1115 switch (pin) { 1116 case HPD_PORT_C: 1117 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1118 case HPD_PORT_D: 1119 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1120 case HPD_PORT_E: 1121 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1122 case HPD_PORT_F: 1123 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1124 default: 1125 return false; 1126 } 1127 } 1128 1129 static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1130 { 1131 switch (pin) { 1132 case HPD_PORT_D: 1133 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1134 case HPD_PORT_E: 1135 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1136 case HPD_PORT_F: 1137 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1138 case HPD_PORT_G: 1139 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1140 case HPD_PORT_H: 1141 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5); 1142 case HPD_PORT_I: 1143 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6); 1144 default: 1145 return false; 1146 } 1147 } 1148 1149 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1150 { 1151 switch (pin) { 1152 case HPD_PORT_E: 1153 return val & PORTE_HOTPLUG_LONG_DETECT; 1154 default: 1155 return false; 1156 } 1157 } 1158 1159 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1160 { 1161 switch (pin) { 1162 case HPD_PORT_A: 1163 return val & PORTA_HOTPLUG_LONG_DETECT; 1164 case HPD_PORT_B: 1165 return val & PORTB_HOTPLUG_LONG_DETECT; 1166 case HPD_PORT_C: 1167 return val & PORTC_HOTPLUG_LONG_DETECT; 1168 case HPD_PORT_D: 1169 return val & PORTD_HOTPLUG_LONG_DETECT; 1170 default: 1171 return false; 1172 } 1173 } 1174 1175 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1176 { 1177 switch (pin) { 1178 case HPD_PORT_A: 1179 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1180 default: 1181 return false; 1182 } 1183 } 1184 1185 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1186 { 1187 switch (pin) { 1188 case HPD_PORT_B: 1189 return val & PORTB_HOTPLUG_LONG_DETECT; 1190 case HPD_PORT_C: 1191 return val & PORTC_HOTPLUG_LONG_DETECT; 1192 case HPD_PORT_D: 1193 return val & PORTD_HOTPLUG_LONG_DETECT; 1194 default: 1195 return false; 1196 } 1197 } 1198 1199 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1200 { 1201 switch (pin) { 1202 case HPD_PORT_B: 1203 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1204 case HPD_PORT_C: 1205 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1206 case HPD_PORT_D: 1207 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1208 default: 1209 return false; 1210 } 1211 } 1212 1213 /* 1214 * Get a bit mask of pins that have triggered, and which ones may be long. 1215 * This can be called multiple times with the same masks to accumulate 1216 * hotplug detection results from several registers. 1217 * 1218 * Note that the caller is expected to zero out the masks initially. 1219 */ 1220 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1221 u32 *pin_mask, u32 *long_mask, 1222 u32 hotplug_trigger, u32 dig_hotplug_reg, 1223 const u32 hpd[HPD_NUM_PINS], 1224 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1225 { 1226 enum hpd_pin pin; 1227 1228 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); 1229 1230 for_each_hpd_pin(pin) { 1231 if ((hpd[pin] & hotplug_trigger) == 0) 1232 continue; 1233 1234 *pin_mask |= BIT(pin); 1235 1236 if (long_pulse_detect(pin, dig_hotplug_reg)) 1237 *long_mask |= BIT(pin); 1238 } 1239 1240 drm_dbg(&dev_priv->drm, 1241 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1242 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1243 1244 } 1245 1246 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1247 { 1248 wake_up_all(&dev_priv->gmbus_wait_queue); 1249 } 1250 1251 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1252 { 1253 wake_up_all(&dev_priv->gmbus_wait_queue); 1254 } 1255 1256 #if defined(CONFIG_DEBUG_FS) 1257 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1258 enum pipe pipe, 1259 u32 crc0, u32 crc1, 1260 u32 crc2, u32 crc3, 1261 u32 crc4) 1262 { 1263 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1264 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 1265 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1266 1267 trace_intel_pipe_crc(crtc, crcs); 1268 1269 spin_lock(&pipe_crc->lock); 1270 /* 1271 * For some not yet identified reason, the first CRC is 1272 * bonkers. So let's just wait for the next vblank and read 1273 * out the buggy result. 1274 * 1275 * On GEN8+ sometimes the second CRC is bonkers as well, so 1276 * don't trust that one either. 1277 */ 1278 if (pipe_crc->skipped <= 0 || 1279 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1280 pipe_crc->skipped++; 1281 spin_unlock(&pipe_crc->lock); 1282 return; 1283 } 1284 spin_unlock(&pipe_crc->lock); 1285 1286 drm_crtc_add_crc_entry(&crtc->base, true, 1287 drm_crtc_accurate_vblank_count(&crtc->base), 1288 crcs); 1289 } 1290 #else 1291 static inline void 1292 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1293 enum pipe pipe, 1294 u32 crc0, u32 crc1, 1295 u32 crc2, u32 crc3, 1296 u32 crc4) {} 1297 #endif 1298 1299 1300 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1301 enum pipe pipe) 1302 { 1303 display_pipe_crc_irq_handler(dev_priv, pipe, 1304 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1305 0, 0, 0, 0); 1306 } 1307 1308 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1309 enum pipe pipe) 1310 { 1311 display_pipe_crc_irq_handler(dev_priv, pipe, 1312 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1313 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1314 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1315 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1316 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1317 } 1318 1319 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1320 enum pipe pipe) 1321 { 1322 u32 res1, res2; 1323 1324 if (INTEL_GEN(dev_priv) >= 3) 1325 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1326 else 1327 res1 = 0; 1328 1329 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1330 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1331 else 1332 res2 = 0; 1333 1334 display_pipe_crc_irq_handler(dev_priv, pipe, 1335 I915_READ(PIPE_CRC_RES_RED(pipe)), 1336 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1337 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1338 res1, res2); 1339 } 1340 1341 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1342 { 1343 enum pipe pipe; 1344 1345 for_each_pipe(dev_priv, pipe) { 1346 I915_WRITE(PIPESTAT(pipe), 1347 PIPESTAT_INT_STATUS_MASK | 1348 PIPE_FIFO_UNDERRUN_STATUS); 1349 1350 dev_priv->pipestat_irq_mask[pipe] = 0; 1351 } 1352 } 1353 1354 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1355 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1356 { 1357 enum pipe pipe; 1358 1359 spin_lock(&dev_priv->irq_lock); 1360 1361 if (!dev_priv->display_irqs_enabled) { 1362 spin_unlock(&dev_priv->irq_lock); 1363 return; 1364 } 1365 1366 for_each_pipe(dev_priv, pipe) { 1367 i915_reg_t reg; 1368 u32 status_mask, enable_mask, iir_bit = 0; 1369 1370 /* 1371 * PIPESTAT bits get signalled even when the interrupt is 1372 * disabled with the mask bits, and some of the status bits do 1373 * not generate interrupts at all (like the underrun bit). Hence 1374 * we need to be careful that we only handle what we want to 1375 * handle. 1376 */ 1377 1378 /* fifo underruns are filterered in the underrun handler. */ 1379 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1380 1381 switch (pipe) { 1382 default: 1383 case PIPE_A: 1384 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1385 break; 1386 case PIPE_B: 1387 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1388 break; 1389 case PIPE_C: 1390 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1391 break; 1392 } 1393 if (iir & iir_bit) 1394 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1395 1396 if (!status_mask) 1397 continue; 1398 1399 reg = PIPESTAT(pipe); 1400 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1401 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1402 1403 /* 1404 * Clear the PIPE*STAT regs before the IIR 1405 * 1406 * Toggle the enable bits to make sure we get an 1407 * edge in the ISR pipe event bit if we don't clear 1408 * all the enabled status bits. Otherwise the edge 1409 * triggered IIR on i965/g4x wouldn't notice that 1410 * an interrupt is still pending. 1411 */ 1412 if (pipe_stats[pipe]) { 1413 I915_WRITE(reg, pipe_stats[pipe]); 1414 I915_WRITE(reg, enable_mask); 1415 } 1416 } 1417 spin_unlock(&dev_priv->irq_lock); 1418 } 1419 1420 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1421 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1422 { 1423 enum pipe pipe; 1424 1425 for_each_pipe(dev_priv, pipe) { 1426 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1427 intel_handle_vblank(dev_priv, pipe); 1428 1429 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1430 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1431 1432 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1433 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1434 } 1435 } 1436 1437 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1438 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1439 { 1440 bool blc_event = false; 1441 enum pipe pipe; 1442 1443 for_each_pipe(dev_priv, pipe) { 1444 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1445 intel_handle_vblank(dev_priv, pipe); 1446 1447 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1448 blc_event = true; 1449 1450 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1451 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1452 1453 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1454 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1455 } 1456 1457 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1458 intel_opregion_asle_intr(dev_priv); 1459 } 1460 1461 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1462 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1463 { 1464 bool blc_event = false; 1465 enum pipe pipe; 1466 1467 for_each_pipe(dev_priv, pipe) { 1468 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1469 intel_handle_vblank(dev_priv, pipe); 1470 1471 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1472 blc_event = true; 1473 1474 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1475 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1476 1477 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1478 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1479 } 1480 1481 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1482 intel_opregion_asle_intr(dev_priv); 1483 1484 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1485 gmbus_irq_handler(dev_priv); 1486 } 1487 1488 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1489 u32 pipe_stats[I915_MAX_PIPES]) 1490 { 1491 enum pipe pipe; 1492 1493 for_each_pipe(dev_priv, pipe) { 1494 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1495 intel_handle_vblank(dev_priv, pipe); 1496 1497 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1498 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1499 1500 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1501 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1502 } 1503 1504 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1505 gmbus_irq_handler(dev_priv); 1506 } 1507 1508 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1509 { 1510 u32 hotplug_status = 0, hotplug_status_mask; 1511 int i; 1512 1513 if (IS_G4X(dev_priv) || 1514 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1515 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1516 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1517 else 1518 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1519 1520 /* 1521 * We absolutely have to clear all the pending interrupt 1522 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1523 * interrupt bit won't have an edge, and the i965/g4x 1524 * edge triggered IIR will not notice that an interrupt 1525 * is still pending. We can't use PORT_HOTPLUG_EN to 1526 * guarantee the edge as the act of toggling the enable 1527 * bits can itself generate a new hotplug interrupt :( 1528 */ 1529 for (i = 0; i < 10; i++) { 1530 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 1531 1532 if (tmp == 0) 1533 return hotplug_status; 1534 1535 hotplug_status |= tmp; 1536 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1537 } 1538 1539 drm_WARN_ONCE(&dev_priv->drm, 1, 1540 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1541 I915_READ(PORT_HOTPLUG_STAT)); 1542 1543 return hotplug_status; 1544 } 1545 1546 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1547 u32 hotplug_status) 1548 { 1549 u32 pin_mask = 0, long_mask = 0; 1550 u32 hotplug_trigger; 1551 1552 if (IS_G4X(dev_priv) || 1553 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1554 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1555 else 1556 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1557 1558 if (hotplug_trigger) { 1559 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1560 hotplug_trigger, hotplug_trigger, 1561 dev_priv->hotplug.hpd, 1562 i9xx_port_hotplug_long_detect); 1563 1564 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1565 } 1566 1567 if ((IS_G4X(dev_priv) || 1568 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1569 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1570 dp_aux_irq_handler(dev_priv); 1571 } 1572 1573 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1574 { 1575 struct drm_i915_private *dev_priv = arg; 1576 irqreturn_t ret = IRQ_NONE; 1577 1578 if (!intel_irqs_enabled(dev_priv)) 1579 return IRQ_NONE; 1580 1581 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1582 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1583 1584 do { 1585 u32 iir, gt_iir, pm_iir; 1586 u32 pipe_stats[I915_MAX_PIPES] = {}; 1587 u32 hotplug_status = 0; 1588 u32 ier = 0; 1589 1590 gt_iir = I915_READ(GTIIR); 1591 pm_iir = I915_READ(GEN6_PMIIR); 1592 iir = I915_READ(VLV_IIR); 1593 1594 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1595 break; 1596 1597 ret = IRQ_HANDLED; 1598 1599 /* 1600 * Theory on interrupt generation, based on empirical evidence: 1601 * 1602 * x = ((VLV_IIR & VLV_IER) || 1603 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1604 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1605 * 1606 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1607 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1608 * guarantee the CPU interrupt will be raised again even if we 1609 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1610 * bits this time around. 1611 */ 1612 I915_WRITE(VLV_MASTER_IER, 0); 1613 ier = I915_READ(VLV_IER); 1614 I915_WRITE(VLV_IER, 0); 1615 1616 if (gt_iir) 1617 I915_WRITE(GTIIR, gt_iir); 1618 if (pm_iir) 1619 I915_WRITE(GEN6_PMIIR, pm_iir); 1620 1621 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1622 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1623 1624 /* Call regardless, as some status bits might not be 1625 * signalled in iir */ 1626 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1627 1628 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1629 I915_LPE_PIPE_B_INTERRUPT)) 1630 intel_lpe_audio_irq_handler(dev_priv); 1631 1632 /* 1633 * VLV_IIR is single buffered, and reflects the level 1634 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1635 */ 1636 if (iir) 1637 I915_WRITE(VLV_IIR, iir); 1638 1639 I915_WRITE(VLV_IER, ier); 1640 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1641 1642 if (gt_iir) 1643 gen6_gt_irq_handler(&dev_priv->gt, gt_iir); 1644 if (pm_iir) 1645 gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); 1646 1647 if (hotplug_status) 1648 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1649 1650 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1651 } while (0); 1652 1653 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1654 1655 return ret; 1656 } 1657 1658 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1659 { 1660 struct drm_i915_private *dev_priv = arg; 1661 irqreturn_t ret = IRQ_NONE; 1662 1663 if (!intel_irqs_enabled(dev_priv)) 1664 return IRQ_NONE; 1665 1666 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1667 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1668 1669 do { 1670 u32 master_ctl, iir; 1671 u32 pipe_stats[I915_MAX_PIPES] = {}; 1672 u32 hotplug_status = 0; 1673 u32 ier = 0; 1674 1675 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1676 iir = I915_READ(VLV_IIR); 1677 1678 if (master_ctl == 0 && iir == 0) 1679 break; 1680 1681 ret = IRQ_HANDLED; 1682 1683 /* 1684 * Theory on interrupt generation, based on empirical evidence: 1685 * 1686 * x = ((VLV_IIR & VLV_IER) || 1687 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1688 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1689 * 1690 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1691 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1692 * guarantee the CPU interrupt will be raised again even if we 1693 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1694 * bits this time around. 1695 */ 1696 I915_WRITE(GEN8_MASTER_IRQ, 0); 1697 ier = I915_READ(VLV_IER); 1698 I915_WRITE(VLV_IER, 0); 1699 1700 gen8_gt_irq_handler(&dev_priv->gt, master_ctl); 1701 1702 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1703 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1704 1705 /* Call regardless, as some status bits might not be 1706 * signalled in iir */ 1707 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1708 1709 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1710 I915_LPE_PIPE_B_INTERRUPT | 1711 I915_LPE_PIPE_C_INTERRUPT)) 1712 intel_lpe_audio_irq_handler(dev_priv); 1713 1714 /* 1715 * VLV_IIR is single buffered, and reflects the level 1716 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1717 */ 1718 if (iir) 1719 I915_WRITE(VLV_IIR, iir); 1720 1721 I915_WRITE(VLV_IER, ier); 1722 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1723 1724 if (hotplug_status) 1725 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1726 1727 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1728 } while (0); 1729 1730 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1731 1732 return ret; 1733 } 1734 1735 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1736 u32 hotplug_trigger) 1737 { 1738 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1739 1740 /* 1741 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1742 * unless we touch the hotplug register, even if hotplug_trigger is 1743 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1744 * errors. 1745 */ 1746 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1747 if (!hotplug_trigger) { 1748 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1749 PORTD_HOTPLUG_STATUS_MASK | 1750 PORTC_HOTPLUG_STATUS_MASK | 1751 PORTB_HOTPLUG_STATUS_MASK; 1752 dig_hotplug_reg &= ~mask; 1753 } 1754 1755 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1756 if (!hotplug_trigger) 1757 return; 1758 1759 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1760 hotplug_trigger, dig_hotplug_reg, 1761 dev_priv->hotplug.pch_hpd, 1762 pch_port_hotplug_long_detect); 1763 1764 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1765 } 1766 1767 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1768 { 1769 enum pipe pipe; 1770 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1771 1772 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1773 1774 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1775 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1776 SDE_AUDIO_POWER_SHIFT); 1777 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 1778 port_name(port)); 1779 } 1780 1781 if (pch_iir & SDE_AUX_MASK) 1782 dp_aux_irq_handler(dev_priv); 1783 1784 if (pch_iir & SDE_GMBUS) 1785 gmbus_irq_handler(dev_priv); 1786 1787 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1788 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 1789 1790 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1791 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 1792 1793 if (pch_iir & SDE_POISON) 1794 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1795 1796 if (pch_iir & SDE_FDI_MASK) { 1797 for_each_pipe(dev_priv, pipe) 1798 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1799 pipe_name(pipe), 1800 I915_READ(FDI_RX_IIR(pipe))); 1801 } 1802 1803 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1804 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 1805 1806 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1807 drm_dbg(&dev_priv->drm, 1808 "PCH transcoder CRC error interrupt\n"); 1809 1810 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1811 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 1812 1813 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1814 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 1815 } 1816 1817 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1818 { 1819 u32 err_int = I915_READ(GEN7_ERR_INT); 1820 enum pipe pipe; 1821 1822 if (err_int & ERR_INT_POISON) 1823 drm_err(&dev_priv->drm, "Poison interrupt\n"); 1824 1825 for_each_pipe(dev_priv, pipe) { 1826 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1827 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1828 1829 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1830 if (IS_IVYBRIDGE(dev_priv)) 1831 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1832 else 1833 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1834 } 1835 } 1836 1837 I915_WRITE(GEN7_ERR_INT, err_int); 1838 } 1839 1840 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1841 { 1842 u32 serr_int = I915_READ(SERR_INT); 1843 enum pipe pipe; 1844 1845 if (serr_int & SERR_INT_POISON) 1846 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 1847 1848 for_each_pipe(dev_priv, pipe) 1849 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1850 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1851 1852 I915_WRITE(SERR_INT, serr_int); 1853 } 1854 1855 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1856 { 1857 enum pipe pipe; 1858 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1859 1860 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 1861 1862 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1863 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1864 SDE_AUDIO_POWER_SHIFT_CPT); 1865 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 1866 port_name(port)); 1867 } 1868 1869 if (pch_iir & SDE_AUX_MASK_CPT) 1870 dp_aux_irq_handler(dev_priv); 1871 1872 if (pch_iir & SDE_GMBUS_CPT) 1873 gmbus_irq_handler(dev_priv); 1874 1875 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1876 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 1877 1878 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1879 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 1880 1881 if (pch_iir & SDE_FDI_MASK_CPT) { 1882 for_each_pipe(dev_priv, pipe) 1883 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1884 pipe_name(pipe), 1885 I915_READ(FDI_RX_IIR(pipe))); 1886 } 1887 1888 if (pch_iir & SDE_ERROR_CPT) 1889 cpt_serr_int_handler(dev_priv); 1890 } 1891 1892 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1893 { 1894 u32 ddi_hotplug_trigger, tc_hotplug_trigger; 1895 u32 pin_mask = 0, long_mask = 0; 1896 bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val); 1897 1898 if (HAS_PCH_TGP(dev_priv)) { 1899 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; 1900 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; 1901 tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect; 1902 } else if (HAS_PCH_JSP(dev_priv)) { 1903 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; 1904 tc_hotplug_trigger = 0; 1905 } else if (HAS_PCH_MCC(dev_priv)) { 1906 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 1907 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1); 1908 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; 1909 } else { 1910 drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv), 1911 "Unrecognized PCH type 0x%x\n", 1912 INTEL_PCH_TYPE(dev_priv)); 1913 1914 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 1915 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 1916 tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; 1917 } 1918 1919 if (ddi_hotplug_trigger) { 1920 u32 dig_hotplug_reg; 1921 1922 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 1923 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 1924 1925 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1926 ddi_hotplug_trigger, dig_hotplug_reg, 1927 dev_priv->hotplug.pch_hpd, 1928 icp_ddi_port_hotplug_long_detect); 1929 } 1930 1931 if (tc_hotplug_trigger) { 1932 u32 dig_hotplug_reg; 1933 1934 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 1935 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 1936 1937 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1938 tc_hotplug_trigger, dig_hotplug_reg, 1939 dev_priv->hotplug.pch_hpd, 1940 tc_port_hotplug_long_detect); 1941 } 1942 1943 if (pin_mask) 1944 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1945 1946 if (pch_iir & SDE_GMBUS_ICP) 1947 gmbus_irq_handler(dev_priv); 1948 } 1949 1950 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1951 { 1952 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 1953 ~SDE_PORTE_HOTPLUG_SPT; 1954 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 1955 u32 pin_mask = 0, long_mask = 0; 1956 1957 if (hotplug_trigger) { 1958 u32 dig_hotplug_reg; 1959 1960 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1961 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1962 1963 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1964 hotplug_trigger, dig_hotplug_reg, 1965 dev_priv->hotplug.pch_hpd, 1966 spt_port_hotplug_long_detect); 1967 } 1968 1969 if (hotplug2_trigger) { 1970 u32 dig_hotplug_reg; 1971 1972 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 1973 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 1974 1975 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1976 hotplug2_trigger, dig_hotplug_reg, 1977 dev_priv->hotplug.pch_hpd, 1978 spt_port_hotplug2_long_detect); 1979 } 1980 1981 if (pin_mask) 1982 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1983 1984 if (pch_iir & SDE_GMBUS_CPT) 1985 gmbus_irq_handler(dev_priv); 1986 } 1987 1988 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 1989 u32 hotplug_trigger) 1990 { 1991 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1992 1993 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 1994 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 1995 1996 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1997 hotplug_trigger, dig_hotplug_reg, 1998 dev_priv->hotplug.hpd, 1999 ilk_port_hotplug_long_detect); 2000 2001 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2002 } 2003 2004 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2005 u32 de_iir) 2006 { 2007 enum pipe pipe; 2008 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2009 2010 if (hotplug_trigger) 2011 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2012 2013 if (de_iir & DE_AUX_CHANNEL_A) 2014 dp_aux_irq_handler(dev_priv); 2015 2016 if (de_iir & DE_GSE) 2017 intel_opregion_asle_intr(dev_priv); 2018 2019 if (de_iir & DE_POISON) 2020 drm_err(&dev_priv->drm, "Poison interrupt\n"); 2021 2022 for_each_pipe(dev_priv, pipe) { 2023 if (de_iir & DE_PIPE_VBLANK(pipe)) 2024 intel_handle_vblank(dev_priv, pipe); 2025 2026 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2027 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2028 2029 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2030 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2031 } 2032 2033 /* check event from PCH */ 2034 if (de_iir & DE_PCH_EVENT) { 2035 u32 pch_iir = I915_READ(SDEIIR); 2036 2037 if (HAS_PCH_CPT(dev_priv)) 2038 cpt_irq_handler(dev_priv, pch_iir); 2039 else 2040 ibx_irq_handler(dev_priv, pch_iir); 2041 2042 /* should clear PCH hotplug event before clear CPU irq */ 2043 I915_WRITE(SDEIIR, pch_iir); 2044 } 2045 2046 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 2047 gen5_rps_irq_handler(&dev_priv->gt.rps); 2048 } 2049 2050 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2051 u32 de_iir) 2052 { 2053 enum pipe pipe; 2054 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2055 2056 if (hotplug_trigger) 2057 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 2058 2059 if (de_iir & DE_ERR_INT_IVB) 2060 ivb_err_int_handler(dev_priv); 2061 2062 if (de_iir & DE_EDP_PSR_INT_HSW) { 2063 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2064 2065 intel_psr_irq_handler(dev_priv, psr_iir); 2066 I915_WRITE(EDP_PSR_IIR, psr_iir); 2067 } 2068 2069 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2070 dp_aux_irq_handler(dev_priv); 2071 2072 if (de_iir & DE_GSE_IVB) 2073 intel_opregion_asle_intr(dev_priv); 2074 2075 for_each_pipe(dev_priv, pipe) { 2076 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2077 intel_handle_vblank(dev_priv, pipe); 2078 } 2079 2080 /* check event from PCH */ 2081 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2082 u32 pch_iir = I915_READ(SDEIIR); 2083 2084 cpt_irq_handler(dev_priv, pch_iir); 2085 2086 /* clear PCH hotplug event before clear CPU irq */ 2087 I915_WRITE(SDEIIR, pch_iir); 2088 } 2089 } 2090 2091 /* 2092 * To handle irqs with the minimum potential races with fresh interrupts, we: 2093 * 1 - Disable Master Interrupt Control. 2094 * 2 - Find the source(s) of the interrupt. 2095 * 3 - Clear the Interrupt Identity bits (IIR). 2096 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2097 * 5 - Re-enable Master Interrupt Control. 2098 */ 2099 static irqreturn_t ilk_irq_handler(int irq, void *arg) 2100 { 2101 struct drm_i915_private *i915 = arg; 2102 void __iomem * const regs = i915->uncore.regs; 2103 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2104 irqreturn_t ret = IRQ_NONE; 2105 2106 if (unlikely(!intel_irqs_enabled(i915))) 2107 return IRQ_NONE; 2108 2109 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2110 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2111 2112 /* disable master interrupt before clearing iir */ 2113 de_ier = raw_reg_read(regs, DEIER); 2114 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2115 2116 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2117 * interrupts will will be stored on its back queue, and then we'll be 2118 * able to process them after we restore SDEIER (as soon as we restore 2119 * it, we'll get an interrupt if SDEIIR still has something to process 2120 * due to its back queue). */ 2121 if (!HAS_PCH_NOP(i915)) { 2122 sde_ier = raw_reg_read(regs, SDEIER); 2123 raw_reg_write(regs, SDEIER, 0); 2124 } 2125 2126 /* Find, clear, then process each source of interrupt */ 2127 2128 gt_iir = raw_reg_read(regs, GTIIR); 2129 if (gt_iir) { 2130 raw_reg_write(regs, GTIIR, gt_iir); 2131 if (INTEL_GEN(i915) >= 6) 2132 gen6_gt_irq_handler(&i915->gt, gt_iir); 2133 else 2134 gen5_gt_irq_handler(&i915->gt, gt_iir); 2135 ret = IRQ_HANDLED; 2136 } 2137 2138 de_iir = raw_reg_read(regs, DEIIR); 2139 if (de_iir) { 2140 raw_reg_write(regs, DEIIR, de_iir); 2141 if (INTEL_GEN(i915) >= 7) 2142 ivb_display_irq_handler(i915, de_iir); 2143 else 2144 ilk_display_irq_handler(i915, de_iir); 2145 ret = IRQ_HANDLED; 2146 } 2147 2148 if (INTEL_GEN(i915) >= 6) { 2149 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 2150 if (pm_iir) { 2151 raw_reg_write(regs, GEN6_PMIIR, pm_iir); 2152 gen6_rps_irq_handler(&i915->gt.rps, pm_iir); 2153 ret = IRQ_HANDLED; 2154 } 2155 } 2156 2157 raw_reg_write(regs, DEIER, de_ier); 2158 if (sde_ier) 2159 raw_reg_write(regs, SDEIER, sde_ier); 2160 2161 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2162 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2163 2164 return ret; 2165 } 2166 2167 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2168 u32 hotplug_trigger) 2169 { 2170 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2171 2172 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2173 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2174 2175 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2176 hotplug_trigger, dig_hotplug_reg, 2177 dev_priv->hotplug.hpd, 2178 bxt_port_hotplug_long_detect); 2179 2180 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2181 } 2182 2183 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2184 { 2185 u32 pin_mask = 0, long_mask = 0; 2186 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2187 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2188 long_pulse_detect_func long_pulse_detect; 2189 2190 if (INTEL_GEN(dev_priv) >= 12) 2191 long_pulse_detect = gen12_port_hotplug_long_detect; 2192 else 2193 long_pulse_detect = gen11_port_hotplug_long_detect; 2194 2195 if (trigger_tc) { 2196 u32 dig_hotplug_reg; 2197 2198 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2199 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2200 2201 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2202 trigger_tc, dig_hotplug_reg, 2203 dev_priv->hotplug.hpd, 2204 long_pulse_detect); 2205 } 2206 2207 if (trigger_tbt) { 2208 u32 dig_hotplug_reg; 2209 2210 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2211 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2212 2213 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2214 trigger_tbt, dig_hotplug_reg, 2215 dev_priv->hotplug.hpd, 2216 long_pulse_detect); 2217 } 2218 2219 if (pin_mask) 2220 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2221 else 2222 drm_err(&dev_priv->drm, 2223 "Unexpected DE HPD interrupt 0x%08x\n", iir); 2224 } 2225 2226 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2227 { 2228 u32 mask; 2229 2230 if (INTEL_GEN(dev_priv) >= 12) 2231 return TGL_DE_PORT_AUX_DDIA | 2232 TGL_DE_PORT_AUX_DDIB | 2233 TGL_DE_PORT_AUX_DDIC | 2234 TGL_DE_PORT_AUX_USBC1 | 2235 TGL_DE_PORT_AUX_USBC2 | 2236 TGL_DE_PORT_AUX_USBC3 | 2237 TGL_DE_PORT_AUX_USBC4 | 2238 TGL_DE_PORT_AUX_USBC5 | 2239 TGL_DE_PORT_AUX_USBC6; 2240 2241 2242 mask = GEN8_AUX_CHANNEL_A; 2243 if (INTEL_GEN(dev_priv) >= 9) 2244 mask |= GEN9_AUX_CHANNEL_B | 2245 GEN9_AUX_CHANNEL_C | 2246 GEN9_AUX_CHANNEL_D; 2247 2248 if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11)) 2249 mask |= CNL_AUX_CHANNEL_F; 2250 2251 if (IS_GEN(dev_priv, 11)) 2252 mask |= ICL_AUX_CHANNEL_E; 2253 2254 return mask; 2255 } 2256 2257 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 2258 { 2259 if (IS_ROCKETLAKE(dev_priv)) 2260 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 2261 else if (INTEL_GEN(dev_priv) >= 11) 2262 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 2263 else if (INTEL_GEN(dev_priv) >= 9) 2264 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2265 else 2266 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2267 } 2268 2269 static void 2270 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2271 { 2272 bool found = false; 2273 2274 if (iir & GEN8_DE_MISC_GSE) { 2275 intel_opregion_asle_intr(dev_priv); 2276 found = true; 2277 } 2278 2279 if (iir & GEN8_DE_EDP_PSR) { 2280 u32 psr_iir; 2281 i915_reg_t iir_reg; 2282 2283 if (INTEL_GEN(dev_priv) >= 12) 2284 iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder); 2285 else 2286 iir_reg = EDP_PSR_IIR; 2287 2288 psr_iir = I915_READ(iir_reg); 2289 I915_WRITE(iir_reg, psr_iir); 2290 2291 if (psr_iir) 2292 found = true; 2293 2294 intel_psr_irq_handler(dev_priv, psr_iir); 2295 } 2296 2297 if (!found) 2298 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); 2299 } 2300 2301 static irqreturn_t 2302 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2303 { 2304 irqreturn_t ret = IRQ_NONE; 2305 u32 iir; 2306 enum pipe pipe; 2307 2308 if (master_ctl & GEN8_DE_MISC_IRQ) { 2309 iir = I915_READ(GEN8_DE_MISC_IIR); 2310 if (iir) { 2311 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2312 ret = IRQ_HANDLED; 2313 gen8_de_misc_irq_handler(dev_priv, iir); 2314 } else { 2315 drm_err(&dev_priv->drm, 2316 "The master control interrupt lied (DE MISC)!\n"); 2317 } 2318 } 2319 2320 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2321 iir = I915_READ(GEN11_DE_HPD_IIR); 2322 if (iir) { 2323 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2324 ret = IRQ_HANDLED; 2325 gen11_hpd_irq_handler(dev_priv, iir); 2326 } else { 2327 drm_err(&dev_priv->drm, 2328 "The master control interrupt lied, (DE HPD)!\n"); 2329 } 2330 } 2331 2332 if (master_ctl & GEN8_DE_PORT_IRQ) { 2333 iir = I915_READ(GEN8_DE_PORT_IIR); 2334 if (iir) { 2335 u32 tmp_mask; 2336 bool found = false; 2337 2338 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2339 ret = IRQ_HANDLED; 2340 2341 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2342 dp_aux_irq_handler(dev_priv); 2343 found = true; 2344 } 2345 2346 if (IS_GEN9_LP(dev_priv)) { 2347 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2348 if (tmp_mask) { 2349 bxt_hpd_irq_handler(dev_priv, tmp_mask); 2350 found = true; 2351 } 2352 } else if (IS_BROADWELL(dev_priv)) { 2353 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2354 if (tmp_mask) { 2355 ilk_hpd_irq_handler(dev_priv, tmp_mask); 2356 found = true; 2357 } 2358 } 2359 2360 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2361 gmbus_irq_handler(dev_priv); 2362 found = true; 2363 } 2364 2365 if (!found) 2366 drm_err(&dev_priv->drm, 2367 "Unexpected DE Port interrupt\n"); 2368 } 2369 else 2370 drm_err(&dev_priv->drm, 2371 "The master control interrupt lied (DE PORT)!\n"); 2372 } 2373 2374 for_each_pipe(dev_priv, pipe) { 2375 u32 fault_errors; 2376 2377 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2378 continue; 2379 2380 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2381 if (!iir) { 2382 drm_err(&dev_priv->drm, 2383 "The master control interrupt lied (DE PIPE)!\n"); 2384 continue; 2385 } 2386 2387 ret = IRQ_HANDLED; 2388 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2389 2390 if (iir & GEN8_PIPE_VBLANK) 2391 intel_handle_vblank(dev_priv, pipe); 2392 2393 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2394 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2395 2396 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2397 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2398 2399 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2400 if (fault_errors) 2401 drm_err(&dev_priv->drm, 2402 "Fault errors on pipe %c: 0x%08x\n", 2403 pipe_name(pipe), 2404 fault_errors); 2405 } 2406 2407 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2408 master_ctl & GEN8_DE_PCH_IRQ) { 2409 /* 2410 * FIXME(BDW): Assume for now that the new interrupt handling 2411 * scheme also closed the SDE interrupt handling race we've seen 2412 * on older pch-split platforms. But this needs testing. 2413 */ 2414 iir = I915_READ(SDEIIR); 2415 if (iir) { 2416 I915_WRITE(SDEIIR, iir); 2417 ret = IRQ_HANDLED; 2418 2419 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2420 icp_irq_handler(dev_priv, iir); 2421 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2422 spt_irq_handler(dev_priv, iir); 2423 else 2424 cpt_irq_handler(dev_priv, iir); 2425 } else { 2426 /* 2427 * Like on previous PCH there seems to be something 2428 * fishy going on with forwarding PCH interrupts. 2429 */ 2430 drm_dbg(&dev_priv->drm, 2431 "The master control interrupt lied (SDE)!\n"); 2432 } 2433 } 2434 2435 return ret; 2436 } 2437 2438 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2439 { 2440 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2441 2442 /* 2443 * Now with master disabled, get a sample of level indications 2444 * for this interrupt. Indications will be cleared on related acks. 2445 * New indications can and will light up during processing, 2446 * and will generate new interrupt after enabling master. 2447 */ 2448 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2449 } 2450 2451 static inline void gen8_master_intr_enable(void __iomem * const regs) 2452 { 2453 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2454 } 2455 2456 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2457 { 2458 struct drm_i915_private *dev_priv = arg; 2459 void __iomem * const regs = dev_priv->uncore.regs; 2460 u32 master_ctl; 2461 2462 if (!intel_irqs_enabled(dev_priv)) 2463 return IRQ_NONE; 2464 2465 master_ctl = gen8_master_intr_disable(regs); 2466 if (!master_ctl) { 2467 gen8_master_intr_enable(regs); 2468 return IRQ_NONE; 2469 } 2470 2471 /* Find, queue (onto bottom-halves), then clear each source */ 2472 gen8_gt_irq_handler(&dev_priv->gt, master_ctl); 2473 2474 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2475 if (master_ctl & ~GEN8_GT_IRQS) { 2476 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2477 gen8_de_irq_handler(dev_priv, master_ctl); 2478 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2479 } 2480 2481 gen8_master_intr_enable(regs); 2482 2483 return IRQ_HANDLED; 2484 } 2485 2486 static u32 2487 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) 2488 { 2489 void __iomem * const regs = gt->uncore->regs; 2490 u32 iir; 2491 2492 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 2493 return 0; 2494 2495 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 2496 if (likely(iir)) 2497 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 2498 2499 return iir; 2500 } 2501 2502 static void 2503 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) 2504 { 2505 if (iir & GEN11_GU_MISC_GSE) 2506 intel_opregion_asle_intr(gt->i915); 2507 } 2508 2509 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 2510 { 2511 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2512 2513 /* 2514 * Now with master disabled, get a sample of level indications 2515 * for this interrupt. Indications will be cleared on related acks. 2516 * New indications can and will light up during processing, 2517 * and will generate new interrupt after enabling master. 2518 */ 2519 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2520 } 2521 2522 static inline void gen11_master_intr_enable(void __iomem * const regs) 2523 { 2524 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 2525 } 2526 2527 static void 2528 gen11_display_irq_handler(struct drm_i915_private *i915) 2529 { 2530 void __iomem * const regs = i915->uncore.regs; 2531 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2532 2533 disable_rpm_wakeref_asserts(&i915->runtime_pm); 2534 /* 2535 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2536 * for the display related bits. 2537 */ 2538 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 2539 gen8_de_irq_handler(i915, disp_ctl); 2540 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 2541 GEN11_DISPLAY_IRQ_ENABLE); 2542 2543 enable_rpm_wakeref_asserts(&i915->runtime_pm); 2544 } 2545 2546 static __always_inline irqreturn_t 2547 __gen11_irq_handler(struct drm_i915_private * const i915, 2548 u32 (*intr_disable)(void __iomem * const regs), 2549 void (*intr_enable)(void __iomem * const regs)) 2550 { 2551 void __iomem * const regs = i915->uncore.regs; 2552 struct intel_gt *gt = &i915->gt; 2553 u32 master_ctl; 2554 u32 gu_misc_iir; 2555 2556 if (!intel_irqs_enabled(i915)) 2557 return IRQ_NONE; 2558 2559 master_ctl = intr_disable(regs); 2560 if (!master_ctl) { 2561 intr_enable(regs); 2562 return IRQ_NONE; 2563 } 2564 2565 /* Find, queue (onto bottom-halves), then clear each source */ 2566 gen11_gt_irq_handler(gt, master_ctl); 2567 2568 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2569 if (master_ctl & GEN11_DISPLAY_IRQ) 2570 gen11_display_irq_handler(i915); 2571 2572 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 2573 2574 intr_enable(regs); 2575 2576 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 2577 2578 return IRQ_HANDLED; 2579 } 2580 2581 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2582 { 2583 return __gen11_irq_handler(arg, 2584 gen11_master_intr_disable, 2585 gen11_master_intr_enable); 2586 } 2587 2588 static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs) 2589 { 2590 u32 val; 2591 2592 /* First disable interrupts */ 2593 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0); 2594 2595 /* Get the indication levels and ack the master unit */ 2596 val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR); 2597 if (unlikely(!val)) 2598 return 0; 2599 2600 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val); 2601 2602 /* 2603 * Now with master disabled, get a sample of level indications 2604 * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ 2605 * out as this bit doesn't exist anymore for DG1 2606 */ 2607 val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ; 2608 if (unlikely(!val)) 2609 return 0; 2610 2611 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val); 2612 2613 return val; 2614 } 2615 2616 static inline void dg1_master_intr_enable(void __iomem * const regs) 2617 { 2618 raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ); 2619 } 2620 2621 static irqreturn_t dg1_irq_handler(int irq, void *arg) 2622 { 2623 return __gen11_irq_handler(arg, 2624 dg1_master_intr_disable_and_ack, 2625 dg1_master_intr_enable); 2626 } 2627 2628 /* Called from drm generic code, passed 'crtc' which 2629 * we use as a pipe index 2630 */ 2631 int i8xx_enable_vblank(struct drm_crtc *crtc) 2632 { 2633 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2634 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2635 unsigned long irqflags; 2636 2637 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2638 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2639 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2640 2641 return 0; 2642 } 2643 2644 int i915gm_enable_vblank(struct drm_crtc *crtc) 2645 { 2646 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2647 2648 /* 2649 * Vblank interrupts fail to wake the device up from C2+. 2650 * Disabling render clock gating during C-states avoids 2651 * the problem. There is a small power cost so we do this 2652 * only when vblank interrupts are actually enabled. 2653 */ 2654 if (dev_priv->vblank_enabled++ == 0) 2655 I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2656 2657 return i8xx_enable_vblank(crtc); 2658 } 2659 2660 int i965_enable_vblank(struct drm_crtc *crtc) 2661 { 2662 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2663 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2664 unsigned long irqflags; 2665 2666 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2667 i915_enable_pipestat(dev_priv, pipe, 2668 PIPE_START_VBLANK_INTERRUPT_STATUS); 2669 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2670 2671 return 0; 2672 } 2673 2674 int ilk_enable_vblank(struct drm_crtc *crtc) 2675 { 2676 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2677 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2678 unsigned long irqflags; 2679 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 2680 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2681 2682 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2683 ilk_enable_display_irq(dev_priv, bit); 2684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2685 2686 /* Even though there is no DMC, frame counter can get stuck when 2687 * PSR is active as no frames are generated. 2688 */ 2689 if (HAS_PSR(dev_priv)) 2690 drm_crtc_vblank_restore(crtc); 2691 2692 return 0; 2693 } 2694 2695 int bdw_enable_vblank(struct drm_crtc *crtc) 2696 { 2697 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2698 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2699 unsigned long irqflags; 2700 2701 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2702 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2703 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2704 2705 /* Even if there is no DMC, frame counter can get stuck when 2706 * PSR is active as no frames are generated, so check only for PSR. 2707 */ 2708 if (HAS_PSR(dev_priv)) 2709 drm_crtc_vblank_restore(crtc); 2710 2711 return 0; 2712 } 2713 2714 /* Called from drm generic code, passed 'crtc' which 2715 * we use as a pipe index 2716 */ 2717 void i8xx_disable_vblank(struct drm_crtc *crtc) 2718 { 2719 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2720 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2721 unsigned long irqflags; 2722 2723 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2724 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2725 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2726 } 2727 2728 void i915gm_disable_vblank(struct drm_crtc *crtc) 2729 { 2730 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2731 2732 i8xx_disable_vblank(crtc); 2733 2734 if (--dev_priv->vblank_enabled == 0) 2735 I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2736 } 2737 2738 void i965_disable_vblank(struct drm_crtc *crtc) 2739 { 2740 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2741 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2742 unsigned long irqflags; 2743 2744 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2745 i915_disable_pipestat(dev_priv, pipe, 2746 PIPE_START_VBLANK_INTERRUPT_STATUS); 2747 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2748 } 2749 2750 void ilk_disable_vblank(struct drm_crtc *crtc) 2751 { 2752 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2753 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2754 unsigned long irqflags; 2755 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 2756 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2757 2758 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2759 ilk_disable_display_irq(dev_priv, bit); 2760 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2761 } 2762 2763 void bdw_disable_vblank(struct drm_crtc *crtc) 2764 { 2765 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 2766 enum pipe pipe = to_intel_crtc(crtc)->pipe; 2767 unsigned long irqflags; 2768 2769 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2770 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2772 } 2773 2774 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2775 { 2776 struct intel_uncore *uncore = &dev_priv->uncore; 2777 2778 if (HAS_PCH_NOP(dev_priv)) 2779 return; 2780 2781 GEN3_IRQ_RESET(uncore, SDE); 2782 2783 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2784 I915_WRITE(SERR_INT, 0xffffffff); 2785 } 2786 2787 /* 2788 * SDEIER is also touched by the interrupt handler to work around missed PCH 2789 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2790 * instead we unconditionally enable all PCH interrupt sources here, but then 2791 * only unmask them as needed with SDEIMR. 2792 * 2793 * This function needs to be called before interrupts are enabled. 2794 */ 2795 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv) 2796 { 2797 if (HAS_PCH_NOP(dev_priv)) 2798 return; 2799 2800 drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0); 2801 I915_WRITE(SDEIER, 0xffffffff); 2802 POSTING_READ(SDEIER); 2803 } 2804 2805 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2806 { 2807 struct intel_uncore *uncore = &dev_priv->uncore; 2808 2809 if (IS_CHERRYVIEW(dev_priv)) 2810 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2811 else 2812 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); 2813 2814 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2815 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2816 2817 i9xx_pipestat_irq_reset(dev_priv); 2818 2819 GEN3_IRQ_RESET(uncore, VLV_); 2820 dev_priv->irq_mask = ~0u; 2821 } 2822 2823 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2824 { 2825 struct intel_uncore *uncore = &dev_priv->uncore; 2826 2827 u32 pipestat_mask; 2828 u32 enable_mask; 2829 enum pipe pipe; 2830 2831 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 2832 2833 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2834 for_each_pipe(dev_priv, pipe) 2835 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2836 2837 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2838 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2839 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2840 I915_LPE_PIPE_A_INTERRUPT | 2841 I915_LPE_PIPE_B_INTERRUPT; 2842 2843 if (IS_CHERRYVIEW(dev_priv)) 2844 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2845 I915_LPE_PIPE_C_INTERRUPT; 2846 2847 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 2848 2849 dev_priv->irq_mask = ~enable_mask; 2850 2851 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 2852 } 2853 2854 /* drm_dma.h hooks 2855 */ 2856 static void ilk_irq_reset(struct drm_i915_private *dev_priv) 2857 { 2858 struct intel_uncore *uncore = &dev_priv->uncore; 2859 2860 GEN3_IRQ_RESET(uncore, DE); 2861 if (IS_GEN(dev_priv, 7)) 2862 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 2863 2864 if (IS_HASWELL(dev_priv)) { 2865 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2866 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2867 } 2868 2869 gen5_gt_irq_reset(&dev_priv->gt); 2870 2871 ibx_irq_reset(dev_priv); 2872 } 2873 2874 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 2875 { 2876 I915_WRITE(VLV_MASTER_IER, 0); 2877 POSTING_READ(VLV_MASTER_IER); 2878 2879 gen5_gt_irq_reset(&dev_priv->gt); 2880 2881 spin_lock_irq(&dev_priv->irq_lock); 2882 if (dev_priv->display_irqs_enabled) 2883 vlv_display_irq_reset(dev_priv); 2884 spin_unlock_irq(&dev_priv->irq_lock); 2885 } 2886 2887 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 2888 { 2889 struct intel_uncore *uncore = &dev_priv->uncore; 2890 enum pipe pipe; 2891 2892 gen8_master_intr_disable(dev_priv->uncore.regs); 2893 2894 gen8_gt_irq_reset(&dev_priv->gt); 2895 2896 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2897 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2898 2899 for_each_pipe(dev_priv, pipe) 2900 if (intel_display_power_is_enabled(dev_priv, 2901 POWER_DOMAIN_PIPE(pipe))) 2902 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2903 2904 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 2905 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 2906 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2907 2908 if (HAS_PCH_SPLIT(dev_priv)) 2909 ibx_irq_reset(dev_priv); 2910 } 2911 2912 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 2913 { 2914 struct intel_uncore *uncore = &dev_priv->uncore; 2915 enum pipe pipe; 2916 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 2917 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 2918 2919 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 2920 2921 if (INTEL_GEN(dev_priv) >= 12) { 2922 enum transcoder trans; 2923 2924 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 2925 enum intel_display_power_domain domain; 2926 2927 domain = POWER_DOMAIN_TRANSCODER(trans); 2928 if (!intel_display_power_is_enabled(dev_priv, domain)) 2929 continue; 2930 2931 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 2932 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 2933 } 2934 } else { 2935 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 2936 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 2937 } 2938 2939 for_each_pipe(dev_priv, pipe) 2940 if (intel_display_power_is_enabled(dev_priv, 2941 POWER_DOMAIN_PIPE(pipe))) 2942 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 2943 2944 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 2945 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 2946 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 2947 2948 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2949 GEN3_IRQ_RESET(uncore, SDE); 2950 2951 /* Wa_14010685332:icl,jsl,ehl,tgl,rkl */ 2952 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) { 2953 intel_uncore_rmw(uncore, SOUTH_CHICKEN1, 2954 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 2955 intel_uncore_rmw(uncore, SOUTH_CHICKEN1, 2956 SBCLK_RUN_REFCLK_DIS, 0); 2957 } 2958 } 2959 2960 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 2961 { 2962 struct intel_uncore *uncore = &dev_priv->uncore; 2963 2964 if (HAS_MASTER_UNIT_IRQ(dev_priv)) 2965 dg1_master_intr_disable_and_ack(dev_priv->uncore.regs); 2966 else 2967 gen11_master_intr_disable(dev_priv->uncore.regs); 2968 2969 gen11_gt_irq_reset(&dev_priv->gt); 2970 gen11_display_irq_reset(dev_priv); 2971 2972 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 2973 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 2974 } 2975 2976 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 2977 u8 pipe_mask) 2978 { 2979 struct intel_uncore *uncore = &dev_priv->uncore; 2980 2981 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 2982 enum pipe pipe; 2983 2984 spin_lock_irq(&dev_priv->irq_lock); 2985 2986 if (!intel_irqs_enabled(dev_priv)) { 2987 spin_unlock_irq(&dev_priv->irq_lock); 2988 return; 2989 } 2990 2991 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 2992 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 2993 dev_priv->de_irq_mask[pipe], 2994 ~dev_priv->de_irq_mask[pipe] | extra_ier); 2995 2996 spin_unlock_irq(&dev_priv->irq_lock); 2997 } 2998 2999 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3000 u8 pipe_mask) 3001 { 3002 struct intel_uncore *uncore = &dev_priv->uncore; 3003 enum pipe pipe; 3004 3005 spin_lock_irq(&dev_priv->irq_lock); 3006 3007 if (!intel_irqs_enabled(dev_priv)) { 3008 spin_unlock_irq(&dev_priv->irq_lock); 3009 return; 3010 } 3011 3012 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3013 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3014 3015 spin_unlock_irq(&dev_priv->irq_lock); 3016 3017 /* make sure we're done processing display irqs */ 3018 intel_synchronize_irq(dev_priv); 3019 } 3020 3021 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 3022 { 3023 struct intel_uncore *uncore = &dev_priv->uncore; 3024 3025 I915_WRITE(GEN8_MASTER_IRQ, 0); 3026 POSTING_READ(GEN8_MASTER_IRQ); 3027 3028 gen8_gt_irq_reset(&dev_priv->gt); 3029 3030 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3031 3032 spin_lock_irq(&dev_priv->irq_lock); 3033 if (dev_priv->display_irqs_enabled) 3034 vlv_display_irq_reset(dev_priv); 3035 spin_unlock_irq(&dev_priv->irq_lock); 3036 } 3037 3038 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3039 const u32 hpd[HPD_NUM_PINS]) 3040 { 3041 struct intel_encoder *encoder; 3042 u32 enabled_irqs = 0; 3043 3044 for_each_intel_encoder(&dev_priv->drm, encoder) 3045 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3046 enabled_irqs |= hpd[encoder->hpd_pin]; 3047 3048 return enabled_irqs; 3049 } 3050 3051 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3052 { 3053 u32 hotplug; 3054 3055 /* 3056 * Enable digital hotplug on the PCH, and configure the DP short pulse 3057 * duration to 2ms (which is the minimum in the Display Port spec). 3058 * The pulse duration bits are reserved on LPT+. 3059 */ 3060 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3061 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3062 PORTC_PULSE_DURATION_MASK | 3063 PORTD_PULSE_DURATION_MASK); 3064 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3065 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3066 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3067 /* 3068 * When CPU and PCH are on the same package, port A 3069 * HPD must be enabled in both north and south. 3070 */ 3071 if (HAS_PCH_LPT_LP(dev_priv)) 3072 hotplug |= PORTA_HOTPLUG_ENABLE; 3073 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3074 } 3075 3076 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3077 { 3078 u32 hotplug_irqs, enabled_irqs; 3079 3080 if (HAS_PCH_IBX(dev_priv)) 3081 hotplug_irqs = SDE_HOTPLUG_MASK; 3082 else 3083 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3084 3085 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3086 3087 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3088 3089 ibx_hpd_detection_setup(dev_priv); 3090 } 3091 3092 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, 3093 u32 ddi_hotplug_enable_mask, 3094 u32 tc_hotplug_enable_mask) 3095 { 3096 u32 hotplug; 3097 3098 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3099 hotplug |= ddi_hotplug_enable_mask; 3100 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3101 3102 if (tc_hotplug_enable_mask) { 3103 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3104 hotplug |= tc_hotplug_enable_mask; 3105 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3106 } 3107 } 3108 3109 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, 3110 u32 sde_ddi_mask, u32 sde_tc_mask, 3111 u32 ddi_enable_mask, u32 tc_enable_mask) 3112 { 3113 u32 hotplug_irqs, enabled_irqs; 3114 3115 hotplug_irqs = sde_ddi_mask | sde_tc_mask; 3116 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3117 3118 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) 3119 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3120 3121 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3122 3123 icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask); 3124 } 3125 3126 /* 3127 * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the 3128 * equivalent of SDE. 3129 */ 3130 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) 3131 { 3132 icp_hpd_irq_setup(dev_priv, 3133 SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1), 3134 ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1)); 3135 } 3136 3137 /* 3138 * JSP behaves exactly the same as MCC above except that port C is mapped to 3139 * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's 3140 * masks & tables rather than ICP's masks & tables. 3141 */ 3142 static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3143 { 3144 icp_hpd_irq_setup(dev_priv, 3145 SDE_DDI_MASK_TGP, 0, 3146 TGP_DDI_HPD_ENABLE_MASK, 0); 3147 } 3148 3149 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3150 { 3151 u32 hotplug; 3152 3153 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3154 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3155 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3156 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3157 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3158 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3159 3160 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3161 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3162 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3163 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3164 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3165 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3166 } 3167 3168 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3169 { 3170 u32 hotplug_irqs, enabled_irqs; 3171 u32 val; 3172 3173 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3174 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3175 3176 val = I915_READ(GEN11_DE_HPD_IMR); 3177 val &= ~hotplug_irqs; 3178 val |= ~enabled_irqs & hotplug_irqs; 3179 I915_WRITE(GEN11_DE_HPD_IMR, val); 3180 POSTING_READ(GEN11_DE_HPD_IMR); 3181 3182 gen11_hpd_detection_setup(dev_priv); 3183 3184 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) 3185 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP, 3186 TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK); 3187 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3188 icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP, 3189 ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK); 3190 } 3191 3192 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3193 { 3194 u32 val, hotplug; 3195 3196 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3197 if (HAS_PCH_CNP(dev_priv)) { 3198 val = I915_READ(SOUTH_CHICKEN1); 3199 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3200 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3201 I915_WRITE(SOUTH_CHICKEN1, val); 3202 } 3203 3204 /* Enable digital hotplug on the PCH */ 3205 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3206 hotplug |= PORTA_HOTPLUG_ENABLE | 3207 PORTB_HOTPLUG_ENABLE | 3208 PORTC_HOTPLUG_ENABLE | 3209 PORTD_HOTPLUG_ENABLE; 3210 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3211 3212 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3213 hotplug |= PORTE_HOTPLUG_ENABLE; 3214 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3215 } 3216 3217 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3218 { 3219 u32 hotplug_irqs, enabled_irqs; 3220 3221 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3222 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3223 3224 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3225 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3226 3227 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3228 3229 spt_hpd_detection_setup(dev_priv); 3230 } 3231 3232 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3233 { 3234 u32 hotplug; 3235 3236 /* 3237 * Enable digital hotplug on the CPU, and configure the DP short pulse 3238 * duration to 2ms (which is the minimum in the Display Port spec) 3239 * The pulse duration bits are reserved on HSW+. 3240 */ 3241 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3242 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3243 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3244 DIGITAL_PORTA_PULSE_DURATION_2ms; 3245 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3246 } 3247 3248 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3249 { 3250 u32 hotplug_irqs, enabled_irqs; 3251 3252 if (INTEL_GEN(dev_priv) >= 8) { 3253 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3254 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3255 3256 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3257 } else if (INTEL_GEN(dev_priv) >= 7) { 3258 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3259 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3260 3261 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3262 } else { 3263 hotplug_irqs = DE_DP_A_HOTPLUG; 3264 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3265 3266 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3267 } 3268 3269 ilk_hpd_detection_setup(dev_priv); 3270 3271 ibx_hpd_irq_setup(dev_priv); 3272 } 3273 3274 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3275 u32 enabled_irqs) 3276 { 3277 u32 hotplug; 3278 3279 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3280 hotplug |= PORTA_HOTPLUG_ENABLE | 3281 PORTB_HOTPLUG_ENABLE | 3282 PORTC_HOTPLUG_ENABLE; 3283 3284 drm_dbg_kms(&dev_priv->drm, 3285 "Invert bit setting: hp_ctl:%x hp_port:%x\n", 3286 hotplug, enabled_irqs); 3287 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3288 3289 /* 3290 * For BXT invert bit has to be set based on AOB design 3291 * for HPD detection logic, update it based on VBT fields. 3292 */ 3293 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3294 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3295 hotplug |= BXT_DDIA_HPD_INVERT; 3296 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3297 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3298 hotplug |= BXT_DDIB_HPD_INVERT; 3299 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3300 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3301 hotplug |= BXT_DDIC_HPD_INVERT; 3302 3303 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3304 } 3305 3306 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3307 { 3308 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3309 } 3310 3311 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3312 { 3313 u32 hotplug_irqs, enabled_irqs; 3314 3315 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3316 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3317 3318 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3319 3320 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3321 } 3322 3323 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3324 { 3325 u32 mask; 3326 3327 if (HAS_PCH_NOP(dev_priv)) 3328 return; 3329 3330 if (HAS_PCH_IBX(dev_priv)) 3331 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3332 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3333 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3334 else 3335 mask = SDE_GMBUS_CPT; 3336 3337 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 3338 I915_WRITE(SDEIMR, ~mask); 3339 3340 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3341 HAS_PCH_LPT(dev_priv)) 3342 ibx_hpd_detection_setup(dev_priv); 3343 else 3344 spt_hpd_detection_setup(dev_priv); 3345 } 3346 3347 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 3348 { 3349 struct intel_uncore *uncore = &dev_priv->uncore; 3350 u32 display_mask, extra_mask; 3351 3352 if (INTEL_GEN(dev_priv) >= 7) { 3353 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3354 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3355 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3356 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3357 DE_DP_A_HOTPLUG_IVB); 3358 } else { 3359 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3360 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3361 DE_PIPEA_CRC_DONE | DE_POISON); 3362 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3363 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3364 DE_DP_A_HOTPLUG); 3365 } 3366 3367 if (IS_HASWELL(dev_priv)) { 3368 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3369 display_mask |= DE_EDP_PSR_INT_HSW; 3370 } 3371 3372 dev_priv->irq_mask = ~display_mask; 3373 3374 ibx_irq_pre_postinstall(dev_priv); 3375 3376 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3377 display_mask | extra_mask); 3378 3379 gen5_gt_irq_postinstall(&dev_priv->gt); 3380 3381 ilk_hpd_detection_setup(dev_priv); 3382 3383 ibx_irq_postinstall(dev_priv); 3384 3385 if (IS_IRONLAKE_M(dev_priv)) { 3386 /* Enable PCU event interrupts 3387 * 3388 * spinlocking not required here for correctness since interrupt 3389 * setup is guaranteed to run in single-threaded context. But we 3390 * need it to make the assert_spin_locked happy. */ 3391 spin_lock_irq(&dev_priv->irq_lock); 3392 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3393 spin_unlock_irq(&dev_priv->irq_lock); 3394 } 3395 } 3396 3397 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3398 { 3399 lockdep_assert_held(&dev_priv->irq_lock); 3400 3401 if (dev_priv->display_irqs_enabled) 3402 return; 3403 3404 dev_priv->display_irqs_enabled = true; 3405 3406 if (intel_irqs_enabled(dev_priv)) { 3407 vlv_display_irq_reset(dev_priv); 3408 vlv_display_irq_postinstall(dev_priv); 3409 } 3410 } 3411 3412 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3413 { 3414 lockdep_assert_held(&dev_priv->irq_lock); 3415 3416 if (!dev_priv->display_irqs_enabled) 3417 return; 3418 3419 dev_priv->display_irqs_enabled = false; 3420 3421 if (intel_irqs_enabled(dev_priv)) 3422 vlv_display_irq_reset(dev_priv); 3423 } 3424 3425 3426 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 3427 { 3428 gen5_gt_irq_postinstall(&dev_priv->gt); 3429 3430 spin_lock_irq(&dev_priv->irq_lock); 3431 if (dev_priv->display_irqs_enabled) 3432 vlv_display_irq_postinstall(dev_priv); 3433 spin_unlock_irq(&dev_priv->irq_lock); 3434 3435 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3436 POSTING_READ(VLV_MASTER_IER); 3437 } 3438 3439 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3440 { 3441 struct intel_uncore *uncore = &dev_priv->uncore; 3442 3443 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 3444 GEN8_PIPE_CDCLK_CRC_DONE; 3445 u32 de_pipe_enables; 3446 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 3447 u32 de_port_enables; 3448 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3449 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 3450 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 3451 enum pipe pipe; 3452 3453 if (INTEL_GEN(dev_priv) <= 10) 3454 de_misc_masked |= GEN8_DE_MISC_GSE; 3455 3456 if (IS_GEN9_LP(dev_priv)) 3457 de_port_masked |= BXT_DE_PORT_GMBUS; 3458 3459 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3460 GEN8_PIPE_FIFO_UNDERRUN; 3461 3462 de_port_enables = de_port_masked; 3463 if (IS_GEN9_LP(dev_priv)) 3464 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3465 else if (IS_BROADWELL(dev_priv)) 3466 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3467 3468 if (INTEL_GEN(dev_priv) >= 12) { 3469 enum transcoder trans; 3470 3471 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 3472 enum intel_display_power_domain domain; 3473 3474 domain = POWER_DOMAIN_TRANSCODER(trans); 3475 if (!intel_display_power_is_enabled(dev_priv, domain)) 3476 continue; 3477 3478 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 3479 } 3480 } else { 3481 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3482 } 3483 3484 for_each_pipe(dev_priv, pipe) { 3485 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3486 3487 if (intel_display_power_is_enabled(dev_priv, 3488 POWER_DOMAIN_PIPE(pipe))) 3489 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3490 dev_priv->de_irq_mask[pipe], 3491 de_pipe_enables); 3492 } 3493 3494 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3495 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3496 3497 if (INTEL_GEN(dev_priv) >= 11) { 3498 u32 de_hpd_masked = 0; 3499 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3500 GEN11_DE_TBT_HOTPLUG_MASK; 3501 3502 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 3503 de_hpd_enables); 3504 gen11_hpd_detection_setup(dev_priv); 3505 } else if (IS_GEN9_LP(dev_priv)) { 3506 bxt_hpd_detection_setup(dev_priv); 3507 } else if (IS_BROADWELL(dev_priv)) { 3508 ilk_hpd_detection_setup(dev_priv); 3509 } 3510 } 3511 3512 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 3513 { 3514 if (HAS_PCH_SPLIT(dev_priv)) 3515 ibx_irq_pre_postinstall(dev_priv); 3516 3517 gen8_gt_irq_postinstall(&dev_priv->gt); 3518 gen8_de_irq_postinstall(dev_priv); 3519 3520 if (HAS_PCH_SPLIT(dev_priv)) 3521 ibx_irq_postinstall(dev_priv); 3522 3523 gen8_master_intr_enable(dev_priv->uncore.regs); 3524 } 3525 3526 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 3527 { 3528 u32 mask = SDE_GMBUS_ICP; 3529 3530 drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0); 3531 I915_WRITE(SDEIER, 0xffffffff); 3532 POSTING_READ(SDEIER); 3533 3534 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 3535 I915_WRITE(SDEIMR, ~mask); 3536 3537 if (HAS_PCH_TGP(dev_priv)) 3538 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 3539 TGP_TC_HPD_ENABLE_MASK); 3540 else if (HAS_PCH_JSP(dev_priv)) 3541 icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); 3542 else if (HAS_PCH_MCC(dev_priv)) 3543 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, 3544 ICP_TC_HPD_ENABLE(PORT_TC1)); 3545 else 3546 icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, 3547 ICP_TC_HPD_ENABLE_MASK); 3548 } 3549 3550 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 3551 { 3552 struct intel_uncore *uncore = &dev_priv->uncore; 3553 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 3554 3555 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3556 icp_irq_postinstall(dev_priv); 3557 3558 gen11_gt_irq_postinstall(&dev_priv->gt); 3559 gen8_de_irq_postinstall(dev_priv); 3560 3561 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3562 3563 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 3564 3565 if (HAS_MASTER_UNIT_IRQ(dev_priv)) { 3566 dg1_master_intr_enable(uncore->regs); 3567 POSTING_READ(DG1_MSTR_UNIT_INTR); 3568 } else { 3569 gen11_master_intr_enable(uncore->regs); 3570 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3571 } 3572 } 3573 3574 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 3575 { 3576 gen8_gt_irq_postinstall(&dev_priv->gt); 3577 3578 spin_lock_irq(&dev_priv->irq_lock); 3579 if (dev_priv->display_irqs_enabled) 3580 vlv_display_irq_postinstall(dev_priv); 3581 spin_unlock_irq(&dev_priv->irq_lock); 3582 3583 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3584 POSTING_READ(GEN8_MASTER_IRQ); 3585 } 3586 3587 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 3588 { 3589 struct intel_uncore *uncore = &dev_priv->uncore; 3590 3591 i9xx_pipestat_irq_reset(dev_priv); 3592 3593 GEN2_IRQ_RESET(uncore); 3594 } 3595 3596 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3597 { 3598 struct intel_uncore *uncore = &dev_priv->uncore; 3599 u16 enable_mask; 3600 3601 intel_uncore_write16(uncore, 3602 EMR, 3603 ~(I915_ERROR_PAGE_TABLE | 3604 I915_ERROR_MEMORY_REFRESH)); 3605 3606 /* Unmask the interrupts that we always want on. */ 3607 dev_priv->irq_mask = 3608 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3609 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3610 I915_MASTER_ERROR_INTERRUPT); 3611 3612 enable_mask = 3613 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3614 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3615 I915_MASTER_ERROR_INTERRUPT | 3616 I915_USER_INTERRUPT; 3617 3618 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask); 3619 3620 /* Interrupt setup is already guaranteed to be single-threaded, this is 3621 * just to make the assert_spin_locked check happy. */ 3622 spin_lock_irq(&dev_priv->irq_lock); 3623 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3624 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3625 spin_unlock_irq(&dev_priv->irq_lock); 3626 } 3627 3628 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 3629 u16 *eir, u16 *eir_stuck) 3630 { 3631 struct intel_uncore *uncore = &i915->uncore; 3632 u16 emr; 3633 3634 *eir = intel_uncore_read16(uncore, EIR); 3635 3636 if (*eir) 3637 intel_uncore_write16(uncore, EIR, *eir); 3638 3639 *eir_stuck = intel_uncore_read16(uncore, EIR); 3640 if (*eir_stuck == 0) 3641 return; 3642 3643 /* 3644 * Toggle all EMR bits to make sure we get an edge 3645 * in the ISR master error bit if we don't clear 3646 * all the EIR bits. Otherwise the edge triggered 3647 * IIR on i965/g4x wouldn't notice that an interrupt 3648 * is still pending. Also some EIR bits can't be 3649 * cleared except by handling the underlying error 3650 * (or by a GPU reset) so we mask any bit that 3651 * remains set. 3652 */ 3653 emr = intel_uncore_read16(uncore, EMR); 3654 intel_uncore_write16(uncore, EMR, 0xffff); 3655 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 3656 } 3657 3658 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 3659 u16 eir, u16 eir_stuck) 3660 { 3661 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 3662 3663 if (eir_stuck) 3664 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 3665 eir_stuck); 3666 } 3667 3668 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 3669 u32 *eir, u32 *eir_stuck) 3670 { 3671 u32 emr; 3672 3673 *eir = I915_READ(EIR); 3674 3675 I915_WRITE(EIR, *eir); 3676 3677 *eir_stuck = I915_READ(EIR); 3678 if (*eir_stuck == 0) 3679 return; 3680 3681 /* 3682 * Toggle all EMR bits to make sure we get an edge 3683 * in the ISR master error bit if we don't clear 3684 * all the EIR bits. Otherwise the edge triggered 3685 * IIR on i965/g4x wouldn't notice that an interrupt 3686 * is still pending. Also some EIR bits can't be 3687 * cleared except by handling the underlying error 3688 * (or by a GPU reset) so we mask any bit that 3689 * remains set. 3690 */ 3691 emr = I915_READ(EMR); 3692 I915_WRITE(EMR, 0xffffffff); 3693 I915_WRITE(EMR, emr | *eir_stuck); 3694 } 3695 3696 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 3697 u32 eir, u32 eir_stuck) 3698 { 3699 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 3700 3701 if (eir_stuck) 3702 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 3703 eir_stuck); 3704 } 3705 3706 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3707 { 3708 struct drm_i915_private *dev_priv = arg; 3709 irqreturn_t ret = IRQ_NONE; 3710 3711 if (!intel_irqs_enabled(dev_priv)) 3712 return IRQ_NONE; 3713 3714 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3715 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3716 3717 do { 3718 u32 pipe_stats[I915_MAX_PIPES] = {}; 3719 u16 eir = 0, eir_stuck = 0; 3720 u16 iir; 3721 3722 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 3723 if (iir == 0) 3724 break; 3725 3726 ret = IRQ_HANDLED; 3727 3728 /* Call regardless, as some status bits might not be 3729 * signalled in iir */ 3730 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3731 3732 if (iir & I915_MASTER_ERROR_INTERRUPT) 3733 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3734 3735 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 3736 3737 if (iir & I915_USER_INTERRUPT) 3738 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); 3739 3740 if (iir & I915_MASTER_ERROR_INTERRUPT) 3741 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 3742 3743 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3744 } while (0); 3745 3746 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3747 3748 return ret; 3749 } 3750 3751 static void i915_irq_reset(struct drm_i915_private *dev_priv) 3752 { 3753 struct intel_uncore *uncore = &dev_priv->uncore; 3754 3755 if (I915_HAS_HOTPLUG(dev_priv)) { 3756 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3757 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3758 } 3759 3760 i9xx_pipestat_irq_reset(dev_priv); 3761 3762 GEN3_IRQ_RESET(uncore, GEN2_); 3763 } 3764 3765 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 3766 { 3767 struct intel_uncore *uncore = &dev_priv->uncore; 3768 u32 enable_mask; 3769 3770 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 3771 I915_ERROR_MEMORY_REFRESH)); 3772 3773 /* Unmask the interrupts that we always want on. */ 3774 dev_priv->irq_mask = 3775 ~(I915_ASLE_INTERRUPT | 3776 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3777 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3778 I915_MASTER_ERROR_INTERRUPT); 3779 3780 enable_mask = 3781 I915_ASLE_INTERRUPT | 3782 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3783 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3784 I915_MASTER_ERROR_INTERRUPT | 3785 I915_USER_INTERRUPT; 3786 3787 if (I915_HAS_HOTPLUG(dev_priv)) { 3788 /* Enable in IER... */ 3789 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3790 /* and unmask in IMR */ 3791 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3792 } 3793 3794 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 3795 3796 /* Interrupt setup is already guaranteed to be single-threaded, this is 3797 * just to make the assert_spin_locked check happy. */ 3798 spin_lock_irq(&dev_priv->irq_lock); 3799 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3800 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3801 spin_unlock_irq(&dev_priv->irq_lock); 3802 3803 i915_enable_asle_pipestat(dev_priv); 3804 } 3805 3806 static irqreturn_t i915_irq_handler(int irq, void *arg) 3807 { 3808 struct drm_i915_private *dev_priv = arg; 3809 irqreturn_t ret = IRQ_NONE; 3810 3811 if (!intel_irqs_enabled(dev_priv)) 3812 return IRQ_NONE; 3813 3814 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3815 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3816 3817 do { 3818 u32 pipe_stats[I915_MAX_PIPES] = {}; 3819 u32 eir = 0, eir_stuck = 0; 3820 u32 hotplug_status = 0; 3821 u32 iir; 3822 3823 iir = I915_READ(GEN2_IIR); 3824 if (iir == 0) 3825 break; 3826 3827 ret = IRQ_HANDLED; 3828 3829 if (I915_HAS_HOTPLUG(dev_priv) && 3830 iir & I915_DISPLAY_PORT_INTERRUPT) 3831 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3832 3833 /* Call regardless, as some status bits might not be 3834 * signalled in iir */ 3835 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3836 3837 if (iir & I915_MASTER_ERROR_INTERRUPT) 3838 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3839 3840 I915_WRITE(GEN2_IIR, iir); 3841 3842 if (iir & I915_USER_INTERRUPT) 3843 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); 3844 3845 if (iir & I915_MASTER_ERROR_INTERRUPT) 3846 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 3847 3848 if (hotplug_status) 3849 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3850 3851 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3852 } while (0); 3853 3854 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3855 3856 return ret; 3857 } 3858 3859 static void i965_irq_reset(struct drm_i915_private *dev_priv) 3860 { 3861 struct intel_uncore *uncore = &dev_priv->uncore; 3862 3863 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3864 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3865 3866 i9xx_pipestat_irq_reset(dev_priv); 3867 3868 GEN3_IRQ_RESET(uncore, GEN2_); 3869 } 3870 3871 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 3872 { 3873 struct intel_uncore *uncore = &dev_priv->uncore; 3874 u32 enable_mask; 3875 u32 error_mask; 3876 3877 /* 3878 * Enable some error detection, note the instruction error mask 3879 * bit is reserved, so we leave it masked. 3880 */ 3881 if (IS_G4X(dev_priv)) { 3882 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3883 GM45_ERROR_MEM_PRIV | 3884 GM45_ERROR_CP_PRIV | 3885 I915_ERROR_MEMORY_REFRESH); 3886 } else { 3887 error_mask = ~(I915_ERROR_PAGE_TABLE | 3888 I915_ERROR_MEMORY_REFRESH); 3889 } 3890 I915_WRITE(EMR, error_mask); 3891 3892 /* Unmask the interrupts that we always want on. */ 3893 dev_priv->irq_mask = 3894 ~(I915_ASLE_INTERRUPT | 3895 I915_DISPLAY_PORT_INTERRUPT | 3896 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3897 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3898 I915_MASTER_ERROR_INTERRUPT); 3899 3900 enable_mask = 3901 I915_ASLE_INTERRUPT | 3902 I915_DISPLAY_PORT_INTERRUPT | 3903 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3904 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3905 I915_MASTER_ERROR_INTERRUPT | 3906 I915_USER_INTERRUPT; 3907 3908 if (IS_G4X(dev_priv)) 3909 enable_mask |= I915_BSD_USER_INTERRUPT; 3910 3911 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 3912 3913 /* Interrupt setup is already guaranteed to be single-threaded, this is 3914 * just to make the assert_spin_locked check happy. */ 3915 spin_lock_irq(&dev_priv->irq_lock); 3916 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3917 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3918 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3919 spin_unlock_irq(&dev_priv->irq_lock); 3920 3921 i915_enable_asle_pipestat(dev_priv); 3922 } 3923 3924 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 3925 { 3926 u32 hotplug_en; 3927 3928 lockdep_assert_held(&dev_priv->irq_lock); 3929 3930 /* Note HDMI and DP share hotplug bits */ 3931 /* enable bits are the same for all generations */ 3932 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 3933 /* Programming the CRT detection parameters tends 3934 to generate a spurious hotplug event about three 3935 seconds later. So just do it once. 3936 */ 3937 if (IS_G4X(dev_priv)) 3938 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3939 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3940 3941 /* Ignore TV since it's buggy */ 3942 i915_hotplug_interrupt_update_locked(dev_priv, 3943 HOTPLUG_INT_EN_MASK | 3944 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 3945 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 3946 hotplug_en); 3947 } 3948 3949 static irqreturn_t i965_irq_handler(int irq, void *arg) 3950 { 3951 struct drm_i915_private *dev_priv = arg; 3952 irqreturn_t ret = IRQ_NONE; 3953 3954 if (!intel_irqs_enabled(dev_priv)) 3955 return IRQ_NONE; 3956 3957 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3958 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3959 3960 do { 3961 u32 pipe_stats[I915_MAX_PIPES] = {}; 3962 u32 eir = 0, eir_stuck = 0; 3963 u32 hotplug_status = 0; 3964 u32 iir; 3965 3966 iir = I915_READ(GEN2_IIR); 3967 if (iir == 0) 3968 break; 3969 3970 ret = IRQ_HANDLED; 3971 3972 if (iir & I915_DISPLAY_PORT_INTERRUPT) 3973 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3974 3975 /* Call regardless, as some status bits might not be 3976 * signalled in iir */ 3977 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3978 3979 if (iir & I915_MASTER_ERROR_INTERRUPT) 3980 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 3981 3982 I915_WRITE(GEN2_IIR, iir); 3983 3984 if (iir & I915_USER_INTERRUPT) 3985 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); 3986 3987 if (iir & I915_BSD_USER_INTERRUPT) 3988 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]); 3989 3990 if (iir & I915_MASTER_ERROR_INTERRUPT) 3991 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 3992 3993 if (hotplug_status) 3994 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3995 3996 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3997 } while (0); 3998 3999 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4000 4001 return ret; 4002 } 4003 4004 /** 4005 * intel_irq_init - initializes irq support 4006 * @dev_priv: i915 device instance 4007 * 4008 * This function initializes all the irq support including work items, timers 4009 * and all the vtables. It does not setup the interrupt itself though. 4010 */ 4011 void intel_irq_init(struct drm_i915_private *dev_priv) 4012 { 4013 struct drm_device *dev = &dev_priv->drm; 4014 int i; 4015 4016 intel_hpd_init_pins(dev_priv); 4017 4018 intel_hpd_init_work(dev_priv); 4019 4020 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 4021 for (i = 0; i < MAX_L3_SLICES; ++i) 4022 dev_priv->l3_parity.remap_info[i] = NULL; 4023 4024 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 4025 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) 4026 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; 4027 4028 dev->vblank_disable_immediate = true; 4029 4030 /* Most platforms treat the display irq block as an always-on 4031 * power domain. vlv/chv can disable it at runtime and need 4032 * special care to avoid writing any of the display block registers 4033 * outside of the power domain. We defer setting up the display irqs 4034 * in this case to the runtime pm. 4035 */ 4036 dev_priv->display_irqs_enabled = true; 4037 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4038 dev_priv->display_irqs_enabled = false; 4039 4040 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4041 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4042 * detection, as short HPD storms will occur as a natural part of 4043 * sideband messaging with MST. 4044 * On older platforms however, IRQ storms can occur with both long and 4045 * short pulses, as seen on some G4x systems. 4046 */ 4047 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4048 4049 if (HAS_GMCH(dev_priv)) { 4050 if (I915_HAS_HOTPLUG(dev_priv)) 4051 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4052 } else { 4053 if (HAS_PCH_JSP(dev_priv)) 4054 dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup; 4055 else if (HAS_PCH_MCC(dev_priv)) 4056 dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup; 4057 else if (INTEL_GEN(dev_priv) >= 11) 4058 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4059 else if (IS_GEN9_LP(dev_priv)) 4060 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4061 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 4062 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4063 else 4064 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4065 } 4066 } 4067 4068 /** 4069 * intel_irq_fini - deinitializes IRQ support 4070 * @i915: i915 device instance 4071 * 4072 * This function deinitializes all the IRQ support. 4073 */ 4074 void intel_irq_fini(struct drm_i915_private *i915) 4075 { 4076 int i; 4077 4078 for (i = 0; i < MAX_L3_SLICES; ++i) 4079 kfree(i915->l3_parity.remap_info[i]); 4080 } 4081 4082 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 4083 { 4084 if (HAS_GMCH(dev_priv)) { 4085 if (IS_CHERRYVIEW(dev_priv)) 4086 return cherryview_irq_handler; 4087 else if (IS_VALLEYVIEW(dev_priv)) 4088 return valleyview_irq_handler; 4089 else if (IS_GEN(dev_priv, 4)) 4090 return i965_irq_handler; 4091 else if (IS_GEN(dev_priv, 3)) 4092 return i915_irq_handler; 4093 else 4094 return i8xx_irq_handler; 4095 } else { 4096 if (HAS_MASTER_UNIT_IRQ(dev_priv)) 4097 return dg1_irq_handler; 4098 if (INTEL_GEN(dev_priv) >= 11) 4099 return gen11_irq_handler; 4100 else if (INTEL_GEN(dev_priv) >= 8) 4101 return gen8_irq_handler; 4102 else 4103 return ilk_irq_handler; 4104 } 4105 } 4106 4107 static void intel_irq_reset(struct drm_i915_private *dev_priv) 4108 { 4109 if (HAS_GMCH(dev_priv)) { 4110 if (IS_CHERRYVIEW(dev_priv)) 4111 cherryview_irq_reset(dev_priv); 4112 else if (IS_VALLEYVIEW(dev_priv)) 4113 valleyview_irq_reset(dev_priv); 4114 else if (IS_GEN(dev_priv, 4)) 4115 i965_irq_reset(dev_priv); 4116 else if (IS_GEN(dev_priv, 3)) 4117 i915_irq_reset(dev_priv); 4118 else 4119 i8xx_irq_reset(dev_priv); 4120 } else { 4121 if (INTEL_GEN(dev_priv) >= 11) 4122 gen11_irq_reset(dev_priv); 4123 else if (INTEL_GEN(dev_priv) >= 8) 4124 gen8_irq_reset(dev_priv); 4125 else 4126 ilk_irq_reset(dev_priv); 4127 } 4128 } 4129 4130 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 4131 { 4132 if (HAS_GMCH(dev_priv)) { 4133 if (IS_CHERRYVIEW(dev_priv)) 4134 cherryview_irq_postinstall(dev_priv); 4135 else if (IS_VALLEYVIEW(dev_priv)) 4136 valleyview_irq_postinstall(dev_priv); 4137 else if (IS_GEN(dev_priv, 4)) 4138 i965_irq_postinstall(dev_priv); 4139 else if (IS_GEN(dev_priv, 3)) 4140 i915_irq_postinstall(dev_priv); 4141 else 4142 i8xx_irq_postinstall(dev_priv); 4143 } else { 4144 if (INTEL_GEN(dev_priv) >= 11) 4145 gen11_irq_postinstall(dev_priv); 4146 else if (INTEL_GEN(dev_priv) >= 8) 4147 gen8_irq_postinstall(dev_priv); 4148 else 4149 ilk_irq_postinstall(dev_priv); 4150 } 4151 } 4152 4153 /** 4154 * intel_irq_install - enables the hardware interrupt 4155 * @dev_priv: i915 device instance 4156 * 4157 * This function enables the hardware interrupt handling, but leaves the hotplug 4158 * handling still disabled. It is called after intel_irq_init(). 4159 * 4160 * In the driver load and resume code we need working interrupts in a few places 4161 * but don't want to deal with the hassle of concurrent probe and hotplug 4162 * workers. Hence the split into this two-stage approach. 4163 */ 4164 int intel_irq_install(struct drm_i915_private *dev_priv) 4165 { 4166 int irq = dev_priv->drm.pdev->irq; 4167 int ret; 4168 4169 /* 4170 * We enable some interrupt sources in our postinstall hooks, so mark 4171 * interrupts as enabled _before_ actually enabling them to avoid 4172 * special cases in our ordering checks. 4173 */ 4174 dev_priv->runtime_pm.irqs_enabled = true; 4175 4176 dev_priv->drm.irq_enabled = true; 4177 4178 intel_irq_reset(dev_priv); 4179 4180 ret = request_irq(irq, intel_irq_handler(dev_priv), 4181 IRQF_SHARED, DRIVER_NAME, dev_priv); 4182 if (ret < 0) { 4183 dev_priv->drm.irq_enabled = false; 4184 return ret; 4185 } 4186 4187 intel_irq_postinstall(dev_priv); 4188 4189 return ret; 4190 } 4191 4192 /** 4193 * intel_irq_uninstall - finilizes all irq handling 4194 * @dev_priv: i915 device instance 4195 * 4196 * This stops interrupt and hotplug handling and unregisters and frees all 4197 * resources acquired in the init functions. 4198 */ 4199 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4200 { 4201 int irq = dev_priv->drm.pdev->irq; 4202 4203 /* 4204 * FIXME we can get called twice during driver probe 4205 * error handling as well as during driver remove due to 4206 * intel_modeset_driver_remove() calling us out of sequence. 4207 * Would be nice if it didn't do that... 4208 */ 4209 if (!dev_priv->drm.irq_enabled) 4210 return; 4211 4212 dev_priv->drm.irq_enabled = false; 4213 4214 intel_irq_reset(dev_priv); 4215 4216 free_irq(irq, dev_priv); 4217 4218 intel_hpd_cancel_work(dev_priv); 4219 dev_priv->runtime_pm.irqs_enabled = false; 4220 } 4221 4222 /** 4223 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4224 * @dev_priv: i915 device instance 4225 * 4226 * This function is used to disable interrupts at runtime, both in the runtime 4227 * pm and the system suspend/resume code. 4228 */ 4229 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4230 { 4231 intel_irq_reset(dev_priv); 4232 dev_priv->runtime_pm.irqs_enabled = false; 4233 intel_synchronize_irq(dev_priv); 4234 } 4235 4236 /** 4237 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4238 * @dev_priv: i915 device instance 4239 * 4240 * This function is used to enable interrupts at runtime, both in the runtime 4241 * pm and the system suspend/resume code. 4242 */ 4243 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4244 { 4245 dev_priv->runtime_pm.irqs_enabled = true; 4246 intel_irq_reset(dev_priv); 4247 intel_irq_postinstall(dev_priv); 4248 } 4249 4250 bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 4251 { 4252 /* 4253 * We only use drm_irq_uninstall() at unload and VT switch, so 4254 * this is the only thing we need to check. 4255 */ 4256 return dev_priv->runtime_pm.irqs_enabled; 4257 } 4258 4259 void intel_synchronize_irq(struct drm_i915_private *i915) 4260 { 4261 synchronize_irq(i915->drm.pdev->irq); 4262 } 4263