1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drm_irq.h> 35 #include <drm/drm_drv.h> 36 #include <drm/i915_drm.h> 37 #include "i915_drv.h" 38 #include "i915_trace.h" 39 #include "intel_drv.h" 40 41 /** 42 * DOC: interrupt handling 43 * 44 * These functions provide the basic support for enabling and disabling the 45 * interrupt handling support. There's a lot more functionality in i915_irq.c 46 * and related files, but that will be described in separate chapters. 47 */ 48 49 static const u32 hpd_ilk[HPD_NUM_PINS] = { 50 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 51 }; 52 53 static const u32 hpd_ivb[HPD_NUM_PINS] = { 54 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 55 }; 56 57 static const u32 hpd_bdw[HPD_NUM_PINS] = { 58 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 59 }; 60 61 static const u32 hpd_ibx[HPD_NUM_PINS] = { 62 [HPD_CRT] = SDE_CRT_HOTPLUG, 63 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 64 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 65 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 66 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 67 }; 68 69 static const u32 hpd_cpt[HPD_NUM_PINS] = { 70 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 71 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 72 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 73 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 74 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 75 }; 76 77 static const u32 hpd_spt[HPD_NUM_PINS] = { 78 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 79 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 80 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 81 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 82 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 83 }; 84 85 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 86 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 87 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 88 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 89 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 90 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 91 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 92 }; 93 94 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 95 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 96 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 97 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 98 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 99 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 100 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 101 }; 102 103 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 104 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 105 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 106 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 107 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 108 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 109 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 110 }; 111 112 /* BXT hpd list */ 113 static const u32 hpd_bxt[HPD_NUM_PINS] = { 114 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 115 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 116 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 117 }; 118 119 static const u32 hpd_gen11[HPD_NUM_PINS] = { 120 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 121 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 122 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 123 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 124 }; 125 126 static const u32 hpd_icp[HPD_NUM_PINS] = { 127 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 128 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 129 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 130 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 131 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 132 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 133 }; 134 135 /* IIR can theoretically queue up two events. Be paranoid. */ 136 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 137 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 138 POSTING_READ(GEN8_##type##_IMR(which)); \ 139 I915_WRITE(GEN8_##type##_IER(which), 0); \ 140 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 141 POSTING_READ(GEN8_##type##_IIR(which)); \ 142 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 143 POSTING_READ(GEN8_##type##_IIR(which)); \ 144 } while (0) 145 146 #define GEN3_IRQ_RESET(type) do { \ 147 I915_WRITE(type##IMR, 0xffffffff); \ 148 POSTING_READ(type##IMR); \ 149 I915_WRITE(type##IER, 0); \ 150 I915_WRITE(type##IIR, 0xffffffff); \ 151 POSTING_READ(type##IIR); \ 152 I915_WRITE(type##IIR, 0xffffffff); \ 153 POSTING_READ(type##IIR); \ 154 } while (0) 155 156 #define GEN2_IRQ_RESET(type) do { \ 157 I915_WRITE16(type##IMR, 0xffff); \ 158 POSTING_READ16(type##IMR); \ 159 I915_WRITE16(type##IER, 0); \ 160 I915_WRITE16(type##IIR, 0xffff); \ 161 POSTING_READ16(type##IIR); \ 162 I915_WRITE16(type##IIR, 0xffff); \ 163 POSTING_READ16(type##IIR); \ 164 } while (0) 165 166 /* 167 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 168 */ 169 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 170 i915_reg_t reg) 171 { 172 u32 val = I915_READ(reg); 173 174 if (val == 0) 175 return; 176 177 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 178 i915_mmio_reg_offset(reg), val); 179 I915_WRITE(reg, 0xffffffff); 180 POSTING_READ(reg); 181 I915_WRITE(reg, 0xffffffff); 182 POSTING_READ(reg); 183 } 184 185 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 186 i915_reg_t reg) 187 { 188 u16 val = I915_READ16(reg); 189 190 if (val == 0) 191 return; 192 193 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 194 i915_mmio_reg_offset(reg), val); 195 I915_WRITE16(reg, 0xffff); 196 POSTING_READ16(reg); 197 I915_WRITE16(reg, 0xffff); 198 POSTING_READ16(reg); 199 } 200 201 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 202 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 203 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 204 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 205 POSTING_READ(GEN8_##type##_IMR(which)); \ 206 } while (0) 207 208 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 209 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 210 I915_WRITE(type##IER, (ier_val)); \ 211 I915_WRITE(type##IMR, (imr_val)); \ 212 POSTING_READ(type##IMR); \ 213 } while (0) 214 215 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 216 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 217 I915_WRITE16(type##IER, (ier_val)); \ 218 I915_WRITE16(type##IMR, (imr_val)); \ 219 POSTING_READ16(type##IMR); \ 220 } while (0) 221 222 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 224 225 /* For display hotplug interrupt */ 226 static inline void 227 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 228 u32 mask, 229 u32 bits) 230 { 231 u32 val; 232 233 lockdep_assert_held(&dev_priv->irq_lock); 234 WARN_ON(bits & ~mask); 235 236 val = I915_READ(PORT_HOTPLUG_EN); 237 val &= ~mask; 238 val |= bits; 239 I915_WRITE(PORT_HOTPLUG_EN, val); 240 } 241 242 /** 243 * i915_hotplug_interrupt_update - update hotplug interrupt enable 244 * @dev_priv: driver private 245 * @mask: bits to update 246 * @bits: bits to enable 247 * NOTE: the HPD enable bits are modified both inside and outside 248 * of an interrupt context. To avoid that read-modify-write cycles 249 * interfer, these bits are protected by a spinlock. Since this 250 * function is usually not called from a context where the lock is 251 * held already, this function acquires the lock itself. A non-locking 252 * version is also available. 253 */ 254 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 255 u32 mask, 256 u32 bits) 257 { 258 spin_lock_irq(&dev_priv->irq_lock); 259 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 260 spin_unlock_irq(&dev_priv->irq_lock); 261 } 262 263 static u32 264 gen11_gt_engine_identity(struct drm_i915_private * const i915, 265 const unsigned int bank, const unsigned int bit); 266 267 static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 268 const unsigned int bank, 269 const unsigned int bit) 270 { 271 void __iomem * const regs = i915->regs; 272 u32 dw; 273 274 lockdep_assert_held(&i915->irq_lock); 275 276 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 277 if (dw & BIT(bit)) { 278 /* 279 * According to the BSpec, DW_IIR bits cannot be cleared without 280 * first servicing the Selector & Shared IIR registers. 281 */ 282 gen11_gt_engine_identity(i915, bank, bit); 283 284 /* 285 * We locked GT INT DW by reading it. If we want to (try 286 * to) recover from this succesfully, we need to clear 287 * our bit, otherwise we are locking the register for 288 * everybody. 289 */ 290 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 291 292 return true; 293 } 294 295 return false; 296 } 297 298 /** 299 * ilk_update_display_irq - update DEIMR 300 * @dev_priv: driver private 301 * @interrupt_mask: mask of interrupt bits to update 302 * @enabled_irq_mask: mask of interrupt bits to enable 303 */ 304 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 305 u32 interrupt_mask, 306 u32 enabled_irq_mask) 307 { 308 u32 new_val; 309 310 lockdep_assert_held(&dev_priv->irq_lock); 311 312 WARN_ON(enabled_irq_mask & ~interrupt_mask); 313 314 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 315 return; 316 317 new_val = dev_priv->irq_mask; 318 new_val &= ~interrupt_mask; 319 new_val |= (~enabled_irq_mask & interrupt_mask); 320 321 if (new_val != dev_priv->irq_mask) { 322 dev_priv->irq_mask = new_val; 323 I915_WRITE(DEIMR, dev_priv->irq_mask); 324 POSTING_READ(DEIMR); 325 } 326 } 327 328 /** 329 * ilk_update_gt_irq - update GTIMR 330 * @dev_priv: driver private 331 * @interrupt_mask: mask of interrupt bits to update 332 * @enabled_irq_mask: mask of interrupt bits to enable 333 */ 334 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 335 u32 interrupt_mask, 336 u32 enabled_irq_mask) 337 { 338 lockdep_assert_held(&dev_priv->irq_lock); 339 340 WARN_ON(enabled_irq_mask & ~interrupt_mask); 341 342 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 343 return; 344 345 dev_priv->gt_irq_mask &= ~interrupt_mask; 346 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 347 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 348 } 349 350 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) 351 { 352 ilk_update_gt_irq(dev_priv, mask, mask); 353 POSTING_READ_FW(GTIMR); 354 } 355 356 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) 357 { 358 ilk_update_gt_irq(dev_priv, mask, 0); 359 } 360 361 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 362 { 363 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 364 365 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 366 } 367 368 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 369 { 370 if (INTEL_GEN(dev_priv) >= 11) 371 return GEN11_GPM_WGBOXPERF_INTR_MASK; 372 else if (INTEL_GEN(dev_priv) >= 8) 373 return GEN8_GT_IMR(2); 374 else 375 return GEN6_PMIMR; 376 } 377 378 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 379 { 380 if (INTEL_GEN(dev_priv) >= 11) 381 return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 382 else if (INTEL_GEN(dev_priv) >= 8) 383 return GEN8_GT_IER(2); 384 else 385 return GEN6_PMIER; 386 } 387 388 /** 389 * snb_update_pm_irq - update GEN6_PMIMR 390 * @dev_priv: driver private 391 * @interrupt_mask: mask of interrupt bits to update 392 * @enabled_irq_mask: mask of interrupt bits to enable 393 */ 394 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 395 u32 interrupt_mask, 396 u32 enabled_irq_mask) 397 { 398 u32 new_val; 399 400 WARN_ON(enabled_irq_mask & ~interrupt_mask); 401 402 lockdep_assert_held(&dev_priv->irq_lock); 403 404 new_val = dev_priv->pm_imr; 405 new_val &= ~interrupt_mask; 406 new_val |= (~enabled_irq_mask & interrupt_mask); 407 408 if (new_val != dev_priv->pm_imr) { 409 dev_priv->pm_imr = new_val; 410 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 411 POSTING_READ(gen6_pm_imr(dev_priv)); 412 } 413 } 414 415 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 416 { 417 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 418 return; 419 420 snb_update_pm_irq(dev_priv, mask, mask); 421 } 422 423 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 424 { 425 snb_update_pm_irq(dev_priv, mask, 0); 426 } 427 428 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 429 { 430 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 431 return; 432 433 __gen6_mask_pm_irq(dev_priv, mask); 434 } 435 436 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 437 { 438 i915_reg_t reg = gen6_pm_iir(dev_priv); 439 440 lockdep_assert_held(&dev_priv->irq_lock); 441 442 I915_WRITE(reg, reset_mask); 443 I915_WRITE(reg, reset_mask); 444 POSTING_READ(reg); 445 } 446 447 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 448 { 449 lockdep_assert_held(&dev_priv->irq_lock); 450 451 dev_priv->pm_ier |= enable_mask; 452 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 453 gen6_unmask_pm_irq(dev_priv, enable_mask); 454 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 455 } 456 457 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 458 { 459 lockdep_assert_held(&dev_priv->irq_lock); 460 461 dev_priv->pm_ier &= ~disable_mask; 462 __gen6_mask_pm_irq(dev_priv, disable_mask); 463 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 464 /* though a barrier is missing here, but don't really need a one */ 465 } 466 467 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 468 { 469 spin_lock_irq(&dev_priv->irq_lock); 470 471 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 472 ; 473 474 dev_priv->gt_pm.rps.pm_iir = 0; 475 476 spin_unlock_irq(&dev_priv->irq_lock); 477 } 478 479 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 480 { 481 spin_lock_irq(&dev_priv->irq_lock); 482 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 483 dev_priv->gt_pm.rps.pm_iir = 0; 484 spin_unlock_irq(&dev_priv->irq_lock); 485 } 486 487 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 488 { 489 struct intel_rps *rps = &dev_priv->gt_pm.rps; 490 491 if (READ_ONCE(rps->interrupts_enabled)) 492 return; 493 494 spin_lock_irq(&dev_priv->irq_lock); 495 WARN_ON_ONCE(rps->pm_iir); 496 497 if (INTEL_GEN(dev_priv) >= 11) 498 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 499 else 500 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 501 502 rps->interrupts_enabled = true; 503 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 504 505 spin_unlock_irq(&dev_priv->irq_lock); 506 } 507 508 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 509 { 510 struct intel_rps *rps = &dev_priv->gt_pm.rps; 511 512 if (!READ_ONCE(rps->interrupts_enabled)) 513 return; 514 515 spin_lock_irq(&dev_priv->irq_lock); 516 rps->interrupts_enabled = false; 517 518 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 519 520 gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 521 522 spin_unlock_irq(&dev_priv->irq_lock); 523 synchronize_irq(dev_priv->drm.irq); 524 525 /* Now that we will not be generating any more work, flush any 526 * outstanding tasks. As we are called on the RPS idle path, 527 * we will reset the GPU to minimum frequencies, so the current 528 * state of the worker can be discarded. 529 */ 530 cancel_work_sync(&rps->work); 531 if (INTEL_GEN(dev_priv) >= 11) 532 gen11_reset_rps_interrupts(dev_priv); 533 else 534 gen6_reset_rps_interrupts(dev_priv); 535 } 536 537 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 538 { 539 assert_rpm_wakelock_held(dev_priv); 540 541 spin_lock_irq(&dev_priv->irq_lock); 542 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 543 spin_unlock_irq(&dev_priv->irq_lock); 544 } 545 546 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 547 { 548 assert_rpm_wakelock_held(dev_priv); 549 550 spin_lock_irq(&dev_priv->irq_lock); 551 if (!dev_priv->guc.interrupts_enabled) { 552 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 553 dev_priv->pm_guc_events); 554 dev_priv->guc.interrupts_enabled = true; 555 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 556 } 557 spin_unlock_irq(&dev_priv->irq_lock); 558 } 559 560 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 561 { 562 assert_rpm_wakelock_held(dev_priv); 563 564 spin_lock_irq(&dev_priv->irq_lock); 565 dev_priv->guc.interrupts_enabled = false; 566 567 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 568 569 spin_unlock_irq(&dev_priv->irq_lock); 570 synchronize_irq(dev_priv->drm.irq); 571 572 gen9_reset_guc_interrupts(dev_priv); 573 } 574 575 /** 576 * bdw_update_port_irq - update DE port interrupt 577 * @dev_priv: driver private 578 * @interrupt_mask: mask of interrupt bits to update 579 * @enabled_irq_mask: mask of interrupt bits to enable 580 */ 581 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 582 u32 interrupt_mask, 583 u32 enabled_irq_mask) 584 { 585 u32 new_val; 586 u32 old_val; 587 588 lockdep_assert_held(&dev_priv->irq_lock); 589 590 WARN_ON(enabled_irq_mask & ~interrupt_mask); 591 592 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 593 return; 594 595 old_val = I915_READ(GEN8_DE_PORT_IMR); 596 597 new_val = old_val; 598 new_val &= ~interrupt_mask; 599 new_val |= (~enabled_irq_mask & interrupt_mask); 600 601 if (new_val != old_val) { 602 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 603 POSTING_READ(GEN8_DE_PORT_IMR); 604 } 605 } 606 607 /** 608 * bdw_update_pipe_irq - update DE pipe interrupt 609 * @dev_priv: driver private 610 * @pipe: pipe whose interrupt to update 611 * @interrupt_mask: mask of interrupt bits to update 612 * @enabled_irq_mask: mask of interrupt bits to enable 613 */ 614 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 615 enum pipe pipe, 616 u32 interrupt_mask, 617 u32 enabled_irq_mask) 618 { 619 u32 new_val; 620 621 lockdep_assert_held(&dev_priv->irq_lock); 622 623 WARN_ON(enabled_irq_mask & ~interrupt_mask); 624 625 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 626 return; 627 628 new_val = dev_priv->de_irq_mask[pipe]; 629 new_val &= ~interrupt_mask; 630 new_val |= (~enabled_irq_mask & interrupt_mask); 631 632 if (new_val != dev_priv->de_irq_mask[pipe]) { 633 dev_priv->de_irq_mask[pipe] = new_val; 634 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 635 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 636 } 637 } 638 639 /** 640 * ibx_display_interrupt_update - update SDEIMR 641 * @dev_priv: driver private 642 * @interrupt_mask: mask of interrupt bits to update 643 * @enabled_irq_mask: mask of interrupt bits to enable 644 */ 645 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 646 u32 interrupt_mask, 647 u32 enabled_irq_mask) 648 { 649 u32 sdeimr = I915_READ(SDEIMR); 650 sdeimr &= ~interrupt_mask; 651 sdeimr |= (~enabled_irq_mask & interrupt_mask); 652 653 WARN_ON(enabled_irq_mask & ~interrupt_mask); 654 655 lockdep_assert_held(&dev_priv->irq_lock); 656 657 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 658 return; 659 660 I915_WRITE(SDEIMR, sdeimr); 661 POSTING_READ(SDEIMR); 662 } 663 664 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 665 enum pipe pipe) 666 { 667 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 668 u32 enable_mask = status_mask << 16; 669 670 lockdep_assert_held(&dev_priv->irq_lock); 671 672 if (INTEL_GEN(dev_priv) < 5) 673 goto out; 674 675 /* 676 * On pipe A we don't support the PSR interrupt yet, 677 * on pipe B and C the same bit MBZ. 678 */ 679 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 680 return 0; 681 /* 682 * On pipe B and C we don't support the PSR interrupt yet, on pipe 683 * A the same bit is for perf counters which we don't use either. 684 */ 685 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 686 return 0; 687 688 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 689 SPRITE0_FLIP_DONE_INT_EN_VLV | 690 SPRITE1_FLIP_DONE_INT_EN_VLV); 691 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 692 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 693 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 694 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 695 696 out: 697 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 698 status_mask & ~PIPESTAT_INT_STATUS_MASK, 699 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 700 pipe_name(pipe), enable_mask, status_mask); 701 702 return enable_mask; 703 } 704 705 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 706 enum pipe pipe, u32 status_mask) 707 { 708 i915_reg_t reg = PIPESTAT(pipe); 709 u32 enable_mask; 710 711 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 712 "pipe %c: status_mask=0x%x\n", 713 pipe_name(pipe), status_mask); 714 715 lockdep_assert_held(&dev_priv->irq_lock); 716 WARN_ON(!intel_irqs_enabled(dev_priv)); 717 718 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 719 return; 720 721 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 722 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 723 724 I915_WRITE(reg, enable_mask | status_mask); 725 POSTING_READ(reg); 726 } 727 728 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 729 enum pipe pipe, u32 status_mask) 730 { 731 i915_reg_t reg = PIPESTAT(pipe); 732 u32 enable_mask; 733 734 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 735 "pipe %c: status_mask=0x%x\n", 736 pipe_name(pipe), status_mask); 737 738 lockdep_assert_held(&dev_priv->irq_lock); 739 WARN_ON(!intel_irqs_enabled(dev_priv)); 740 741 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 742 return; 743 744 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 745 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 746 747 I915_WRITE(reg, enable_mask | status_mask); 748 POSTING_READ(reg); 749 } 750 751 /** 752 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 753 * @dev_priv: i915 device private 754 */ 755 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 756 { 757 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 758 return; 759 760 spin_lock_irq(&dev_priv->irq_lock); 761 762 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 763 if (INTEL_GEN(dev_priv) >= 4) 764 i915_enable_pipestat(dev_priv, PIPE_A, 765 PIPE_LEGACY_BLC_EVENT_STATUS); 766 767 spin_unlock_irq(&dev_priv->irq_lock); 768 } 769 770 /* 771 * This timing diagram depicts the video signal in and 772 * around the vertical blanking period. 773 * 774 * Assumptions about the fictitious mode used in this example: 775 * vblank_start >= 3 776 * vsync_start = vblank_start + 1 777 * vsync_end = vblank_start + 2 778 * vtotal = vblank_start + 3 779 * 780 * start of vblank: 781 * latch double buffered registers 782 * increment frame counter (ctg+) 783 * generate start of vblank interrupt (gen4+) 784 * | 785 * | frame start: 786 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 787 * | may be shifted forward 1-3 extra lines via PIPECONF 788 * | | 789 * | | start of vsync: 790 * | | generate vsync interrupt 791 * | | | 792 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 793 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 794 * ----va---> <-----------------vb--------------------> <--------va------------- 795 * | | <----vs-----> | 796 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 797 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 798 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 799 * | | | 800 * last visible pixel first visible pixel 801 * | increment frame counter (gen3/4) 802 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 803 * 804 * x = horizontal active 805 * _ = horizontal blanking 806 * hs = horizontal sync 807 * va = vertical active 808 * vb = vertical blanking 809 * vs = vertical sync 810 * vbs = vblank_start (number) 811 * 812 * Summary: 813 * - most events happen at the start of horizontal sync 814 * - frame start happens at the start of horizontal blank, 1-4 lines 815 * (depending on PIPECONF settings) after the start of vblank 816 * - gen3/4 pixel and frame counter are synchronized with the start 817 * of horizontal active on the first line of vertical active 818 */ 819 820 /* Called from drm generic code, passed a 'crtc', which 821 * we use as a pipe index 822 */ 823 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 824 { 825 struct drm_i915_private *dev_priv = to_i915(dev); 826 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 827 const struct drm_display_mode *mode = &vblank->hwmode; 828 i915_reg_t high_frame, low_frame; 829 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 830 unsigned long irqflags; 831 832 /* 833 * On i965gm TV output the frame counter only works up to 834 * the point when we enable the TV encoder. After that the 835 * frame counter ceases to work and reads zero. We need a 836 * vblank wait before enabling the TV encoder and so we 837 * have to enable vblank interrupts while the frame counter 838 * is still in a working state. However the core vblank code 839 * does not like us returning non-zero frame counter values 840 * when we've told it that we don't have a working frame 841 * counter. Thus we must stop non-zero values leaking out. 842 */ 843 if (!vblank->max_vblank_count) 844 return 0; 845 846 htotal = mode->crtc_htotal; 847 hsync_start = mode->crtc_hsync_start; 848 vbl_start = mode->crtc_vblank_start; 849 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 850 vbl_start = DIV_ROUND_UP(vbl_start, 2); 851 852 /* Convert to pixel count */ 853 vbl_start *= htotal; 854 855 /* Start of vblank event occurs at start of hsync */ 856 vbl_start -= htotal - hsync_start; 857 858 high_frame = PIPEFRAME(pipe); 859 low_frame = PIPEFRAMEPIXEL(pipe); 860 861 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 862 863 /* 864 * High & low register fields aren't synchronized, so make sure 865 * we get a low value that's stable across two reads of the high 866 * register. 867 */ 868 do { 869 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 870 low = I915_READ_FW(low_frame); 871 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 872 } while (high1 != high2); 873 874 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 875 876 high1 >>= PIPE_FRAME_HIGH_SHIFT; 877 pixel = low & PIPE_PIXEL_MASK; 878 low >>= PIPE_FRAME_LOW_SHIFT; 879 880 /* 881 * The frame counter increments at beginning of active. 882 * Cook up a vblank counter by also checking the pixel 883 * counter against vblank start. 884 */ 885 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 886 } 887 888 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 889 { 890 struct drm_i915_private *dev_priv = to_i915(dev); 891 892 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 893 } 894 895 /* 896 * On certain encoders on certain platforms, pipe 897 * scanline register will not work to get the scanline, 898 * since the timings are driven from the PORT or issues 899 * with scanline register updates. 900 * This function will use Framestamp and current 901 * timestamp registers to calculate the scanline. 902 */ 903 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 904 { 905 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 906 struct drm_vblank_crtc *vblank = 907 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 908 const struct drm_display_mode *mode = &vblank->hwmode; 909 u32 vblank_start = mode->crtc_vblank_start; 910 u32 vtotal = mode->crtc_vtotal; 911 u32 htotal = mode->crtc_htotal; 912 u32 clock = mode->crtc_clock; 913 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 914 915 /* 916 * To avoid the race condition where we might cross into the 917 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 918 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 919 * during the same frame. 920 */ 921 do { 922 /* 923 * This field provides read back of the display 924 * pipe frame time stamp. The time stamp value 925 * is sampled at every start of vertical blank. 926 */ 927 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 928 929 /* 930 * The TIMESTAMP_CTR register has the current 931 * time stamp value. 932 */ 933 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 934 935 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 936 } while (scan_post_time != scan_prev_time); 937 938 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 939 clock), 1000 * htotal); 940 scanline = min(scanline, vtotal - 1); 941 scanline = (scanline + vblank_start) % vtotal; 942 943 return scanline; 944 } 945 946 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 947 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 948 { 949 struct drm_device *dev = crtc->base.dev; 950 struct drm_i915_private *dev_priv = to_i915(dev); 951 const struct drm_display_mode *mode; 952 struct drm_vblank_crtc *vblank; 953 enum pipe pipe = crtc->pipe; 954 int position, vtotal; 955 956 if (!crtc->active) 957 return -1; 958 959 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 960 mode = &vblank->hwmode; 961 962 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 963 return __intel_get_crtc_scanline_from_timestamp(crtc); 964 965 vtotal = mode->crtc_vtotal; 966 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 967 vtotal /= 2; 968 969 if (IS_GEN(dev_priv, 2)) 970 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 971 else 972 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 973 974 /* 975 * On HSW, the DSL reg (0x70000) appears to return 0 if we 976 * read it just before the start of vblank. So try it again 977 * so we don't accidentally end up spanning a vblank frame 978 * increment, causing the pipe_update_end() code to squak at us. 979 * 980 * The nature of this problem means we can't simply check the ISR 981 * bit and return the vblank start value; nor can we use the scanline 982 * debug register in the transcoder as it appears to have the same 983 * problem. We may need to extend this to include other platforms, 984 * but so far testing only shows the problem on HSW. 985 */ 986 if (HAS_DDI(dev_priv) && !position) { 987 int i, temp; 988 989 for (i = 0; i < 100; i++) { 990 udelay(1); 991 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 992 if (temp != position) { 993 position = temp; 994 break; 995 } 996 } 997 } 998 999 /* 1000 * See update_scanline_offset() for the details on the 1001 * scanline_offset adjustment. 1002 */ 1003 return (position + crtc->scanline_offset) % vtotal; 1004 } 1005 1006 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 1007 bool in_vblank_irq, int *vpos, int *hpos, 1008 ktime_t *stime, ktime_t *etime, 1009 const struct drm_display_mode *mode) 1010 { 1011 struct drm_i915_private *dev_priv = to_i915(dev); 1012 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1013 pipe); 1014 int position; 1015 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1016 unsigned long irqflags; 1017 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 || 1018 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || 1019 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 1020 1021 if (WARN_ON(!mode->crtc_clock)) { 1022 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 1023 "pipe %c\n", pipe_name(pipe)); 1024 return false; 1025 } 1026 1027 htotal = mode->crtc_htotal; 1028 hsync_start = mode->crtc_hsync_start; 1029 vtotal = mode->crtc_vtotal; 1030 vbl_start = mode->crtc_vblank_start; 1031 vbl_end = mode->crtc_vblank_end; 1032 1033 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1034 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1035 vbl_end /= 2; 1036 vtotal /= 2; 1037 } 1038 1039 /* 1040 * Lock uncore.lock, as we will do multiple timing critical raw 1041 * register reads, potentially with preemption disabled, so the 1042 * following code must not block on uncore.lock. 1043 */ 1044 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1045 1046 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1047 1048 /* Get optional system timestamp before query. */ 1049 if (stime) 1050 *stime = ktime_get(); 1051 1052 if (use_scanline_counter) { 1053 /* No obvious pixelcount register. Only query vertical 1054 * scanout position from Display scan line register. 1055 */ 1056 position = __intel_get_crtc_scanline(intel_crtc); 1057 } else { 1058 /* Have access to pixelcount since start of frame. 1059 * We can split this into vertical and horizontal 1060 * scanout position. 1061 */ 1062 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1063 1064 /* convert to pixel counts */ 1065 vbl_start *= htotal; 1066 vbl_end *= htotal; 1067 vtotal *= htotal; 1068 1069 /* 1070 * In interlaced modes, the pixel counter counts all pixels, 1071 * so one field will have htotal more pixels. In order to avoid 1072 * the reported position from jumping backwards when the pixel 1073 * counter is beyond the length of the shorter field, just 1074 * clamp the position the length of the shorter field. This 1075 * matches how the scanline counter based position works since 1076 * the scanline counter doesn't count the two half lines. 1077 */ 1078 if (position >= vtotal) 1079 position = vtotal - 1; 1080 1081 /* 1082 * Start of vblank interrupt is triggered at start of hsync, 1083 * just prior to the first active line of vblank. However we 1084 * consider lines to start at the leading edge of horizontal 1085 * active. So, should we get here before we've crossed into 1086 * the horizontal active of the first line in vblank, we would 1087 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1088 * always add htotal-hsync_start to the current pixel position. 1089 */ 1090 position = (position + htotal - hsync_start) % vtotal; 1091 } 1092 1093 /* Get optional system timestamp after query. */ 1094 if (etime) 1095 *etime = ktime_get(); 1096 1097 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1098 1099 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1100 1101 /* 1102 * While in vblank, position will be negative 1103 * counting up towards 0 at vbl_end. And outside 1104 * vblank, position will be positive counting 1105 * up since vbl_end. 1106 */ 1107 if (position >= vbl_start) 1108 position -= vbl_end; 1109 else 1110 position += vtotal - vbl_end; 1111 1112 if (use_scanline_counter) { 1113 *vpos = position; 1114 *hpos = 0; 1115 } else { 1116 *vpos = position / htotal; 1117 *hpos = position - (*vpos * htotal); 1118 } 1119 1120 return true; 1121 } 1122 1123 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1124 { 1125 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1126 unsigned long irqflags; 1127 int position; 1128 1129 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1130 position = __intel_get_crtc_scanline(crtc); 1131 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1132 1133 return position; 1134 } 1135 1136 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1137 { 1138 u32 busy_up, busy_down, max_avg, min_avg; 1139 u8 new_delay; 1140 1141 spin_lock(&mchdev_lock); 1142 1143 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1144 1145 new_delay = dev_priv->ips.cur_delay; 1146 1147 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1148 busy_up = I915_READ(RCPREVBSYTUPAVG); 1149 busy_down = I915_READ(RCPREVBSYTDNAVG); 1150 max_avg = I915_READ(RCBMAXAVG); 1151 min_avg = I915_READ(RCBMINAVG); 1152 1153 /* Handle RCS change request from hw */ 1154 if (busy_up > max_avg) { 1155 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1156 new_delay = dev_priv->ips.cur_delay - 1; 1157 if (new_delay < dev_priv->ips.max_delay) 1158 new_delay = dev_priv->ips.max_delay; 1159 } else if (busy_down < min_avg) { 1160 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1161 new_delay = dev_priv->ips.cur_delay + 1; 1162 if (new_delay > dev_priv->ips.min_delay) 1163 new_delay = dev_priv->ips.min_delay; 1164 } 1165 1166 if (ironlake_set_drps(dev_priv, new_delay)) 1167 dev_priv->ips.cur_delay = new_delay; 1168 1169 spin_unlock(&mchdev_lock); 1170 1171 return; 1172 } 1173 1174 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1175 struct intel_rps_ei *ei) 1176 { 1177 ei->ktime = ktime_get_raw(); 1178 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1179 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1180 } 1181 1182 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1183 { 1184 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1185 } 1186 1187 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1188 { 1189 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1190 const struct intel_rps_ei *prev = &rps->ei; 1191 struct intel_rps_ei now; 1192 u32 events = 0; 1193 1194 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1195 return 0; 1196 1197 vlv_c0_read(dev_priv, &now); 1198 1199 if (prev->ktime) { 1200 u64 time, c0; 1201 u32 render, media; 1202 1203 time = ktime_us_delta(now.ktime, prev->ktime); 1204 1205 time *= dev_priv->czclk_freq; 1206 1207 /* Workload can be split between render + media, 1208 * e.g. SwapBuffers being blitted in X after being rendered in 1209 * mesa. To account for this we need to combine both engines 1210 * into our activity counter. 1211 */ 1212 render = now.render_c0 - prev->render_c0; 1213 media = now.media_c0 - prev->media_c0; 1214 c0 = max(render, media); 1215 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1216 1217 if (c0 > time * rps->power.up_threshold) 1218 events = GEN6_PM_RP_UP_THRESHOLD; 1219 else if (c0 < time * rps->power.down_threshold) 1220 events = GEN6_PM_RP_DOWN_THRESHOLD; 1221 } 1222 1223 rps->ei = now; 1224 return events; 1225 } 1226 1227 static void gen6_pm_rps_work(struct work_struct *work) 1228 { 1229 struct drm_i915_private *dev_priv = 1230 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1231 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1232 bool client_boost = false; 1233 int new_delay, adj, min, max; 1234 u32 pm_iir = 0; 1235 1236 spin_lock_irq(&dev_priv->irq_lock); 1237 if (rps->interrupts_enabled) { 1238 pm_iir = fetch_and_zero(&rps->pm_iir); 1239 client_boost = atomic_read(&rps->num_waiters); 1240 } 1241 spin_unlock_irq(&dev_priv->irq_lock); 1242 1243 /* Make sure we didn't queue anything we're not going to process. */ 1244 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1245 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1246 goto out; 1247 1248 mutex_lock(&dev_priv->pcu_lock); 1249 1250 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1251 1252 adj = rps->last_adj; 1253 new_delay = rps->cur_freq; 1254 min = rps->min_freq_softlimit; 1255 max = rps->max_freq_softlimit; 1256 if (client_boost) 1257 max = rps->max_freq; 1258 if (client_boost && new_delay < rps->boost_freq) { 1259 new_delay = rps->boost_freq; 1260 adj = 0; 1261 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1262 if (adj > 0) 1263 adj *= 2; 1264 else /* CHV needs even encode values */ 1265 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1266 1267 if (new_delay >= rps->max_freq_softlimit) 1268 adj = 0; 1269 } else if (client_boost) { 1270 adj = 0; 1271 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1272 if (rps->cur_freq > rps->efficient_freq) 1273 new_delay = rps->efficient_freq; 1274 else if (rps->cur_freq > rps->min_freq_softlimit) 1275 new_delay = rps->min_freq_softlimit; 1276 adj = 0; 1277 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1278 if (adj < 0) 1279 adj *= 2; 1280 else /* CHV needs even encode values */ 1281 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1282 1283 if (new_delay <= rps->min_freq_softlimit) 1284 adj = 0; 1285 } else { /* unknown event */ 1286 adj = 0; 1287 } 1288 1289 rps->last_adj = adj; 1290 1291 /* sysfs frequency interfaces may have snuck in while servicing the 1292 * interrupt 1293 */ 1294 new_delay += adj; 1295 new_delay = clamp_t(int, new_delay, min, max); 1296 1297 if (intel_set_rps(dev_priv, new_delay)) { 1298 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1299 rps->last_adj = 0; 1300 } 1301 1302 mutex_unlock(&dev_priv->pcu_lock); 1303 1304 out: 1305 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1306 spin_lock_irq(&dev_priv->irq_lock); 1307 if (rps->interrupts_enabled) 1308 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1309 spin_unlock_irq(&dev_priv->irq_lock); 1310 } 1311 1312 1313 /** 1314 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1315 * occurred. 1316 * @work: workqueue struct 1317 * 1318 * Doesn't actually do anything except notify userspace. As a consequence of 1319 * this event, userspace should try to remap the bad rows since statistically 1320 * it is likely the same row is more likely to go bad again. 1321 */ 1322 static void ivybridge_parity_work(struct work_struct *work) 1323 { 1324 struct drm_i915_private *dev_priv = 1325 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1326 u32 error_status, row, bank, subbank; 1327 char *parity_event[6]; 1328 u32 misccpctl; 1329 u8 slice = 0; 1330 1331 /* We must turn off DOP level clock gating to access the L3 registers. 1332 * In order to prevent a get/put style interface, acquire struct mutex 1333 * any time we access those registers. 1334 */ 1335 mutex_lock(&dev_priv->drm.struct_mutex); 1336 1337 /* If we've screwed up tracking, just let the interrupt fire again */ 1338 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1339 goto out; 1340 1341 misccpctl = I915_READ(GEN7_MISCCPCTL); 1342 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1343 POSTING_READ(GEN7_MISCCPCTL); 1344 1345 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1346 i915_reg_t reg; 1347 1348 slice--; 1349 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1350 break; 1351 1352 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1353 1354 reg = GEN7_L3CDERRST1(slice); 1355 1356 error_status = I915_READ(reg); 1357 row = GEN7_PARITY_ERROR_ROW(error_status); 1358 bank = GEN7_PARITY_ERROR_BANK(error_status); 1359 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1360 1361 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1362 POSTING_READ(reg); 1363 1364 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1365 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1366 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1367 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1368 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1369 parity_event[5] = NULL; 1370 1371 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1372 KOBJ_CHANGE, parity_event); 1373 1374 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1375 slice, row, bank, subbank); 1376 1377 kfree(parity_event[4]); 1378 kfree(parity_event[3]); 1379 kfree(parity_event[2]); 1380 kfree(parity_event[1]); 1381 } 1382 1383 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1384 1385 out: 1386 WARN_ON(dev_priv->l3_parity.which_slice); 1387 spin_lock_irq(&dev_priv->irq_lock); 1388 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1389 spin_unlock_irq(&dev_priv->irq_lock); 1390 1391 mutex_unlock(&dev_priv->drm.struct_mutex); 1392 } 1393 1394 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1395 u32 iir) 1396 { 1397 if (!HAS_L3_DPF(dev_priv)) 1398 return; 1399 1400 spin_lock(&dev_priv->irq_lock); 1401 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1402 spin_unlock(&dev_priv->irq_lock); 1403 1404 iir &= GT_PARITY_ERROR(dev_priv); 1405 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1406 dev_priv->l3_parity.which_slice |= 1 << 1; 1407 1408 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1409 dev_priv->l3_parity.which_slice |= 1 << 0; 1410 1411 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1412 } 1413 1414 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1415 u32 gt_iir) 1416 { 1417 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1418 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); 1419 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1420 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); 1421 } 1422 1423 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1424 u32 gt_iir) 1425 { 1426 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1427 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); 1428 if (gt_iir & GT_BSD_USER_INTERRUPT) 1429 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); 1430 if (gt_iir & GT_BLT_USER_INTERRUPT) 1431 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]); 1432 1433 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1434 GT_BSD_CS_ERROR_INTERRUPT | 1435 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1436 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1437 1438 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1439 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1440 } 1441 1442 static void 1443 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1444 { 1445 bool tasklet = false; 1446 1447 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 1448 tasklet = true; 1449 1450 if (iir & GT_RENDER_USER_INTERRUPT) { 1451 intel_engine_breadcrumbs_irq(engine); 1452 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1453 } 1454 1455 if (tasklet) 1456 tasklet_hi_schedule(&engine->execlists.tasklet); 1457 } 1458 1459 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1460 u32 master_ctl, u32 gt_iir[4]) 1461 { 1462 void __iomem * const regs = i915->regs; 1463 1464 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1465 GEN8_GT_BCS_IRQ | \ 1466 GEN8_GT_VCS1_IRQ | \ 1467 GEN8_GT_VCS2_IRQ | \ 1468 GEN8_GT_VECS_IRQ | \ 1469 GEN8_GT_PM_IRQ | \ 1470 GEN8_GT_GUC_IRQ) 1471 1472 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1473 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1474 if (likely(gt_iir[0])) 1475 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1476 } 1477 1478 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1479 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1480 if (likely(gt_iir[1])) 1481 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1482 } 1483 1484 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1485 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1486 if (likely(gt_iir[2])) 1487 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 1488 } 1489 1490 if (master_ctl & GEN8_GT_VECS_IRQ) { 1491 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1492 if (likely(gt_iir[3])) 1493 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1494 } 1495 } 1496 1497 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1498 u32 master_ctl, u32 gt_iir[4]) 1499 { 1500 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1501 gen8_cs_irq_handler(i915->engine[RCS], 1502 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1503 gen8_cs_irq_handler(i915->engine[BCS], 1504 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1505 } 1506 1507 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1508 gen8_cs_irq_handler(i915->engine[VCS], 1509 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1510 gen8_cs_irq_handler(i915->engine[VCS2], 1511 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1512 } 1513 1514 if (master_ctl & GEN8_GT_VECS_IRQ) { 1515 gen8_cs_irq_handler(i915->engine[VECS], 1516 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1517 } 1518 1519 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1520 gen6_rps_irq_handler(i915, gt_iir[2]); 1521 gen9_guc_irq_handler(i915, gt_iir[2]); 1522 } 1523 } 1524 1525 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1526 { 1527 switch (pin) { 1528 case HPD_PORT_C: 1529 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1530 case HPD_PORT_D: 1531 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1532 case HPD_PORT_E: 1533 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1534 case HPD_PORT_F: 1535 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1536 default: 1537 return false; 1538 } 1539 } 1540 1541 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1542 { 1543 switch (pin) { 1544 case HPD_PORT_A: 1545 return val & PORTA_HOTPLUG_LONG_DETECT; 1546 case HPD_PORT_B: 1547 return val & PORTB_HOTPLUG_LONG_DETECT; 1548 case HPD_PORT_C: 1549 return val & PORTC_HOTPLUG_LONG_DETECT; 1550 default: 1551 return false; 1552 } 1553 } 1554 1555 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1556 { 1557 switch (pin) { 1558 case HPD_PORT_A: 1559 return val & ICP_DDIA_HPD_LONG_DETECT; 1560 case HPD_PORT_B: 1561 return val & ICP_DDIB_HPD_LONG_DETECT; 1562 default: 1563 return false; 1564 } 1565 } 1566 1567 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1568 { 1569 switch (pin) { 1570 case HPD_PORT_C: 1571 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1572 case HPD_PORT_D: 1573 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1574 case HPD_PORT_E: 1575 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1576 case HPD_PORT_F: 1577 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1578 default: 1579 return false; 1580 } 1581 } 1582 1583 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1584 { 1585 switch (pin) { 1586 case HPD_PORT_E: 1587 return val & PORTE_HOTPLUG_LONG_DETECT; 1588 default: 1589 return false; 1590 } 1591 } 1592 1593 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1594 { 1595 switch (pin) { 1596 case HPD_PORT_A: 1597 return val & PORTA_HOTPLUG_LONG_DETECT; 1598 case HPD_PORT_B: 1599 return val & PORTB_HOTPLUG_LONG_DETECT; 1600 case HPD_PORT_C: 1601 return val & PORTC_HOTPLUG_LONG_DETECT; 1602 case HPD_PORT_D: 1603 return val & PORTD_HOTPLUG_LONG_DETECT; 1604 default: 1605 return false; 1606 } 1607 } 1608 1609 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1610 { 1611 switch (pin) { 1612 case HPD_PORT_A: 1613 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1614 default: 1615 return false; 1616 } 1617 } 1618 1619 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1620 { 1621 switch (pin) { 1622 case HPD_PORT_B: 1623 return val & PORTB_HOTPLUG_LONG_DETECT; 1624 case HPD_PORT_C: 1625 return val & PORTC_HOTPLUG_LONG_DETECT; 1626 case HPD_PORT_D: 1627 return val & PORTD_HOTPLUG_LONG_DETECT; 1628 default: 1629 return false; 1630 } 1631 } 1632 1633 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1634 { 1635 switch (pin) { 1636 case HPD_PORT_B: 1637 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1638 case HPD_PORT_C: 1639 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1640 case HPD_PORT_D: 1641 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1642 default: 1643 return false; 1644 } 1645 } 1646 1647 /* 1648 * Get a bit mask of pins that have triggered, and which ones may be long. 1649 * This can be called multiple times with the same masks to accumulate 1650 * hotplug detection results from several registers. 1651 * 1652 * Note that the caller is expected to zero out the masks initially. 1653 */ 1654 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1655 u32 *pin_mask, u32 *long_mask, 1656 u32 hotplug_trigger, u32 dig_hotplug_reg, 1657 const u32 hpd[HPD_NUM_PINS], 1658 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1659 { 1660 enum hpd_pin pin; 1661 1662 for_each_hpd_pin(pin) { 1663 if ((hpd[pin] & hotplug_trigger) == 0) 1664 continue; 1665 1666 *pin_mask |= BIT(pin); 1667 1668 if (long_pulse_detect(pin, dig_hotplug_reg)) 1669 *long_mask |= BIT(pin); 1670 } 1671 1672 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1673 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1674 1675 } 1676 1677 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1678 { 1679 wake_up_all(&dev_priv->gmbus_wait_queue); 1680 } 1681 1682 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1683 { 1684 wake_up_all(&dev_priv->gmbus_wait_queue); 1685 } 1686 1687 #if defined(CONFIG_DEBUG_FS) 1688 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1689 enum pipe pipe, 1690 u32 crc0, u32 crc1, 1691 u32 crc2, u32 crc3, 1692 u32 crc4) 1693 { 1694 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1695 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1696 u32 crcs[5]; 1697 1698 spin_lock(&pipe_crc->lock); 1699 /* 1700 * For some not yet identified reason, the first CRC is 1701 * bonkers. So let's just wait for the next vblank and read 1702 * out the buggy result. 1703 * 1704 * On GEN8+ sometimes the second CRC is bonkers as well, so 1705 * don't trust that one either. 1706 */ 1707 if (pipe_crc->skipped <= 0 || 1708 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1709 pipe_crc->skipped++; 1710 spin_unlock(&pipe_crc->lock); 1711 return; 1712 } 1713 spin_unlock(&pipe_crc->lock); 1714 1715 crcs[0] = crc0; 1716 crcs[1] = crc1; 1717 crcs[2] = crc2; 1718 crcs[3] = crc3; 1719 crcs[4] = crc4; 1720 drm_crtc_add_crc_entry(&crtc->base, true, 1721 drm_crtc_accurate_vblank_count(&crtc->base), 1722 crcs); 1723 } 1724 #else 1725 static inline void 1726 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1727 enum pipe pipe, 1728 u32 crc0, u32 crc1, 1729 u32 crc2, u32 crc3, 1730 u32 crc4) {} 1731 #endif 1732 1733 1734 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1735 enum pipe pipe) 1736 { 1737 display_pipe_crc_irq_handler(dev_priv, pipe, 1738 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1739 0, 0, 0, 0); 1740 } 1741 1742 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1743 enum pipe pipe) 1744 { 1745 display_pipe_crc_irq_handler(dev_priv, pipe, 1746 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1747 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1748 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1749 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1750 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1751 } 1752 1753 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1754 enum pipe pipe) 1755 { 1756 u32 res1, res2; 1757 1758 if (INTEL_GEN(dev_priv) >= 3) 1759 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1760 else 1761 res1 = 0; 1762 1763 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1764 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1765 else 1766 res2 = 0; 1767 1768 display_pipe_crc_irq_handler(dev_priv, pipe, 1769 I915_READ(PIPE_CRC_RES_RED(pipe)), 1770 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1771 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1772 res1, res2); 1773 } 1774 1775 /* The RPS events need forcewake, so we add them to a work queue and mask their 1776 * IMR bits until the work is done. Other interrupts can be processed without 1777 * the work queue. */ 1778 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1779 { 1780 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1781 1782 if (pm_iir & dev_priv->pm_rps_events) { 1783 spin_lock(&dev_priv->irq_lock); 1784 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1785 if (rps->interrupts_enabled) { 1786 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1787 schedule_work(&rps->work); 1788 } 1789 spin_unlock(&dev_priv->irq_lock); 1790 } 1791 1792 if (INTEL_GEN(dev_priv) >= 8) 1793 return; 1794 1795 if (HAS_VEBOX(dev_priv)) { 1796 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1797 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]); 1798 1799 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1800 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1801 } 1802 } 1803 1804 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1805 { 1806 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 1807 intel_guc_to_host_event_handler(&dev_priv->guc); 1808 } 1809 1810 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1811 { 1812 enum pipe pipe; 1813 1814 for_each_pipe(dev_priv, pipe) { 1815 I915_WRITE(PIPESTAT(pipe), 1816 PIPESTAT_INT_STATUS_MASK | 1817 PIPE_FIFO_UNDERRUN_STATUS); 1818 1819 dev_priv->pipestat_irq_mask[pipe] = 0; 1820 } 1821 } 1822 1823 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1824 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1825 { 1826 int pipe; 1827 1828 spin_lock(&dev_priv->irq_lock); 1829 1830 if (!dev_priv->display_irqs_enabled) { 1831 spin_unlock(&dev_priv->irq_lock); 1832 return; 1833 } 1834 1835 for_each_pipe(dev_priv, pipe) { 1836 i915_reg_t reg; 1837 u32 status_mask, enable_mask, iir_bit = 0; 1838 1839 /* 1840 * PIPESTAT bits get signalled even when the interrupt is 1841 * disabled with the mask bits, and some of the status bits do 1842 * not generate interrupts at all (like the underrun bit). Hence 1843 * we need to be careful that we only handle what we want to 1844 * handle. 1845 */ 1846 1847 /* fifo underruns are filterered in the underrun handler. */ 1848 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1849 1850 switch (pipe) { 1851 case PIPE_A: 1852 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1853 break; 1854 case PIPE_B: 1855 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1856 break; 1857 case PIPE_C: 1858 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1859 break; 1860 } 1861 if (iir & iir_bit) 1862 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1863 1864 if (!status_mask) 1865 continue; 1866 1867 reg = PIPESTAT(pipe); 1868 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1869 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1870 1871 /* 1872 * Clear the PIPE*STAT regs before the IIR 1873 * 1874 * Toggle the enable bits to make sure we get an 1875 * edge in the ISR pipe event bit if we don't clear 1876 * all the enabled status bits. Otherwise the edge 1877 * triggered IIR on i965/g4x wouldn't notice that 1878 * an interrupt is still pending. 1879 */ 1880 if (pipe_stats[pipe]) { 1881 I915_WRITE(reg, pipe_stats[pipe]); 1882 I915_WRITE(reg, enable_mask); 1883 } 1884 } 1885 spin_unlock(&dev_priv->irq_lock); 1886 } 1887 1888 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1889 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1890 { 1891 enum pipe pipe; 1892 1893 for_each_pipe(dev_priv, pipe) { 1894 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1895 drm_handle_vblank(&dev_priv->drm, pipe); 1896 1897 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1898 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1899 1900 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1901 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1902 } 1903 } 1904 1905 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1906 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1907 { 1908 bool blc_event = false; 1909 enum pipe pipe; 1910 1911 for_each_pipe(dev_priv, pipe) { 1912 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1913 drm_handle_vblank(&dev_priv->drm, pipe); 1914 1915 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1916 blc_event = true; 1917 1918 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1919 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1920 1921 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1922 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1923 } 1924 1925 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1926 intel_opregion_asle_intr(dev_priv); 1927 } 1928 1929 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1930 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1931 { 1932 bool blc_event = false; 1933 enum pipe pipe; 1934 1935 for_each_pipe(dev_priv, pipe) { 1936 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1937 drm_handle_vblank(&dev_priv->drm, pipe); 1938 1939 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1940 blc_event = true; 1941 1942 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1943 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1944 1945 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1946 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1947 } 1948 1949 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1950 intel_opregion_asle_intr(dev_priv); 1951 1952 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1953 gmbus_irq_handler(dev_priv); 1954 } 1955 1956 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1957 u32 pipe_stats[I915_MAX_PIPES]) 1958 { 1959 enum pipe pipe; 1960 1961 for_each_pipe(dev_priv, pipe) { 1962 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1963 drm_handle_vblank(&dev_priv->drm, pipe); 1964 1965 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1966 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1967 1968 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1969 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1970 } 1971 1972 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1973 gmbus_irq_handler(dev_priv); 1974 } 1975 1976 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1977 { 1978 u32 hotplug_status = 0, hotplug_status_mask; 1979 int i; 1980 1981 if (IS_G4X(dev_priv) || 1982 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1983 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 1984 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 1985 else 1986 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 1987 1988 /* 1989 * We absolutely have to clear all the pending interrupt 1990 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 1991 * interrupt bit won't have an edge, and the i965/g4x 1992 * edge triggered IIR will not notice that an interrupt 1993 * is still pending. We can't use PORT_HOTPLUG_EN to 1994 * guarantee the edge as the act of toggling the enable 1995 * bits can itself generate a new hotplug interrupt :( 1996 */ 1997 for (i = 0; i < 10; i++) { 1998 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 1999 2000 if (tmp == 0) 2001 return hotplug_status; 2002 2003 hotplug_status |= tmp; 2004 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2005 } 2006 2007 WARN_ONCE(1, 2008 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2009 I915_READ(PORT_HOTPLUG_STAT)); 2010 2011 return hotplug_status; 2012 } 2013 2014 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2015 u32 hotplug_status) 2016 { 2017 u32 pin_mask = 0, long_mask = 0; 2018 2019 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2020 IS_CHERRYVIEW(dev_priv)) { 2021 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2022 2023 if (hotplug_trigger) { 2024 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2025 hotplug_trigger, hotplug_trigger, 2026 hpd_status_g4x, 2027 i9xx_port_hotplug_long_detect); 2028 2029 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2030 } 2031 2032 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2033 dp_aux_irq_handler(dev_priv); 2034 } else { 2035 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2036 2037 if (hotplug_trigger) { 2038 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2039 hotplug_trigger, hotplug_trigger, 2040 hpd_status_i915, 2041 i9xx_port_hotplug_long_detect); 2042 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2043 } 2044 } 2045 } 2046 2047 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2048 { 2049 struct drm_device *dev = arg; 2050 struct drm_i915_private *dev_priv = to_i915(dev); 2051 irqreturn_t ret = IRQ_NONE; 2052 2053 if (!intel_irqs_enabled(dev_priv)) 2054 return IRQ_NONE; 2055 2056 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2057 disable_rpm_wakeref_asserts(dev_priv); 2058 2059 do { 2060 u32 iir, gt_iir, pm_iir; 2061 u32 pipe_stats[I915_MAX_PIPES] = {}; 2062 u32 hotplug_status = 0; 2063 u32 ier = 0; 2064 2065 gt_iir = I915_READ(GTIIR); 2066 pm_iir = I915_READ(GEN6_PMIIR); 2067 iir = I915_READ(VLV_IIR); 2068 2069 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2070 break; 2071 2072 ret = IRQ_HANDLED; 2073 2074 /* 2075 * Theory on interrupt generation, based on empirical evidence: 2076 * 2077 * x = ((VLV_IIR & VLV_IER) || 2078 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2079 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2080 * 2081 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2082 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2083 * guarantee the CPU interrupt will be raised again even if we 2084 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2085 * bits this time around. 2086 */ 2087 I915_WRITE(VLV_MASTER_IER, 0); 2088 ier = I915_READ(VLV_IER); 2089 I915_WRITE(VLV_IER, 0); 2090 2091 if (gt_iir) 2092 I915_WRITE(GTIIR, gt_iir); 2093 if (pm_iir) 2094 I915_WRITE(GEN6_PMIIR, pm_iir); 2095 2096 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2097 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2098 2099 /* Call regardless, as some status bits might not be 2100 * signalled in iir */ 2101 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2102 2103 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2104 I915_LPE_PIPE_B_INTERRUPT)) 2105 intel_lpe_audio_irq_handler(dev_priv); 2106 2107 /* 2108 * VLV_IIR is single buffered, and reflects the level 2109 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2110 */ 2111 if (iir) 2112 I915_WRITE(VLV_IIR, iir); 2113 2114 I915_WRITE(VLV_IER, ier); 2115 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2116 2117 if (gt_iir) 2118 snb_gt_irq_handler(dev_priv, gt_iir); 2119 if (pm_iir) 2120 gen6_rps_irq_handler(dev_priv, pm_iir); 2121 2122 if (hotplug_status) 2123 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2124 2125 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2126 } while (0); 2127 2128 enable_rpm_wakeref_asserts(dev_priv); 2129 2130 return ret; 2131 } 2132 2133 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2134 { 2135 struct drm_device *dev = arg; 2136 struct drm_i915_private *dev_priv = to_i915(dev); 2137 irqreturn_t ret = IRQ_NONE; 2138 2139 if (!intel_irqs_enabled(dev_priv)) 2140 return IRQ_NONE; 2141 2142 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2143 disable_rpm_wakeref_asserts(dev_priv); 2144 2145 do { 2146 u32 master_ctl, iir; 2147 u32 pipe_stats[I915_MAX_PIPES] = {}; 2148 u32 hotplug_status = 0; 2149 u32 gt_iir[4]; 2150 u32 ier = 0; 2151 2152 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2153 iir = I915_READ(VLV_IIR); 2154 2155 if (master_ctl == 0 && iir == 0) 2156 break; 2157 2158 ret = IRQ_HANDLED; 2159 2160 /* 2161 * Theory on interrupt generation, based on empirical evidence: 2162 * 2163 * x = ((VLV_IIR & VLV_IER) || 2164 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2165 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2166 * 2167 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2168 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2169 * guarantee the CPU interrupt will be raised again even if we 2170 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2171 * bits this time around. 2172 */ 2173 I915_WRITE(GEN8_MASTER_IRQ, 0); 2174 ier = I915_READ(VLV_IER); 2175 I915_WRITE(VLV_IER, 0); 2176 2177 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2178 2179 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2180 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2181 2182 /* Call regardless, as some status bits might not be 2183 * signalled in iir */ 2184 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2185 2186 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2187 I915_LPE_PIPE_B_INTERRUPT | 2188 I915_LPE_PIPE_C_INTERRUPT)) 2189 intel_lpe_audio_irq_handler(dev_priv); 2190 2191 /* 2192 * VLV_IIR is single buffered, and reflects the level 2193 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2194 */ 2195 if (iir) 2196 I915_WRITE(VLV_IIR, iir); 2197 2198 I915_WRITE(VLV_IER, ier); 2199 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2200 2201 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2202 2203 if (hotplug_status) 2204 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2205 2206 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2207 } while (0); 2208 2209 enable_rpm_wakeref_asserts(dev_priv); 2210 2211 return ret; 2212 } 2213 2214 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2215 u32 hotplug_trigger, 2216 const u32 hpd[HPD_NUM_PINS]) 2217 { 2218 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2219 2220 /* 2221 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2222 * unless we touch the hotplug register, even if hotplug_trigger is 2223 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2224 * errors. 2225 */ 2226 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2227 if (!hotplug_trigger) { 2228 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2229 PORTD_HOTPLUG_STATUS_MASK | 2230 PORTC_HOTPLUG_STATUS_MASK | 2231 PORTB_HOTPLUG_STATUS_MASK; 2232 dig_hotplug_reg &= ~mask; 2233 } 2234 2235 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2236 if (!hotplug_trigger) 2237 return; 2238 2239 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2240 dig_hotplug_reg, hpd, 2241 pch_port_hotplug_long_detect); 2242 2243 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2244 } 2245 2246 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2247 { 2248 int pipe; 2249 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2250 2251 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2252 2253 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2254 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2255 SDE_AUDIO_POWER_SHIFT); 2256 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2257 port_name(port)); 2258 } 2259 2260 if (pch_iir & SDE_AUX_MASK) 2261 dp_aux_irq_handler(dev_priv); 2262 2263 if (pch_iir & SDE_GMBUS) 2264 gmbus_irq_handler(dev_priv); 2265 2266 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2267 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2268 2269 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2270 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2271 2272 if (pch_iir & SDE_POISON) 2273 DRM_ERROR("PCH poison interrupt\n"); 2274 2275 if (pch_iir & SDE_FDI_MASK) 2276 for_each_pipe(dev_priv, pipe) 2277 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2278 pipe_name(pipe), 2279 I915_READ(FDI_RX_IIR(pipe))); 2280 2281 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2282 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2283 2284 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2285 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2286 2287 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2288 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2289 2290 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2291 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2292 } 2293 2294 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2295 { 2296 u32 err_int = I915_READ(GEN7_ERR_INT); 2297 enum pipe pipe; 2298 2299 if (err_int & ERR_INT_POISON) 2300 DRM_ERROR("Poison interrupt\n"); 2301 2302 for_each_pipe(dev_priv, pipe) { 2303 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2304 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2305 2306 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2307 if (IS_IVYBRIDGE(dev_priv)) 2308 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2309 else 2310 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2311 } 2312 } 2313 2314 I915_WRITE(GEN7_ERR_INT, err_int); 2315 } 2316 2317 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2318 { 2319 u32 serr_int = I915_READ(SERR_INT); 2320 enum pipe pipe; 2321 2322 if (serr_int & SERR_INT_POISON) 2323 DRM_ERROR("PCH poison interrupt\n"); 2324 2325 for_each_pipe(dev_priv, pipe) 2326 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2327 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2328 2329 I915_WRITE(SERR_INT, serr_int); 2330 } 2331 2332 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2333 { 2334 int pipe; 2335 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2336 2337 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2338 2339 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2340 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2341 SDE_AUDIO_POWER_SHIFT_CPT); 2342 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2343 port_name(port)); 2344 } 2345 2346 if (pch_iir & SDE_AUX_MASK_CPT) 2347 dp_aux_irq_handler(dev_priv); 2348 2349 if (pch_iir & SDE_GMBUS_CPT) 2350 gmbus_irq_handler(dev_priv); 2351 2352 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2353 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2354 2355 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2356 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2357 2358 if (pch_iir & SDE_FDI_MASK_CPT) 2359 for_each_pipe(dev_priv, pipe) 2360 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2361 pipe_name(pipe), 2362 I915_READ(FDI_RX_IIR(pipe))); 2363 2364 if (pch_iir & SDE_ERROR_CPT) 2365 cpt_serr_int_handler(dev_priv); 2366 } 2367 2368 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2369 { 2370 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 2371 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 2372 u32 pin_mask = 0, long_mask = 0; 2373 2374 if (ddi_hotplug_trigger) { 2375 u32 dig_hotplug_reg; 2376 2377 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 2378 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 2379 2380 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2381 ddi_hotplug_trigger, 2382 dig_hotplug_reg, hpd_icp, 2383 icp_ddi_port_hotplug_long_detect); 2384 } 2385 2386 if (tc_hotplug_trigger) { 2387 u32 dig_hotplug_reg; 2388 2389 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 2390 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 2391 2392 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2393 tc_hotplug_trigger, 2394 dig_hotplug_reg, hpd_icp, 2395 icp_tc_port_hotplug_long_detect); 2396 } 2397 2398 if (pin_mask) 2399 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2400 2401 if (pch_iir & SDE_GMBUS_ICP) 2402 gmbus_irq_handler(dev_priv); 2403 } 2404 2405 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2406 { 2407 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2408 ~SDE_PORTE_HOTPLUG_SPT; 2409 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2410 u32 pin_mask = 0, long_mask = 0; 2411 2412 if (hotplug_trigger) { 2413 u32 dig_hotplug_reg; 2414 2415 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2416 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2417 2418 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2419 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2420 spt_port_hotplug_long_detect); 2421 } 2422 2423 if (hotplug2_trigger) { 2424 u32 dig_hotplug_reg; 2425 2426 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2427 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2428 2429 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2430 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2431 spt_port_hotplug2_long_detect); 2432 } 2433 2434 if (pin_mask) 2435 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2436 2437 if (pch_iir & SDE_GMBUS_CPT) 2438 gmbus_irq_handler(dev_priv); 2439 } 2440 2441 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2442 u32 hotplug_trigger, 2443 const u32 hpd[HPD_NUM_PINS]) 2444 { 2445 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2446 2447 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2448 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2449 2450 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2451 dig_hotplug_reg, hpd, 2452 ilk_port_hotplug_long_detect); 2453 2454 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2455 } 2456 2457 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2458 u32 de_iir) 2459 { 2460 enum pipe pipe; 2461 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2462 2463 if (hotplug_trigger) 2464 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2465 2466 if (de_iir & DE_AUX_CHANNEL_A) 2467 dp_aux_irq_handler(dev_priv); 2468 2469 if (de_iir & DE_GSE) 2470 intel_opregion_asle_intr(dev_priv); 2471 2472 if (de_iir & DE_POISON) 2473 DRM_ERROR("Poison interrupt\n"); 2474 2475 for_each_pipe(dev_priv, pipe) { 2476 if (de_iir & DE_PIPE_VBLANK(pipe)) 2477 drm_handle_vblank(&dev_priv->drm, pipe); 2478 2479 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2480 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2481 2482 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2483 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2484 } 2485 2486 /* check event from PCH */ 2487 if (de_iir & DE_PCH_EVENT) { 2488 u32 pch_iir = I915_READ(SDEIIR); 2489 2490 if (HAS_PCH_CPT(dev_priv)) 2491 cpt_irq_handler(dev_priv, pch_iir); 2492 else 2493 ibx_irq_handler(dev_priv, pch_iir); 2494 2495 /* should clear PCH hotplug event before clear CPU irq */ 2496 I915_WRITE(SDEIIR, pch_iir); 2497 } 2498 2499 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 2500 ironlake_rps_change_irq_handler(dev_priv); 2501 } 2502 2503 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2504 u32 de_iir) 2505 { 2506 enum pipe pipe; 2507 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2508 2509 if (hotplug_trigger) 2510 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2511 2512 if (de_iir & DE_ERR_INT_IVB) 2513 ivb_err_int_handler(dev_priv); 2514 2515 if (de_iir & DE_EDP_PSR_INT_HSW) { 2516 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2517 2518 intel_psr_irq_handler(dev_priv, psr_iir); 2519 I915_WRITE(EDP_PSR_IIR, psr_iir); 2520 } 2521 2522 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2523 dp_aux_irq_handler(dev_priv); 2524 2525 if (de_iir & DE_GSE_IVB) 2526 intel_opregion_asle_intr(dev_priv); 2527 2528 for_each_pipe(dev_priv, pipe) { 2529 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2530 drm_handle_vblank(&dev_priv->drm, pipe); 2531 } 2532 2533 /* check event from PCH */ 2534 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2535 u32 pch_iir = I915_READ(SDEIIR); 2536 2537 cpt_irq_handler(dev_priv, pch_iir); 2538 2539 /* clear PCH hotplug event before clear CPU irq */ 2540 I915_WRITE(SDEIIR, pch_iir); 2541 } 2542 } 2543 2544 /* 2545 * To handle irqs with the minimum potential races with fresh interrupts, we: 2546 * 1 - Disable Master Interrupt Control. 2547 * 2 - Find the source(s) of the interrupt. 2548 * 3 - Clear the Interrupt Identity bits (IIR). 2549 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2550 * 5 - Re-enable Master Interrupt Control. 2551 */ 2552 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2553 { 2554 struct drm_device *dev = arg; 2555 struct drm_i915_private *dev_priv = to_i915(dev); 2556 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2557 irqreturn_t ret = IRQ_NONE; 2558 2559 if (!intel_irqs_enabled(dev_priv)) 2560 return IRQ_NONE; 2561 2562 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2563 disable_rpm_wakeref_asserts(dev_priv); 2564 2565 /* disable master interrupt before clearing iir */ 2566 de_ier = I915_READ(DEIER); 2567 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2568 2569 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2570 * interrupts will will be stored on its back queue, and then we'll be 2571 * able to process them after we restore SDEIER (as soon as we restore 2572 * it, we'll get an interrupt if SDEIIR still has something to process 2573 * due to its back queue). */ 2574 if (!HAS_PCH_NOP(dev_priv)) { 2575 sde_ier = I915_READ(SDEIER); 2576 I915_WRITE(SDEIER, 0); 2577 } 2578 2579 /* Find, clear, then process each source of interrupt */ 2580 2581 gt_iir = I915_READ(GTIIR); 2582 if (gt_iir) { 2583 I915_WRITE(GTIIR, gt_iir); 2584 ret = IRQ_HANDLED; 2585 if (INTEL_GEN(dev_priv) >= 6) 2586 snb_gt_irq_handler(dev_priv, gt_iir); 2587 else 2588 ilk_gt_irq_handler(dev_priv, gt_iir); 2589 } 2590 2591 de_iir = I915_READ(DEIIR); 2592 if (de_iir) { 2593 I915_WRITE(DEIIR, de_iir); 2594 ret = IRQ_HANDLED; 2595 if (INTEL_GEN(dev_priv) >= 7) 2596 ivb_display_irq_handler(dev_priv, de_iir); 2597 else 2598 ilk_display_irq_handler(dev_priv, de_iir); 2599 } 2600 2601 if (INTEL_GEN(dev_priv) >= 6) { 2602 u32 pm_iir = I915_READ(GEN6_PMIIR); 2603 if (pm_iir) { 2604 I915_WRITE(GEN6_PMIIR, pm_iir); 2605 ret = IRQ_HANDLED; 2606 gen6_rps_irq_handler(dev_priv, pm_iir); 2607 } 2608 } 2609 2610 I915_WRITE(DEIER, de_ier); 2611 if (!HAS_PCH_NOP(dev_priv)) 2612 I915_WRITE(SDEIER, sde_ier); 2613 2614 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2615 enable_rpm_wakeref_asserts(dev_priv); 2616 2617 return ret; 2618 } 2619 2620 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2621 u32 hotplug_trigger, 2622 const u32 hpd[HPD_NUM_PINS]) 2623 { 2624 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2625 2626 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2627 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2628 2629 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2630 dig_hotplug_reg, hpd, 2631 bxt_port_hotplug_long_detect); 2632 2633 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2634 } 2635 2636 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2637 { 2638 u32 pin_mask = 0, long_mask = 0; 2639 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2640 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2641 2642 if (trigger_tc) { 2643 u32 dig_hotplug_reg; 2644 2645 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2646 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2647 2648 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2649 dig_hotplug_reg, hpd_gen11, 2650 gen11_port_hotplug_long_detect); 2651 } 2652 2653 if (trigger_tbt) { 2654 u32 dig_hotplug_reg; 2655 2656 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2657 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2658 2659 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2660 dig_hotplug_reg, hpd_gen11, 2661 gen11_port_hotplug_long_detect); 2662 } 2663 2664 if (pin_mask) 2665 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2666 else 2667 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2668 } 2669 2670 static irqreturn_t 2671 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2672 { 2673 irqreturn_t ret = IRQ_NONE; 2674 u32 iir; 2675 enum pipe pipe; 2676 2677 if (master_ctl & GEN8_DE_MISC_IRQ) { 2678 iir = I915_READ(GEN8_DE_MISC_IIR); 2679 if (iir) { 2680 bool found = false; 2681 2682 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2683 ret = IRQ_HANDLED; 2684 2685 if (iir & GEN8_DE_MISC_GSE) { 2686 intel_opregion_asle_intr(dev_priv); 2687 found = true; 2688 } 2689 2690 if (iir & GEN8_DE_EDP_PSR) { 2691 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2692 2693 intel_psr_irq_handler(dev_priv, psr_iir); 2694 I915_WRITE(EDP_PSR_IIR, psr_iir); 2695 found = true; 2696 } 2697 2698 if (!found) 2699 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2700 } 2701 else 2702 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2703 } 2704 2705 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2706 iir = I915_READ(GEN11_DE_HPD_IIR); 2707 if (iir) { 2708 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2709 ret = IRQ_HANDLED; 2710 gen11_hpd_irq_handler(dev_priv, iir); 2711 } else { 2712 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2713 } 2714 } 2715 2716 if (master_ctl & GEN8_DE_PORT_IRQ) { 2717 iir = I915_READ(GEN8_DE_PORT_IIR); 2718 if (iir) { 2719 u32 tmp_mask; 2720 bool found = false; 2721 2722 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2723 ret = IRQ_HANDLED; 2724 2725 tmp_mask = GEN8_AUX_CHANNEL_A; 2726 if (INTEL_GEN(dev_priv) >= 9) 2727 tmp_mask |= GEN9_AUX_CHANNEL_B | 2728 GEN9_AUX_CHANNEL_C | 2729 GEN9_AUX_CHANNEL_D; 2730 2731 if (INTEL_GEN(dev_priv) >= 11) 2732 tmp_mask |= ICL_AUX_CHANNEL_E; 2733 2734 if (IS_CNL_WITH_PORT_F(dev_priv) || 2735 INTEL_GEN(dev_priv) >= 11) 2736 tmp_mask |= CNL_AUX_CHANNEL_F; 2737 2738 if (iir & tmp_mask) { 2739 dp_aux_irq_handler(dev_priv); 2740 found = true; 2741 } 2742 2743 if (IS_GEN9_LP(dev_priv)) { 2744 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2745 if (tmp_mask) { 2746 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2747 hpd_bxt); 2748 found = true; 2749 } 2750 } else if (IS_BROADWELL(dev_priv)) { 2751 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2752 if (tmp_mask) { 2753 ilk_hpd_irq_handler(dev_priv, 2754 tmp_mask, hpd_bdw); 2755 found = true; 2756 } 2757 } 2758 2759 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2760 gmbus_irq_handler(dev_priv); 2761 found = true; 2762 } 2763 2764 if (!found) 2765 DRM_ERROR("Unexpected DE Port interrupt\n"); 2766 } 2767 else 2768 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2769 } 2770 2771 for_each_pipe(dev_priv, pipe) { 2772 u32 fault_errors; 2773 2774 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2775 continue; 2776 2777 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2778 if (!iir) { 2779 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2780 continue; 2781 } 2782 2783 ret = IRQ_HANDLED; 2784 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2785 2786 if (iir & GEN8_PIPE_VBLANK) 2787 drm_handle_vblank(&dev_priv->drm, pipe); 2788 2789 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2790 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2791 2792 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2793 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2794 2795 fault_errors = iir; 2796 if (INTEL_GEN(dev_priv) >= 9) 2797 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2798 else 2799 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2800 2801 if (fault_errors) 2802 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2803 pipe_name(pipe), 2804 fault_errors); 2805 } 2806 2807 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2808 master_ctl & GEN8_DE_PCH_IRQ) { 2809 /* 2810 * FIXME(BDW): Assume for now that the new interrupt handling 2811 * scheme also closed the SDE interrupt handling race we've seen 2812 * on older pch-split platforms. But this needs testing. 2813 */ 2814 iir = I915_READ(SDEIIR); 2815 if (iir) { 2816 I915_WRITE(SDEIIR, iir); 2817 ret = IRQ_HANDLED; 2818 2819 if (HAS_PCH_ICP(dev_priv)) 2820 icp_irq_handler(dev_priv, iir); 2821 else if (HAS_PCH_SPT(dev_priv) || 2822 HAS_PCH_KBP(dev_priv) || 2823 HAS_PCH_CNP(dev_priv)) 2824 spt_irq_handler(dev_priv, iir); 2825 else 2826 cpt_irq_handler(dev_priv, iir); 2827 } else { 2828 /* 2829 * Like on previous PCH there seems to be something 2830 * fishy going on with forwarding PCH interrupts. 2831 */ 2832 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2833 } 2834 } 2835 2836 return ret; 2837 } 2838 2839 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2840 { 2841 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2842 2843 /* 2844 * Now with master disabled, get a sample of level indications 2845 * for this interrupt. Indications will be cleared on related acks. 2846 * New indications can and will light up during processing, 2847 * and will generate new interrupt after enabling master. 2848 */ 2849 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2850 } 2851 2852 static inline void gen8_master_intr_enable(void __iomem * const regs) 2853 { 2854 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2855 } 2856 2857 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2858 { 2859 struct drm_i915_private *dev_priv = to_i915(arg); 2860 void __iomem * const regs = dev_priv->regs; 2861 u32 master_ctl; 2862 u32 gt_iir[4]; 2863 2864 if (!intel_irqs_enabled(dev_priv)) 2865 return IRQ_NONE; 2866 2867 master_ctl = gen8_master_intr_disable(regs); 2868 if (!master_ctl) { 2869 gen8_master_intr_enable(regs); 2870 return IRQ_NONE; 2871 } 2872 2873 /* Find, clear, then process each source of interrupt */ 2874 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2875 2876 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2877 if (master_ctl & ~GEN8_GT_IRQS) { 2878 disable_rpm_wakeref_asserts(dev_priv); 2879 gen8_de_irq_handler(dev_priv, master_ctl); 2880 enable_rpm_wakeref_asserts(dev_priv); 2881 } 2882 2883 gen8_master_intr_enable(regs); 2884 2885 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2886 2887 return IRQ_HANDLED; 2888 } 2889 2890 static u32 2891 gen11_gt_engine_identity(struct drm_i915_private * const i915, 2892 const unsigned int bank, const unsigned int bit) 2893 { 2894 void __iomem * const regs = i915->regs; 2895 u32 timeout_ts; 2896 u32 ident; 2897 2898 lockdep_assert_held(&i915->irq_lock); 2899 2900 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2901 2902 /* 2903 * NB: Specs do not specify how long to spin wait, 2904 * so we do ~100us as an educated guess. 2905 */ 2906 timeout_ts = (local_clock() >> 10) + 100; 2907 do { 2908 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2909 } while (!(ident & GEN11_INTR_DATA_VALID) && 2910 !time_after32(local_clock() >> 10, timeout_ts)); 2911 2912 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 2913 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 2914 bank, bit, ident); 2915 return 0; 2916 } 2917 2918 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 2919 GEN11_INTR_DATA_VALID); 2920 2921 return ident; 2922 } 2923 2924 static void 2925 gen11_other_irq_handler(struct drm_i915_private * const i915, 2926 const u8 instance, const u16 iir) 2927 { 2928 if (instance == OTHER_GTPM_INSTANCE) 2929 return gen6_rps_irq_handler(i915, iir); 2930 2931 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 2932 instance, iir); 2933 } 2934 2935 static void 2936 gen11_engine_irq_handler(struct drm_i915_private * const i915, 2937 const u8 class, const u8 instance, const u16 iir) 2938 { 2939 struct intel_engine_cs *engine; 2940 2941 if (instance <= MAX_ENGINE_INSTANCE) 2942 engine = i915->engine_class[class][instance]; 2943 else 2944 engine = NULL; 2945 2946 if (likely(engine)) 2947 return gen8_cs_irq_handler(engine, iir); 2948 2949 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 2950 class, instance); 2951 } 2952 2953 static void 2954 gen11_gt_identity_handler(struct drm_i915_private * const i915, 2955 const u32 identity) 2956 { 2957 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 2958 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 2959 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 2960 2961 if (unlikely(!intr)) 2962 return; 2963 2964 if (class <= COPY_ENGINE_CLASS) 2965 return gen11_engine_irq_handler(i915, class, instance, intr); 2966 2967 if (class == OTHER_CLASS) 2968 return gen11_other_irq_handler(i915, instance, intr); 2969 2970 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 2971 class, instance, intr); 2972 } 2973 2974 static void 2975 gen11_gt_bank_handler(struct drm_i915_private * const i915, 2976 const unsigned int bank) 2977 { 2978 void __iomem * const regs = i915->regs; 2979 unsigned long intr_dw; 2980 unsigned int bit; 2981 2982 lockdep_assert_held(&i915->irq_lock); 2983 2984 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 2985 2986 if (unlikely(!intr_dw)) { 2987 DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 2988 return; 2989 } 2990 2991 for_each_set_bit(bit, &intr_dw, 32) { 2992 const u32 ident = gen11_gt_engine_identity(i915, 2993 bank, bit); 2994 2995 gen11_gt_identity_handler(i915, ident); 2996 } 2997 2998 /* Clear must be after shared has been served for engine */ 2999 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 3000 } 3001 3002 static void 3003 gen11_gt_irq_handler(struct drm_i915_private * const i915, 3004 const u32 master_ctl) 3005 { 3006 unsigned int bank; 3007 3008 spin_lock(&i915->irq_lock); 3009 3010 for (bank = 0; bank < 2; bank++) { 3011 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 3012 gen11_gt_bank_handler(i915, bank); 3013 } 3014 3015 spin_unlock(&i915->irq_lock); 3016 } 3017 3018 static u32 3019 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) 3020 { 3021 void __iomem * const regs = dev_priv->regs; 3022 u32 iir; 3023 3024 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3025 return 0; 3026 3027 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3028 if (likely(iir)) 3029 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 3030 3031 return iir; 3032 } 3033 3034 static void 3035 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) 3036 { 3037 if (iir & GEN11_GU_MISC_GSE) 3038 intel_opregion_asle_intr(dev_priv); 3039 } 3040 3041 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 3042 { 3043 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 3044 3045 /* 3046 * Now with master disabled, get a sample of level indications 3047 * for this interrupt. Indications will be cleared on related acks. 3048 * New indications can and will light up during processing, 3049 * and will generate new interrupt after enabling master. 3050 */ 3051 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3052 } 3053 3054 static inline void gen11_master_intr_enable(void __iomem * const regs) 3055 { 3056 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 3057 } 3058 3059 static irqreturn_t gen11_irq_handler(int irq, void *arg) 3060 { 3061 struct drm_i915_private * const i915 = to_i915(arg); 3062 void __iomem * const regs = i915->regs; 3063 u32 master_ctl; 3064 u32 gu_misc_iir; 3065 3066 if (!intel_irqs_enabled(i915)) 3067 return IRQ_NONE; 3068 3069 master_ctl = gen11_master_intr_disable(regs); 3070 if (!master_ctl) { 3071 gen11_master_intr_enable(regs); 3072 return IRQ_NONE; 3073 } 3074 3075 /* Find, clear, then process each source of interrupt. */ 3076 gen11_gt_irq_handler(i915, master_ctl); 3077 3078 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3079 if (master_ctl & GEN11_DISPLAY_IRQ) { 3080 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3081 3082 disable_rpm_wakeref_asserts(i915); 3083 /* 3084 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3085 * for the display related bits. 3086 */ 3087 gen8_de_irq_handler(i915, disp_ctl); 3088 enable_rpm_wakeref_asserts(i915); 3089 } 3090 3091 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3092 3093 gen11_master_intr_enable(regs); 3094 3095 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3096 3097 return IRQ_HANDLED; 3098 } 3099 3100 /* Called from drm generic code, passed 'crtc' which 3101 * we use as a pipe index 3102 */ 3103 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3104 { 3105 struct drm_i915_private *dev_priv = to_i915(dev); 3106 unsigned long irqflags; 3107 3108 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3109 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3110 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3111 3112 return 0; 3113 } 3114 3115 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3116 { 3117 struct drm_i915_private *dev_priv = to_i915(dev); 3118 unsigned long irqflags; 3119 3120 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3121 i915_enable_pipestat(dev_priv, pipe, 3122 PIPE_START_VBLANK_INTERRUPT_STATUS); 3123 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3124 3125 return 0; 3126 } 3127 3128 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3129 { 3130 struct drm_i915_private *dev_priv = to_i915(dev); 3131 unsigned long irqflags; 3132 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 3133 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3134 3135 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3136 ilk_enable_display_irq(dev_priv, bit); 3137 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3138 3139 /* Even though there is no DMC, frame counter can get stuck when 3140 * PSR is active as no frames are generated. 3141 */ 3142 if (HAS_PSR(dev_priv)) 3143 drm_vblank_restore(dev, pipe); 3144 3145 return 0; 3146 } 3147 3148 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3149 { 3150 struct drm_i915_private *dev_priv = to_i915(dev); 3151 unsigned long irqflags; 3152 3153 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3154 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3155 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3156 3157 /* Even if there is no DMC, frame counter can get stuck when 3158 * PSR is active as no frames are generated, so check only for PSR. 3159 */ 3160 if (HAS_PSR(dev_priv)) 3161 drm_vblank_restore(dev, pipe); 3162 3163 return 0; 3164 } 3165 3166 /* Called from drm generic code, passed 'crtc' which 3167 * we use as a pipe index 3168 */ 3169 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3170 { 3171 struct drm_i915_private *dev_priv = to_i915(dev); 3172 unsigned long irqflags; 3173 3174 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3175 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3176 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3177 } 3178 3179 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3180 { 3181 struct drm_i915_private *dev_priv = to_i915(dev); 3182 unsigned long irqflags; 3183 3184 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3185 i915_disable_pipestat(dev_priv, pipe, 3186 PIPE_START_VBLANK_INTERRUPT_STATUS); 3187 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3188 } 3189 3190 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3191 { 3192 struct drm_i915_private *dev_priv = to_i915(dev); 3193 unsigned long irqflags; 3194 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 3195 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3196 3197 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3198 ilk_disable_display_irq(dev_priv, bit); 3199 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3200 } 3201 3202 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3203 { 3204 struct drm_i915_private *dev_priv = to_i915(dev); 3205 unsigned long irqflags; 3206 3207 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3208 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3209 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3210 } 3211 3212 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3213 { 3214 if (HAS_PCH_NOP(dev_priv)) 3215 return; 3216 3217 GEN3_IRQ_RESET(SDE); 3218 3219 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3220 I915_WRITE(SERR_INT, 0xffffffff); 3221 } 3222 3223 /* 3224 * SDEIER is also touched by the interrupt handler to work around missed PCH 3225 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3226 * instead we unconditionally enable all PCH interrupt sources here, but then 3227 * only unmask them as needed with SDEIMR. 3228 * 3229 * This function needs to be called before interrupts are enabled. 3230 */ 3231 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3232 { 3233 struct drm_i915_private *dev_priv = to_i915(dev); 3234 3235 if (HAS_PCH_NOP(dev_priv)) 3236 return; 3237 3238 WARN_ON(I915_READ(SDEIER) != 0); 3239 I915_WRITE(SDEIER, 0xffffffff); 3240 POSTING_READ(SDEIER); 3241 } 3242 3243 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3244 { 3245 GEN3_IRQ_RESET(GT); 3246 if (INTEL_GEN(dev_priv) >= 6) 3247 GEN3_IRQ_RESET(GEN6_PM); 3248 } 3249 3250 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3251 { 3252 if (IS_CHERRYVIEW(dev_priv)) 3253 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3254 else 3255 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3256 3257 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3258 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3259 3260 i9xx_pipestat_irq_reset(dev_priv); 3261 3262 GEN3_IRQ_RESET(VLV_); 3263 dev_priv->irq_mask = ~0u; 3264 } 3265 3266 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3267 { 3268 u32 pipestat_mask; 3269 u32 enable_mask; 3270 enum pipe pipe; 3271 3272 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3273 3274 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3275 for_each_pipe(dev_priv, pipe) 3276 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3277 3278 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3279 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3280 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3281 I915_LPE_PIPE_A_INTERRUPT | 3282 I915_LPE_PIPE_B_INTERRUPT; 3283 3284 if (IS_CHERRYVIEW(dev_priv)) 3285 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3286 I915_LPE_PIPE_C_INTERRUPT; 3287 3288 WARN_ON(dev_priv->irq_mask != ~0u); 3289 3290 dev_priv->irq_mask = ~enable_mask; 3291 3292 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3293 } 3294 3295 /* drm_dma.h hooks 3296 */ 3297 static void ironlake_irq_reset(struct drm_device *dev) 3298 { 3299 struct drm_i915_private *dev_priv = to_i915(dev); 3300 3301 GEN3_IRQ_RESET(DE); 3302 if (IS_GEN(dev_priv, 7)) 3303 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3304 3305 if (IS_HASWELL(dev_priv)) { 3306 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3307 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3308 } 3309 3310 gen5_gt_irq_reset(dev_priv); 3311 3312 ibx_irq_reset(dev_priv); 3313 } 3314 3315 static void valleyview_irq_reset(struct drm_device *dev) 3316 { 3317 struct drm_i915_private *dev_priv = to_i915(dev); 3318 3319 I915_WRITE(VLV_MASTER_IER, 0); 3320 POSTING_READ(VLV_MASTER_IER); 3321 3322 gen5_gt_irq_reset(dev_priv); 3323 3324 spin_lock_irq(&dev_priv->irq_lock); 3325 if (dev_priv->display_irqs_enabled) 3326 vlv_display_irq_reset(dev_priv); 3327 spin_unlock_irq(&dev_priv->irq_lock); 3328 } 3329 3330 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3331 { 3332 GEN8_IRQ_RESET_NDX(GT, 0); 3333 GEN8_IRQ_RESET_NDX(GT, 1); 3334 GEN8_IRQ_RESET_NDX(GT, 2); 3335 GEN8_IRQ_RESET_NDX(GT, 3); 3336 } 3337 3338 static void gen8_irq_reset(struct drm_device *dev) 3339 { 3340 struct drm_i915_private *dev_priv = to_i915(dev); 3341 int pipe; 3342 3343 gen8_master_intr_disable(dev_priv->regs); 3344 3345 gen8_gt_irq_reset(dev_priv); 3346 3347 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3348 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3349 3350 for_each_pipe(dev_priv, pipe) 3351 if (intel_display_power_is_enabled(dev_priv, 3352 POWER_DOMAIN_PIPE(pipe))) 3353 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3354 3355 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3356 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3357 GEN3_IRQ_RESET(GEN8_PCU_); 3358 3359 if (HAS_PCH_SPLIT(dev_priv)) 3360 ibx_irq_reset(dev_priv); 3361 } 3362 3363 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3364 { 3365 /* Disable RCS, BCS, VCS and VECS class engines. */ 3366 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3367 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3368 3369 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3370 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3371 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3372 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3373 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3374 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3375 3376 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3377 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3378 } 3379 3380 static void gen11_irq_reset(struct drm_device *dev) 3381 { 3382 struct drm_i915_private *dev_priv = dev->dev_private; 3383 int pipe; 3384 3385 gen11_master_intr_disable(dev_priv->regs); 3386 3387 gen11_gt_irq_reset(dev_priv); 3388 3389 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3390 3391 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3392 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3393 3394 for_each_pipe(dev_priv, pipe) 3395 if (intel_display_power_is_enabled(dev_priv, 3396 POWER_DOMAIN_PIPE(pipe))) 3397 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3398 3399 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3400 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3401 GEN3_IRQ_RESET(GEN11_DE_HPD_); 3402 GEN3_IRQ_RESET(GEN11_GU_MISC_); 3403 GEN3_IRQ_RESET(GEN8_PCU_); 3404 3405 if (HAS_PCH_ICP(dev_priv)) 3406 GEN3_IRQ_RESET(SDE); 3407 } 3408 3409 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3410 u8 pipe_mask) 3411 { 3412 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3413 enum pipe pipe; 3414 3415 spin_lock_irq(&dev_priv->irq_lock); 3416 3417 if (!intel_irqs_enabled(dev_priv)) { 3418 spin_unlock_irq(&dev_priv->irq_lock); 3419 return; 3420 } 3421 3422 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3423 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3424 dev_priv->de_irq_mask[pipe], 3425 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3426 3427 spin_unlock_irq(&dev_priv->irq_lock); 3428 } 3429 3430 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3431 u8 pipe_mask) 3432 { 3433 enum pipe pipe; 3434 3435 spin_lock_irq(&dev_priv->irq_lock); 3436 3437 if (!intel_irqs_enabled(dev_priv)) { 3438 spin_unlock_irq(&dev_priv->irq_lock); 3439 return; 3440 } 3441 3442 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3443 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3444 3445 spin_unlock_irq(&dev_priv->irq_lock); 3446 3447 /* make sure we're done processing display irqs */ 3448 synchronize_irq(dev_priv->drm.irq); 3449 } 3450 3451 static void cherryview_irq_reset(struct drm_device *dev) 3452 { 3453 struct drm_i915_private *dev_priv = to_i915(dev); 3454 3455 I915_WRITE(GEN8_MASTER_IRQ, 0); 3456 POSTING_READ(GEN8_MASTER_IRQ); 3457 3458 gen8_gt_irq_reset(dev_priv); 3459 3460 GEN3_IRQ_RESET(GEN8_PCU_); 3461 3462 spin_lock_irq(&dev_priv->irq_lock); 3463 if (dev_priv->display_irqs_enabled) 3464 vlv_display_irq_reset(dev_priv); 3465 spin_unlock_irq(&dev_priv->irq_lock); 3466 } 3467 3468 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3469 const u32 hpd[HPD_NUM_PINS]) 3470 { 3471 struct intel_encoder *encoder; 3472 u32 enabled_irqs = 0; 3473 3474 for_each_intel_encoder(&dev_priv->drm, encoder) 3475 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3476 enabled_irqs |= hpd[encoder->hpd_pin]; 3477 3478 return enabled_irqs; 3479 } 3480 3481 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3482 { 3483 u32 hotplug; 3484 3485 /* 3486 * Enable digital hotplug on the PCH, and configure the DP short pulse 3487 * duration to 2ms (which is the minimum in the Display Port spec). 3488 * The pulse duration bits are reserved on LPT+. 3489 */ 3490 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3491 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3492 PORTC_PULSE_DURATION_MASK | 3493 PORTD_PULSE_DURATION_MASK); 3494 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3495 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3496 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3497 /* 3498 * When CPU and PCH are on the same package, port A 3499 * HPD must be enabled in both north and south. 3500 */ 3501 if (HAS_PCH_LPT_LP(dev_priv)) 3502 hotplug |= PORTA_HOTPLUG_ENABLE; 3503 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3504 } 3505 3506 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3507 { 3508 u32 hotplug_irqs, enabled_irqs; 3509 3510 if (HAS_PCH_IBX(dev_priv)) { 3511 hotplug_irqs = SDE_HOTPLUG_MASK; 3512 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3513 } else { 3514 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3515 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3516 } 3517 3518 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3519 3520 ibx_hpd_detection_setup(dev_priv); 3521 } 3522 3523 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 3524 { 3525 u32 hotplug; 3526 3527 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3528 hotplug |= ICP_DDIA_HPD_ENABLE | 3529 ICP_DDIB_HPD_ENABLE; 3530 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3531 3532 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3533 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 3534 ICP_TC_HPD_ENABLE(PORT_TC2) | 3535 ICP_TC_HPD_ENABLE(PORT_TC3) | 3536 ICP_TC_HPD_ENABLE(PORT_TC4); 3537 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3538 } 3539 3540 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3541 { 3542 u32 hotplug_irqs, enabled_irqs; 3543 3544 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 3545 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 3546 3547 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3548 3549 icp_hpd_detection_setup(dev_priv); 3550 } 3551 3552 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3553 { 3554 u32 hotplug; 3555 3556 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3557 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3558 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3559 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3560 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3561 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3562 3563 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3564 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3565 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3566 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3567 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3568 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3569 } 3570 3571 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3572 { 3573 u32 hotplug_irqs, enabled_irqs; 3574 u32 val; 3575 3576 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3577 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3578 3579 val = I915_READ(GEN11_DE_HPD_IMR); 3580 val &= ~hotplug_irqs; 3581 I915_WRITE(GEN11_DE_HPD_IMR, val); 3582 POSTING_READ(GEN11_DE_HPD_IMR); 3583 3584 gen11_hpd_detection_setup(dev_priv); 3585 3586 if (HAS_PCH_ICP(dev_priv)) 3587 icp_hpd_irq_setup(dev_priv); 3588 } 3589 3590 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3591 { 3592 u32 val, hotplug; 3593 3594 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3595 if (HAS_PCH_CNP(dev_priv)) { 3596 val = I915_READ(SOUTH_CHICKEN1); 3597 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3598 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3599 I915_WRITE(SOUTH_CHICKEN1, val); 3600 } 3601 3602 /* Enable digital hotplug on the PCH */ 3603 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3604 hotplug |= PORTA_HOTPLUG_ENABLE | 3605 PORTB_HOTPLUG_ENABLE | 3606 PORTC_HOTPLUG_ENABLE | 3607 PORTD_HOTPLUG_ENABLE; 3608 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3609 3610 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3611 hotplug |= PORTE_HOTPLUG_ENABLE; 3612 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3613 } 3614 3615 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3616 { 3617 u32 hotplug_irqs, enabled_irqs; 3618 3619 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3620 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3621 3622 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3623 3624 spt_hpd_detection_setup(dev_priv); 3625 } 3626 3627 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3628 { 3629 u32 hotplug; 3630 3631 /* 3632 * Enable digital hotplug on the CPU, and configure the DP short pulse 3633 * duration to 2ms (which is the minimum in the Display Port spec) 3634 * The pulse duration bits are reserved on HSW+. 3635 */ 3636 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3637 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3638 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3639 DIGITAL_PORTA_PULSE_DURATION_2ms; 3640 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3641 } 3642 3643 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3644 { 3645 u32 hotplug_irqs, enabled_irqs; 3646 3647 if (INTEL_GEN(dev_priv) >= 8) { 3648 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3649 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3650 3651 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3652 } else if (INTEL_GEN(dev_priv) >= 7) { 3653 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3654 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3655 3656 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3657 } else { 3658 hotplug_irqs = DE_DP_A_HOTPLUG; 3659 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3660 3661 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3662 } 3663 3664 ilk_hpd_detection_setup(dev_priv); 3665 3666 ibx_hpd_irq_setup(dev_priv); 3667 } 3668 3669 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3670 u32 enabled_irqs) 3671 { 3672 u32 hotplug; 3673 3674 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3675 hotplug |= PORTA_HOTPLUG_ENABLE | 3676 PORTB_HOTPLUG_ENABLE | 3677 PORTC_HOTPLUG_ENABLE; 3678 3679 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3680 hotplug, enabled_irqs); 3681 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3682 3683 /* 3684 * For BXT invert bit has to be set based on AOB design 3685 * for HPD detection logic, update it based on VBT fields. 3686 */ 3687 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3688 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3689 hotplug |= BXT_DDIA_HPD_INVERT; 3690 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3691 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3692 hotplug |= BXT_DDIB_HPD_INVERT; 3693 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3694 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3695 hotplug |= BXT_DDIC_HPD_INVERT; 3696 3697 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3698 } 3699 3700 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3701 { 3702 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3703 } 3704 3705 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3706 { 3707 u32 hotplug_irqs, enabled_irqs; 3708 3709 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3710 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3711 3712 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3713 3714 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3715 } 3716 3717 static void ibx_irq_postinstall(struct drm_device *dev) 3718 { 3719 struct drm_i915_private *dev_priv = to_i915(dev); 3720 u32 mask; 3721 3722 if (HAS_PCH_NOP(dev_priv)) 3723 return; 3724 3725 if (HAS_PCH_IBX(dev_priv)) 3726 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3727 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3728 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3729 else 3730 mask = SDE_GMBUS_CPT; 3731 3732 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3733 I915_WRITE(SDEIMR, ~mask); 3734 3735 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3736 HAS_PCH_LPT(dev_priv)) 3737 ibx_hpd_detection_setup(dev_priv); 3738 else 3739 spt_hpd_detection_setup(dev_priv); 3740 } 3741 3742 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3743 { 3744 struct drm_i915_private *dev_priv = to_i915(dev); 3745 u32 pm_irqs, gt_irqs; 3746 3747 pm_irqs = gt_irqs = 0; 3748 3749 dev_priv->gt_irq_mask = ~0; 3750 if (HAS_L3_DPF(dev_priv)) { 3751 /* L3 parity interrupt is always unmasked. */ 3752 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3753 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3754 } 3755 3756 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3757 if (IS_GEN(dev_priv, 5)) { 3758 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3759 } else { 3760 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3761 } 3762 3763 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3764 3765 if (INTEL_GEN(dev_priv) >= 6) { 3766 /* 3767 * RPS interrupts will get enabled/disabled on demand when RPS 3768 * itself is enabled/disabled. 3769 */ 3770 if (HAS_VEBOX(dev_priv)) { 3771 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3772 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3773 } 3774 3775 dev_priv->pm_imr = 0xffffffff; 3776 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3777 } 3778 } 3779 3780 static int ironlake_irq_postinstall(struct drm_device *dev) 3781 { 3782 struct drm_i915_private *dev_priv = to_i915(dev); 3783 u32 display_mask, extra_mask; 3784 3785 if (INTEL_GEN(dev_priv) >= 7) { 3786 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3787 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3788 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3789 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3790 DE_DP_A_HOTPLUG_IVB); 3791 } else { 3792 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3793 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3794 DE_PIPEA_CRC_DONE | DE_POISON); 3795 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3796 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3797 DE_DP_A_HOTPLUG); 3798 } 3799 3800 if (IS_HASWELL(dev_priv)) { 3801 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 3802 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 3803 display_mask |= DE_EDP_PSR_INT_HSW; 3804 } 3805 3806 dev_priv->irq_mask = ~display_mask; 3807 3808 ibx_irq_pre_postinstall(dev); 3809 3810 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3811 3812 gen5_gt_irq_postinstall(dev); 3813 3814 ilk_hpd_detection_setup(dev_priv); 3815 3816 ibx_irq_postinstall(dev); 3817 3818 if (IS_IRONLAKE_M(dev_priv)) { 3819 /* Enable PCU event interrupts 3820 * 3821 * spinlocking not required here for correctness since interrupt 3822 * setup is guaranteed to run in single-threaded context. But we 3823 * need it to make the assert_spin_locked happy. */ 3824 spin_lock_irq(&dev_priv->irq_lock); 3825 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3826 spin_unlock_irq(&dev_priv->irq_lock); 3827 } 3828 3829 return 0; 3830 } 3831 3832 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3833 { 3834 lockdep_assert_held(&dev_priv->irq_lock); 3835 3836 if (dev_priv->display_irqs_enabled) 3837 return; 3838 3839 dev_priv->display_irqs_enabled = true; 3840 3841 if (intel_irqs_enabled(dev_priv)) { 3842 vlv_display_irq_reset(dev_priv); 3843 vlv_display_irq_postinstall(dev_priv); 3844 } 3845 } 3846 3847 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3848 { 3849 lockdep_assert_held(&dev_priv->irq_lock); 3850 3851 if (!dev_priv->display_irqs_enabled) 3852 return; 3853 3854 dev_priv->display_irqs_enabled = false; 3855 3856 if (intel_irqs_enabled(dev_priv)) 3857 vlv_display_irq_reset(dev_priv); 3858 } 3859 3860 3861 static int valleyview_irq_postinstall(struct drm_device *dev) 3862 { 3863 struct drm_i915_private *dev_priv = to_i915(dev); 3864 3865 gen5_gt_irq_postinstall(dev); 3866 3867 spin_lock_irq(&dev_priv->irq_lock); 3868 if (dev_priv->display_irqs_enabled) 3869 vlv_display_irq_postinstall(dev_priv); 3870 spin_unlock_irq(&dev_priv->irq_lock); 3871 3872 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3873 POSTING_READ(VLV_MASTER_IER); 3874 3875 return 0; 3876 } 3877 3878 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3879 { 3880 /* These are interrupts we'll toggle with the ring mask register */ 3881 u32 gt_interrupts[] = { 3882 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3883 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3884 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3885 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3886 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3887 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3888 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3889 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3890 0, 3891 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3892 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3893 }; 3894 3895 dev_priv->pm_ier = 0x0; 3896 dev_priv->pm_imr = ~dev_priv->pm_ier; 3897 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3898 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3899 /* 3900 * RPS interrupts will get enabled/disabled on demand when RPS itself 3901 * is enabled/disabled. Same wil be the case for GuC interrupts. 3902 */ 3903 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3904 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3905 } 3906 3907 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3908 { 3909 u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3910 u32 de_pipe_enables; 3911 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3912 u32 de_port_enables; 3913 u32 de_misc_masked = GEN8_DE_EDP_PSR; 3914 enum pipe pipe; 3915 3916 if (INTEL_GEN(dev_priv) <= 10) 3917 de_misc_masked |= GEN8_DE_MISC_GSE; 3918 3919 if (INTEL_GEN(dev_priv) >= 9) { 3920 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3921 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3922 GEN9_AUX_CHANNEL_D; 3923 if (IS_GEN9_LP(dev_priv)) 3924 de_port_masked |= BXT_DE_PORT_GMBUS; 3925 } else { 3926 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3927 } 3928 3929 if (INTEL_GEN(dev_priv) >= 11) 3930 de_port_masked |= ICL_AUX_CHANNEL_E; 3931 3932 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 3933 de_port_masked |= CNL_AUX_CHANNEL_F; 3934 3935 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3936 GEN8_PIPE_FIFO_UNDERRUN; 3937 3938 de_port_enables = de_port_masked; 3939 if (IS_GEN9_LP(dev_priv)) 3940 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3941 else if (IS_BROADWELL(dev_priv)) 3942 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3943 3944 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 3945 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 3946 3947 for_each_pipe(dev_priv, pipe) { 3948 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3949 3950 if (intel_display_power_is_enabled(dev_priv, 3951 POWER_DOMAIN_PIPE(pipe))) 3952 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3953 dev_priv->de_irq_mask[pipe], 3954 de_pipe_enables); 3955 } 3956 3957 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3958 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3959 3960 if (INTEL_GEN(dev_priv) >= 11) { 3961 u32 de_hpd_masked = 0; 3962 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 3963 GEN11_DE_TBT_HOTPLUG_MASK; 3964 3965 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 3966 gen11_hpd_detection_setup(dev_priv); 3967 } else if (IS_GEN9_LP(dev_priv)) { 3968 bxt_hpd_detection_setup(dev_priv); 3969 } else if (IS_BROADWELL(dev_priv)) { 3970 ilk_hpd_detection_setup(dev_priv); 3971 } 3972 } 3973 3974 static int gen8_irq_postinstall(struct drm_device *dev) 3975 { 3976 struct drm_i915_private *dev_priv = to_i915(dev); 3977 3978 if (HAS_PCH_SPLIT(dev_priv)) 3979 ibx_irq_pre_postinstall(dev); 3980 3981 gen8_gt_irq_postinstall(dev_priv); 3982 gen8_de_irq_postinstall(dev_priv); 3983 3984 if (HAS_PCH_SPLIT(dev_priv)) 3985 ibx_irq_postinstall(dev); 3986 3987 gen8_master_intr_enable(dev_priv->regs); 3988 3989 return 0; 3990 } 3991 3992 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3993 { 3994 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 3995 3996 BUILD_BUG_ON(irqs & 0xffff0000); 3997 3998 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 3999 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 4000 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 4001 4002 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4003 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 4004 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 4005 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 4006 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 4007 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 4008 4009 /* 4010 * RPS interrupts will get enabled/disabled on demand when RPS itself 4011 * is enabled/disabled. 4012 */ 4013 dev_priv->pm_ier = 0x0; 4014 dev_priv->pm_imr = ~dev_priv->pm_ier; 4015 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4016 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4017 } 4018 4019 static void icp_irq_postinstall(struct drm_device *dev) 4020 { 4021 struct drm_i915_private *dev_priv = to_i915(dev); 4022 u32 mask = SDE_GMBUS_ICP; 4023 4024 WARN_ON(I915_READ(SDEIER) != 0); 4025 I915_WRITE(SDEIER, 0xffffffff); 4026 POSTING_READ(SDEIER); 4027 4028 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4029 I915_WRITE(SDEIMR, ~mask); 4030 4031 icp_hpd_detection_setup(dev_priv); 4032 } 4033 4034 static int gen11_irq_postinstall(struct drm_device *dev) 4035 { 4036 struct drm_i915_private *dev_priv = dev->dev_private; 4037 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 4038 4039 if (HAS_PCH_ICP(dev_priv)) 4040 icp_irq_postinstall(dev); 4041 4042 gen11_gt_irq_postinstall(dev_priv); 4043 gen8_de_irq_postinstall(dev_priv); 4044 4045 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4046 4047 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4048 4049 gen11_master_intr_enable(dev_priv->regs); 4050 POSTING_READ(GEN11_GFX_MSTR_IRQ); 4051 4052 return 0; 4053 } 4054 4055 static int cherryview_irq_postinstall(struct drm_device *dev) 4056 { 4057 struct drm_i915_private *dev_priv = to_i915(dev); 4058 4059 gen8_gt_irq_postinstall(dev_priv); 4060 4061 spin_lock_irq(&dev_priv->irq_lock); 4062 if (dev_priv->display_irqs_enabled) 4063 vlv_display_irq_postinstall(dev_priv); 4064 spin_unlock_irq(&dev_priv->irq_lock); 4065 4066 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4067 POSTING_READ(GEN8_MASTER_IRQ); 4068 4069 return 0; 4070 } 4071 4072 static void i8xx_irq_reset(struct drm_device *dev) 4073 { 4074 struct drm_i915_private *dev_priv = to_i915(dev); 4075 4076 i9xx_pipestat_irq_reset(dev_priv); 4077 4078 GEN2_IRQ_RESET(); 4079 } 4080 4081 static int i8xx_irq_postinstall(struct drm_device *dev) 4082 { 4083 struct drm_i915_private *dev_priv = to_i915(dev); 4084 u16 enable_mask; 4085 4086 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4087 I915_ERROR_MEMORY_REFRESH)); 4088 4089 /* Unmask the interrupts that we always want on. */ 4090 dev_priv->irq_mask = 4091 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4092 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4093 I915_MASTER_ERROR_INTERRUPT); 4094 4095 enable_mask = 4096 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4097 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4098 I915_MASTER_ERROR_INTERRUPT | 4099 I915_USER_INTERRUPT; 4100 4101 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4102 4103 /* Interrupt setup is already guaranteed to be single-threaded, this is 4104 * just to make the assert_spin_locked check happy. */ 4105 spin_lock_irq(&dev_priv->irq_lock); 4106 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4107 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4108 spin_unlock_irq(&dev_priv->irq_lock); 4109 4110 return 0; 4111 } 4112 4113 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 4114 u16 *eir, u16 *eir_stuck) 4115 { 4116 u16 emr; 4117 4118 *eir = I915_READ16(EIR); 4119 4120 if (*eir) 4121 I915_WRITE16(EIR, *eir); 4122 4123 *eir_stuck = I915_READ16(EIR); 4124 if (*eir_stuck == 0) 4125 return; 4126 4127 /* 4128 * Toggle all EMR bits to make sure we get an edge 4129 * in the ISR master error bit if we don't clear 4130 * all the EIR bits. Otherwise the edge triggered 4131 * IIR on i965/g4x wouldn't notice that an interrupt 4132 * is still pending. Also some EIR bits can't be 4133 * cleared except by handling the underlying error 4134 * (or by a GPU reset) so we mask any bit that 4135 * remains set. 4136 */ 4137 emr = I915_READ16(EMR); 4138 I915_WRITE16(EMR, 0xffff); 4139 I915_WRITE16(EMR, emr | *eir_stuck); 4140 } 4141 4142 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 4143 u16 eir, u16 eir_stuck) 4144 { 4145 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4146 4147 if (eir_stuck) 4148 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 4149 } 4150 4151 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4152 u32 *eir, u32 *eir_stuck) 4153 { 4154 u32 emr; 4155 4156 *eir = I915_READ(EIR); 4157 4158 I915_WRITE(EIR, *eir); 4159 4160 *eir_stuck = I915_READ(EIR); 4161 if (*eir_stuck == 0) 4162 return; 4163 4164 /* 4165 * Toggle all EMR bits to make sure we get an edge 4166 * in the ISR master error bit if we don't clear 4167 * all the EIR bits. Otherwise the edge triggered 4168 * IIR on i965/g4x wouldn't notice that an interrupt 4169 * is still pending. Also some EIR bits can't be 4170 * cleared except by handling the underlying error 4171 * (or by a GPU reset) so we mask any bit that 4172 * remains set. 4173 */ 4174 emr = I915_READ(EMR); 4175 I915_WRITE(EMR, 0xffffffff); 4176 I915_WRITE(EMR, emr | *eir_stuck); 4177 } 4178 4179 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4180 u32 eir, u32 eir_stuck) 4181 { 4182 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4183 4184 if (eir_stuck) 4185 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 4186 } 4187 4188 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4189 { 4190 struct drm_device *dev = arg; 4191 struct drm_i915_private *dev_priv = to_i915(dev); 4192 irqreturn_t ret = IRQ_NONE; 4193 4194 if (!intel_irqs_enabled(dev_priv)) 4195 return IRQ_NONE; 4196 4197 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4198 disable_rpm_wakeref_asserts(dev_priv); 4199 4200 do { 4201 u32 pipe_stats[I915_MAX_PIPES] = {}; 4202 u16 eir = 0, eir_stuck = 0; 4203 u16 iir; 4204 4205 iir = I915_READ16(IIR); 4206 if (iir == 0) 4207 break; 4208 4209 ret = IRQ_HANDLED; 4210 4211 /* Call regardless, as some status bits might not be 4212 * signalled in iir */ 4213 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4214 4215 if (iir & I915_MASTER_ERROR_INTERRUPT) 4216 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4217 4218 I915_WRITE16(IIR, iir); 4219 4220 if (iir & I915_USER_INTERRUPT) 4221 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); 4222 4223 if (iir & I915_MASTER_ERROR_INTERRUPT) 4224 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4225 4226 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4227 } while (0); 4228 4229 enable_rpm_wakeref_asserts(dev_priv); 4230 4231 return ret; 4232 } 4233 4234 static void i915_irq_reset(struct drm_device *dev) 4235 { 4236 struct drm_i915_private *dev_priv = to_i915(dev); 4237 4238 if (I915_HAS_HOTPLUG(dev_priv)) { 4239 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4240 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4241 } 4242 4243 i9xx_pipestat_irq_reset(dev_priv); 4244 4245 GEN3_IRQ_RESET(); 4246 } 4247 4248 static int i915_irq_postinstall(struct drm_device *dev) 4249 { 4250 struct drm_i915_private *dev_priv = to_i915(dev); 4251 u32 enable_mask; 4252 4253 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4254 I915_ERROR_MEMORY_REFRESH)); 4255 4256 /* Unmask the interrupts that we always want on. */ 4257 dev_priv->irq_mask = 4258 ~(I915_ASLE_INTERRUPT | 4259 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4260 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4261 I915_MASTER_ERROR_INTERRUPT); 4262 4263 enable_mask = 4264 I915_ASLE_INTERRUPT | 4265 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4266 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4267 I915_MASTER_ERROR_INTERRUPT | 4268 I915_USER_INTERRUPT; 4269 4270 if (I915_HAS_HOTPLUG(dev_priv)) { 4271 /* Enable in IER... */ 4272 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4273 /* and unmask in IMR */ 4274 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4275 } 4276 4277 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4278 4279 /* Interrupt setup is already guaranteed to be single-threaded, this is 4280 * just to make the assert_spin_locked check happy. */ 4281 spin_lock_irq(&dev_priv->irq_lock); 4282 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4283 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4284 spin_unlock_irq(&dev_priv->irq_lock); 4285 4286 i915_enable_asle_pipestat(dev_priv); 4287 4288 return 0; 4289 } 4290 4291 static irqreturn_t i915_irq_handler(int irq, void *arg) 4292 { 4293 struct drm_device *dev = arg; 4294 struct drm_i915_private *dev_priv = to_i915(dev); 4295 irqreturn_t ret = IRQ_NONE; 4296 4297 if (!intel_irqs_enabled(dev_priv)) 4298 return IRQ_NONE; 4299 4300 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4301 disable_rpm_wakeref_asserts(dev_priv); 4302 4303 do { 4304 u32 pipe_stats[I915_MAX_PIPES] = {}; 4305 u32 eir = 0, eir_stuck = 0; 4306 u32 hotplug_status = 0; 4307 u32 iir; 4308 4309 iir = I915_READ(IIR); 4310 if (iir == 0) 4311 break; 4312 4313 ret = IRQ_HANDLED; 4314 4315 if (I915_HAS_HOTPLUG(dev_priv) && 4316 iir & I915_DISPLAY_PORT_INTERRUPT) 4317 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4318 4319 /* Call regardless, as some status bits might not be 4320 * signalled in iir */ 4321 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4322 4323 if (iir & I915_MASTER_ERROR_INTERRUPT) 4324 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4325 4326 I915_WRITE(IIR, iir); 4327 4328 if (iir & I915_USER_INTERRUPT) 4329 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); 4330 4331 if (iir & I915_MASTER_ERROR_INTERRUPT) 4332 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4333 4334 if (hotplug_status) 4335 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4336 4337 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4338 } while (0); 4339 4340 enable_rpm_wakeref_asserts(dev_priv); 4341 4342 return ret; 4343 } 4344 4345 static void i965_irq_reset(struct drm_device *dev) 4346 { 4347 struct drm_i915_private *dev_priv = to_i915(dev); 4348 4349 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4350 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4351 4352 i9xx_pipestat_irq_reset(dev_priv); 4353 4354 GEN3_IRQ_RESET(); 4355 } 4356 4357 static int i965_irq_postinstall(struct drm_device *dev) 4358 { 4359 struct drm_i915_private *dev_priv = to_i915(dev); 4360 u32 enable_mask; 4361 u32 error_mask; 4362 4363 /* 4364 * Enable some error detection, note the instruction error mask 4365 * bit is reserved, so we leave it masked. 4366 */ 4367 if (IS_G4X(dev_priv)) { 4368 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4369 GM45_ERROR_MEM_PRIV | 4370 GM45_ERROR_CP_PRIV | 4371 I915_ERROR_MEMORY_REFRESH); 4372 } else { 4373 error_mask = ~(I915_ERROR_PAGE_TABLE | 4374 I915_ERROR_MEMORY_REFRESH); 4375 } 4376 I915_WRITE(EMR, error_mask); 4377 4378 /* Unmask the interrupts that we always want on. */ 4379 dev_priv->irq_mask = 4380 ~(I915_ASLE_INTERRUPT | 4381 I915_DISPLAY_PORT_INTERRUPT | 4382 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4383 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4384 I915_MASTER_ERROR_INTERRUPT); 4385 4386 enable_mask = 4387 I915_ASLE_INTERRUPT | 4388 I915_DISPLAY_PORT_INTERRUPT | 4389 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4390 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4391 I915_MASTER_ERROR_INTERRUPT | 4392 I915_USER_INTERRUPT; 4393 4394 if (IS_G4X(dev_priv)) 4395 enable_mask |= I915_BSD_USER_INTERRUPT; 4396 4397 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4398 4399 /* Interrupt setup is already guaranteed to be single-threaded, this is 4400 * just to make the assert_spin_locked check happy. */ 4401 spin_lock_irq(&dev_priv->irq_lock); 4402 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4403 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4404 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4405 spin_unlock_irq(&dev_priv->irq_lock); 4406 4407 i915_enable_asle_pipestat(dev_priv); 4408 4409 return 0; 4410 } 4411 4412 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4413 { 4414 u32 hotplug_en; 4415 4416 lockdep_assert_held(&dev_priv->irq_lock); 4417 4418 /* Note HDMI and DP share hotplug bits */ 4419 /* enable bits are the same for all generations */ 4420 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4421 /* Programming the CRT detection parameters tends 4422 to generate a spurious hotplug event about three 4423 seconds later. So just do it once. 4424 */ 4425 if (IS_G4X(dev_priv)) 4426 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4427 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4428 4429 /* Ignore TV since it's buggy */ 4430 i915_hotplug_interrupt_update_locked(dev_priv, 4431 HOTPLUG_INT_EN_MASK | 4432 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4433 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4434 hotplug_en); 4435 } 4436 4437 static irqreturn_t i965_irq_handler(int irq, void *arg) 4438 { 4439 struct drm_device *dev = arg; 4440 struct drm_i915_private *dev_priv = to_i915(dev); 4441 irqreturn_t ret = IRQ_NONE; 4442 4443 if (!intel_irqs_enabled(dev_priv)) 4444 return IRQ_NONE; 4445 4446 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4447 disable_rpm_wakeref_asserts(dev_priv); 4448 4449 do { 4450 u32 pipe_stats[I915_MAX_PIPES] = {}; 4451 u32 eir = 0, eir_stuck = 0; 4452 u32 hotplug_status = 0; 4453 u32 iir; 4454 4455 iir = I915_READ(IIR); 4456 if (iir == 0) 4457 break; 4458 4459 ret = IRQ_HANDLED; 4460 4461 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4462 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4463 4464 /* Call regardless, as some status bits might not be 4465 * signalled in iir */ 4466 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4467 4468 if (iir & I915_MASTER_ERROR_INTERRUPT) 4469 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4470 4471 I915_WRITE(IIR, iir); 4472 4473 if (iir & I915_USER_INTERRUPT) 4474 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); 4475 4476 if (iir & I915_BSD_USER_INTERRUPT) 4477 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); 4478 4479 if (iir & I915_MASTER_ERROR_INTERRUPT) 4480 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4481 4482 if (hotplug_status) 4483 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4484 4485 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4486 } while (0); 4487 4488 enable_rpm_wakeref_asserts(dev_priv); 4489 4490 return ret; 4491 } 4492 4493 /** 4494 * intel_irq_init - initializes irq support 4495 * @dev_priv: i915 device instance 4496 * 4497 * This function initializes all the irq support including work items, timers 4498 * and all the vtables. It does not setup the interrupt itself though. 4499 */ 4500 void intel_irq_init(struct drm_i915_private *dev_priv) 4501 { 4502 struct drm_device *dev = &dev_priv->drm; 4503 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4504 int i; 4505 4506 intel_hpd_init_work(dev_priv); 4507 4508 INIT_WORK(&rps->work, gen6_pm_rps_work); 4509 4510 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4511 for (i = 0; i < MAX_L3_SLICES; ++i) 4512 dev_priv->l3_parity.remap_info[i] = NULL; 4513 4514 if (HAS_GUC_SCHED(dev_priv)) 4515 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4516 4517 /* Let's track the enabled rps events */ 4518 if (IS_VALLEYVIEW(dev_priv)) 4519 /* WaGsvRC0ResidencyMethod:vlv */ 4520 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4521 else 4522 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 4523 GEN6_PM_RP_DOWN_THRESHOLD | 4524 GEN6_PM_RP_DOWN_TIMEOUT); 4525 4526 rps->pm_intrmsk_mbz = 0; 4527 4528 /* 4529 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4530 * if GEN6_PM_UP_EI_EXPIRED is masked. 4531 * 4532 * TODO: verify if this can be reproduced on VLV,CHV. 4533 */ 4534 if (INTEL_GEN(dev_priv) <= 7) 4535 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4536 4537 if (INTEL_GEN(dev_priv) >= 8) 4538 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4539 4540 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4541 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4542 else if (INTEL_GEN(dev_priv) >= 3) 4543 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4544 4545 /* 4546 * Opt out of the vblank disable timer on everything except gen2. 4547 * Gen2 doesn't have a hardware frame counter and so depends on 4548 * vblank interrupts to produce sane vblank seuquence numbers. 4549 */ 4550 if (!IS_GEN(dev_priv, 2)) 4551 dev->vblank_disable_immediate = true; 4552 4553 /* Most platforms treat the display irq block as an always-on 4554 * power domain. vlv/chv can disable it at runtime and need 4555 * special care to avoid writing any of the display block registers 4556 * outside of the power domain. We defer setting up the display irqs 4557 * in this case to the runtime pm. 4558 */ 4559 dev_priv->display_irqs_enabled = true; 4560 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4561 dev_priv->display_irqs_enabled = false; 4562 4563 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4564 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4565 * detection, as short HPD storms will occur as a natural part of 4566 * sideband messaging with MST. 4567 * On older platforms however, IRQ storms can occur with both long and 4568 * short pulses, as seen on some G4x systems. 4569 */ 4570 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4571 4572 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4573 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4574 4575 if (IS_CHERRYVIEW(dev_priv)) { 4576 dev->driver->irq_handler = cherryview_irq_handler; 4577 dev->driver->irq_preinstall = cherryview_irq_reset; 4578 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4579 dev->driver->irq_uninstall = cherryview_irq_reset; 4580 dev->driver->enable_vblank = i965_enable_vblank; 4581 dev->driver->disable_vblank = i965_disable_vblank; 4582 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4583 } else if (IS_VALLEYVIEW(dev_priv)) { 4584 dev->driver->irq_handler = valleyview_irq_handler; 4585 dev->driver->irq_preinstall = valleyview_irq_reset; 4586 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4587 dev->driver->irq_uninstall = valleyview_irq_reset; 4588 dev->driver->enable_vblank = i965_enable_vblank; 4589 dev->driver->disable_vblank = i965_disable_vblank; 4590 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4591 } else if (INTEL_GEN(dev_priv) >= 11) { 4592 dev->driver->irq_handler = gen11_irq_handler; 4593 dev->driver->irq_preinstall = gen11_irq_reset; 4594 dev->driver->irq_postinstall = gen11_irq_postinstall; 4595 dev->driver->irq_uninstall = gen11_irq_reset; 4596 dev->driver->enable_vblank = gen8_enable_vblank; 4597 dev->driver->disable_vblank = gen8_disable_vblank; 4598 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4599 } else if (INTEL_GEN(dev_priv) >= 8) { 4600 dev->driver->irq_handler = gen8_irq_handler; 4601 dev->driver->irq_preinstall = gen8_irq_reset; 4602 dev->driver->irq_postinstall = gen8_irq_postinstall; 4603 dev->driver->irq_uninstall = gen8_irq_reset; 4604 dev->driver->enable_vblank = gen8_enable_vblank; 4605 dev->driver->disable_vblank = gen8_disable_vblank; 4606 if (IS_GEN9_LP(dev_priv)) 4607 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4608 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4609 HAS_PCH_CNP(dev_priv)) 4610 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4611 else 4612 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4613 } else if (HAS_PCH_SPLIT(dev_priv)) { 4614 dev->driver->irq_handler = ironlake_irq_handler; 4615 dev->driver->irq_preinstall = ironlake_irq_reset; 4616 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4617 dev->driver->irq_uninstall = ironlake_irq_reset; 4618 dev->driver->enable_vblank = ironlake_enable_vblank; 4619 dev->driver->disable_vblank = ironlake_disable_vblank; 4620 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4621 } else { 4622 if (IS_GEN(dev_priv, 2)) { 4623 dev->driver->irq_preinstall = i8xx_irq_reset; 4624 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4625 dev->driver->irq_handler = i8xx_irq_handler; 4626 dev->driver->irq_uninstall = i8xx_irq_reset; 4627 dev->driver->enable_vblank = i8xx_enable_vblank; 4628 dev->driver->disable_vblank = i8xx_disable_vblank; 4629 } else if (IS_GEN(dev_priv, 3)) { 4630 dev->driver->irq_preinstall = i915_irq_reset; 4631 dev->driver->irq_postinstall = i915_irq_postinstall; 4632 dev->driver->irq_uninstall = i915_irq_reset; 4633 dev->driver->irq_handler = i915_irq_handler; 4634 dev->driver->enable_vblank = i8xx_enable_vblank; 4635 dev->driver->disable_vblank = i8xx_disable_vblank; 4636 } else { 4637 dev->driver->irq_preinstall = i965_irq_reset; 4638 dev->driver->irq_postinstall = i965_irq_postinstall; 4639 dev->driver->irq_uninstall = i965_irq_reset; 4640 dev->driver->irq_handler = i965_irq_handler; 4641 dev->driver->enable_vblank = i965_enable_vblank; 4642 dev->driver->disable_vblank = i965_disable_vblank; 4643 } 4644 if (I915_HAS_HOTPLUG(dev_priv)) 4645 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4646 } 4647 } 4648 4649 /** 4650 * intel_irq_fini - deinitializes IRQ support 4651 * @i915: i915 device instance 4652 * 4653 * This function deinitializes all the IRQ support. 4654 */ 4655 void intel_irq_fini(struct drm_i915_private *i915) 4656 { 4657 int i; 4658 4659 for (i = 0; i < MAX_L3_SLICES; ++i) 4660 kfree(i915->l3_parity.remap_info[i]); 4661 } 4662 4663 /** 4664 * intel_irq_install - enables the hardware interrupt 4665 * @dev_priv: i915 device instance 4666 * 4667 * This function enables the hardware interrupt handling, but leaves the hotplug 4668 * handling still disabled. It is called after intel_irq_init(). 4669 * 4670 * In the driver load and resume code we need working interrupts in a few places 4671 * but don't want to deal with the hassle of concurrent probe and hotplug 4672 * workers. Hence the split into this two-stage approach. 4673 */ 4674 int intel_irq_install(struct drm_i915_private *dev_priv) 4675 { 4676 /* 4677 * We enable some interrupt sources in our postinstall hooks, so mark 4678 * interrupts as enabled _before_ actually enabling them to avoid 4679 * special cases in our ordering checks. 4680 */ 4681 dev_priv->runtime_pm.irqs_enabled = true; 4682 4683 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4684 } 4685 4686 /** 4687 * intel_irq_uninstall - finilizes all irq handling 4688 * @dev_priv: i915 device instance 4689 * 4690 * This stops interrupt and hotplug handling and unregisters and frees all 4691 * resources acquired in the init functions. 4692 */ 4693 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4694 { 4695 drm_irq_uninstall(&dev_priv->drm); 4696 intel_hpd_cancel_work(dev_priv); 4697 dev_priv->runtime_pm.irqs_enabled = false; 4698 } 4699 4700 /** 4701 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4702 * @dev_priv: i915 device instance 4703 * 4704 * This function is used to disable interrupts at runtime, both in the runtime 4705 * pm and the system suspend/resume code. 4706 */ 4707 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4708 { 4709 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4710 dev_priv->runtime_pm.irqs_enabled = false; 4711 synchronize_irq(dev_priv->drm.irq); 4712 } 4713 4714 /** 4715 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4716 * @dev_priv: i915 device instance 4717 * 4718 * This function is used to enable interrupts at runtime, both in the runtime 4719 * pm and the system suspend/resume code. 4720 */ 4721 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4722 { 4723 dev_priv->runtime_pm.irqs_enabled = true; 4724 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4725 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4726 } 4727