1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 static const u32 hpd_gen11[HPD_NUM_PINS] = { 119 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 120 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 121 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 122 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 123 }; 124 125 static const u32 hpd_icp[HPD_NUM_PINS] = { 126 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 127 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 128 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 129 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 130 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 131 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 132 }; 133 134 /* IIR can theoretically queue up two events. Be paranoid. */ 135 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 136 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 137 POSTING_READ(GEN8_##type##_IMR(which)); \ 138 I915_WRITE(GEN8_##type##_IER(which), 0); \ 139 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 140 POSTING_READ(GEN8_##type##_IIR(which)); \ 141 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 142 POSTING_READ(GEN8_##type##_IIR(which)); \ 143 } while (0) 144 145 #define GEN3_IRQ_RESET(type) do { \ 146 I915_WRITE(type##IMR, 0xffffffff); \ 147 POSTING_READ(type##IMR); \ 148 I915_WRITE(type##IER, 0); \ 149 I915_WRITE(type##IIR, 0xffffffff); \ 150 POSTING_READ(type##IIR); \ 151 I915_WRITE(type##IIR, 0xffffffff); \ 152 POSTING_READ(type##IIR); \ 153 } while (0) 154 155 #define GEN2_IRQ_RESET(type) do { \ 156 I915_WRITE16(type##IMR, 0xffff); \ 157 POSTING_READ16(type##IMR); \ 158 I915_WRITE16(type##IER, 0); \ 159 I915_WRITE16(type##IIR, 0xffff); \ 160 POSTING_READ16(type##IIR); \ 161 I915_WRITE16(type##IIR, 0xffff); \ 162 POSTING_READ16(type##IIR); \ 163 } while (0) 164 165 /* 166 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 167 */ 168 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u32 val = I915_READ(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE(reg, 0xffffffff); 179 POSTING_READ(reg); 180 I915_WRITE(reg, 0xffffffff); 181 POSTING_READ(reg); 182 } 183 184 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 185 i915_reg_t reg) 186 { 187 u16 val = I915_READ16(reg); 188 189 if (val == 0) 190 return; 191 192 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 193 i915_mmio_reg_offset(reg), val); 194 I915_WRITE16(reg, 0xffff); 195 POSTING_READ16(reg); 196 I915_WRITE16(reg, 0xffff); 197 POSTING_READ16(reg); 198 } 199 200 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 201 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 202 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 203 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 204 POSTING_READ(GEN8_##type##_IMR(which)); \ 205 } while (0) 206 207 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 208 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 209 I915_WRITE(type##IER, (ier_val)); \ 210 I915_WRITE(type##IMR, (imr_val)); \ 211 POSTING_READ(type##IMR); \ 212 } while (0) 213 214 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 215 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 216 I915_WRITE16(type##IER, (ier_val)); \ 217 I915_WRITE16(type##IMR, (imr_val)); \ 218 POSTING_READ16(type##IMR); \ 219 } while (0) 220 221 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 222 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223 224 /* For display hotplug interrupt */ 225 static inline void 226 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 227 uint32_t mask, 228 uint32_t bits) 229 { 230 uint32_t val; 231 232 lockdep_assert_held(&dev_priv->irq_lock); 233 WARN_ON(bits & ~mask); 234 235 val = I915_READ(PORT_HOTPLUG_EN); 236 val &= ~mask; 237 val |= bits; 238 I915_WRITE(PORT_HOTPLUG_EN, val); 239 } 240 241 /** 242 * i915_hotplug_interrupt_update - update hotplug interrupt enable 243 * @dev_priv: driver private 244 * @mask: bits to update 245 * @bits: bits to enable 246 * NOTE: the HPD enable bits are modified both inside and outside 247 * of an interrupt context. To avoid that read-modify-write cycles 248 * interfer, these bits are protected by a spinlock. Since this 249 * function is usually not called from a context where the lock is 250 * held already, this function acquires the lock itself. A non-locking 251 * version is also available. 252 */ 253 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 254 uint32_t mask, 255 uint32_t bits) 256 { 257 spin_lock_irq(&dev_priv->irq_lock); 258 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 259 spin_unlock_irq(&dev_priv->irq_lock); 260 } 261 262 static u32 263 gen11_gt_engine_identity(struct drm_i915_private * const i915, 264 const unsigned int bank, const unsigned int bit); 265 266 static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 267 const unsigned int bank, 268 const unsigned int bit) 269 { 270 void __iomem * const regs = i915->regs; 271 u32 dw; 272 273 lockdep_assert_held(&i915->irq_lock); 274 275 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 276 if (dw & BIT(bit)) { 277 /* 278 * According to the BSpec, DW_IIR bits cannot be cleared without 279 * first servicing the Selector & Shared IIR registers. 280 */ 281 gen11_gt_engine_identity(i915, bank, bit); 282 283 /* 284 * We locked GT INT DW by reading it. If we want to (try 285 * to) recover from this succesfully, we need to clear 286 * our bit, otherwise we are locking the register for 287 * everybody. 288 */ 289 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 290 291 return true; 292 } 293 294 return false; 295 } 296 297 /** 298 * ilk_update_display_irq - update DEIMR 299 * @dev_priv: driver private 300 * @interrupt_mask: mask of interrupt bits to update 301 * @enabled_irq_mask: mask of interrupt bits to enable 302 */ 303 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 304 uint32_t interrupt_mask, 305 uint32_t enabled_irq_mask) 306 { 307 uint32_t new_val; 308 309 lockdep_assert_held(&dev_priv->irq_lock); 310 311 WARN_ON(enabled_irq_mask & ~interrupt_mask); 312 313 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 314 return; 315 316 new_val = dev_priv->irq_mask; 317 new_val &= ~interrupt_mask; 318 new_val |= (~enabled_irq_mask & interrupt_mask); 319 320 if (new_val != dev_priv->irq_mask) { 321 dev_priv->irq_mask = new_val; 322 I915_WRITE(DEIMR, dev_priv->irq_mask); 323 POSTING_READ(DEIMR); 324 } 325 } 326 327 /** 328 * ilk_update_gt_irq - update GTIMR 329 * @dev_priv: driver private 330 * @interrupt_mask: mask of interrupt bits to update 331 * @enabled_irq_mask: mask of interrupt bits to enable 332 */ 333 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 334 uint32_t interrupt_mask, 335 uint32_t enabled_irq_mask) 336 { 337 lockdep_assert_held(&dev_priv->irq_lock); 338 339 WARN_ON(enabled_irq_mask & ~interrupt_mask); 340 341 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342 return; 343 344 dev_priv->gt_irq_mask &= ~interrupt_mask; 345 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 346 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 347 } 348 349 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 350 { 351 ilk_update_gt_irq(dev_priv, mask, mask); 352 POSTING_READ_FW(GTIMR); 353 } 354 355 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 356 { 357 ilk_update_gt_irq(dev_priv, mask, 0); 358 } 359 360 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 361 { 362 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 363 364 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 365 } 366 367 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 368 { 369 if (INTEL_GEN(dev_priv) >= 11) 370 return GEN11_GPM_WGBOXPERF_INTR_MASK; 371 else if (INTEL_GEN(dev_priv) >= 8) 372 return GEN8_GT_IMR(2); 373 else 374 return GEN6_PMIMR; 375 } 376 377 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 378 { 379 if (INTEL_GEN(dev_priv) >= 11) 380 return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 381 else if (INTEL_GEN(dev_priv) >= 8) 382 return GEN8_GT_IER(2); 383 else 384 return GEN6_PMIER; 385 } 386 387 /** 388 * snb_update_pm_irq - update GEN6_PMIMR 389 * @dev_priv: driver private 390 * @interrupt_mask: mask of interrupt bits to update 391 * @enabled_irq_mask: mask of interrupt bits to enable 392 */ 393 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 394 uint32_t interrupt_mask, 395 uint32_t enabled_irq_mask) 396 { 397 uint32_t new_val; 398 399 WARN_ON(enabled_irq_mask & ~interrupt_mask); 400 401 lockdep_assert_held(&dev_priv->irq_lock); 402 403 new_val = dev_priv->pm_imr; 404 new_val &= ~interrupt_mask; 405 new_val |= (~enabled_irq_mask & interrupt_mask); 406 407 if (new_val != dev_priv->pm_imr) { 408 dev_priv->pm_imr = new_val; 409 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 410 POSTING_READ(gen6_pm_imr(dev_priv)); 411 } 412 } 413 414 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 415 { 416 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 417 return; 418 419 snb_update_pm_irq(dev_priv, mask, mask); 420 } 421 422 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 423 { 424 snb_update_pm_irq(dev_priv, mask, 0); 425 } 426 427 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 428 { 429 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 430 return; 431 432 __gen6_mask_pm_irq(dev_priv, mask); 433 } 434 435 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 436 { 437 i915_reg_t reg = gen6_pm_iir(dev_priv); 438 439 lockdep_assert_held(&dev_priv->irq_lock); 440 441 I915_WRITE(reg, reset_mask); 442 I915_WRITE(reg, reset_mask); 443 POSTING_READ(reg); 444 } 445 446 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 447 { 448 lockdep_assert_held(&dev_priv->irq_lock); 449 450 dev_priv->pm_ier |= enable_mask; 451 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 452 gen6_unmask_pm_irq(dev_priv, enable_mask); 453 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 454 } 455 456 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 457 { 458 lockdep_assert_held(&dev_priv->irq_lock); 459 460 dev_priv->pm_ier &= ~disable_mask; 461 __gen6_mask_pm_irq(dev_priv, disable_mask); 462 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 463 /* though a barrier is missing here, but don't really need a one */ 464 } 465 466 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 467 { 468 spin_lock_irq(&dev_priv->irq_lock); 469 470 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 471 ; 472 473 dev_priv->gt_pm.rps.pm_iir = 0; 474 475 spin_unlock_irq(&dev_priv->irq_lock); 476 } 477 478 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 479 { 480 spin_lock_irq(&dev_priv->irq_lock); 481 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 482 dev_priv->gt_pm.rps.pm_iir = 0; 483 spin_unlock_irq(&dev_priv->irq_lock); 484 } 485 486 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 487 { 488 struct intel_rps *rps = &dev_priv->gt_pm.rps; 489 490 if (READ_ONCE(rps->interrupts_enabled)) 491 return; 492 493 spin_lock_irq(&dev_priv->irq_lock); 494 WARN_ON_ONCE(rps->pm_iir); 495 496 if (INTEL_GEN(dev_priv) >= 11) 497 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 498 else 499 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 500 501 rps->interrupts_enabled = true; 502 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 503 504 spin_unlock_irq(&dev_priv->irq_lock); 505 } 506 507 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 508 { 509 struct intel_rps *rps = &dev_priv->gt_pm.rps; 510 511 if (!READ_ONCE(rps->interrupts_enabled)) 512 return; 513 514 spin_lock_irq(&dev_priv->irq_lock); 515 rps->interrupts_enabled = false; 516 517 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 518 519 gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 520 521 spin_unlock_irq(&dev_priv->irq_lock); 522 synchronize_irq(dev_priv->drm.irq); 523 524 /* Now that we will not be generating any more work, flush any 525 * outstanding tasks. As we are called on the RPS idle path, 526 * we will reset the GPU to minimum frequencies, so the current 527 * state of the worker can be discarded. 528 */ 529 cancel_work_sync(&rps->work); 530 if (INTEL_GEN(dev_priv) >= 11) 531 gen11_reset_rps_interrupts(dev_priv); 532 else 533 gen6_reset_rps_interrupts(dev_priv); 534 } 535 536 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 537 { 538 assert_rpm_wakelock_held(dev_priv); 539 540 spin_lock_irq(&dev_priv->irq_lock); 541 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 542 spin_unlock_irq(&dev_priv->irq_lock); 543 } 544 545 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 546 { 547 assert_rpm_wakelock_held(dev_priv); 548 549 spin_lock_irq(&dev_priv->irq_lock); 550 if (!dev_priv->guc.interrupts_enabled) { 551 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 552 dev_priv->pm_guc_events); 553 dev_priv->guc.interrupts_enabled = true; 554 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 555 } 556 spin_unlock_irq(&dev_priv->irq_lock); 557 } 558 559 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 560 { 561 assert_rpm_wakelock_held(dev_priv); 562 563 spin_lock_irq(&dev_priv->irq_lock); 564 dev_priv->guc.interrupts_enabled = false; 565 566 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 567 568 spin_unlock_irq(&dev_priv->irq_lock); 569 synchronize_irq(dev_priv->drm.irq); 570 571 gen9_reset_guc_interrupts(dev_priv); 572 } 573 574 /** 575 * bdw_update_port_irq - update DE port interrupt 576 * @dev_priv: driver private 577 * @interrupt_mask: mask of interrupt bits to update 578 * @enabled_irq_mask: mask of interrupt bits to enable 579 */ 580 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 581 uint32_t interrupt_mask, 582 uint32_t enabled_irq_mask) 583 { 584 uint32_t new_val; 585 uint32_t old_val; 586 587 lockdep_assert_held(&dev_priv->irq_lock); 588 589 WARN_ON(enabled_irq_mask & ~interrupt_mask); 590 591 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 592 return; 593 594 old_val = I915_READ(GEN8_DE_PORT_IMR); 595 596 new_val = old_val; 597 new_val &= ~interrupt_mask; 598 new_val |= (~enabled_irq_mask & interrupt_mask); 599 600 if (new_val != old_val) { 601 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 602 POSTING_READ(GEN8_DE_PORT_IMR); 603 } 604 } 605 606 /** 607 * bdw_update_pipe_irq - update DE pipe interrupt 608 * @dev_priv: driver private 609 * @pipe: pipe whose interrupt to update 610 * @interrupt_mask: mask of interrupt bits to update 611 * @enabled_irq_mask: mask of interrupt bits to enable 612 */ 613 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 614 enum pipe pipe, 615 uint32_t interrupt_mask, 616 uint32_t enabled_irq_mask) 617 { 618 uint32_t new_val; 619 620 lockdep_assert_held(&dev_priv->irq_lock); 621 622 WARN_ON(enabled_irq_mask & ~interrupt_mask); 623 624 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 625 return; 626 627 new_val = dev_priv->de_irq_mask[pipe]; 628 new_val &= ~interrupt_mask; 629 new_val |= (~enabled_irq_mask & interrupt_mask); 630 631 if (new_val != dev_priv->de_irq_mask[pipe]) { 632 dev_priv->de_irq_mask[pipe] = new_val; 633 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 634 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 635 } 636 } 637 638 /** 639 * ibx_display_interrupt_update - update SDEIMR 640 * @dev_priv: driver private 641 * @interrupt_mask: mask of interrupt bits to update 642 * @enabled_irq_mask: mask of interrupt bits to enable 643 */ 644 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 645 uint32_t interrupt_mask, 646 uint32_t enabled_irq_mask) 647 { 648 uint32_t sdeimr = I915_READ(SDEIMR); 649 sdeimr &= ~interrupt_mask; 650 sdeimr |= (~enabled_irq_mask & interrupt_mask); 651 652 WARN_ON(enabled_irq_mask & ~interrupt_mask); 653 654 lockdep_assert_held(&dev_priv->irq_lock); 655 656 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 657 return; 658 659 I915_WRITE(SDEIMR, sdeimr); 660 POSTING_READ(SDEIMR); 661 } 662 663 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 664 enum pipe pipe) 665 { 666 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 667 u32 enable_mask = status_mask << 16; 668 669 lockdep_assert_held(&dev_priv->irq_lock); 670 671 if (INTEL_GEN(dev_priv) < 5) 672 goto out; 673 674 /* 675 * On pipe A we don't support the PSR interrupt yet, 676 * on pipe B and C the same bit MBZ. 677 */ 678 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 679 return 0; 680 /* 681 * On pipe B and C we don't support the PSR interrupt yet, on pipe 682 * A the same bit is for perf counters which we don't use either. 683 */ 684 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 685 return 0; 686 687 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 688 SPRITE0_FLIP_DONE_INT_EN_VLV | 689 SPRITE1_FLIP_DONE_INT_EN_VLV); 690 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 691 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 692 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 693 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 694 695 out: 696 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 697 status_mask & ~PIPESTAT_INT_STATUS_MASK, 698 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 699 pipe_name(pipe), enable_mask, status_mask); 700 701 return enable_mask; 702 } 703 704 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 705 enum pipe pipe, u32 status_mask) 706 { 707 i915_reg_t reg = PIPESTAT(pipe); 708 u32 enable_mask; 709 710 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 711 "pipe %c: status_mask=0x%x\n", 712 pipe_name(pipe), status_mask); 713 714 lockdep_assert_held(&dev_priv->irq_lock); 715 WARN_ON(!intel_irqs_enabled(dev_priv)); 716 717 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 718 return; 719 720 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 721 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 722 723 I915_WRITE(reg, enable_mask | status_mask); 724 POSTING_READ(reg); 725 } 726 727 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 728 enum pipe pipe, u32 status_mask) 729 { 730 i915_reg_t reg = PIPESTAT(pipe); 731 u32 enable_mask; 732 733 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 734 "pipe %c: status_mask=0x%x\n", 735 pipe_name(pipe), status_mask); 736 737 lockdep_assert_held(&dev_priv->irq_lock); 738 WARN_ON(!intel_irqs_enabled(dev_priv)); 739 740 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 741 return; 742 743 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 744 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 745 746 I915_WRITE(reg, enable_mask | status_mask); 747 POSTING_READ(reg); 748 } 749 750 /** 751 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 752 * @dev_priv: i915 device private 753 */ 754 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 755 { 756 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 757 return; 758 759 spin_lock_irq(&dev_priv->irq_lock); 760 761 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 762 if (INTEL_GEN(dev_priv) >= 4) 763 i915_enable_pipestat(dev_priv, PIPE_A, 764 PIPE_LEGACY_BLC_EVENT_STATUS); 765 766 spin_unlock_irq(&dev_priv->irq_lock); 767 } 768 769 /* 770 * This timing diagram depicts the video signal in and 771 * around the vertical blanking period. 772 * 773 * Assumptions about the fictitious mode used in this example: 774 * vblank_start >= 3 775 * vsync_start = vblank_start + 1 776 * vsync_end = vblank_start + 2 777 * vtotal = vblank_start + 3 778 * 779 * start of vblank: 780 * latch double buffered registers 781 * increment frame counter (ctg+) 782 * generate start of vblank interrupt (gen4+) 783 * | 784 * | frame start: 785 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 786 * | may be shifted forward 1-3 extra lines via PIPECONF 787 * | | 788 * | | start of vsync: 789 * | | generate vsync interrupt 790 * | | | 791 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 792 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 793 * ----va---> <-----------------vb--------------------> <--------va------------- 794 * | | <----vs-----> | 795 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 796 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 797 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 798 * | | | 799 * last visible pixel first visible pixel 800 * | increment frame counter (gen3/4) 801 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 802 * 803 * x = horizontal active 804 * _ = horizontal blanking 805 * hs = horizontal sync 806 * va = vertical active 807 * vb = vertical blanking 808 * vs = vertical sync 809 * vbs = vblank_start (number) 810 * 811 * Summary: 812 * - most events happen at the start of horizontal sync 813 * - frame start happens at the start of horizontal blank, 1-4 lines 814 * (depending on PIPECONF settings) after the start of vblank 815 * - gen3/4 pixel and frame counter are synchronized with the start 816 * of horizontal active on the first line of vertical active 817 */ 818 819 /* Called from drm generic code, passed a 'crtc', which 820 * we use as a pipe index 821 */ 822 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 823 { 824 struct drm_i915_private *dev_priv = to_i915(dev); 825 i915_reg_t high_frame, low_frame; 826 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 827 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 828 unsigned long irqflags; 829 830 htotal = mode->crtc_htotal; 831 hsync_start = mode->crtc_hsync_start; 832 vbl_start = mode->crtc_vblank_start; 833 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 834 vbl_start = DIV_ROUND_UP(vbl_start, 2); 835 836 /* Convert to pixel count */ 837 vbl_start *= htotal; 838 839 /* Start of vblank event occurs at start of hsync */ 840 vbl_start -= htotal - hsync_start; 841 842 high_frame = PIPEFRAME(pipe); 843 low_frame = PIPEFRAMEPIXEL(pipe); 844 845 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 846 847 /* 848 * High & low register fields aren't synchronized, so make sure 849 * we get a low value that's stable across two reads of the high 850 * register. 851 */ 852 do { 853 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 854 low = I915_READ_FW(low_frame); 855 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 856 } while (high1 != high2); 857 858 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859 860 high1 >>= PIPE_FRAME_HIGH_SHIFT; 861 pixel = low & PIPE_PIXEL_MASK; 862 low >>= PIPE_FRAME_LOW_SHIFT; 863 864 /* 865 * The frame counter increments at beginning of active. 866 * Cook up a vblank counter by also checking the pixel 867 * counter against vblank start. 868 */ 869 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 870 } 871 872 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 873 { 874 struct drm_i915_private *dev_priv = to_i915(dev); 875 876 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 877 } 878 879 /* 880 * On certain encoders on certain platforms, pipe 881 * scanline register will not work to get the scanline, 882 * since the timings are driven from the PORT or issues 883 * with scanline register updates. 884 * This function will use Framestamp and current 885 * timestamp registers to calculate the scanline. 886 */ 887 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 888 { 889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 890 struct drm_vblank_crtc *vblank = 891 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 892 const struct drm_display_mode *mode = &vblank->hwmode; 893 u32 vblank_start = mode->crtc_vblank_start; 894 u32 vtotal = mode->crtc_vtotal; 895 u32 htotal = mode->crtc_htotal; 896 u32 clock = mode->crtc_clock; 897 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 898 899 /* 900 * To avoid the race condition where we might cross into the 901 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 902 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 903 * during the same frame. 904 */ 905 do { 906 /* 907 * This field provides read back of the display 908 * pipe frame time stamp. The time stamp value 909 * is sampled at every start of vertical blank. 910 */ 911 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 912 913 /* 914 * The TIMESTAMP_CTR register has the current 915 * time stamp value. 916 */ 917 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 918 919 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 920 } while (scan_post_time != scan_prev_time); 921 922 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 923 clock), 1000 * htotal); 924 scanline = min(scanline, vtotal - 1); 925 scanline = (scanline + vblank_start) % vtotal; 926 927 return scanline; 928 } 929 930 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 931 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 932 { 933 struct drm_device *dev = crtc->base.dev; 934 struct drm_i915_private *dev_priv = to_i915(dev); 935 const struct drm_display_mode *mode; 936 struct drm_vblank_crtc *vblank; 937 enum pipe pipe = crtc->pipe; 938 int position, vtotal; 939 940 if (!crtc->active) 941 return -1; 942 943 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 944 mode = &vblank->hwmode; 945 946 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 947 return __intel_get_crtc_scanline_from_timestamp(crtc); 948 949 vtotal = mode->crtc_vtotal; 950 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 951 vtotal /= 2; 952 953 if (IS_GEN2(dev_priv)) 954 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 955 else 956 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 957 958 /* 959 * On HSW, the DSL reg (0x70000) appears to return 0 if we 960 * read it just before the start of vblank. So try it again 961 * so we don't accidentally end up spanning a vblank frame 962 * increment, causing the pipe_update_end() code to squak at us. 963 * 964 * The nature of this problem means we can't simply check the ISR 965 * bit and return the vblank start value; nor can we use the scanline 966 * debug register in the transcoder as it appears to have the same 967 * problem. We may need to extend this to include other platforms, 968 * but so far testing only shows the problem on HSW. 969 */ 970 if (HAS_DDI(dev_priv) && !position) { 971 int i, temp; 972 973 for (i = 0; i < 100; i++) { 974 udelay(1); 975 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 976 if (temp != position) { 977 position = temp; 978 break; 979 } 980 } 981 } 982 983 /* 984 * See update_scanline_offset() for the details on the 985 * scanline_offset adjustment. 986 */ 987 return (position + crtc->scanline_offset) % vtotal; 988 } 989 990 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 991 bool in_vblank_irq, int *vpos, int *hpos, 992 ktime_t *stime, ktime_t *etime, 993 const struct drm_display_mode *mode) 994 { 995 struct drm_i915_private *dev_priv = to_i915(dev); 996 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 997 pipe); 998 int position; 999 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1000 unsigned long irqflags; 1001 1002 if (WARN_ON(!mode->crtc_clock)) { 1003 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 1004 "pipe %c\n", pipe_name(pipe)); 1005 return false; 1006 } 1007 1008 htotal = mode->crtc_htotal; 1009 hsync_start = mode->crtc_hsync_start; 1010 vtotal = mode->crtc_vtotal; 1011 vbl_start = mode->crtc_vblank_start; 1012 vbl_end = mode->crtc_vblank_end; 1013 1014 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1015 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1016 vbl_end /= 2; 1017 vtotal /= 2; 1018 } 1019 1020 /* 1021 * Lock uncore.lock, as we will do multiple timing critical raw 1022 * register reads, potentially with preemption disabled, so the 1023 * following code must not block on uncore.lock. 1024 */ 1025 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1026 1027 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1028 1029 /* Get optional system timestamp before query. */ 1030 if (stime) 1031 *stime = ktime_get(); 1032 1033 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1034 /* No obvious pixelcount register. Only query vertical 1035 * scanout position from Display scan line register. 1036 */ 1037 position = __intel_get_crtc_scanline(intel_crtc); 1038 } else { 1039 /* Have access to pixelcount since start of frame. 1040 * We can split this into vertical and horizontal 1041 * scanout position. 1042 */ 1043 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1044 1045 /* convert to pixel counts */ 1046 vbl_start *= htotal; 1047 vbl_end *= htotal; 1048 vtotal *= htotal; 1049 1050 /* 1051 * In interlaced modes, the pixel counter counts all pixels, 1052 * so one field will have htotal more pixels. In order to avoid 1053 * the reported position from jumping backwards when the pixel 1054 * counter is beyond the length of the shorter field, just 1055 * clamp the position the length of the shorter field. This 1056 * matches how the scanline counter based position works since 1057 * the scanline counter doesn't count the two half lines. 1058 */ 1059 if (position >= vtotal) 1060 position = vtotal - 1; 1061 1062 /* 1063 * Start of vblank interrupt is triggered at start of hsync, 1064 * just prior to the first active line of vblank. However we 1065 * consider lines to start at the leading edge of horizontal 1066 * active. So, should we get here before we've crossed into 1067 * the horizontal active of the first line in vblank, we would 1068 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1069 * always add htotal-hsync_start to the current pixel position. 1070 */ 1071 position = (position + htotal - hsync_start) % vtotal; 1072 } 1073 1074 /* Get optional system timestamp after query. */ 1075 if (etime) 1076 *etime = ktime_get(); 1077 1078 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1079 1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1081 1082 /* 1083 * While in vblank, position will be negative 1084 * counting up towards 0 at vbl_end. And outside 1085 * vblank, position will be positive counting 1086 * up since vbl_end. 1087 */ 1088 if (position >= vbl_start) 1089 position -= vbl_end; 1090 else 1091 position += vtotal - vbl_end; 1092 1093 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1094 *vpos = position; 1095 *hpos = 0; 1096 } else { 1097 *vpos = position / htotal; 1098 *hpos = position - (*vpos * htotal); 1099 } 1100 1101 return true; 1102 } 1103 1104 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1105 { 1106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1107 unsigned long irqflags; 1108 int position; 1109 1110 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1111 position = __intel_get_crtc_scanline(crtc); 1112 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1113 1114 return position; 1115 } 1116 1117 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1118 { 1119 u32 busy_up, busy_down, max_avg, min_avg; 1120 u8 new_delay; 1121 1122 spin_lock(&mchdev_lock); 1123 1124 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1125 1126 new_delay = dev_priv->ips.cur_delay; 1127 1128 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1129 busy_up = I915_READ(RCPREVBSYTUPAVG); 1130 busy_down = I915_READ(RCPREVBSYTDNAVG); 1131 max_avg = I915_READ(RCBMAXAVG); 1132 min_avg = I915_READ(RCBMINAVG); 1133 1134 /* Handle RCS change request from hw */ 1135 if (busy_up > max_avg) { 1136 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1137 new_delay = dev_priv->ips.cur_delay - 1; 1138 if (new_delay < dev_priv->ips.max_delay) 1139 new_delay = dev_priv->ips.max_delay; 1140 } else if (busy_down < min_avg) { 1141 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1142 new_delay = dev_priv->ips.cur_delay + 1; 1143 if (new_delay > dev_priv->ips.min_delay) 1144 new_delay = dev_priv->ips.min_delay; 1145 } 1146 1147 if (ironlake_set_drps(dev_priv, new_delay)) 1148 dev_priv->ips.cur_delay = new_delay; 1149 1150 spin_unlock(&mchdev_lock); 1151 1152 return; 1153 } 1154 1155 static void notify_ring(struct intel_engine_cs *engine) 1156 { 1157 const u32 seqno = intel_engine_get_seqno(engine); 1158 struct i915_request *rq = NULL; 1159 struct task_struct *tsk = NULL; 1160 struct intel_wait *wait; 1161 1162 if (unlikely(!engine->breadcrumbs.irq_armed)) 1163 return; 1164 1165 rcu_read_lock(); 1166 1167 spin_lock(&engine->breadcrumbs.irq_lock); 1168 wait = engine->breadcrumbs.irq_wait; 1169 if (wait) { 1170 /* 1171 * We use a callback from the dma-fence to submit 1172 * requests after waiting on our own requests. To 1173 * ensure minimum delay in queuing the next request to 1174 * hardware, signal the fence now rather than wait for 1175 * the signaler to be woken up. We still wake up the 1176 * waiter in order to handle the irq-seqno coherency 1177 * issues (we may receive the interrupt before the 1178 * seqno is written, see __i915_request_irq_complete()) 1179 * and to handle coalescing of multiple seqno updates 1180 * and many waiters. 1181 */ 1182 if (i915_seqno_passed(seqno, wait->seqno)) { 1183 struct i915_request *waiter = wait->request; 1184 1185 if (waiter && 1186 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1187 &waiter->fence.flags) && 1188 intel_wait_check_request(wait, waiter)) 1189 rq = i915_request_get(waiter); 1190 1191 tsk = wait->tsk; 1192 } else { 1193 if (engine->irq_seqno_barrier && 1194 i915_seqno_passed(seqno, wait->seqno - 1)) { 1195 set_bit(ENGINE_IRQ_BREADCRUMB, 1196 &engine->irq_posted); 1197 tsk = wait->tsk; 1198 } 1199 } 1200 1201 engine->breadcrumbs.irq_count++; 1202 } else { 1203 if (engine->breadcrumbs.irq_armed) 1204 __intel_engine_disarm_breadcrumbs(engine); 1205 } 1206 spin_unlock(&engine->breadcrumbs.irq_lock); 1207 1208 if (rq) { 1209 spin_lock(&rq->lock); 1210 dma_fence_signal_locked(&rq->fence); 1211 GEM_BUG_ON(!i915_request_completed(rq)); 1212 spin_unlock(&rq->lock); 1213 1214 i915_request_put(rq); 1215 } 1216 1217 if (tsk && tsk->state & TASK_NORMAL) 1218 wake_up_process(tsk); 1219 1220 rcu_read_unlock(); 1221 1222 trace_intel_engine_notify(engine, wait); 1223 } 1224 1225 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1226 struct intel_rps_ei *ei) 1227 { 1228 ei->ktime = ktime_get_raw(); 1229 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1230 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1231 } 1232 1233 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1234 { 1235 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1236 } 1237 1238 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1239 { 1240 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1241 const struct intel_rps_ei *prev = &rps->ei; 1242 struct intel_rps_ei now; 1243 u32 events = 0; 1244 1245 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1246 return 0; 1247 1248 vlv_c0_read(dev_priv, &now); 1249 1250 if (prev->ktime) { 1251 u64 time, c0; 1252 u32 render, media; 1253 1254 time = ktime_us_delta(now.ktime, prev->ktime); 1255 1256 time *= dev_priv->czclk_freq; 1257 1258 /* Workload can be split between render + media, 1259 * e.g. SwapBuffers being blitted in X after being rendered in 1260 * mesa. To account for this we need to combine both engines 1261 * into our activity counter. 1262 */ 1263 render = now.render_c0 - prev->render_c0; 1264 media = now.media_c0 - prev->media_c0; 1265 c0 = max(render, media); 1266 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1267 1268 if (c0 > time * rps->power.up_threshold) 1269 events = GEN6_PM_RP_UP_THRESHOLD; 1270 else if (c0 < time * rps->power.down_threshold) 1271 events = GEN6_PM_RP_DOWN_THRESHOLD; 1272 } 1273 1274 rps->ei = now; 1275 return events; 1276 } 1277 1278 static void gen6_pm_rps_work(struct work_struct *work) 1279 { 1280 struct drm_i915_private *dev_priv = 1281 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1282 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1283 bool client_boost = false; 1284 int new_delay, adj, min, max; 1285 u32 pm_iir = 0; 1286 1287 spin_lock_irq(&dev_priv->irq_lock); 1288 if (rps->interrupts_enabled) { 1289 pm_iir = fetch_and_zero(&rps->pm_iir); 1290 client_boost = atomic_read(&rps->num_waiters); 1291 } 1292 spin_unlock_irq(&dev_priv->irq_lock); 1293 1294 /* Make sure we didn't queue anything we're not going to process. */ 1295 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1296 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1297 goto out; 1298 1299 mutex_lock(&dev_priv->pcu_lock); 1300 1301 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1302 1303 adj = rps->last_adj; 1304 new_delay = rps->cur_freq; 1305 min = rps->min_freq_softlimit; 1306 max = rps->max_freq_softlimit; 1307 if (client_boost) 1308 max = rps->max_freq; 1309 if (client_boost && new_delay < rps->boost_freq) { 1310 new_delay = rps->boost_freq; 1311 adj = 0; 1312 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1313 if (adj > 0) 1314 adj *= 2; 1315 else /* CHV needs even encode values */ 1316 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1317 1318 if (new_delay >= rps->max_freq_softlimit) 1319 adj = 0; 1320 } else if (client_boost) { 1321 adj = 0; 1322 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1323 if (rps->cur_freq > rps->efficient_freq) 1324 new_delay = rps->efficient_freq; 1325 else if (rps->cur_freq > rps->min_freq_softlimit) 1326 new_delay = rps->min_freq_softlimit; 1327 adj = 0; 1328 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1329 if (adj < 0) 1330 adj *= 2; 1331 else /* CHV needs even encode values */ 1332 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1333 1334 if (new_delay <= rps->min_freq_softlimit) 1335 adj = 0; 1336 } else { /* unknown event */ 1337 adj = 0; 1338 } 1339 1340 rps->last_adj = adj; 1341 1342 /* sysfs frequency interfaces may have snuck in while servicing the 1343 * interrupt 1344 */ 1345 new_delay += adj; 1346 new_delay = clamp_t(int, new_delay, min, max); 1347 1348 if (intel_set_rps(dev_priv, new_delay)) { 1349 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1350 rps->last_adj = 0; 1351 } 1352 1353 mutex_unlock(&dev_priv->pcu_lock); 1354 1355 out: 1356 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1357 spin_lock_irq(&dev_priv->irq_lock); 1358 if (rps->interrupts_enabled) 1359 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1360 spin_unlock_irq(&dev_priv->irq_lock); 1361 } 1362 1363 1364 /** 1365 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1366 * occurred. 1367 * @work: workqueue struct 1368 * 1369 * Doesn't actually do anything except notify userspace. As a consequence of 1370 * this event, userspace should try to remap the bad rows since statistically 1371 * it is likely the same row is more likely to go bad again. 1372 */ 1373 static void ivybridge_parity_work(struct work_struct *work) 1374 { 1375 struct drm_i915_private *dev_priv = 1376 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1377 u32 error_status, row, bank, subbank; 1378 char *parity_event[6]; 1379 uint32_t misccpctl; 1380 uint8_t slice = 0; 1381 1382 /* We must turn off DOP level clock gating to access the L3 registers. 1383 * In order to prevent a get/put style interface, acquire struct mutex 1384 * any time we access those registers. 1385 */ 1386 mutex_lock(&dev_priv->drm.struct_mutex); 1387 1388 /* If we've screwed up tracking, just let the interrupt fire again */ 1389 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1390 goto out; 1391 1392 misccpctl = I915_READ(GEN7_MISCCPCTL); 1393 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1394 POSTING_READ(GEN7_MISCCPCTL); 1395 1396 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1397 i915_reg_t reg; 1398 1399 slice--; 1400 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1401 break; 1402 1403 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1404 1405 reg = GEN7_L3CDERRST1(slice); 1406 1407 error_status = I915_READ(reg); 1408 row = GEN7_PARITY_ERROR_ROW(error_status); 1409 bank = GEN7_PARITY_ERROR_BANK(error_status); 1410 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1411 1412 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1413 POSTING_READ(reg); 1414 1415 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1416 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1417 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1418 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1419 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1420 parity_event[5] = NULL; 1421 1422 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1423 KOBJ_CHANGE, parity_event); 1424 1425 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1426 slice, row, bank, subbank); 1427 1428 kfree(parity_event[4]); 1429 kfree(parity_event[3]); 1430 kfree(parity_event[2]); 1431 kfree(parity_event[1]); 1432 } 1433 1434 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1435 1436 out: 1437 WARN_ON(dev_priv->l3_parity.which_slice); 1438 spin_lock_irq(&dev_priv->irq_lock); 1439 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1440 spin_unlock_irq(&dev_priv->irq_lock); 1441 1442 mutex_unlock(&dev_priv->drm.struct_mutex); 1443 } 1444 1445 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1446 u32 iir) 1447 { 1448 if (!HAS_L3_DPF(dev_priv)) 1449 return; 1450 1451 spin_lock(&dev_priv->irq_lock); 1452 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1453 spin_unlock(&dev_priv->irq_lock); 1454 1455 iir &= GT_PARITY_ERROR(dev_priv); 1456 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1457 dev_priv->l3_parity.which_slice |= 1 << 1; 1458 1459 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1460 dev_priv->l3_parity.which_slice |= 1 << 0; 1461 1462 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1463 } 1464 1465 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1466 u32 gt_iir) 1467 { 1468 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1469 notify_ring(dev_priv->engine[RCS]); 1470 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1471 notify_ring(dev_priv->engine[VCS]); 1472 } 1473 1474 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1475 u32 gt_iir) 1476 { 1477 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1478 notify_ring(dev_priv->engine[RCS]); 1479 if (gt_iir & GT_BSD_USER_INTERRUPT) 1480 notify_ring(dev_priv->engine[VCS]); 1481 if (gt_iir & GT_BLT_USER_INTERRUPT) 1482 notify_ring(dev_priv->engine[BCS]); 1483 1484 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1485 GT_BSD_CS_ERROR_INTERRUPT | 1486 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1487 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1488 1489 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1490 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1491 } 1492 1493 static void 1494 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1495 { 1496 bool tasklet = false; 1497 1498 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 1499 tasklet = true; 1500 1501 if (iir & GT_RENDER_USER_INTERRUPT) { 1502 notify_ring(engine); 1503 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1504 } 1505 1506 if (tasklet) 1507 tasklet_hi_schedule(&engine->execlists.tasklet); 1508 } 1509 1510 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1511 u32 master_ctl, u32 gt_iir[4]) 1512 { 1513 void __iomem * const regs = i915->regs; 1514 1515 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1516 GEN8_GT_BCS_IRQ | \ 1517 GEN8_GT_VCS1_IRQ | \ 1518 GEN8_GT_VCS2_IRQ | \ 1519 GEN8_GT_VECS_IRQ | \ 1520 GEN8_GT_PM_IRQ | \ 1521 GEN8_GT_GUC_IRQ) 1522 1523 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1524 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1525 if (likely(gt_iir[0])) 1526 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1527 } 1528 1529 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1530 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1531 if (likely(gt_iir[1])) 1532 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1533 } 1534 1535 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1536 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1537 if (likely(gt_iir[2])) 1538 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 1539 } 1540 1541 if (master_ctl & GEN8_GT_VECS_IRQ) { 1542 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1543 if (likely(gt_iir[3])) 1544 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1545 } 1546 } 1547 1548 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1549 u32 master_ctl, u32 gt_iir[4]) 1550 { 1551 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1552 gen8_cs_irq_handler(i915->engine[RCS], 1553 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1554 gen8_cs_irq_handler(i915->engine[BCS], 1555 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1556 } 1557 1558 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1559 gen8_cs_irq_handler(i915->engine[VCS], 1560 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1561 gen8_cs_irq_handler(i915->engine[VCS2], 1562 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1563 } 1564 1565 if (master_ctl & GEN8_GT_VECS_IRQ) { 1566 gen8_cs_irq_handler(i915->engine[VECS], 1567 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1568 } 1569 1570 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1571 gen6_rps_irq_handler(i915, gt_iir[2]); 1572 gen9_guc_irq_handler(i915, gt_iir[2]); 1573 } 1574 } 1575 1576 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1577 { 1578 switch (pin) { 1579 case HPD_PORT_C: 1580 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1581 case HPD_PORT_D: 1582 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1583 case HPD_PORT_E: 1584 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1585 case HPD_PORT_F: 1586 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1587 default: 1588 return false; 1589 } 1590 } 1591 1592 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1593 { 1594 switch (pin) { 1595 case HPD_PORT_A: 1596 return val & PORTA_HOTPLUG_LONG_DETECT; 1597 case HPD_PORT_B: 1598 return val & PORTB_HOTPLUG_LONG_DETECT; 1599 case HPD_PORT_C: 1600 return val & PORTC_HOTPLUG_LONG_DETECT; 1601 default: 1602 return false; 1603 } 1604 } 1605 1606 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1607 { 1608 switch (pin) { 1609 case HPD_PORT_A: 1610 return val & ICP_DDIA_HPD_LONG_DETECT; 1611 case HPD_PORT_B: 1612 return val & ICP_DDIB_HPD_LONG_DETECT; 1613 default: 1614 return false; 1615 } 1616 } 1617 1618 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1619 { 1620 switch (pin) { 1621 case HPD_PORT_C: 1622 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1623 case HPD_PORT_D: 1624 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1625 case HPD_PORT_E: 1626 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1627 case HPD_PORT_F: 1628 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1629 default: 1630 return false; 1631 } 1632 } 1633 1634 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1635 { 1636 switch (pin) { 1637 case HPD_PORT_E: 1638 return val & PORTE_HOTPLUG_LONG_DETECT; 1639 default: 1640 return false; 1641 } 1642 } 1643 1644 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1645 { 1646 switch (pin) { 1647 case HPD_PORT_A: 1648 return val & PORTA_HOTPLUG_LONG_DETECT; 1649 case HPD_PORT_B: 1650 return val & PORTB_HOTPLUG_LONG_DETECT; 1651 case HPD_PORT_C: 1652 return val & PORTC_HOTPLUG_LONG_DETECT; 1653 case HPD_PORT_D: 1654 return val & PORTD_HOTPLUG_LONG_DETECT; 1655 default: 1656 return false; 1657 } 1658 } 1659 1660 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1661 { 1662 switch (pin) { 1663 case HPD_PORT_A: 1664 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1665 default: 1666 return false; 1667 } 1668 } 1669 1670 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1671 { 1672 switch (pin) { 1673 case HPD_PORT_B: 1674 return val & PORTB_HOTPLUG_LONG_DETECT; 1675 case HPD_PORT_C: 1676 return val & PORTC_HOTPLUG_LONG_DETECT; 1677 case HPD_PORT_D: 1678 return val & PORTD_HOTPLUG_LONG_DETECT; 1679 default: 1680 return false; 1681 } 1682 } 1683 1684 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1685 { 1686 switch (pin) { 1687 case HPD_PORT_B: 1688 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1689 case HPD_PORT_C: 1690 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1691 case HPD_PORT_D: 1692 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1693 default: 1694 return false; 1695 } 1696 } 1697 1698 /* 1699 * Get a bit mask of pins that have triggered, and which ones may be long. 1700 * This can be called multiple times with the same masks to accumulate 1701 * hotplug detection results from several registers. 1702 * 1703 * Note that the caller is expected to zero out the masks initially. 1704 */ 1705 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1706 u32 *pin_mask, u32 *long_mask, 1707 u32 hotplug_trigger, u32 dig_hotplug_reg, 1708 const u32 hpd[HPD_NUM_PINS], 1709 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1710 { 1711 enum hpd_pin pin; 1712 1713 for_each_hpd_pin(pin) { 1714 if ((hpd[pin] & hotplug_trigger) == 0) 1715 continue; 1716 1717 *pin_mask |= BIT(pin); 1718 1719 if (long_pulse_detect(pin, dig_hotplug_reg)) 1720 *long_mask |= BIT(pin); 1721 } 1722 1723 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1724 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1725 1726 } 1727 1728 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1729 { 1730 wake_up_all(&dev_priv->gmbus_wait_queue); 1731 } 1732 1733 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1734 { 1735 wake_up_all(&dev_priv->gmbus_wait_queue); 1736 } 1737 1738 #if defined(CONFIG_DEBUG_FS) 1739 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1740 enum pipe pipe, 1741 uint32_t crc0, uint32_t crc1, 1742 uint32_t crc2, uint32_t crc3, 1743 uint32_t crc4) 1744 { 1745 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1746 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1747 uint32_t crcs[5]; 1748 1749 spin_lock(&pipe_crc->lock); 1750 /* 1751 * For some not yet identified reason, the first CRC is 1752 * bonkers. So let's just wait for the next vblank and read 1753 * out the buggy result. 1754 * 1755 * On GEN8+ sometimes the second CRC is bonkers as well, so 1756 * don't trust that one either. 1757 */ 1758 if (pipe_crc->skipped <= 0 || 1759 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1760 pipe_crc->skipped++; 1761 spin_unlock(&pipe_crc->lock); 1762 return; 1763 } 1764 spin_unlock(&pipe_crc->lock); 1765 1766 crcs[0] = crc0; 1767 crcs[1] = crc1; 1768 crcs[2] = crc2; 1769 crcs[3] = crc3; 1770 crcs[4] = crc4; 1771 drm_crtc_add_crc_entry(&crtc->base, true, 1772 drm_crtc_accurate_vblank_count(&crtc->base), 1773 crcs); 1774 } 1775 #else 1776 static inline void 1777 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1778 enum pipe pipe, 1779 uint32_t crc0, uint32_t crc1, 1780 uint32_t crc2, uint32_t crc3, 1781 uint32_t crc4) {} 1782 #endif 1783 1784 1785 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1786 enum pipe pipe) 1787 { 1788 display_pipe_crc_irq_handler(dev_priv, pipe, 1789 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1790 0, 0, 0, 0); 1791 } 1792 1793 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1794 enum pipe pipe) 1795 { 1796 display_pipe_crc_irq_handler(dev_priv, pipe, 1797 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1798 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1799 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1800 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1801 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1802 } 1803 1804 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1805 enum pipe pipe) 1806 { 1807 uint32_t res1, res2; 1808 1809 if (INTEL_GEN(dev_priv) >= 3) 1810 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1811 else 1812 res1 = 0; 1813 1814 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1815 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1816 else 1817 res2 = 0; 1818 1819 display_pipe_crc_irq_handler(dev_priv, pipe, 1820 I915_READ(PIPE_CRC_RES_RED(pipe)), 1821 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1822 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1823 res1, res2); 1824 } 1825 1826 /* The RPS events need forcewake, so we add them to a work queue and mask their 1827 * IMR bits until the work is done. Other interrupts can be processed without 1828 * the work queue. */ 1829 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1830 { 1831 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1832 1833 if (pm_iir & dev_priv->pm_rps_events) { 1834 spin_lock(&dev_priv->irq_lock); 1835 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1836 if (rps->interrupts_enabled) { 1837 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1838 schedule_work(&rps->work); 1839 } 1840 spin_unlock(&dev_priv->irq_lock); 1841 } 1842 1843 if (INTEL_GEN(dev_priv) >= 8) 1844 return; 1845 1846 if (HAS_VEBOX(dev_priv)) { 1847 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1848 notify_ring(dev_priv->engine[VECS]); 1849 1850 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1851 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1852 } 1853 } 1854 1855 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1856 { 1857 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 1858 intel_guc_to_host_event_handler(&dev_priv->guc); 1859 } 1860 1861 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1862 { 1863 enum pipe pipe; 1864 1865 for_each_pipe(dev_priv, pipe) { 1866 I915_WRITE(PIPESTAT(pipe), 1867 PIPESTAT_INT_STATUS_MASK | 1868 PIPE_FIFO_UNDERRUN_STATUS); 1869 1870 dev_priv->pipestat_irq_mask[pipe] = 0; 1871 } 1872 } 1873 1874 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1875 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1876 { 1877 int pipe; 1878 1879 spin_lock(&dev_priv->irq_lock); 1880 1881 if (!dev_priv->display_irqs_enabled) { 1882 spin_unlock(&dev_priv->irq_lock); 1883 return; 1884 } 1885 1886 for_each_pipe(dev_priv, pipe) { 1887 i915_reg_t reg; 1888 u32 status_mask, enable_mask, iir_bit = 0; 1889 1890 /* 1891 * PIPESTAT bits get signalled even when the interrupt is 1892 * disabled with the mask bits, and some of the status bits do 1893 * not generate interrupts at all (like the underrun bit). Hence 1894 * we need to be careful that we only handle what we want to 1895 * handle. 1896 */ 1897 1898 /* fifo underruns are filterered in the underrun handler. */ 1899 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1900 1901 switch (pipe) { 1902 case PIPE_A: 1903 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1904 break; 1905 case PIPE_B: 1906 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1907 break; 1908 case PIPE_C: 1909 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1910 break; 1911 } 1912 if (iir & iir_bit) 1913 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1914 1915 if (!status_mask) 1916 continue; 1917 1918 reg = PIPESTAT(pipe); 1919 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1920 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1921 1922 /* 1923 * Clear the PIPE*STAT regs before the IIR 1924 * 1925 * Toggle the enable bits to make sure we get an 1926 * edge in the ISR pipe event bit if we don't clear 1927 * all the enabled status bits. Otherwise the edge 1928 * triggered IIR on i965/g4x wouldn't notice that 1929 * an interrupt is still pending. 1930 */ 1931 if (pipe_stats[pipe]) { 1932 I915_WRITE(reg, pipe_stats[pipe]); 1933 I915_WRITE(reg, enable_mask); 1934 } 1935 } 1936 spin_unlock(&dev_priv->irq_lock); 1937 } 1938 1939 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1940 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1941 { 1942 enum pipe pipe; 1943 1944 for_each_pipe(dev_priv, pipe) { 1945 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1946 drm_handle_vblank(&dev_priv->drm, pipe); 1947 1948 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1949 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1950 1951 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1952 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1953 } 1954 } 1955 1956 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1957 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1958 { 1959 bool blc_event = false; 1960 enum pipe pipe; 1961 1962 for_each_pipe(dev_priv, pipe) { 1963 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1964 drm_handle_vblank(&dev_priv->drm, pipe); 1965 1966 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1967 blc_event = true; 1968 1969 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1970 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1971 1972 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1973 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1974 } 1975 1976 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1977 intel_opregion_asle_intr(dev_priv); 1978 } 1979 1980 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1981 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1982 { 1983 bool blc_event = false; 1984 enum pipe pipe; 1985 1986 for_each_pipe(dev_priv, pipe) { 1987 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1988 drm_handle_vblank(&dev_priv->drm, pipe); 1989 1990 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1991 blc_event = true; 1992 1993 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1994 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1995 1996 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1997 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1998 } 1999 2000 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2001 intel_opregion_asle_intr(dev_priv); 2002 2003 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2004 gmbus_irq_handler(dev_priv); 2005 } 2006 2007 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2008 u32 pipe_stats[I915_MAX_PIPES]) 2009 { 2010 enum pipe pipe; 2011 2012 for_each_pipe(dev_priv, pipe) { 2013 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2014 drm_handle_vblank(&dev_priv->drm, pipe); 2015 2016 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2017 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2018 2019 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2020 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2021 } 2022 2023 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2024 gmbus_irq_handler(dev_priv); 2025 } 2026 2027 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 2028 { 2029 u32 hotplug_status = 0, hotplug_status_mask; 2030 int i; 2031 2032 if (IS_G4X(dev_priv) || 2033 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2034 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 2035 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 2036 else 2037 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 2038 2039 /* 2040 * We absolutely have to clear all the pending interrupt 2041 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 2042 * interrupt bit won't have an edge, and the i965/g4x 2043 * edge triggered IIR will not notice that an interrupt 2044 * is still pending. We can't use PORT_HOTPLUG_EN to 2045 * guarantee the edge as the act of toggling the enable 2046 * bits can itself generate a new hotplug interrupt :( 2047 */ 2048 for (i = 0; i < 10; i++) { 2049 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 2050 2051 if (tmp == 0) 2052 return hotplug_status; 2053 2054 hotplug_status |= tmp; 2055 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2056 } 2057 2058 WARN_ONCE(1, 2059 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2060 I915_READ(PORT_HOTPLUG_STAT)); 2061 2062 return hotplug_status; 2063 } 2064 2065 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2066 u32 hotplug_status) 2067 { 2068 u32 pin_mask = 0, long_mask = 0; 2069 2070 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2071 IS_CHERRYVIEW(dev_priv)) { 2072 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2073 2074 if (hotplug_trigger) { 2075 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2076 hotplug_trigger, hotplug_trigger, 2077 hpd_status_g4x, 2078 i9xx_port_hotplug_long_detect); 2079 2080 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2081 } 2082 2083 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2084 dp_aux_irq_handler(dev_priv); 2085 } else { 2086 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2087 2088 if (hotplug_trigger) { 2089 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2090 hotplug_trigger, hotplug_trigger, 2091 hpd_status_i915, 2092 i9xx_port_hotplug_long_detect); 2093 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2094 } 2095 } 2096 } 2097 2098 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2099 { 2100 struct drm_device *dev = arg; 2101 struct drm_i915_private *dev_priv = to_i915(dev); 2102 irqreturn_t ret = IRQ_NONE; 2103 2104 if (!intel_irqs_enabled(dev_priv)) 2105 return IRQ_NONE; 2106 2107 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2108 disable_rpm_wakeref_asserts(dev_priv); 2109 2110 do { 2111 u32 iir, gt_iir, pm_iir; 2112 u32 pipe_stats[I915_MAX_PIPES] = {}; 2113 u32 hotplug_status = 0; 2114 u32 ier = 0; 2115 2116 gt_iir = I915_READ(GTIIR); 2117 pm_iir = I915_READ(GEN6_PMIIR); 2118 iir = I915_READ(VLV_IIR); 2119 2120 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2121 break; 2122 2123 ret = IRQ_HANDLED; 2124 2125 /* 2126 * Theory on interrupt generation, based on empirical evidence: 2127 * 2128 * x = ((VLV_IIR & VLV_IER) || 2129 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2130 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2131 * 2132 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2133 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2134 * guarantee the CPU interrupt will be raised again even if we 2135 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2136 * bits this time around. 2137 */ 2138 I915_WRITE(VLV_MASTER_IER, 0); 2139 ier = I915_READ(VLV_IER); 2140 I915_WRITE(VLV_IER, 0); 2141 2142 if (gt_iir) 2143 I915_WRITE(GTIIR, gt_iir); 2144 if (pm_iir) 2145 I915_WRITE(GEN6_PMIIR, pm_iir); 2146 2147 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2148 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2149 2150 /* Call regardless, as some status bits might not be 2151 * signalled in iir */ 2152 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2153 2154 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2155 I915_LPE_PIPE_B_INTERRUPT)) 2156 intel_lpe_audio_irq_handler(dev_priv); 2157 2158 /* 2159 * VLV_IIR is single buffered, and reflects the level 2160 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2161 */ 2162 if (iir) 2163 I915_WRITE(VLV_IIR, iir); 2164 2165 I915_WRITE(VLV_IER, ier); 2166 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2167 2168 if (gt_iir) 2169 snb_gt_irq_handler(dev_priv, gt_iir); 2170 if (pm_iir) 2171 gen6_rps_irq_handler(dev_priv, pm_iir); 2172 2173 if (hotplug_status) 2174 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2175 2176 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2177 } while (0); 2178 2179 enable_rpm_wakeref_asserts(dev_priv); 2180 2181 return ret; 2182 } 2183 2184 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2185 { 2186 struct drm_device *dev = arg; 2187 struct drm_i915_private *dev_priv = to_i915(dev); 2188 irqreturn_t ret = IRQ_NONE; 2189 2190 if (!intel_irqs_enabled(dev_priv)) 2191 return IRQ_NONE; 2192 2193 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2194 disable_rpm_wakeref_asserts(dev_priv); 2195 2196 do { 2197 u32 master_ctl, iir; 2198 u32 pipe_stats[I915_MAX_PIPES] = {}; 2199 u32 hotplug_status = 0; 2200 u32 gt_iir[4]; 2201 u32 ier = 0; 2202 2203 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2204 iir = I915_READ(VLV_IIR); 2205 2206 if (master_ctl == 0 && iir == 0) 2207 break; 2208 2209 ret = IRQ_HANDLED; 2210 2211 /* 2212 * Theory on interrupt generation, based on empirical evidence: 2213 * 2214 * x = ((VLV_IIR & VLV_IER) || 2215 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2216 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2217 * 2218 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2219 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2220 * guarantee the CPU interrupt will be raised again even if we 2221 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2222 * bits this time around. 2223 */ 2224 I915_WRITE(GEN8_MASTER_IRQ, 0); 2225 ier = I915_READ(VLV_IER); 2226 I915_WRITE(VLV_IER, 0); 2227 2228 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2229 2230 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2231 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2232 2233 /* Call regardless, as some status bits might not be 2234 * signalled in iir */ 2235 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2236 2237 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2238 I915_LPE_PIPE_B_INTERRUPT | 2239 I915_LPE_PIPE_C_INTERRUPT)) 2240 intel_lpe_audio_irq_handler(dev_priv); 2241 2242 /* 2243 * VLV_IIR is single buffered, and reflects the level 2244 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2245 */ 2246 if (iir) 2247 I915_WRITE(VLV_IIR, iir); 2248 2249 I915_WRITE(VLV_IER, ier); 2250 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2251 2252 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2253 2254 if (hotplug_status) 2255 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2256 2257 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2258 } while (0); 2259 2260 enable_rpm_wakeref_asserts(dev_priv); 2261 2262 return ret; 2263 } 2264 2265 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2266 u32 hotplug_trigger, 2267 const u32 hpd[HPD_NUM_PINS]) 2268 { 2269 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2270 2271 /* 2272 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2273 * unless we touch the hotplug register, even if hotplug_trigger is 2274 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2275 * errors. 2276 */ 2277 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2278 if (!hotplug_trigger) { 2279 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2280 PORTD_HOTPLUG_STATUS_MASK | 2281 PORTC_HOTPLUG_STATUS_MASK | 2282 PORTB_HOTPLUG_STATUS_MASK; 2283 dig_hotplug_reg &= ~mask; 2284 } 2285 2286 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2287 if (!hotplug_trigger) 2288 return; 2289 2290 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2291 dig_hotplug_reg, hpd, 2292 pch_port_hotplug_long_detect); 2293 2294 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2295 } 2296 2297 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2298 { 2299 int pipe; 2300 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2301 2302 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2303 2304 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2305 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2306 SDE_AUDIO_POWER_SHIFT); 2307 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2308 port_name(port)); 2309 } 2310 2311 if (pch_iir & SDE_AUX_MASK) 2312 dp_aux_irq_handler(dev_priv); 2313 2314 if (pch_iir & SDE_GMBUS) 2315 gmbus_irq_handler(dev_priv); 2316 2317 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2318 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2319 2320 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2321 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2322 2323 if (pch_iir & SDE_POISON) 2324 DRM_ERROR("PCH poison interrupt\n"); 2325 2326 if (pch_iir & SDE_FDI_MASK) 2327 for_each_pipe(dev_priv, pipe) 2328 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2329 pipe_name(pipe), 2330 I915_READ(FDI_RX_IIR(pipe))); 2331 2332 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2333 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2334 2335 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2336 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2337 2338 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2339 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2340 2341 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2342 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2343 } 2344 2345 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2346 { 2347 u32 err_int = I915_READ(GEN7_ERR_INT); 2348 enum pipe pipe; 2349 2350 if (err_int & ERR_INT_POISON) 2351 DRM_ERROR("Poison interrupt\n"); 2352 2353 for_each_pipe(dev_priv, pipe) { 2354 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2355 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2356 2357 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2358 if (IS_IVYBRIDGE(dev_priv)) 2359 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2360 else 2361 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2362 } 2363 } 2364 2365 I915_WRITE(GEN7_ERR_INT, err_int); 2366 } 2367 2368 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2369 { 2370 u32 serr_int = I915_READ(SERR_INT); 2371 enum pipe pipe; 2372 2373 if (serr_int & SERR_INT_POISON) 2374 DRM_ERROR("PCH poison interrupt\n"); 2375 2376 for_each_pipe(dev_priv, pipe) 2377 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2378 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2379 2380 I915_WRITE(SERR_INT, serr_int); 2381 } 2382 2383 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2384 { 2385 int pipe; 2386 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2387 2388 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2389 2390 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2391 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2392 SDE_AUDIO_POWER_SHIFT_CPT); 2393 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2394 port_name(port)); 2395 } 2396 2397 if (pch_iir & SDE_AUX_MASK_CPT) 2398 dp_aux_irq_handler(dev_priv); 2399 2400 if (pch_iir & SDE_GMBUS_CPT) 2401 gmbus_irq_handler(dev_priv); 2402 2403 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2404 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2405 2406 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2407 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2408 2409 if (pch_iir & SDE_FDI_MASK_CPT) 2410 for_each_pipe(dev_priv, pipe) 2411 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2412 pipe_name(pipe), 2413 I915_READ(FDI_RX_IIR(pipe))); 2414 2415 if (pch_iir & SDE_ERROR_CPT) 2416 cpt_serr_int_handler(dev_priv); 2417 } 2418 2419 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2420 { 2421 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 2422 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 2423 u32 pin_mask = 0, long_mask = 0; 2424 2425 if (ddi_hotplug_trigger) { 2426 u32 dig_hotplug_reg; 2427 2428 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 2429 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 2430 2431 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2432 ddi_hotplug_trigger, 2433 dig_hotplug_reg, hpd_icp, 2434 icp_ddi_port_hotplug_long_detect); 2435 } 2436 2437 if (tc_hotplug_trigger) { 2438 u32 dig_hotplug_reg; 2439 2440 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 2441 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 2442 2443 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2444 tc_hotplug_trigger, 2445 dig_hotplug_reg, hpd_icp, 2446 icp_tc_port_hotplug_long_detect); 2447 } 2448 2449 if (pin_mask) 2450 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2451 2452 if (pch_iir & SDE_GMBUS_ICP) 2453 gmbus_irq_handler(dev_priv); 2454 } 2455 2456 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2457 { 2458 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2459 ~SDE_PORTE_HOTPLUG_SPT; 2460 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2461 u32 pin_mask = 0, long_mask = 0; 2462 2463 if (hotplug_trigger) { 2464 u32 dig_hotplug_reg; 2465 2466 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2467 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2468 2469 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2470 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2471 spt_port_hotplug_long_detect); 2472 } 2473 2474 if (hotplug2_trigger) { 2475 u32 dig_hotplug_reg; 2476 2477 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2478 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2479 2480 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2481 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2482 spt_port_hotplug2_long_detect); 2483 } 2484 2485 if (pin_mask) 2486 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2487 2488 if (pch_iir & SDE_GMBUS_CPT) 2489 gmbus_irq_handler(dev_priv); 2490 } 2491 2492 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2493 u32 hotplug_trigger, 2494 const u32 hpd[HPD_NUM_PINS]) 2495 { 2496 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2497 2498 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2499 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2500 2501 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2502 dig_hotplug_reg, hpd, 2503 ilk_port_hotplug_long_detect); 2504 2505 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2506 } 2507 2508 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2509 u32 de_iir) 2510 { 2511 enum pipe pipe; 2512 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2513 2514 if (hotplug_trigger) 2515 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2516 2517 if (de_iir & DE_AUX_CHANNEL_A) 2518 dp_aux_irq_handler(dev_priv); 2519 2520 if (de_iir & DE_GSE) 2521 intel_opregion_asle_intr(dev_priv); 2522 2523 if (de_iir & DE_POISON) 2524 DRM_ERROR("Poison interrupt\n"); 2525 2526 for_each_pipe(dev_priv, pipe) { 2527 if (de_iir & DE_PIPE_VBLANK(pipe)) 2528 drm_handle_vblank(&dev_priv->drm, pipe); 2529 2530 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2531 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2532 2533 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2534 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2535 } 2536 2537 /* check event from PCH */ 2538 if (de_iir & DE_PCH_EVENT) { 2539 u32 pch_iir = I915_READ(SDEIIR); 2540 2541 if (HAS_PCH_CPT(dev_priv)) 2542 cpt_irq_handler(dev_priv, pch_iir); 2543 else 2544 ibx_irq_handler(dev_priv, pch_iir); 2545 2546 /* should clear PCH hotplug event before clear CPU irq */ 2547 I915_WRITE(SDEIIR, pch_iir); 2548 } 2549 2550 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2551 ironlake_rps_change_irq_handler(dev_priv); 2552 } 2553 2554 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2555 u32 de_iir) 2556 { 2557 enum pipe pipe; 2558 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2559 2560 if (hotplug_trigger) 2561 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2562 2563 if (de_iir & DE_ERR_INT_IVB) 2564 ivb_err_int_handler(dev_priv); 2565 2566 if (de_iir & DE_EDP_PSR_INT_HSW) { 2567 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2568 2569 intel_psr_irq_handler(dev_priv, psr_iir); 2570 I915_WRITE(EDP_PSR_IIR, psr_iir); 2571 } 2572 2573 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2574 dp_aux_irq_handler(dev_priv); 2575 2576 if (de_iir & DE_GSE_IVB) 2577 intel_opregion_asle_intr(dev_priv); 2578 2579 for_each_pipe(dev_priv, pipe) { 2580 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2581 drm_handle_vblank(&dev_priv->drm, pipe); 2582 } 2583 2584 /* check event from PCH */ 2585 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2586 u32 pch_iir = I915_READ(SDEIIR); 2587 2588 cpt_irq_handler(dev_priv, pch_iir); 2589 2590 /* clear PCH hotplug event before clear CPU irq */ 2591 I915_WRITE(SDEIIR, pch_iir); 2592 } 2593 } 2594 2595 /* 2596 * To handle irqs with the minimum potential races with fresh interrupts, we: 2597 * 1 - Disable Master Interrupt Control. 2598 * 2 - Find the source(s) of the interrupt. 2599 * 3 - Clear the Interrupt Identity bits (IIR). 2600 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2601 * 5 - Re-enable Master Interrupt Control. 2602 */ 2603 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2604 { 2605 struct drm_device *dev = arg; 2606 struct drm_i915_private *dev_priv = to_i915(dev); 2607 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2608 irqreturn_t ret = IRQ_NONE; 2609 2610 if (!intel_irqs_enabled(dev_priv)) 2611 return IRQ_NONE; 2612 2613 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2614 disable_rpm_wakeref_asserts(dev_priv); 2615 2616 /* disable master interrupt before clearing iir */ 2617 de_ier = I915_READ(DEIER); 2618 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2619 2620 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2621 * interrupts will will be stored on its back queue, and then we'll be 2622 * able to process them after we restore SDEIER (as soon as we restore 2623 * it, we'll get an interrupt if SDEIIR still has something to process 2624 * due to its back queue). */ 2625 if (!HAS_PCH_NOP(dev_priv)) { 2626 sde_ier = I915_READ(SDEIER); 2627 I915_WRITE(SDEIER, 0); 2628 } 2629 2630 /* Find, clear, then process each source of interrupt */ 2631 2632 gt_iir = I915_READ(GTIIR); 2633 if (gt_iir) { 2634 I915_WRITE(GTIIR, gt_iir); 2635 ret = IRQ_HANDLED; 2636 if (INTEL_GEN(dev_priv) >= 6) 2637 snb_gt_irq_handler(dev_priv, gt_iir); 2638 else 2639 ilk_gt_irq_handler(dev_priv, gt_iir); 2640 } 2641 2642 de_iir = I915_READ(DEIIR); 2643 if (de_iir) { 2644 I915_WRITE(DEIIR, de_iir); 2645 ret = IRQ_HANDLED; 2646 if (INTEL_GEN(dev_priv) >= 7) 2647 ivb_display_irq_handler(dev_priv, de_iir); 2648 else 2649 ilk_display_irq_handler(dev_priv, de_iir); 2650 } 2651 2652 if (INTEL_GEN(dev_priv) >= 6) { 2653 u32 pm_iir = I915_READ(GEN6_PMIIR); 2654 if (pm_iir) { 2655 I915_WRITE(GEN6_PMIIR, pm_iir); 2656 ret = IRQ_HANDLED; 2657 gen6_rps_irq_handler(dev_priv, pm_iir); 2658 } 2659 } 2660 2661 I915_WRITE(DEIER, de_ier); 2662 if (!HAS_PCH_NOP(dev_priv)) 2663 I915_WRITE(SDEIER, sde_ier); 2664 2665 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2666 enable_rpm_wakeref_asserts(dev_priv); 2667 2668 return ret; 2669 } 2670 2671 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2672 u32 hotplug_trigger, 2673 const u32 hpd[HPD_NUM_PINS]) 2674 { 2675 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2676 2677 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2678 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2679 2680 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2681 dig_hotplug_reg, hpd, 2682 bxt_port_hotplug_long_detect); 2683 2684 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2685 } 2686 2687 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2688 { 2689 u32 pin_mask = 0, long_mask = 0; 2690 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2691 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2692 2693 if (trigger_tc) { 2694 u32 dig_hotplug_reg; 2695 2696 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2697 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2698 2699 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2700 dig_hotplug_reg, hpd_gen11, 2701 gen11_port_hotplug_long_detect); 2702 } 2703 2704 if (trigger_tbt) { 2705 u32 dig_hotplug_reg; 2706 2707 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2708 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2709 2710 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2711 dig_hotplug_reg, hpd_gen11, 2712 gen11_port_hotplug_long_detect); 2713 } 2714 2715 if (pin_mask) 2716 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2717 else 2718 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2719 } 2720 2721 static irqreturn_t 2722 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2723 { 2724 irqreturn_t ret = IRQ_NONE; 2725 u32 iir; 2726 enum pipe pipe; 2727 2728 if (master_ctl & GEN8_DE_MISC_IRQ) { 2729 iir = I915_READ(GEN8_DE_MISC_IIR); 2730 if (iir) { 2731 bool found = false; 2732 2733 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2734 ret = IRQ_HANDLED; 2735 2736 if (iir & GEN8_DE_MISC_GSE) { 2737 intel_opregion_asle_intr(dev_priv); 2738 found = true; 2739 } 2740 2741 if (iir & GEN8_DE_EDP_PSR) { 2742 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2743 2744 intel_psr_irq_handler(dev_priv, psr_iir); 2745 I915_WRITE(EDP_PSR_IIR, psr_iir); 2746 found = true; 2747 } 2748 2749 if (!found) 2750 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2751 } 2752 else 2753 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2754 } 2755 2756 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2757 iir = I915_READ(GEN11_DE_HPD_IIR); 2758 if (iir) { 2759 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2760 ret = IRQ_HANDLED; 2761 gen11_hpd_irq_handler(dev_priv, iir); 2762 } else { 2763 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2764 } 2765 } 2766 2767 if (master_ctl & GEN8_DE_PORT_IRQ) { 2768 iir = I915_READ(GEN8_DE_PORT_IIR); 2769 if (iir) { 2770 u32 tmp_mask; 2771 bool found = false; 2772 2773 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2774 ret = IRQ_HANDLED; 2775 2776 tmp_mask = GEN8_AUX_CHANNEL_A; 2777 if (INTEL_GEN(dev_priv) >= 9) 2778 tmp_mask |= GEN9_AUX_CHANNEL_B | 2779 GEN9_AUX_CHANNEL_C | 2780 GEN9_AUX_CHANNEL_D; 2781 2782 if (INTEL_GEN(dev_priv) >= 11) 2783 tmp_mask |= ICL_AUX_CHANNEL_E; 2784 2785 if (IS_CNL_WITH_PORT_F(dev_priv) || 2786 INTEL_GEN(dev_priv) >= 11) 2787 tmp_mask |= CNL_AUX_CHANNEL_F; 2788 2789 if (iir & tmp_mask) { 2790 dp_aux_irq_handler(dev_priv); 2791 found = true; 2792 } 2793 2794 if (IS_GEN9_LP(dev_priv)) { 2795 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2796 if (tmp_mask) { 2797 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2798 hpd_bxt); 2799 found = true; 2800 } 2801 } else if (IS_BROADWELL(dev_priv)) { 2802 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2803 if (tmp_mask) { 2804 ilk_hpd_irq_handler(dev_priv, 2805 tmp_mask, hpd_bdw); 2806 found = true; 2807 } 2808 } 2809 2810 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2811 gmbus_irq_handler(dev_priv); 2812 found = true; 2813 } 2814 2815 if (!found) 2816 DRM_ERROR("Unexpected DE Port interrupt\n"); 2817 } 2818 else 2819 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2820 } 2821 2822 for_each_pipe(dev_priv, pipe) { 2823 u32 fault_errors; 2824 2825 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2826 continue; 2827 2828 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2829 if (!iir) { 2830 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2831 continue; 2832 } 2833 2834 ret = IRQ_HANDLED; 2835 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2836 2837 if (iir & GEN8_PIPE_VBLANK) 2838 drm_handle_vblank(&dev_priv->drm, pipe); 2839 2840 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2841 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2842 2843 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2844 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2845 2846 fault_errors = iir; 2847 if (INTEL_GEN(dev_priv) >= 9) 2848 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2849 else 2850 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2851 2852 if (fault_errors) 2853 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2854 pipe_name(pipe), 2855 fault_errors); 2856 } 2857 2858 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2859 master_ctl & GEN8_DE_PCH_IRQ) { 2860 /* 2861 * FIXME(BDW): Assume for now that the new interrupt handling 2862 * scheme also closed the SDE interrupt handling race we've seen 2863 * on older pch-split platforms. But this needs testing. 2864 */ 2865 iir = I915_READ(SDEIIR); 2866 if (iir) { 2867 I915_WRITE(SDEIIR, iir); 2868 ret = IRQ_HANDLED; 2869 2870 if (HAS_PCH_ICP(dev_priv)) 2871 icp_irq_handler(dev_priv, iir); 2872 else if (HAS_PCH_SPT(dev_priv) || 2873 HAS_PCH_KBP(dev_priv) || 2874 HAS_PCH_CNP(dev_priv)) 2875 spt_irq_handler(dev_priv, iir); 2876 else 2877 cpt_irq_handler(dev_priv, iir); 2878 } else { 2879 /* 2880 * Like on previous PCH there seems to be something 2881 * fishy going on with forwarding PCH interrupts. 2882 */ 2883 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2884 } 2885 } 2886 2887 return ret; 2888 } 2889 2890 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2891 { 2892 struct drm_i915_private *dev_priv = to_i915(arg); 2893 u32 master_ctl; 2894 u32 gt_iir[4]; 2895 2896 if (!intel_irqs_enabled(dev_priv)) 2897 return IRQ_NONE; 2898 2899 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2900 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2901 if (!master_ctl) 2902 return IRQ_NONE; 2903 2904 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2905 2906 /* Find, clear, then process each source of interrupt */ 2907 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2908 2909 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2910 if (master_ctl & ~GEN8_GT_IRQS) { 2911 disable_rpm_wakeref_asserts(dev_priv); 2912 gen8_de_irq_handler(dev_priv, master_ctl); 2913 enable_rpm_wakeref_asserts(dev_priv); 2914 } 2915 2916 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2917 2918 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2919 2920 return IRQ_HANDLED; 2921 } 2922 2923 struct wedge_me { 2924 struct delayed_work work; 2925 struct drm_i915_private *i915; 2926 const char *name; 2927 }; 2928 2929 static void wedge_me(struct work_struct *work) 2930 { 2931 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2932 2933 dev_err(w->i915->drm.dev, 2934 "%s timed out, cancelling all in-flight rendering.\n", 2935 w->name); 2936 i915_gem_set_wedged(w->i915); 2937 } 2938 2939 static void __init_wedge(struct wedge_me *w, 2940 struct drm_i915_private *i915, 2941 long timeout, 2942 const char *name) 2943 { 2944 w->i915 = i915; 2945 w->name = name; 2946 2947 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2948 schedule_delayed_work(&w->work, timeout); 2949 } 2950 2951 static void __fini_wedge(struct wedge_me *w) 2952 { 2953 cancel_delayed_work_sync(&w->work); 2954 destroy_delayed_work_on_stack(&w->work); 2955 w->i915 = NULL; 2956 } 2957 2958 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2959 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2960 (W)->i915; \ 2961 __fini_wedge((W))) 2962 2963 static u32 2964 gen11_gt_engine_identity(struct drm_i915_private * const i915, 2965 const unsigned int bank, const unsigned int bit) 2966 { 2967 void __iomem * const regs = i915->regs; 2968 u32 timeout_ts; 2969 u32 ident; 2970 2971 lockdep_assert_held(&i915->irq_lock); 2972 2973 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2974 2975 /* 2976 * NB: Specs do not specify how long to spin wait, 2977 * so we do ~100us as an educated guess. 2978 */ 2979 timeout_ts = (local_clock() >> 10) + 100; 2980 do { 2981 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2982 } while (!(ident & GEN11_INTR_DATA_VALID) && 2983 !time_after32(local_clock() >> 10, timeout_ts)); 2984 2985 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 2986 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 2987 bank, bit, ident); 2988 return 0; 2989 } 2990 2991 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 2992 GEN11_INTR_DATA_VALID); 2993 2994 return ident; 2995 } 2996 2997 static void 2998 gen11_other_irq_handler(struct drm_i915_private * const i915, 2999 const u8 instance, const u16 iir) 3000 { 3001 if (instance == OTHER_GTPM_INSTANCE) 3002 return gen6_rps_irq_handler(i915, iir); 3003 3004 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3005 instance, iir); 3006 } 3007 3008 static void 3009 gen11_engine_irq_handler(struct drm_i915_private * const i915, 3010 const u8 class, const u8 instance, const u16 iir) 3011 { 3012 struct intel_engine_cs *engine; 3013 3014 if (instance <= MAX_ENGINE_INSTANCE) 3015 engine = i915->engine_class[class][instance]; 3016 else 3017 engine = NULL; 3018 3019 if (likely(engine)) 3020 return gen8_cs_irq_handler(engine, iir); 3021 3022 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3023 class, instance); 3024 } 3025 3026 static void 3027 gen11_gt_identity_handler(struct drm_i915_private * const i915, 3028 const u32 identity) 3029 { 3030 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3031 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3032 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3033 3034 if (unlikely(!intr)) 3035 return; 3036 3037 if (class <= COPY_ENGINE_CLASS) 3038 return gen11_engine_irq_handler(i915, class, instance, intr); 3039 3040 if (class == OTHER_CLASS) 3041 return gen11_other_irq_handler(i915, instance, intr); 3042 3043 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3044 class, instance, intr); 3045 } 3046 3047 static void 3048 gen11_gt_bank_handler(struct drm_i915_private * const i915, 3049 const unsigned int bank) 3050 { 3051 void __iomem * const regs = i915->regs; 3052 unsigned long intr_dw; 3053 unsigned int bit; 3054 3055 lockdep_assert_held(&i915->irq_lock); 3056 3057 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 3058 3059 if (unlikely(!intr_dw)) { 3060 DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 3061 return; 3062 } 3063 3064 for_each_set_bit(bit, &intr_dw, 32) { 3065 const u32 ident = gen11_gt_engine_identity(i915, 3066 bank, bit); 3067 3068 gen11_gt_identity_handler(i915, ident); 3069 } 3070 3071 /* Clear must be after shared has been served for engine */ 3072 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 3073 } 3074 3075 static void 3076 gen11_gt_irq_handler(struct drm_i915_private * const i915, 3077 const u32 master_ctl) 3078 { 3079 unsigned int bank; 3080 3081 spin_lock(&i915->irq_lock); 3082 3083 for (bank = 0; bank < 2; bank++) { 3084 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 3085 gen11_gt_bank_handler(i915, bank); 3086 } 3087 3088 spin_unlock(&i915->irq_lock); 3089 } 3090 3091 static u32 3092 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) 3093 { 3094 void __iomem * const regs = dev_priv->regs; 3095 u32 iir; 3096 3097 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3098 return 0; 3099 3100 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3101 if (likely(iir)) 3102 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 3103 3104 return iir; 3105 } 3106 3107 static void 3108 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) 3109 { 3110 if (iir & GEN11_GU_MISC_GSE) 3111 intel_opregion_asle_intr(dev_priv); 3112 } 3113 3114 static irqreturn_t gen11_irq_handler(int irq, void *arg) 3115 { 3116 struct drm_i915_private * const i915 = to_i915(arg); 3117 void __iomem * const regs = i915->regs; 3118 u32 master_ctl; 3119 u32 gu_misc_iir; 3120 3121 if (!intel_irqs_enabled(i915)) 3122 return IRQ_NONE; 3123 3124 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3125 master_ctl &= ~GEN11_MASTER_IRQ; 3126 if (!master_ctl) 3127 return IRQ_NONE; 3128 3129 /* Disable interrupts. */ 3130 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 3131 3132 /* Find, clear, then process each source of interrupt. */ 3133 gen11_gt_irq_handler(i915, master_ctl); 3134 3135 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3136 if (master_ctl & GEN11_DISPLAY_IRQ) { 3137 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3138 3139 disable_rpm_wakeref_asserts(i915); 3140 /* 3141 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3142 * for the display related bits. 3143 */ 3144 gen8_de_irq_handler(i915, disp_ctl); 3145 enable_rpm_wakeref_asserts(i915); 3146 } 3147 3148 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3149 3150 /* Acknowledge and enable interrupts. */ 3151 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 3152 3153 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3154 3155 return IRQ_HANDLED; 3156 } 3157 3158 static void i915_reset_device(struct drm_i915_private *dev_priv, 3159 u32 engine_mask, 3160 const char *reason) 3161 { 3162 struct i915_gpu_error *error = &dev_priv->gpu_error; 3163 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3164 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3165 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3166 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 3167 struct wedge_me w; 3168 3169 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 3170 3171 DRM_DEBUG_DRIVER("resetting chip\n"); 3172 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 3173 3174 /* Use a watchdog to ensure that our reset completes */ 3175 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3176 intel_prepare_reset(dev_priv); 3177 3178 error->reason = reason; 3179 error->stalled_mask = engine_mask; 3180 3181 /* Signal that locked waiters should reset the GPU */ 3182 smp_mb__before_atomic(); 3183 set_bit(I915_RESET_HANDOFF, &error->flags); 3184 wake_up_all(&error->wait_queue); 3185 3186 /* Wait for anyone holding the lock to wakeup, without 3187 * blocking indefinitely on struct_mutex. 3188 */ 3189 do { 3190 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3191 i915_reset(dev_priv, engine_mask, reason); 3192 mutex_unlock(&dev_priv->drm.struct_mutex); 3193 } 3194 } while (wait_on_bit_timeout(&error->flags, 3195 I915_RESET_HANDOFF, 3196 TASK_UNINTERRUPTIBLE, 3197 1)); 3198 3199 error->stalled_mask = 0; 3200 error->reason = NULL; 3201 3202 intel_finish_reset(dev_priv); 3203 } 3204 3205 if (!test_bit(I915_WEDGED, &error->flags)) 3206 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3207 } 3208 3209 void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3210 { 3211 u32 eir; 3212 3213 if (!IS_GEN2(dev_priv)) 3214 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 3215 3216 if (INTEL_GEN(dev_priv) < 4) 3217 I915_WRITE(IPEIR, I915_READ(IPEIR)); 3218 else 3219 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 3220 3221 I915_WRITE(EIR, I915_READ(EIR)); 3222 eir = I915_READ(EIR); 3223 if (eir) { 3224 /* 3225 * some errors might have become stuck, 3226 * mask them. 3227 */ 3228 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 3229 I915_WRITE(EMR, I915_READ(EMR) | eir); 3230 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 3231 } 3232 3233 if (INTEL_GEN(dev_priv) >= 8) { 3234 I915_WRITE(GEN8_RING_FAULT_REG, 3235 I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); 3236 POSTING_READ(GEN8_RING_FAULT_REG); 3237 } else if (INTEL_GEN(dev_priv) >= 6) { 3238 struct intel_engine_cs *engine; 3239 enum intel_engine_id id; 3240 3241 for_each_engine(engine, dev_priv, id) { 3242 I915_WRITE(RING_FAULT_REG(engine), 3243 I915_READ(RING_FAULT_REG(engine)) & 3244 ~RING_FAULT_VALID); 3245 } 3246 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); 3247 } 3248 } 3249 3250 /** 3251 * i915_handle_error - handle a gpu error 3252 * @dev_priv: i915 device private 3253 * @engine_mask: mask representing engines that are hung 3254 * @flags: control flags 3255 * @fmt: Error message format string 3256 * 3257 * Do some basic checking of register state at error time and 3258 * dump it to the syslog. Also call i915_capture_error_state() to make 3259 * sure we get a record and make it available in debugfs. Fire a uevent 3260 * so userspace knows something bad happened (should trigger collection 3261 * of a ring dump etc.). 3262 */ 3263 void i915_handle_error(struct drm_i915_private *dev_priv, 3264 u32 engine_mask, 3265 unsigned long flags, 3266 const char *fmt, ...) 3267 { 3268 struct intel_engine_cs *engine; 3269 unsigned int tmp; 3270 char error_msg[80]; 3271 char *msg = NULL; 3272 3273 if (fmt) { 3274 va_list args; 3275 3276 va_start(args, fmt); 3277 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 3278 va_end(args); 3279 3280 msg = error_msg; 3281 } 3282 3283 /* 3284 * In most cases it's guaranteed that we get here with an RPM 3285 * reference held, for example because there is a pending GPU 3286 * request that won't finish until the reset is done. This 3287 * isn't the case at least when we get here by doing a 3288 * simulated reset via debugfs, so get an RPM reference. 3289 */ 3290 intel_runtime_pm_get(dev_priv); 3291 3292 engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3293 3294 if (flags & I915_ERROR_CAPTURE) { 3295 i915_capture_error_state(dev_priv, engine_mask, msg); 3296 i915_clear_error_registers(dev_priv); 3297 } 3298 3299 /* 3300 * Try engine reset when available. We fall back to full reset if 3301 * single reset fails. 3302 */ 3303 if (intel_has_reset_engine(dev_priv) && 3304 !i915_terminally_wedged(&dev_priv->gpu_error)) { 3305 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 3306 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3307 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3308 &dev_priv->gpu_error.flags)) 3309 continue; 3310 3311 if (i915_reset_engine(engine, msg) == 0) 3312 engine_mask &= ~intel_engine_flag(engine); 3313 3314 clear_bit(I915_RESET_ENGINE + engine->id, 3315 &dev_priv->gpu_error.flags); 3316 wake_up_bit(&dev_priv->gpu_error.flags, 3317 I915_RESET_ENGINE + engine->id); 3318 } 3319 } 3320 3321 if (!engine_mask) 3322 goto out; 3323 3324 /* Full reset needs the mutex, stop any other user trying to do so. */ 3325 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3326 wait_event(dev_priv->gpu_error.reset_queue, 3327 !test_bit(I915_RESET_BACKOFF, 3328 &dev_priv->gpu_error.flags)); 3329 goto out; 3330 } 3331 3332 /* Prevent any other reset-engine attempt. */ 3333 for_each_engine(engine, dev_priv, tmp) { 3334 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3335 &dev_priv->gpu_error.flags)) 3336 wait_on_bit(&dev_priv->gpu_error.flags, 3337 I915_RESET_ENGINE + engine->id, 3338 TASK_UNINTERRUPTIBLE); 3339 } 3340 3341 i915_reset_device(dev_priv, engine_mask, msg); 3342 3343 for_each_engine(engine, dev_priv, tmp) { 3344 clear_bit(I915_RESET_ENGINE + engine->id, 3345 &dev_priv->gpu_error.flags); 3346 } 3347 3348 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3349 wake_up_all(&dev_priv->gpu_error.reset_queue); 3350 3351 out: 3352 intel_runtime_pm_put(dev_priv); 3353 } 3354 3355 /* Called from drm generic code, passed 'crtc' which 3356 * we use as a pipe index 3357 */ 3358 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3359 { 3360 struct drm_i915_private *dev_priv = to_i915(dev); 3361 unsigned long irqflags; 3362 3363 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3364 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3365 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3366 3367 return 0; 3368 } 3369 3370 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3371 { 3372 struct drm_i915_private *dev_priv = to_i915(dev); 3373 unsigned long irqflags; 3374 3375 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3376 i915_enable_pipestat(dev_priv, pipe, 3377 PIPE_START_VBLANK_INTERRUPT_STATUS); 3378 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3379 3380 return 0; 3381 } 3382 3383 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3384 { 3385 struct drm_i915_private *dev_priv = to_i915(dev); 3386 unsigned long irqflags; 3387 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3388 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3389 3390 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3391 ilk_enable_display_irq(dev_priv, bit); 3392 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3393 3394 /* Even though there is no DMC, frame counter can get stuck when 3395 * PSR is active as no frames are generated. 3396 */ 3397 if (HAS_PSR(dev_priv)) 3398 drm_vblank_restore(dev, pipe); 3399 3400 return 0; 3401 } 3402 3403 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3404 { 3405 struct drm_i915_private *dev_priv = to_i915(dev); 3406 unsigned long irqflags; 3407 3408 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3409 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3410 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3411 3412 /* Even if there is no DMC, frame counter can get stuck when 3413 * PSR is active as no frames are generated, so check only for PSR. 3414 */ 3415 if (HAS_PSR(dev_priv)) 3416 drm_vblank_restore(dev, pipe); 3417 3418 return 0; 3419 } 3420 3421 /* Called from drm generic code, passed 'crtc' which 3422 * we use as a pipe index 3423 */ 3424 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3425 { 3426 struct drm_i915_private *dev_priv = to_i915(dev); 3427 unsigned long irqflags; 3428 3429 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3430 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3431 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3432 } 3433 3434 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3435 { 3436 struct drm_i915_private *dev_priv = to_i915(dev); 3437 unsigned long irqflags; 3438 3439 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3440 i915_disable_pipestat(dev_priv, pipe, 3441 PIPE_START_VBLANK_INTERRUPT_STATUS); 3442 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3443 } 3444 3445 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3446 { 3447 struct drm_i915_private *dev_priv = to_i915(dev); 3448 unsigned long irqflags; 3449 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3450 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3451 3452 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3453 ilk_disable_display_irq(dev_priv, bit); 3454 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3455 } 3456 3457 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3458 { 3459 struct drm_i915_private *dev_priv = to_i915(dev); 3460 unsigned long irqflags; 3461 3462 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3463 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3464 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3465 } 3466 3467 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3468 { 3469 if (HAS_PCH_NOP(dev_priv)) 3470 return; 3471 3472 GEN3_IRQ_RESET(SDE); 3473 3474 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3475 I915_WRITE(SERR_INT, 0xffffffff); 3476 } 3477 3478 /* 3479 * SDEIER is also touched by the interrupt handler to work around missed PCH 3480 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3481 * instead we unconditionally enable all PCH interrupt sources here, but then 3482 * only unmask them as needed with SDEIMR. 3483 * 3484 * This function needs to be called before interrupts are enabled. 3485 */ 3486 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3487 { 3488 struct drm_i915_private *dev_priv = to_i915(dev); 3489 3490 if (HAS_PCH_NOP(dev_priv)) 3491 return; 3492 3493 WARN_ON(I915_READ(SDEIER) != 0); 3494 I915_WRITE(SDEIER, 0xffffffff); 3495 POSTING_READ(SDEIER); 3496 } 3497 3498 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3499 { 3500 GEN3_IRQ_RESET(GT); 3501 if (INTEL_GEN(dev_priv) >= 6) 3502 GEN3_IRQ_RESET(GEN6_PM); 3503 } 3504 3505 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3506 { 3507 if (IS_CHERRYVIEW(dev_priv)) 3508 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3509 else 3510 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3511 3512 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3513 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3514 3515 i9xx_pipestat_irq_reset(dev_priv); 3516 3517 GEN3_IRQ_RESET(VLV_); 3518 dev_priv->irq_mask = ~0u; 3519 } 3520 3521 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3522 { 3523 u32 pipestat_mask; 3524 u32 enable_mask; 3525 enum pipe pipe; 3526 3527 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3528 3529 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3530 for_each_pipe(dev_priv, pipe) 3531 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3532 3533 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3534 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3535 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3536 I915_LPE_PIPE_A_INTERRUPT | 3537 I915_LPE_PIPE_B_INTERRUPT; 3538 3539 if (IS_CHERRYVIEW(dev_priv)) 3540 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3541 I915_LPE_PIPE_C_INTERRUPT; 3542 3543 WARN_ON(dev_priv->irq_mask != ~0u); 3544 3545 dev_priv->irq_mask = ~enable_mask; 3546 3547 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3548 } 3549 3550 /* drm_dma.h hooks 3551 */ 3552 static void ironlake_irq_reset(struct drm_device *dev) 3553 { 3554 struct drm_i915_private *dev_priv = to_i915(dev); 3555 3556 if (IS_GEN5(dev_priv)) 3557 I915_WRITE(HWSTAM, 0xffffffff); 3558 3559 GEN3_IRQ_RESET(DE); 3560 if (IS_GEN7(dev_priv)) 3561 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3562 3563 if (IS_HASWELL(dev_priv)) { 3564 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3565 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3566 } 3567 3568 gen5_gt_irq_reset(dev_priv); 3569 3570 ibx_irq_reset(dev_priv); 3571 } 3572 3573 static void valleyview_irq_reset(struct drm_device *dev) 3574 { 3575 struct drm_i915_private *dev_priv = to_i915(dev); 3576 3577 I915_WRITE(VLV_MASTER_IER, 0); 3578 POSTING_READ(VLV_MASTER_IER); 3579 3580 gen5_gt_irq_reset(dev_priv); 3581 3582 spin_lock_irq(&dev_priv->irq_lock); 3583 if (dev_priv->display_irqs_enabled) 3584 vlv_display_irq_reset(dev_priv); 3585 spin_unlock_irq(&dev_priv->irq_lock); 3586 } 3587 3588 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3589 { 3590 GEN8_IRQ_RESET_NDX(GT, 0); 3591 GEN8_IRQ_RESET_NDX(GT, 1); 3592 GEN8_IRQ_RESET_NDX(GT, 2); 3593 GEN8_IRQ_RESET_NDX(GT, 3); 3594 } 3595 3596 static void gen8_irq_reset(struct drm_device *dev) 3597 { 3598 struct drm_i915_private *dev_priv = to_i915(dev); 3599 int pipe; 3600 3601 I915_WRITE(GEN8_MASTER_IRQ, 0); 3602 POSTING_READ(GEN8_MASTER_IRQ); 3603 3604 gen8_gt_irq_reset(dev_priv); 3605 3606 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3607 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3608 3609 for_each_pipe(dev_priv, pipe) 3610 if (intel_display_power_is_enabled(dev_priv, 3611 POWER_DOMAIN_PIPE(pipe))) 3612 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3613 3614 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3615 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3616 GEN3_IRQ_RESET(GEN8_PCU_); 3617 3618 if (HAS_PCH_SPLIT(dev_priv)) 3619 ibx_irq_reset(dev_priv); 3620 } 3621 3622 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3623 { 3624 /* Disable RCS, BCS, VCS and VECS class engines. */ 3625 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3626 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3627 3628 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3629 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3630 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3631 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3632 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3633 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3634 3635 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3636 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3637 } 3638 3639 static void gen11_irq_reset(struct drm_device *dev) 3640 { 3641 struct drm_i915_private *dev_priv = dev->dev_private; 3642 int pipe; 3643 3644 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3645 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3646 3647 gen11_gt_irq_reset(dev_priv); 3648 3649 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3650 3651 for_each_pipe(dev_priv, pipe) 3652 if (intel_display_power_is_enabled(dev_priv, 3653 POWER_DOMAIN_PIPE(pipe))) 3654 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3655 3656 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3657 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3658 GEN3_IRQ_RESET(GEN11_DE_HPD_); 3659 GEN3_IRQ_RESET(GEN11_GU_MISC_); 3660 GEN3_IRQ_RESET(GEN8_PCU_); 3661 3662 if (HAS_PCH_ICP(dev_priv)) 3663 GEN3_IRQ_RESET(SDE); 3664 } 3665 3666 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3667 u8 pipe_mask) 3668 { 3669 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3670 enum pipe pipe; 3671 3672 spin_lock_irq(&dev_priv->irq_lock); 3673 3674 if (!intel_irqs_enabled(dev_priv)) { 3675 spin_unlock_irq(&dev_priv->irq_lock); 3676 return; 3677 } 3678 3679 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3680 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3681 dev_priv->de_irq_mask[pipe], 3682 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3683 3684 spin_unlock_irq(&dev_priv->irq_lock); 3685 } 3686 3687 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3688 u8 pipe_mask) 3689 { 3690 enum pipe pipe; 3691 3692 spin_lock_irq(&dev_priv->irq_lock); 3693 3694 if (!intel_irqs_enabled(dev_priv)) { 3695 spin_unlock_irq(&dev_priv->irq_lock); 3696 return; 3697 } 3698 3699 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3700 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3701 3702 spin_unlock_irq(&dev_priv->irq_lock); 3703 3704 /* make sure we're done processing display irqs */ 3705 synchronize_irq(dev_priv->drm.irq); 3706 } 3707 3708 static void cherryview_irq_reset(struct drm_device *dev) 3709 { 3710 struct drm_i915_private *dev_priv = to_i915(dev); 3711 3712 I915_WRITE(GEN8_MASTER_IRQ, 0); 3713 POSTING_READ(GEN8_MASTER_IRQ); 3714 3715 gen8_gt_irq_reset(dev_priv); 3716 3717 GEN3_IRQ_RESET(GEN8_PCU_); 3718 3719 spin_lock_irq(&dev_priv->irq_lock); 3720 if (dev_priv->display_irqs_enabled) 3721 vlv_display_irq_reset(dev_priv); 3722 spin_unlock_irq(&dev_priv->irq_lock); 3723 } 3724 3725 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3726 const u32 hpd[HPD_NUM_PINS]) 3727 { 3728 struct intel_encoder *encoder; 3729 u32 enabled_irqs = 0; 3730 3731 for_each_intel_encoder(&dev_priv->drm, encoder) 3732 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3733 enabled_irqs |= hpd[encoder->hpd_pin]; 3734 3735 return enabled_irqs; 3736 } 3737 3738 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3739 { 3740 u32 hotplug; 3741 3742 /* 3743 * Enable digital hotplug on the PCH, and configure the DP short pulse 3744 * duration to 2ms (which is the minimum in the Display Port spec). 3745 * The pulse duration bits are reserved on LPT+. 3746 */ 3747 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3748 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3749 PORTC_PULSE_DURATION_MASK | 3750 PORTD_PULSE_DURATION_MASK); 3751 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3752 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3753 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3754 /* 3755 * When CPU and PCH are on the same package, port A 3756 * HPD must be enabled in both north and south. 3757 */ 3758 if (HAS_PCH_LPT_LP(dev_priv)) 3759 hotplug |= PORTA_HOTPLUG_ENABLE; 3760 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3761 } 3762 3763 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3764 { 3765 u32 hotplug_irqs, enabled_irqs; 3766 3767 if (HAS_PCH_IBX(dev_priv)) { 3768 hotplug_irqs = SDE_HOTPLUG_MASK; 3769 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3770 } else { 3771 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3772 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3773 } 3774 3775 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3776 3777 ibx_hpd_detection_setup(dev_priv); 3778 } 3779 3780 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 3781 { 3782 u32 hotplug; 3783 3784 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3785 hotplug |= ICP_DDIA_HPD_ENABLE | 3786 ICP_DDIB_HPD_ENABLE; 3787 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3788 3789 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3790 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 3791 ICP_TC_HPD_ENABLE(PORT_TC2) | 3792 ICP_TC_HPD_ENABLE(PORT_TC3) | 3793 ICP_TC_HPD_ENABLE(PORT_TC4); 3794 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3795 } 3796 3797 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3798 { 3799 u32 hotplug_irqs, enabled_irqs; 3800 3801 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 3802 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 3803 3804 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3805 3806 icp_hpd_detection_setup(dev_priv); 3807 } 3808 3809 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3810 { 3811 u32 hotplug; 3812 3813 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3814 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3815 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3816 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3817 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3818 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3819 3820 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3821 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3822 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3823 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3824 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3825 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3826 } 3827 3828 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3829 { 3830 u32 hotplug_irqs, enabled_irqs; 3831 u32 val; 3832 3833 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3834 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3835 3836 val = I915_READ(GEN11_DE_HPD_IMR); 3837 val &= ~hotplug_irqs; 3838 I915_WRITE(GEN11_DE_HPD_IMR, val); 3839 POSTING_READ(GEN11_DE_HPD_IMR); 3840 3841 gen11_hpd_detection_setup(dev_priv); 3842 3843 if (HAS_PCH_ICP(dev_priv)) 3844 icp_hpd_irq_setup(dev_priv); 3845 } 3846 3847 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3848 { 3849 u32 val, hotplug; 3850 3851 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3852 if (HAS_PCH_CNP(dev_priv)) { 3853 val = I915_READ(SOUTH_CHICKEN1); 3854 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3855 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3856 I915_WRITE(SOUTH_CHICKEN1, val); 3857 } 3858 3859 /* Enable digital hotplug on the PCH */ 3860 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3861 hotplug |= PORTA_HOTPLUG_ENABLE | 3862 PORTB_HOTPLUG_ENABLE | 3863 PORTC_HOTPLUG_ENABLE | 3864 PORTD_HOTPLUG_ENABLE; 3865 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3866 3867 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3868 hotplug |= PORTE_HOTPLUG_ENABLE; 3869 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3870 } 3871 3872 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3873 { 3874 u32 hotplug_irqs, enabled_irqs; 3875 3876 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3877 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3878 3879 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3880 3881 spt_hpd_detection_setup(dev_priv); 3882 } 3883 3884 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3885 { 3886 u32 hotplug; 3887 3888 /* 3889 * Enable digital hotplug on the CPU, and configure the DP short pulse 3890 * duration to 2ms (which is the minimum in the Display Port spec) 3891 * The pulse duration bits are reserved on HSW+. 3892 */ 3893 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3894 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3895 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3896 DIGITAL_PORTA_PULSE_DURATION_2ms; 3897 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3898 } 3899 3900 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3901 { 3902 u32 hotplug_irqs, enabled_irqs; 3903 3904 if (INTEL_GEN(dev_priv) >= 8) { 3905 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3906 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3907 3908 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3909 } else if (INTEL_GEN(dev_priv) >= 7) { 3910 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3911 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3912 3913 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3914 } else { 3915 hotplug_irqs = DE_DP_A_HOTPLUG; 3916 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3917 3918 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3919 } 3920 3921 ilk_hpd_detection_setup(dev_priv); 3922 3923 ibx_hpd_irq_setup(dev_priv); 3924 } 3925 3926 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3927 u32 enabled_irqs) 3928 { 3929 u32 hotplug; 3930 3931 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3932 hotplug |= PORTA_HOTPLUG_ENABLE | 3933 PORTB_HOTPLUG_ENABLE | 3934 PORTC_HOTPLUG_ENABLE; 3935 3936 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3937 hotplug, enabled_irqs); 3938 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3939 3940 /* 3941 * For BXT invert bit has to be set based on AOB design 3942 * for HPD detection logic, update it based on VBT fields. 3943 */ 3944 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3945 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3946 hotplug |= BXT_DDIA_HPD_INVERT; 3947 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3948 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3949 hotplug |= BXT_DDIB_HPD_INVERT; 3950 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3951 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3952 hotplug |= BXT_DDIC_HPD_INVERT; 3953 3954 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3955 } 3956 3957 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3958 { 3959 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3960 } 3961 3962 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3963 { 3964 u32 hotplug_irqs, enabled_irqs; 3965 3966 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3967 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3968 3969 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3970 3971 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3972 } 3973 3974 static void ibx_irq_postinstall(struct drm_device *dev) 3975 { 3976 struct drm_i915_private *dev_priv = to_i915(dev); 3977 u32 mask; 3978 3979 if (HAS_PCH_NOP(dev_priv)) 3980 return; 3981 3982 if (HAS_PCH_IBX(dev_priv)) 3983 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3984 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3985 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3986 else 3987 mask = SDE_GMBUS_CPT; 3988 3989 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3990 I915_WRITE(SDEIMR, ~mask); 3991 3992 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3993 HAS_PCH_LPT(dev_priv)) 3994 ibx_hpd_detection_setup(dev_priv); 3995 else 3996 spt_hpd_detection_setup(dev_priv); 3997 } 3998 3999 static void gen5_gt_irq_postinstall(struct drm_device *dev) 4000 { 4001 struct drm_i915_private *dev_priv = to_i915(dev); 4002 u32 pm_irqs, gt_irqs; 4003 4004 pm_irqs = gt_irqs = 0; 4005 4006 dev_priv->gt_irq_mask = ~0; 4007 if (HAS_L3_DPF(dev_priv)) { 4008 /* L3 parity interrupt is always unmasked. */ 4009 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4010 gt_irqs |= GT_PARITY_ERROR(dev_priv); 4011 } 4012 4013 gt_irqs |= GT_RENDER_USER_INTERRUPT; 4014 if (IS_GEN5(dev_priv)) { 4015 gt_irqs |= ILK_BSD_USER_INTERRUPT; 4016 } else { 4017 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 4018 } 4019 4020 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 4021 4022 if (INTEL_GEN(dev_priv) >= 6) { 4023 /* 4024 * RPS interrupts will get enabled/disabled on demand when RPS 4025 * itself is enabled/disabled. 4026 */ 4027 if (HAS_VEBOX(dev_priv)) { 4028 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4029 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4030 } 4031 4032 dev_priv->pm_imr = 0xffffffff; 4033 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 4034 } 4035 } 4036 4037 static int ironlake_irq_postinstall(struct drm_device *dev) 4038 { 4039 struct drm_i915_private *dev_priv = to_i915(dev); 4040 u32 display_mask, extra_mask; 4041 4042 if (INTEL_GEN(dev_priv) >= 7) { 4043 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4044 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 4045 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 4046 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 4047 DE_DP_A_HOTPLUG_IVB); 4048 } else { 4049 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4050 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4051 DE_PIPEA_CRC_DONE | DE_POISON); 4052 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4053 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4054 DE_DP_A_HOTPLUG); 4055 } 4056 4057 if (IS_HASWELL(dev_priv)) { 4058 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4059 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4060 display_mask |= DE_EDP_PSR_INT_HSW; 4061 } 4062 4063 dev_priv->irq_mask = ~display_mask; 4064 4065 ibx_irq_pre_postinstall(dev); 4066 4067 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4068 4069 gen5_gt_irq_postinstall(dev); 4070 4071 ilk_hpd_detection_setup(dev_priv); 4072 4073 ibx_irq_postinstall(dev); 4074 4075 if (IS_IRONLAKE_M(dev_priv)) { 4076 /* Enable PCU event interrupts 4077 * 4078 * spinlocking not required here for correctness since interrupt 4079 * setup is guaranteed to run in single-threaded context. But we 4080 * need it to make the assert_spin_locked happy. */ 4081 spin_lock_irq(&dev_priv->irq_lock); 4082 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4083 spin_unlock_irq(&dev_priv->irq_lock); 4084 } 4085 4086 return 0; 4087 } 4088 4089 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4090 { 4091 lockdep_assert_held(&dev_priv->irq_lock); 4092 4093 if (dev_priv->display_irqs_enabled) 4094 return; 4095 4096 dev_priv->display_irqs_enabled = true; 4097 4098 if (intel_irqs_enabled(dev_priv)) { 4099 vlv_display_irq_reset(dev_priv); 4100 vlv_display_irq_postinstall(dev_priv); 4101 } 4102 } 4103 4104 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4105 { 4106 lockdep_assert_held(&dev_priv->irq_lock); 4107 4108 if (!dev_priv->display_irqs_enabled) 4109 return; 4110 4111 dev_priv->display_irqs_enabled = false; 4112 4113 if (intel_irqs_enabled(dev_priv)) 4114 vlv_display_irq_reset(dev_priv); 4115 } 4116 4117 4118 static int valleyview_irq_postinstall(struct drm_device *dev) 4119 { 4120 struct drm_i915_private *dev_priv = to_i915(dev); 4121 4122 gen5_gt_irq_postinstall(dev); 4123 4124 spin_lock_irq(&dev_priv->irq_lock); 4125 if (dev_priv->display_irqs_enabled) 4126 vlv_display_irq_postinstall(dev_priv); 4127 spin_unlock_irq(&dev_priv->irq_lock); 4128 4129 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 4130 POSTING_READ(VLV_MASTER_IER); 4131 4132 return 0; 4133 } 4134 4135 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4136 { 4137 /* These are interrupts we'll toggle with the ring mask register */ 4138 uint32_t gt_interrupts[] = { 4139 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4140 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4141 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 4142 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4143 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4144 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4145 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 4146 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4147 0, 4148 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 4149 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4150 }; 4151 4152 if (HAS_L3_DPF(dev_priv)) 4153 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 4154 4155 dev_priv->pm_ier = 0x0; 4156 dev_priv->pm_imr = ~dev_priv->pm_ier; 4157 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 4158 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 4159 /* 4160 * RPS interrupts will get enabled/disabled on demand when RPS itself 4161 * is enabled/disabled. Same wil be the case for GuC interrupts. 4162 */ 4163 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 4164 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4165 } 4166 4167 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4168 { 4169 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4170 uint32_t de_pipe_enables; 4171 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 4172 u32 de_port_enables; 4173 u32 de_misc_masked = GEN8_DE_EDP_PSR; 4174 enum pipe pipe; 4175 4176 if (INTEL_GEN(dev_priv) <= 10) 4177 de_misc_masked |= GEN8_DE_MISC_GSE; 4178 4179 if (INTEL_GEN(dev_priv) >= 9) { 4180 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 4181 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 4182 GEN9_AUX_CHANNEL_D; 4183 if (IS_GEN9_LP(dev_priv)) 4184 de_port_masked |= BXT_DE_PORT_GMBUS; 4185 } else { 4186 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 4187 } 4188 4189 if (INTEL_GEN(dev_priv) >= 11) 4190 de_port_masked |= ICL_AUX_CHANNEL_E; 4191 4192 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4193 de_port_masked |= CNL_AUX_CHANNEL_F; 4194 4195 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4196 GEN8_PIPE_FIFO_UNDERRUN; 4197 4198 de_port_enables = de_port_masked; 4199 if (IS_GEN9_LP(dev_priv)) 4200 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4201 else if (IS_BROADWELL(dev_priv)) 4202 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 4203 4204 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4205 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4206 4207 for_each_pipe(dev_priv, pipe) { 4208 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4209 4210 if (intel_display_power_is_enabled(dev_priv, 4211 POWER_DOMAIN_PIPE(pipe))) 4212 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4213 dev_priv->de_irq_mask[pipe], 4214 de_pipe_enables); 4215 } 4216 4217 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 4218 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 4219 4220 if (INTEL_GEN(dev_priv) >= 11) { 4221 u32 de_hpd_masked = 0; 4222 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4223 GEN11_DE_TBT_HOTPLUG_MASK; 4224 4225 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4226 gen11_hpd_detection_setup(dev_priv); 4227 } else if (IS_GEN9_LP(dev_priv)) { 4228 bxt_hpd_detection_setup(dev_priv); 4229 } else if (IS_BROADWELL(dev_priv)) { 4230 ilk_hpd_detection_setup(dev_priv); 4231 } 4232 } 4233 4234 static int gen8_irq_postinstall(struct drm_device *dev) 4235 { 4236 struct drm_i915_private *dev_priv = to_i915(dev); 4237 4238 if (HAS_PCH_SPLIT(dev_priv)) 4239 ibx_irq_pre_postinstall(dev); 4240 4241 gen8_gt_irq_postinstall(dev_priv); 4242 gen8_de_irq_postinstall(dev_priv); 4243 4244 if (HAS_PCH_SPLIT(dev_priv)) 4245 ibx_irq_postinstall(dev); 4246 4247 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4248 POSTING_READ(GEN8_MASTER_IRQ); 4249 4250 return 0; 4251 } 4252 4253 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4254 { 4255 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 4256 4257 BUILD_BUG_ON(irqs & 0xffff0000); 4258 4259 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 4260 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 4261 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 4262 4263 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4264 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 4265 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 4266 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 4267 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 4268 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 4269 4270 /* 4271 * RPS interrupts will get enabled/disabled on demand when RPS itself 4272 * is enabled/disabled. 4273 */ 4274 dev_priv->pm_ier = 0x0; 4275 dev_priv->pm_imr = ~dev_priv->pm_ier; 4276 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4277 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4278 } 4279 4280 static void icp_irq_postinstall(struct drm_device *dev) 4281 { 4282 struct drm_i915_private *dev_priv = to_i915(dev); 4283 u32 mask = SDE_GMBUS_ICP; 4284 4285 WARN_ON(I915_READ(SDEIER) != 0); 4286 I915_WRITE(SDEIER, 0xffffffff); 4287 POSTING_READ(SDEIER); 4288 4289 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4290 I915_WRITE(SDEIMR, ~mask); 4291 4292 icp_hpd_detection_setup(dev_priv); 4293 } 4294 4295 static int gen11_irq_postinstall(struct drm_device *dev) 4296 { 4297 struct drm_i915_private *dev_priv = dev->dev_private; 4298 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 4299 4300 if (HAS_PCH_ICP(dev_priv)) 4301 icp_irq_postinstall(dev); 4302 4303 gen11_gt_irq_postinstall(dev_priv); 4304 gen8_de_irq_postinstall(dev_priv); 4305 4306 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4307 4308 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4309 4310 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 4311 POSTING_READ(GEN11_GFX_MSTR_IRQ); 4312 4313 return 0; 4314 } 4315 4316 static int cherryview_irq_postinstall(struct drm_device *dev) 4317 { 4318 struct drm_i915_private *dev_priv = to_i915(dev); 4319 4320 gen8_gt_irq_postinstall(dev_priv); 4321 4322 spin_lock_irq(&dev_priv->irq_lock); 4323 if (dev_priv->display_irqs_enabled) 4324 vlv_display_irq_postinstall(dev_priv); 4325 spin_unlock_irq(&dev_priv->irq_lock); 4326 4327 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4328 POSTING_READ(GEN8_MASTER_IRQ); 4329 4330 return 0; 4331 } 4332 4333 static void i8xx_irq_reset(struct drm_device *dev) 4334 { 4335 struct drm_i915_private *dev_priv = to_i915(dev); 4336 4337 i9xx_pipestat_irq_reset(dev_priv); 4338 4339 I915_WRITE16(HWSTAM, 0xffff); 4340 4341 GEN2_IRQ_RESET(); 4342 } 4343 4344 static int i8xx_irq_postinstall(struct drm_device *dev) 4345 { 4346 struct drm_i915_private *dev_priv = to_i915(dev); 4347 u16 enable_mask; 4348 4349 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4350 I915_ERROR_MEMORY_REFRESH)); 4351 4352 /* Unmask the interrupts that we always want on. */ 4353 dev_priv->irq_mask = 4354 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4355 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4356 I915_MASTER_ERROR_INTERRUPT); 4357 4358 enable_mask = 4359 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4360 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4361 I915_MASTER_ERROR_INTERRUPT | 4362 I915_USER_INTERRUPT; 4363 4364 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4365 4366 /* Interrupt setup is already guaranteed to be single-threaded, this is 4367 * just to make the assert_spin_locked check happy. */ 4368 spin_lock_irq(&dev_priv->irq_lock); 4369 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4370 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4371 spin_unlock_irq(&dev_priv->irq_lock); 4372 4373 return 0; 4374 } 4375 4376 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 4377 u16 *eir, u16 *eir_stuck) 4378 { 4379 u16 emr; 4380 4381 *eir = I915_READ16(EIR); 4382 4383 if (*eir) 4384 I915_WRITE16(EIR, *eir); 4385 4386 *eir_stuck = I915_READ16(EIR); 4387 if (*eir_stuck == 0) 4388 return; 4389 4390 /* 4391 * Toggle all EMR bits to make sure we get an edge 4392 * in the ISR master error bit if we don't clear 4393 * all the EIR bits. Otherwise the edge triggered 4394 * IIR on i965/g4x wouldn't notice that an interrupt 4395 * is still pending. Also some EIR bits can't be 4396 * cleared except by handling the underlying error 4397 * (or by a GPU reset) so we mask any bit that 4398 * remains set. 4399 */ 4400 emr = I915_READ16(EMR); 4401 I915_WRITE16(EMR, 0xffff); 4402 I915_WRITE16(EMR, emr | *eir_stuck); 4403 } 4404 4405 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 4406 u16 eir, u16 eir_stuck) 4407 { 4408 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4409 4410 if (eir_stuck) 4411 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 4412 } 4413 4414 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4415 u32 *eir, u32 *eir_stuck) 4416 { 4417 u32 emr; 4418 4419 *eir = I915_READ(EIR); 4420 4421 I915_WRITE(EIR, *eir); 4422 4423 *eir_stuck = I915_READ(EIR); 4424 if (*eir_stuck == 0) 4425 return; 4426 4427 /* 4428 * Toggle all EMR bits to make sure we get an edge 4429 * in the ISR master error bit if we don't clear 4430 * all the EIR bits. Otherwise the edge triggered 4431 * IIR on i965/g4x wouldn't notice that an interrupt 4432 * is still pending. Also some EIR bits can't be 4433 * cleared except by handling the underlying error 4434 * (or by a GPU reset) so we mask any bit that 4435 * remains set. 4436 */ 4437 emr = I915_READ(EMR); 4438 I915_WRITE(EMR, 0xffffffff); 4439 I915_WRITE(EMR, emr | *eir_stuck); 4440 } 4441 4442 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4443 u32 eir, u32 eir_stuck) 4444 { 4445 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4446 4447 if (eir_stuck) 4448 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 4449 } 4450 4451 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4452 { 4453 struct drm_device *dev = arg; 4454 struct drm_i915_private *dev_priv = to_i915(dev); 4455 irqreturn_t ret = IRQ_NONE; 4456 4457 if (!intel_irqs_enabled(dev_priv)) 4458 return IRQ_NONE; 4459 4460 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4461 disable_rpm_wakeref_asserts(dev_priv); 4462 4463 do { 4464 u32 pipe_stats[I915_MAX_PIPES] = {}; 4465 u16 eir = 0, eir_stuck = 0; 4466 u16 iir; 4467 4468 iir = I915_READ16(IIR); 4469 if (iir == 0) 4470 break; 4471 4472 ret = IRQ_HANDLED; 4473 4474 /* Call regardless, as some status bits might not be 4475 * signalled in iir */ 4476 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4477 4478 if (iir & I915_MASTER_ERROR_INTERRUPT) 4479 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4480 4481 I915_WRITE16(IIR, iir); 4482 4483 if (iir & I915_USER_INTERRUPT) 4484 notify_ring(dev_priv->engine[RCS]); 4485 4486 if (iir & I915_MASTER_ERROR_INTERRUPT) 4487 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4488 4489 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4490 } while (0); 4491 4492 enable_rpm_wakeref_asserts(dev_priv); 4493 4494 return ret; 4495 } 4496 4497 static void i915_irq_reset(struct drm_device *dev) 4498 { 4499 struct drm_i915_private *dev_priv = to_i915(dev); 4500 4501 if (I915_HAS_HOTPLUG(dev_priv)) { 4502 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4503 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4504 } 4505 4506 i9xx_pipestat_irq_reset(dev_priv); 4507 4508 I915_WRITE(HWSTAM, 0xffffffff); 4509 4510 GEN3_IRQ_RESET(); 4511 } 4512 4513 static int i915_irq_postinstall(struct drm_device *dev) 4514 { 4515 struct drm_i915_private *dev_priv = to_i915(dev); 4516 u32 enable_mask; 4517 4518 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4519 I915_ERROR_MEMORY_REFRESH)); 4520 4521 /* Unmask the interrupts that we always want on. */ 4522 dev_priv->irq_mask = 4523 ~(I915_ASLE_INTERRUPT | 4524 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4525 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4526 I915_MASTER_ERROR_INTERRUPT); 4527 4528 enable_mask = 4529 I915_ASLE_INTERRUPT | 4530 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4531 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4532 I915_MASTER_ERROR_INTERRUPT | 4533 I915_USER_INTERRUPT; 4534 4535 if (I915_HAS_HOTPLUG(dev_priv)) { 4536 /* Enable in IER... */ 4537 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4538 /* and unmask in IMR */ 4539 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4540 } 4541 4542 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4543 4544 /* Interrupt setup is already guaranteed to be single-threaded, this is 4545 * just to make the assert_spin_locked check happy. */ 4546 spin_lock_irq(&dev_priv->irq_lock); 4547 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4548 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4549 spin_unlock_irq(&dev_priv->irq_lock); 4550 4551 i915_enable_asle_pipestat(dev_priv); 4552 4553 return 0; 4554 } 4555 4556 static irqreturn_t i915_irq_handler(int irq, void *arg) 4557 { 4558 struct drm_device *dev = arg; 4559 struct drm_i915_private *dev_priv = to_i915(dev); 4560 irqreturn_t ret = IRQ_NONE; 4561 4562 if (!intel_irqs_enabled(dev_priv)) 4563 return IRQ_NONE; 4564 4565 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4566 disable_rpm_wakeref_asserts(dev_priv); 4567 4568 do { 4569 u32 pipe_stats[I915_MAX_PIPES] = {}; 4570 u32 eir = 0, eir_stuck = 0; 4571 u32 hotplug_status = 0; 4572 u32 iir; 4573 4574 iir = I915_READ(IIR); 4575 if (iir == 0) 4576 break; 4577 4578 ret = IRQ_HANDLED; 4579 4580 if (I915_HAS_HOTPLUG(dev_priv) && 4581 iir & I915_DISPLAY_PORT_INTERRUPT) 4582 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4583 4584 /* Call regardless, as some status bits might not be 4585 * signalled in iir */ 4586 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4587 4588 if (iir & I915_MASTER_ERROR_INTERRUPT) 4589 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4590 4591 I915_WRITE(IIR, iir); 4592 4593 if (iir & I915_USER_INTERRUPT) 4594 notify_ring(dev_priv->engine[RCS]); 4595 4596 if (iir & I915_MASTER_ERROR_INTERRUPT) 4597 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4598 4599 if (hotplug_status) 4600 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4601 4602 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4603 } while (0); 4604 4605 enable_rpm_wakeref_asserts(dev_priv); 4606 4607 return ret; 4608 } 4609 4610 static void i965_irq_reset(struct drm_device *dev) 4611 { 4612 struct drm_i915_private *dev_priv = to_i915(dev); 4613 4614 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4615 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4616 4617 i9xx_pipestat_irq_reset(dev_priv); 4618 4619 I915_WRITE(HWSTAM, 0xffffffff); 4620 4621 GEN3_IRQ_RESET(); 4622 } 4623 4624 static int i965_irq_postinstall(struct drm_device *dev) 4625 { 4626 struct drm_i915_private *dev_priv = to_i915(dev); 4627 u32 enable_mask; 4628 u32 error_mask; 4629 4630 /* 4631 * Enable some error detection, note the instruction error mask 4632 * bit is reserved, so we leave it masked. 4633 */ 4634 if (IS_G4X(dev_priv)) { 4635 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4636 GM45_ERROR_MEM_PRIV | 4637 GM45_ERROR_CP_PRIV | 4638 I915_ERROR_MEMORY_REFRESH); 4639 } else { 4640 error_mask = ~(I915_ERROR_PAGE_TABLE | 4641 I915_ERROR_MEMORY_REFRESH); 4642 } 4643 I915_WRITE(EMR, error_mask); 4644 4645 /* Unmask the interrupts that we always want on. */ 4646 dev_priv->irq_mask = 4647 ~(I915_ASLE_INTERRUPT | 4648 I915_DISPLAY_PORT_INTERRUPT | 4649 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4650 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4651 I915_MASTER_ERROR_INTERRUPT); 4652 4653 enable_mask = 4654 I915_ASLE_INTERRUPT | 4655 I915_DISPLAY_PORT_INTERRUPT | 4656 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4657 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4658 I915_MASTER_ERROR_INTERRUPT | 4659 I915_USER_INTERRUPT; 4660 4661 if (IS_G4X(dev_priv)) 4662 enable_mask |= I915_BSD_USER_INTERRUPT; 4663 4664 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4665 4666 /* Interrupt setup is already guaranteed to be single-threaded, this is 4667 * just to make the assert_spin_locked check happy. */ 4668 spin_lock_irq(&dev_priv->irq_lock); 4669 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4670 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4671 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4672 spin_unlock_irq(&dev_priv->irq_lock); 4673 4674 i915_enable_asle_pipestat(dev_priv); 4675 4676 return 0; 4677 } 4678 4679 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4680 { 4681 u32 hotplug_en; 4682 4683 lockdep_assert_held(&dev_priv->irq_lock); 4684 4685 /* Note HDMI and DP share hotplug bits */ 4686 /* enable bits are the same for all generations */ 4687 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4688 /* Programming the CRT detection parameters tends 4689 to generate a spurious hotplug event about three 4690 seconds later. So just do it once. 4691 */ 4692 if (IS_G4X(dev_priv)) 4693 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4694 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4695 4696 /* Ignore TV since it's buggy */ 4697 i915_hotplug_interrupt_update_locked(dev_priv, 4698 HOTPLUG_INT_EN_MASK | 4699 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4700 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4701 hotplug_en); 4702 } 4703 4704 static irqreturn_t i965_irq_handler(int irq, void *arg) 4705 { 4706 struct drm_device *dev = arg; 4707 struct drm_i915_private *dev_priv = to_i915(dev); 4708 irqreturn_t ret = IRQ_NONE; 4709 4710 if (!intel_irqs_enabled(dev_priv)) 4711 return IRQ_NONE; 4712 4713 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4714 disable_rpm_wakeref_asserts(dev_priv); 4715 4716 do { 4717 u32 pipe_stats[I915_MAX_PIPES] = {}; 4718 u32 eir = 0, eir_stuck = 0; 4719 u32 hotplug_status = 0; 4720 u32 iir; 4721 4722 iir = I915_READ(IIR); 4723 if (iir == 0) 4724 break; 4725 4726 ret = IRQ_HANDLED; 4727 4728 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4729 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4730 4731 /* Call regardless, as some status bits might not be 4732 * signalled in iir */ 4733 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4734 4735 if (iir & I915_MASTER_ERROR_INTERRUPT) 4736 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4737 4738 I915_WRITE(IIR, iir); 4739 4740 if (iir & I915_USER_INTERRUPT) 4741 notify_ring(dev_priv->engine[RCS]); 4742 4743 if (iir & I915_BSD_USER_INTERRUPT) 4744 notify_ring(dev_priv->engine[VCS]); 4745 4746 if (iir & I915_MASTER_ERROR_INTERRUPT) 4747 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4748 4749 if (hotplug_status) 4750 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4751 4752 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4753 } while (0); 4754 4755 enable_rpm_wakeref_asserts(dev_priv); 4756 4757 return ret; 4758 } 4759 4760 /** 4761 * intel_irq_init - initializes irq support 4762 * @dev_priv: i915 device instance 4763 * 4764 * This function initializes all the irq support including work items, timers 4765 * and all the vtables. It does not setup the interrupt itself though. 4766 */ 4767 void intel_irq_init(struct drm_i915_private *dev_priv) 4768 { 4769 struct drm_device *dev = &dev_priv->drm; 4770 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4771 int i; 4772 4773 intel_hpd_init_work(dev_priv); 4774 4775 INIT_WORK(&rps->work, gen6_pm_rps_work); 4776 4777 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4778 for (i = 0; i < MAX_L3_SLICES; ++i) 4779 dev_priv->l3_parity.remap_info[i] = NULL; 4780 4781 if (HAS_GUC_SCHED(dev_priv)) 4782 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4783 4784 /* Let's track the enabled rps events */ 4785 if (IS_VALLEYVIEW(dev_priv)) 4786 /* WaGsvRC0ResidencyMethod:vlv */ 4787 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4788 else 4789 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 4790 GEN6_PM_RP_DOWN_THRESHOLD | 4791 GEN6_PM_RP_DOWN_TIMEOUT); 4792 4793 rps->pm_intrmsk_mbz = 0; 4794 4795 /* 4796 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4797 * if GEN6_PM_UP_EI_EXPIRED is masked. 4798 * 4799 * TODO: verify if this can be reproduced on VLV,CHV. 4800 */ 4801 if (INTEL_GEN(dev_priv) <= 7) 4802 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4803 4804 if (INTEL_GEN(dev_priv) >= 8) 4805 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4806 4807 if (IS_GEN2(dev_priv)) { 4808 /* Gen2 doesn't have a hardware frame counter */ 4809 dev->max_vblank_count = 0; 4810 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4811 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4812 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4813 } else { 4814 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4815 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4816 } 4817 4818 /* 4819 * Opt out of the vblank disable timer on everything except gen2. 4820 * Gen2 doesn't have a hardware frame counter and so depends on 4821 * vblank interrupts to produce sane vblank seuquence numbers. 4822 */ 4823 if (!IS_GEN2(dev_priv)) 4824 dev->vblank_disable_immediate = true; 4825 4826 /* Most platforms treat the display irq block as an always-on 4827 * power domain. vlv/chv can disable it at runtime and need 4828 * special care to avoid writing any of the display block registers 4829 * outside of the power domain. We defer setting up the display irqs 4830 * in this case to the runtime pm. 4831 */ 4832 dev_priv->display_irqs_enabled = true; 4833 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4834 dev_priv->display_irqs_enabled = false; 4835 4836 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4837 4838 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4839 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4840 4841 if (IS_CHERRYVIEW(dev_priv)) { 4842 dev->driver->irq_handler = cherryview_irq_handler; 4843 dev->driver->irq_preinstall = cherryview_irq_reset; 4844 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4845 dev->driver->irq_uninstall = cherryview_irq_reset; 4846 dev->driver->enable_vblank = i965_enable_vblank; 4847 dev->driver->disable_vblank = i965_disable_vblank; 4848 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4849 } else if (IS_VALLEYVIEW(dev_priv)) { 4850 dev->driver->irq_handler = valleyview_irq_handler; 4851 dev->driver->irq_preinstall = valleyview_irq_reset; 4852 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4853 dev->driver->irq_uninstall = valleyview_irq_reset; 4854 dev->driver->enable_vblank = i965_enable_vblank; 4855 dev->driver->disable_vblank = i965_disable_vblank; 4856 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4857 } else if (INTEL_GEN(dev_priv) >= 11) { 4858 dev->driver->irq_handler = gen11_irq_handler; 4859 dev->driver->irq_preinstall = gen11_irq_reset; 4860 dev->driver->irq_postinstall = gen11_irq_postinstall; 4861 dev->driver->irq_uninstall = gen11_irq_reset; 4862 dev->driver->enable_vblank = gen8_enable_vblank; 4863 dev->driver->disable_vblank = gen8_disable_vblank; 4864 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4865 } else if (INTEL_GEN(dev_priv) >= 8) { 4866 dev->driver->irq_handler = gen8_irq_handler; 4867 dev->driver->irq_preinstall = gen8_irq_reset; 4868 dev->driver->irq_postinstall = gen8_irq_postinstall; 4869 dev->driver->irq_uninstall = gen8_irq_reset; 4870 dev->driver->enable_vblank = gen8_enable_vblank; 4871 dev->driver->disable_vblank = gen8_disable_vblank; 4872 if (IS_GEN9_LP(dev_priv)) 4873 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4874 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4875 HAS_PCH_CNP(dev_priv)) 4876 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4877 else 4878 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4879 } else if (HAS_PCH_SPLIT(dev_priv)) { 4880 dev->driver->irq_handler = ironlake_irq_handler; 4881 dev->driver->irq_preinstall = ironlake_irq_reset; 4882 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4883 dev->driver->irq_uninstall = ironlake_irq_reset; 4884 dev->driver->enable_vblank = ironlake_enable_vblank; 4885 dev->driver->disable_vblank = ironlake_disable_vblank; 4886 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4887 } else { 4888 if (IS_GEN2(dev_priv)) { 4889 dev->driver->irq_preinstall = i8xx_irq_reset; 4890 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4891 dev->driver->irq_handler = i8xx_irq_handler; 4892 dev->driver->irq_uninstall = i8xx_irq_reset; 4893 dev->driver->enable_vblank = i8xx_enable_vblank; 4894 dev->driver->disable_vblank = i8xx_disable_vblank; 4895 } else if (IS_GEN3(dev_priv)) { 4896 dev->driver->irq_preinstall = i915_irq_reset; 4897 dev->driver->irq_postinstall = i915_irq_postinstall; 4898 dev->driver->irq_uninstall = i915_irq_reset; 4899 dev->driver->irq_handler = i915_irq_handler; 4900 dev->driver->enable_vblank = i8xx_enable_vblank; 4901 dev->driver->disable_vblank = i8xx_disable_vblank; 4902 } else { 4903 dev->driver->irq_preinstall = i965_irq_reset; 4904 dev->driver->irq_postinstall = i965_irq_postinstall; 4905 dev->driver->irq_uninstall = i965_irq_reset; 4906 dev->driver->irq_handler = i965_irq_handler; 4907 dev->driver->enable_vblank = i965_enable_vblank; 4908 dev->driver->disable_vblank = i965_disable_vblank; 4909 } 4910 if (I915_HAS_HOTPLUG(dev_priv)) 4911 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4912 } 4913 } 4914 4915 /** 4916 * intel_irq_fini - deinitializes IRQ support 4917 * @i915: i915 device instance 4918 * 4919 * This function deinitializes all the IRQ support. 4920 */ 4921 void intel_irq_fini(struct drm_i915_private *i915) 4922 { 4923 int i; 4924 4925 for (i = 0; i < MAX_L3_SLICES; ++i) 4926 kfree(i915->l3_parity.remap_info[i]); 4927 } 4928 4929 /** 4930 * intel_irq_install - enables the hardware interrupt 4931 * @dev_priv: i915 device instance 4932 * 4933 * This function enables the hardware interrupt handling, but leaves the hotplug 4934 * handling still disabled. It is called after intel_irq_init(). 4935 * 4936 * In the driver load and resume code we need working interrupts in a few places 4937 * but don't want to deal with the hassle of concurrent probe and hotplug 4938 * workers. Hence the split into this two-stage approach. 4939 */ 4940 int intel_irq_install(struct drm_i915_private *dev_priv) 4941 { 4942 /* 4943 * We enable some interrupt sources in our postinstall hooks, so mark 4944 * interrupts as enabled _before_ actually enabling them to avoid 4945 * special cases in our ordering checks. 4946 */ 4947 dev_priv->runtime_pm.irqs_enabled = true; 4948 4949 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4950 } 4951 4952 /** 4953 * intel_irq_uninstall - finilizes all irq handling 4954 * @dev_priv: i915 device instance 4955 * 4956 * This stops interrupt and hotplug handling and unregisters and frees all 4957 * resources acquired in the init functions. 4958 */ 4959 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4960 { 4961 drm_irq_uninstall(&dev_priv->drm); 4962 intel_hpd_cancel_work(dev_priv); 4963 dev_priv->runtime_pm.irqs_enabled = false; 4964 } 4965 4966 /** 4967 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4968 * @dev_priv: i915 device instance 4969 * 4970 * This function is used to disable interrupts at runtime, both in the runtime 4971 * pm and the system suspend/resume code. 4972 */ 4973 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4974 { 4975 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4976 dev_priv->runtime_pm.irqs_enabled = false; 4977 synchronize_irq(dev_priv->drm.irq); 4978 } 4979 4980 /** 4981 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4982 * @dev_priv: i915 device instance 4983 * 4984 * This function is used to enable interrupts at runtime, both in the runtime 4985 * pm and the system suspend/resume code. 4986 */ 4987 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4988 { 4989 dev_priv->runtime_pm.irqs_enabled = true; 4990 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4991 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4992 } 4993