1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 static const u32 hpd_gen11[HPD_NUM_PINS] = { 119 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 120 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 121 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 122 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 123 }; 124 125 static const u32 hpd_icp[HPD_NUM_PINS] = { 126 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 127 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 128 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 129 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 130 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 131 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 132 }; 133 134 /* IIR can theoretically queue up two events. Be paranoid. */ 135 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 136 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 137 POSTING_READ(GEN8_##type##_IMR(which)); \ 138 I915_WRITE(GEN8_##type##_IER(which), 0); \ 139 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 140 POSTING_READ(GEN8_##type##_IIR(which)); \ 141 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 142 POSTING_READ(GEN8_##type##_IIR(which)); \ 143 } while (0) 144 145 #define GEN3_IRQ_RESET(type) do { \ 146 I915_WRITE(type##IMR, 0xffffffff); \ 147 POSTING_READ(type##IMR); \ 148 I915_WRITE(type##IER, 0); \ 149 I915_WRITE(type##IIR, 0xffffffff); \ 150 POSTING_READ(type##IIR); \ 151 I915_WRITE(type##IIR, 0xffffffff); \ 152 POSTING_READ(type##IIR); \ 153 } while (0) 154 155 #define GEN2_IRQ_RESET(type) do { \ 156 I915_WRITE16(type##IMR, 0xffff); \ 157 POSTING_READ16(type##IMR); \ 158 I915_WRITE16(type##IER, 0); \ 159 I915_WRITE16(type##IIR, 0xffff); \ 160 POSTING_READ16(type##IIR); \ 161 I915_WRITE16(type##IIR, 0xffff); \ 162 POSTING_READ16(type##IIR); \ 163 } while (0) 164 165 /* 166 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 167 */ 168 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u32 val = I915_READ(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE(reg, 0xffffffff); 179 POSTING_READ(reg); 180 I915_WRITE(reg, 0xffffffff); 181 POSTING_READ(reg); 182 } 183 184 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 185 i915_reg_t reg) 186 { 187 u16 val = I915_READ16(reg); 188 189 if (val == 0) 190 return; 191 192 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 193 i915_mmio_reg_offset(reg), val); 194 I915_WRITE16(reg, 0xffff); 195 POSTING_READ16(reg); 196 I915_WRITE16(reg, 0xffff); 197 POSTING_READ16(reg); 198 } 199 200 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 201 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 202 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 203 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 204 POSTING_READ(GEN8_##type##_IMR(which)); \ 205 } while (0) 206 207 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 208 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 209 I915_WRITE(type##IER, (ier_val)); \ 210 I915_WRITE(type##IMR, (imr_val)); \ 211 POSTING_READ(type##IMR); \ 212 } while (0) 213 214 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 215 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 216 I915_WRITE16(type##IER, (ier_val)); \ 217 I915_WRITE16(type##IMR, (imr_val)); \ 218 POSTING_READ16(type##IMR); \ 219 } while (0) 220 221 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 222 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223 224 /* For display hotplug interrupt */ 225 static inline void 226 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 227 uint32_t mask, 228 uint32_t bits) 229 { 230 uint32_t val; 231 232 lockdep_assert_held(&dev_priv->irq_lock); 233 WARN_ON(bits & ~mask); 234 235 val = I915_READ(PORT_HOTPLUG_EN); 236 val &= ~mask; 237 val |= bits; 238 I915_WRITE(PORT_HOTPLUG_EN, val); 239 } 240 241 /** 242 * i915_hotplug_interrupt_update - update hotplug interrupt enable 243 * @dev_priv: driver private 244 * @mask: bits to update 245 * @bits: bits to enable 246 * NOTE: the HPD enable bits are modified both inside and outside 247 * of an interrupt context. To avoid that read-modify-write cycles 248 * interfer, these bits are protected by a spinlock. Since this 249 * function is usually not called from a context where the lock is 250 * held already, this function acquires the lock itself. A non-locking 251 * version is also available. 252 */ 253 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 254 uint32_t mask, 255 uint32_t bits) 256 { 257 spin_lock_irq(&dev_priv->irq_lock); 258 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 259 spin_unlock_irq(&dev_priv->irq_lock); 260 } 261 262 static u32 263 gen11_gt_engine_identity(struct drm_i915_private * const i915, 264 const unsigned int bank, const unsigned int bit); 265 266 static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 267 const unsigned int bank, 268 const unsigned int bit) 269 { 270 void __iomem * const regs = i915->regs; 271 u32 dw; 272 273 lockdep_assert_held(&i915->irq_lock); 274 275 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 276 if (dw & BIT(bit)) { 277 /* 278 * According to the BSpec, DW_IIR bits cannot be cleared without 279 * first servicing the Selector & Shared IIR registers. 280 */ 281 gen11_gt_engine_identity(i915, bank, bit); 282 283 /* 284 * We locked GT INT DW by reading it. If we want to (try 285 * to) recover from this succesfully, we need to clear 286 * our bit, otherwise we are locking the register for 287 * everybody. 288 */ 289 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 290 291 return true; 292 } 293 294 return false; 295 } 296 297 /** 298 * ilk_update_display_irq - update DEIMR 299 * @dev_priv: driver private 300 * @interrupt_mask: mask of interrupt bits to update 301 * @enabled_irq_mask: mask of interrupt bits to enable 302 */ 303 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 304 uint32_t interrupt_mask, 305 uint32_t enabled_irq_mask) 306 { 307 uint32_t new_val; 308 309 lockdep_assert_held(&dev_priv->irq_lock); 310 311 WARN_ON(enabled_irq_mask & ~interrupt_mask); 312 313 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 314 return; 315 316 new_val = dev_priv->irq_mask; 317 new_val &= ~interrupt_mask; 318 new_val |= (~enabled_irq_mask & interrupt_mask); 319 320 if (new_val != dev_priv->irq_mask) { 321 dev_priv->irq_mask = new_val; 322 I915_WRITE(DEIMR, dev_priv->irq_mask); 323 POSTING_READ(DEIMR); 324 } 325 } 326 327 /** 328 * ilk_update_gt_irq - update GTIMR 329 * @dev_priv: driver private 330 * @interrupt_mask: mask of interrupt bits to update 331 * @enabled_irq_mask: mask of interrupt bits to enable 332 */ 333 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 334 uint32_t interrupt_mask, 335 uint32_t enabled_irq_mask) 336 { 337 lockdep_assert_held(&dev_priv->irq_lock); 338 339 WARN_ON(enabled_irq_mask & ~interrupt_mask); 340 341 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342 return; 343 344 dev_priv->gt_irq_mask &= ~interrupt_mask; 345 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 346 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 347 } 348 349 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 350 { 351 ilk_update_gt_irq(dev_priv, mask, mask); 352 POSTING_READ_FW(GTIMR); 353 } 354 355 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 356 { 357 ilk_update_gt_irq(dev_priv, mask, 0); 358 } 359 360 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 361 { 362 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 363 364 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 365 } 366 367 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 368 { 369 if (INTEL_GEN(dev_priv) >= 11) 370 return GEN11_GPM_WGBOXPERF_INTR_MASK; 371 else if (INTEL_GEN(dev_priv) >= 8) 372 return GEN8_GT_IMR(2); 373 else 374 return GEN6_PMIMR; 375 } 376 377 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 378 { 379 if (INTEL_GEN(dev_priv) >= 11) 380 return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 381 else if (INTEL_GEN(dev_priv) >= 8) 382 return GEN8_GT_IER(2); 383 else 384 return GEN6_PMIER; 385 } 386 387 /** 388 * snb_update_pm_irq - update GEN6_PMIMR 389 * @dev_priv: driver private 390 * @interrupt_mask: mask of interrupt bits to update 391 * @enabled_irq_mask: mask of interrupt bits to enable 392 */ 393 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 394 uint32_t interrupt_mask, 395 uint32_t enabled_irq_mask) 396 { 397 uint32_t new_val; 398 399 WARN_ON(enabled_irq_mask & ~interrupt_mask); 400 401 lockdep_assert_held(&dev_priv->irq_lock); 402 403 new_val = dev_priv->pm_imr; 404 new_val &= ~interrupt_mask; 405 new_val |= (~enabled_irq_mask & interrupt_mask); 406 407 if (new_val != dev_priv->pm_imr) { 408 dev_priv->pm_imr = new_val; 409 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 410 POSTING_READ(gen6_pm_imr(dev_priv)); 411 } 412 } 413 414 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 415 { 416 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 417 return; 418 419 snb_update_pm_irq(dev_priv, mask, mask); 420 } 421 422 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 423 { 424 snb_update_pm_irq(dev_priv, mask, 0); 425 } 426 427 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 428 { 429 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 430 return; 431 432 __gen6_mask_pm_irq(dev_priv, mask); 433 } 434 435 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 436 { 437 i915_reg_t reg = gen6_pm_iir(dev_priv); 438 439 lockdep_assert_held(&dev_priv->irq_lock); 440 441 I915_WRITE(reg, reset_mask); 442 I915_WRITE(reg, reset_mask); 443 POSTING_READ(reg); 444 } 445 446 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 447 { 448 lockdep_assert_held(&dev_priv->irq_lock); 449 450 dev_priv->pm_ier |= enable_mask; 451 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 452 gen6_unmask_pm_irq(dev_priv, enable_mask); 453 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 454 } 455 456 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 457 { 458 lockdep_assert_held(&dev_priv->irq_lock); 459 460 dev_priv->pm_ier &= ~disable_mask; 461 __gen6_mask_pm_irq(dev_priv, disable_mask); 462 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 463 /* though a barrier is missing here, but don't really need a one */ 464 } 465 466 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 467 { 468 spin_lock_irq(&dev_priv->irq_lock); 469 470 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 471 ; 472 473 dev_priv->gt_pm.rps.pm_iir = 0; 474 475 spin_unlock_irq(&dev_priv->irq_lock); 476 } 477 478 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 479 { 480 spin_lock_irq(&dev_priv->irq_lock); 481 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 482 dev_priv->gt_pm.rps.pm_iir = 0; 483 spin_unlock_irq(&dev_priv->irq_lock); 484 } 485 486 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 487 { 488 struct intel_rps *rps = &dev_priv->gt_pm.rps; 489 490 if (READ_ONCE(rps->interrupts_enabled)) 491 return; 492 493 spin_lock_irq(&dev_priv->irq_lock); 494 WARN_ON_ONCE(rps->pm_iir); 495 496 if (INTEL_GEN(dev_priv) >= 11) 497 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 498 else 499 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 500 501 rps->interrupts_enabled = true; 502 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 503 504 spin_unlock_irq(&dev_priv->irq_lock); 505 } 506 507 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 508 { 509 struct intel_rps *rps = &dev_priv->gt_pm.rps; 510 511 if (!READ_ONCE(rps->interrupts_enabled)) 512 return; 513 514 spin_lock_irq(&dev_priv->irq_lock); 515 rps->interrupts_enabled = false; 516 517 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 518 519 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 520 521 spin_unlock_irq(&dev_priv->irq_lock); 522 synchronize_irq(dev_priv->drm.irq); 523 524 /* Now that we will not be generating any more work, flush any 525 * outstanding tasks. As we are called on the RPS idle path, 526 * we will reset the GPU to minimum frequencies, so the current 527 * state of the worker can be discarded. 528 */ 529 cancel_work_sync(&rps->work); 530 if (INTEL_GEN(dev_priv) >= 11) 531 gen11_reset_rps_interrupts(dev_priv); 532 else 533 gen6_reset_rps_interrupts(dev_priv); 534 } 535 536 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 537 { 538 assert_rpm_wakelock_held(dev_priv); 539 540 spin_lock_irq(&dev_priv->irq_lock); 541 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 542 spin_unlock_irq(&dev_priv->irq_lock); 543 } 544 545 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 546 { 547 assert_rpm_wakelock_held(dev_priv); 548 549 spin_lock_irq(&dev_priv->irq_lock); 550 if (!dev_priv->guc.interrupts_enabled) { 551 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 552 dev_priv->pm_guc_events); 553 dev_priv->guc.interrupts_enabled = true; 554 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 555 } 556 spin_unlock_irq(&dev_priv->irq_lock); 557 } 558 559 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 560 { 561 assert_rpm_wakelock_held(dev_priv); 562 563 spin_lock_irq(&dev_priv->irq_lock); 564 dev_priv->guc.interrupts_enabled = false; 565 566 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 567 568 spin_unlock_irq(&dev_priv->irq_lock); 569 synchronize_irq(dev_priv->drm.irq); 570 571 gen9_reset_guc_interrupts(dev_priv); 572 } 573 574 /** 575 * bdw_update_port_irq - update DE port interrupt 576 * @dev_priv: driver private 577 * @interrupt_mask: mask of interrupt bits to update 578 * @enabled_irq_mask: mask of interrupt bits to enable 579 */ 580 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 581 uint32_t interrupt_mask, 582 uint32_t enabled_irq_mask) 583 { 584 uint32_t new_val; 585 uint32_t old_val; 586 587 lockdep_assert_held(&dev_priv->irq_lock); 588 589 WARN_ON(enabled_irq_mask & ~interrupt_mask); 590 591 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 592 return; 593 594 old_val = I915_READ(GEN8_DE_PORT_IMR); 595 596 new_val = old_val; 597 new_val &= ~interrupt_mask; 598 new_val |= (~enabled_irq_mask & interrupt_mask); 599 600 if (new_val != old_val) { 601 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 602 POSTING_READ(GEN8_DE_PORT_IMR); 603 } 604 } 605 606 /** 607 * bdw_update_pipe_irq - update DE pipe interrupt 608 * @dev_priv: driver private 609 * @pipe: pipe whose interrupt to update 610 * @interrupt_mask: mask of interrupt bits to update 611 * @enabled_irq_mask: mask of interrupt bits to enable 612 */ 613 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 614 enum pipe pipe, 615 uint32_t interrupt_mask, 616 uint32_t enabled_irq_mask) 617 { 618 uint32_t new_val; 619 620 lockdep_assert_held(&dev_priv->irq_lock); 621 622 WARN_ON(enabled_irq_mask & ~interrupt_mask); 623 624 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 625 return; 626 627 new_val = dev_priv->de_irq_mask[pipe]; 628 new_val &= ~interrupt_mask; 629 new_val |= (~enabled_irq_mask & interrupt_mask); 630 631 if (new_val != dev_priv->de_irq_mask[pipe]) { 632 dev_priv->de_irq_mask[pipe] = new_val; 633 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 634 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 635 } 636 } 637 638 /** 639 * ibx_display_interrupt_update - update SDEIMR 640 * @dev_priv: driver private 641 * @interrupt_mask: mask of interrupt bits to update 642 * @enabled_irq_mask: mask of interrupt bits to enable 643 */ 644 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 645 uint32_t interrupt_mask, 646 uint32_t enabled_irq_mask) 647 { 648 uint32_t sdeimr = I915_READ(SDEIMR); 649 sdeimr &= ~interrupt_mask; 650 sdeimr |= (~enabled_irq_mask & interrupt_mask); 651 652 WARN_ON(enabled_irq_mask & ~interrupt_mask); 653 654 lockdep_assert_held(&dev_priv->irq_lock); 655 656 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 657 return; 658 659 I915_WRITE(SDEIMR, sdeimr); 660 POSTING_READ(SDEIMR); 661 } 662 663 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 664 enum pipe pipe) 665 { 666 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 667 u32 enable_mask = status_mask << 16; 668 669 lockdep_assert_held(&dev_priv->irq_lock); 670 671 if (INTEL_GEN(dev_priv) < 5) 672 goto out; 673 674 /* 675 * On pipe A we don't support the PSR interrupt yet, 676 * on pipe B and C the same bit MBZ. 677 */ 678 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 679 return 0; 680 /* 681 * On pipe B and C we don't support the PSR interrupt yet, on pipe 682 * A the same bit is for perf counters which we don't use either. 683 */ 684 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 685 return 0; 686 687 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 688 SPRITE0_FLIP_DONE_INT_EN_VLV | 689 SPRITE1_FLIP_DONE_INT_EN_VLV); 690 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 691 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 692 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 693 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 694 695 out: 696 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 697 status_mask & ~PIPESTAT_INT_STATUS_MASK, 698 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 699 pipe_name(pipe), enable_mask, status_mask); 700 701 return enable_mask; 702 } 703 704 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 705 enum pipe pipe, u32 status_mask) 706 { 707 i915_reg_t reg = PIPESTAT(pipe); 708 u32 enable_mask; 709 710 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 711 "pipe %c: status_mask=0x%x\n", 712 pipe_name(pipe), status_mask); 713 714 lockdep_assert_held(&dev_priv->irq_lock); 715 WARN_ON(!intel_irqs_enabled(dev_priv)); 716 717 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 718 return; 719 720 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 721 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 722 723 I915_WRITE(reg, enable_mask | status_mask); 724 POSTING_READ(reg); 725 } 726 727 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 728 enum pipe pipe, u32 status_mask) 729 { 730 i915_reg_t reg = PIPESTAT(pipe); 731 u32 enable_mask; 732 733 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 734 "pipe %c: status_mask=0x%x\n", 735 pipe_name(pipe), status_mask); 736 737 lockdep_assert_held(&dev_priv->irq_lock); 738 WARN_ON(!intel_irqs_enabled(dev_priv)); 739 740 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 741 return; 742 743 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 744 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 745 746 I915_WRITE(reg, enable_mask | status_mask); 747 POSTING_READ(reg); 748 } 749 750 /** 751 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 752 * @dev_priv: i915 device private 753 */ 754 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 755 { 756 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 757 return; 758 759 spin_lock_irq(&dev_priv->irq_lock); 760 761 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 762 if (INTEL_GEN(dev_priv) >= 4) 763 i915_enable_pipestat(dev_priv, PIPE_A, 764 PIPE_LEGACY_BLC_EVENT_STATUS); 765 766 spin_unlock_irq(&dev_priv->irq_lock); 767 } 768 769 /* 770 * This timing diagram depicts the video signal in and 771 * around the vertical blanking period. 772 * 773 * Assumptions about the fictitious mode used in this example: 774 * vblank_start >= 3 775 * vsync_start = vblank_start + 1 776 * vsync_end = vblank_start + 2 777 * vtotal = vblank_start + 3 778 * 779 * start of vblank: 780 * latch double buffered registers 781 * increment frame counter (ctg+) 782 * generate start of vblank interrupt (gen4+) 783 * | 784 * | frame start: 785 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 786 * | may be shifted forward 1-3 extra lines via PIPECONF 787 * | | 788 * | | start of vsync: 789 * | | generate vsync interrupt 790 * | | | 791 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 792 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 793 * ----va---> <-----------------vb--------------------> <--------va------------- 794 * | | <----vs-----> | 795 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 796 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 797 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 798 * | | | 799 * last visible pixel first visible pixel 800 * | increment frame counter (gen3/4) 801 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 802 * 803 * x = horizontal active 804 * _ = horizontal blanking 805 * hs = horizontal sync 806 * va = vertical active 807 * vb = vertical blanking 808 * vs = vertical sync 809 * vbs = vblank_start (number) 810 * 811 * Summary: 812 * - most events happen at the start of horizontal sync 813 * - frame start happens at the start of horizontal blank, 1-4 lines 814 * (depending on PIPECONF settings) after the start of vblank 815 * - gen3/4 pixel and frame counter are synchronized with the start 816 * of horizontal active on the first line of vertical active 817 */ 818 819 /* Called from drm generic code, passed a 'crtc', which 820 * we use as a pipe index 821 */ 822 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 823 { 824 struct drm_i915_private *dev_priv = to_i915(dev); 825 i915_reg_t high_frame, low_frame; 826 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 827 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 828 unsigned long irqflags; 829 830 htotal = mode->crtc_htotal; 831 hsync_start = mode->crtc_hsync_start; 832 vbl_start = mode->crtc_vblank_start; 833 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 834 vbl_start = DIV_ROUND_UP(vbl_start, 2); 835 836 /* Convert to pixel count */ 837 vbl_start *= htotal; 838 839 /* Start of vblank event occurs at start of hsync */ 840 vbl_start -= htotal - hsync_start; 841 842 high_frame = PIPEFRAME(pipe); 843 low_frame = PIPEFRAMEPIXEL(pipe); 844 845 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 846 847 /* 848 * High & low register fields aren't synchronized, so make sure 849 * we get a low value that's stable across two reads of the high 850 * register. 851 */ 852 do { 853 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 854 low = I915_READ_FW(low_frame); 855 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 856 } while (high1 != high2); 857 858 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859 860 high1 >>= PIPE_FRAME_HIGH_SHIFT; 861 pixel = low & PIPE_PIXEL_MASK; 862 low >>= PIPE_FRAME_LOW_SHIFT; 863 864 /* 865 * The frame counter increments at beginning of active. 866 * Cook up a vblank counter by also checking the pixel 867 * counter against vblank start. 868 */ 869 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 870 } 871 872 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 873 { 874 struct drm_i915_private *dev_priv = to_i915(dev); 875 876 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 877 } 878 879 /* 880 * On certain encoders on certain platforms, pipe 881 * scanline register will not work to get the scanline, 882 * since the timings are driven from the PORT or issues 883 * with scanline register updates. 884 * This function will use Framestamp and current 885 * timestamp registers to calculate the scanline. 886 */ 887 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 888 { 889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 890 struct drm_vblank_crtc *vblank = 891 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 892 const struct drm_display_mode *mode = &vblank->hwmode; 893 u32 vblank_start = mode->crtc_vblank_start; 894 u32 vtotal = mode->crtc_vtotal; 895 u32 htotal = mode->crtc_htotal; 896 u32 clock = mode->crtc_clock; 897 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 898 899 /* 900 * To avoid the race condition where we might cross into the 901 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 902 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 903 * during the same frame. 904 */ 905 do { 906 /* 907 * This field provides read back of the display 908 * pipe frame time stamp. The time stamp value 909 * is sampled at every start of vertical blank. 910 */ 911 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 912 913 /* 914 * The TIMESTAMP_CTR register has the current 915 * time stamp value. 916 */ 917 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 918 919 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 920 } while (scan_post_time != scan_prev_time); 921 922 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 923 clock), 1000 * htotal); 924 scanline = min(scanline, vtotal - 1); 925 scanline = (scanline + vblank_start) % vtotal; 926 927 return scanline; 928 } 929 930 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 931 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 932 { 933 struct drm_device *dev = crtc->base.dev; 934 struct drm_i915_private *dev_priv = to_i915(dev); 935 const struct drm_display_mode *mode; 936 struct drm_vblank_crtc *vblank; 937 enum pipe pipe = crtc->pipe; 938 int position, vtotal; 939 940 if (!crtc->active) 941 return -1; 942 943 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 944 mode = &vblank->hwmode; 945 946 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 947 return __intel_get_crtc_scanline_from_timestamp(crtc); 948 949 vtotal = mode->crtc_vtotal; 950 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 951 vtotal /= 2; 952 953 if (IS_GEN2(dev_priv)) 954 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 955 else 956 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 957 958 /* 959 * On HSW, the DSL reg (0x70000) appears to return 0 if we 960 * read it just before the start of vblank. So try it again 961 * so we don't accidentally end up spanning a vblank frame 962 * increment, causing the pipe_update_end() code to squak at us. 963 * 964 * The nature of this problem means we can't simply check the ISR 965 * bit and return the vblank start value; nor can we use the scanline 966 * debug register in the transcoder as it appears to have the same 967 * problem. We may need to extend this to include other platforms, 968 * but so far testing only shows the problem on HSW. 969 */ 970 if (HAS_DDI(dev_priv) && !position) { 971 int i, temp; 972 973 for (i = 0; i < 100; i++) { 974 udelay(1); 975 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 976 if (temp != position) { 977 position = temp; 978 break; 979 } 980 } 981 } 982 983 /* 984 * See update_scanline_offset() for the details on the 985 * scanline_offset adjustment. 986 */ 987 return (position + crtc->scanline_offset) % vtotal; 988 } 989 990 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 991 bool in_vblank_irq, int *vpos, int *hpos, 992 ktime_t *stime, ktime_t *etime, 993 const struct drm_display_mode *mode) 994 { 995 struct drm_i915_private *dev_priv = to_i915(dev); 996 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 997 pipe); 998 int position; 999 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1000 unsigned long irqflags; 1001 1002 if (WARN_ON(!mode->crtc_clock)) { 1003 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 1004 "pipe %c\n", pipe_name(pipe)); 1005 return false; 1006 } 1007 1008 htotal = mode->crtc_htotal; 1009 hsync_start = mode->crtc_hsync_start; 1010 vtotal = mode->crtc_vtotal; 1011 vbl_start = mode->crtc_vblank_start; 1012 vbl_end = mode->crtc_vblank_end; 1013 1014 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1015 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1016 vbl_end /= 2; 1017 vtotal /= 2; 1018 } 1019 1020 /* 1021 * Lock uncore.lock, as we will do multiple timing critical raw 1022 * register reads, potentially with preemption disabled, so the 1023 * following code must not block on uncore.lock. 1024 */ 1025 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1026 1027 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1028 1029 /* Get optional system timestamp before query. */ 1030 if (stime) 1031 *stime = ktime_get(); 1032 1033 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1034 /* No obvious pixelcount register. Only query vertical 1035 * scanout position from Display scan line register. 1036 */ 1037 position = __intel_get_crtc_scanline(intel_crtc); 1038 } else { 1039 /* Have access to pixelcount since start of frame. 1040 * We can split this into vertical and horizontal 1041 * scanout position. 1042 */ 1043 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1044 1045 /* convert to pixel counts */ 1046 vbl_start *= htotal; 1047 vbl_end *= htotal; 1048 vtotal *= htotal; 1049 1050 /* 1051 * In interlaced modes, the pixel counter counts all pixels, 1052 * so one field will have htotal more pixels. In order to avoid 1053 * the reported position from jumping backwards when the pixel 1054 * counter is beyond the length of the shorter field, just 1055 * clamp the position the length of the shorter field. This 1056 * matches how the scanline counter based position works since 1057 * the scanline counter doesn't count the two half lines. 1058 */ 1059 if (position >= vtotal) 1060 position = vtotal - 1; 1061 1062 /* 1063 * Start of vblank interrupt is triggered at start of hsync, 1064 * just prior to the first active line of vblank. However we 1065 * consider lines to start at the leading edge of horizontal 1066 * active. So, should we get here before we've crossed into 1067 * the horizontal active of the first line in vblank, we would 1068 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1069 * always add htotal-hsync_start to the current pixel position. 1070 */ 1071 position = (position + htotal - hsync_start) % vtotal; 1072 } 1073 1074 /* Get optional system timestamp after query. */ 1075 if (etime) 1076 *etime = ktime_get(); 1077 1078 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1079 1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1081 1082 /* 1083 * While in vblank, position will be negative 1084 * counting up towards 0 at vbl_end. And outside 1085 * vblank, position will be positive counting 1086 * up since vbl_end. 1087 */ 1088 if (position >= vbl_start) 1089 position -= vbl_end; 1090 else 1091 position += vtotal - vbl_end; 1092 1093 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1094 *vpos = position; 1095 *hpos = 0; 1096 } else { 1097 *vpos = position / htotal; 1098 *hpos = position - (*vpos * htotal); 1099 } 1100 1101 return true; 1102 } 1103 1104 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1105 { 1106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1107 unsigned long irqflags; 1108 int position; 1109 1110 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1111 position = __intel_get_crtc_scanline(crtc); 1112 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1113 1114 return position; 1115 } 1116 1117 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1118 { 1119 u32 busy_up, busy_down, max_avg, min_avg; 1120 u8 new_delay; 1121 1122 spin_lock(&mchdev_lock); 1123 1124 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1125 1126 new_delay = dev_priv->ips.cur_delay; 1127 1128 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1129 busy_up = I915_READ(RCPREVBSYTUPAVG); 1130 busy_down = I915_READ(RCPREVBSYTDNAVG); 1131 max_avg = I915_READ(RCBMAXAVG); 1132 min_avg = I915_READ(RCBMINAVG); 1133 1134 /* Handle RCS change request from hw */ 1135 if (busy_up > max_avg) { 1136 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1137 new_delay = dev_priv->ips.cur_delay - 1; 1138 if (new_delay < dev_priv->ips.max_delay) 1139 new_delay = dev_priv->ips.max_delay; 1140 } else if (busy_down < min_avg) { 1141 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1142 new_delay = dev_priv->ips.cur_delay + 1; 1143 if (new_delay > dev_priv->ips.min_delay) 1144 new_delay = dev_priv->ips.min_delay; 1145 } 1146 1147 if (ironlake_set_drps(dev_priv, new_delay)) 1148 dev_priv->ips.cur_delay = new_delay; 1149 1150 spin_unlock(&mchdev_lock); 1151 1152 return; 1153 } 1154 1155 static void notify_ring(struct intel_engine_cs *engine) 1156 { 1157 const u32 seqno = intel_engine_get_seqno(engine); 1158 struct i915_request *rq = NULL; 1159 struct task_struct *tsk = NULL; 1160 struct intel_wait *wait; 1161 1162 if (unlikely(!engine->breadcrumbs.irq_armed)) 1163 return; 1164 1165 rcu_read_lock(); 1166 1167 spin_lock(&engine->breadcrumbs.irq_lock); 1168 wait = engine->breadcrumbs.irq_wait; 1169 if (wait) { 1170 /* 1171 * We use a callback from the dma-fence to submit 1172 * requests after waiting on our own requests. To 1173 * ensure minimum delay in queuing the next request to 1174 * hardware, signal the fence now rather than wait for 1175 * the signaler to be woken up. We still wake up the 1176 * waiter in order to handle the irq-seqno coherency 1177 * issues (we may receive the interrupt before the 1178 * seqno is written, see __i915_request_irq_complete()) 1179 * and to handle coalescing of multiple seqno updates 1180 * and many waiters. 1181 */ 1182 if (i915_seqno_passed(seqno, wait->seqno)) { 1183 struct i915_request *waiter = wait->request; 1184 1185 if (waiter && 1186 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1187 &waiter->fence.flags) && 1188 intel_wait_check_request(wait, waiter)) 1189 rq = i915_request_get(waiter); 1190 1191 tsk = wait->tsk; 1192 } else { 1193 if (engine->irq_seqno_barrier && 1194 i915_seqno_passed(seqno, wait->seqno - 1)) { 1195 set_bit(ENGINE_IRQ_BREADCRUMB, 1196 &engine->irq_posted); 1197 tsk = wait->tsk; 1198 } 1199 } 1200 1201 engine->breadcrumbs.irq_count++; 1202 } else { 1203 if (engine->breadcrumbs.irq_armed) 1204 __intel_engine_disarm_breadcrumbs(engine); 1205 } 1206 spin_unlock(&engine->breadcrumbs.irq_lock); 1207 1208 if (rq) { 1209 spin_lock(&rq->lock); 1210 dma_fence_signal_locked(&rq->fence); 1211 GEM_BUG_ON(!i915_request_completed(rq)); 1212 spin_unlock(&rq->lock); 1213 1214 i915_request_put(rq); 1215 } 1216 1217 if (tsk && tsk->state & TASK_NORMAL) 1218 wake_up_process(tsk); 1219 1220 rcu_read_unlock(); 1221 1222 trace_intel_engine_notify(engine, wait); 1223 } 1224 1225 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1226 struct intel_rps_ei *ei) 1227 { 1228 ei->ktime = ktime_get_raw(); 1229 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1230 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1231 } 1232 1233 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1234 { 1235 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1236 } 1237 1238 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1239 { 1240 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1241 const struct intel_rps_ei *prev = &rps->ei; 1242 struct intel_rps_ei now; 1243 u32 events = 0; 1244 1245 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1246 return 0; 1247 1248 vlv_c0_read(dev_priv, &now); 1249 1250 if (prev->ktime) { 1251 u64 time, c0; 1252 u32 render, media; 1253 1254 time = ktime_us_delta(now.ktime, prev->ktime); 1255 1256 time *= dev_priv->czclk_freq; 1257 1258 /* Workload can be split between render + media, 1259 * e.g. SwapBuffers being blitted in X after being rendered in 1260 * mesa. To account for this we need to combine both engines 1261 * into our activity counter. 1262 */ 1263 render = now.render_c0 - prev->render_c0; 1264 media = now.media_c0 - prev->media_c0; 1265 c0 = max(render, media); 1266 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1267 1268 if (c0 > time * rps->power.up_threshold) 1269 events = GEN6_PM_RP_UP_THRESHOLD; 1270 else if (c0 < time * rps->power.down_threshold) 1271 events = GEN6_PM_RP_DOWN_THRESHOLD; 1272 } 1273 1274 rps->ei = now; 1275 return events; 1276 } 1277 1278 static void gen6_pm_rps_work(struct work_struct *work) 1279 { 1280 struct drm_i915_private *dev_priv = 1281 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1282 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1283 bool client_boost = false; 1284 int new_delay, adj, min, max; 1285 u32 pm_iir = 0; 1286 1287 spin_lock_irq(&dev_priv->irq_lock); 1288 if (rps->interrupts_enabled) { 1289 pm_iir = fetch_and_zero(&rps->pm_iir); 1290 client_boost = atomic_read(&rps->num_waiters); 1291 } 1292 spin_unlock_irq(&dev_priv->irq_lock); 1293 1294 /* Make sure we didn't queue anything we're not going to process. */ 1295 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1296 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1297 goto out; 1298 1299 mutex_lock(&dev_priv->pcu_lock); 1300 1301 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1302 1303 adj = rps->last_adj; 1304 new_delay = rps->cur_freq; 1305 min = rps->min_freq_softlimit; 1306 max = rps->max_freq_softlimit; 1307 if (client_boost) 1308 max = rps->max_freq; 1309 if (client_boost && new_delay < rps->boost_freq) { 1310 new_delay = rps->boost_freq; 1311 adj = 0; 1312 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1313 if (adj > 0) 1314 adj *= 2; 1315 else /* CHV needs even encode values */ 1316 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1317 1318 if (new_delay >= rps->max_freq_softlimit) 1319 adj = 0; 1320 } else if (client_boost) { 1321 adj = 0; 1322 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1323 if (rps->cur_freq > rps->efficient_freq) 1324 new_delay = rps->efficient_freq; 1325 else if (rps->cur_freq > rps->min_freq_softlimit) 1326 new_delay = rps->min_freq_softlimit; 1327 adj = 0; 1328 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1329 if (adj < 0) 1330 adj *= 2; 1331 else /* CHV needs even encode values */ 1332 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1333 1334 if (new_delay <= rps->min_freq_softlimit) 1335 adj = 0; 1336 } else { /* unknown event */ 1337 adj = 0; 1338 } 1339 1340 rps->last_adj = adj; 1341 1342 /* sysfs frequency interfaces may have snuck in while servicing the 1343 * interrupt 1344 */ 1345 new_delay += adj; 1346 new_delay = clamp_t(int, new_delay, min, max); 1347 1348 if (intel_set_rps(dev_priv, new_delay)) { 1349 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1350 rps->last_adj = 0; 1351 } 1352 1353 mutex_unlock(&dev_priv->pcu_lock); 1354 1355 out: 1356 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1357 spin_lock_irq(&dev_priv->irq_lock); 1358 if (rps->interrupts_enabled) 1359 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1360 spin_unlock_irq(&dev_priv->irq_lock); 1361 } 1362 1363 1364 /** 1365 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1366 * occurred. 1367 * @work: workqueue struct 1368 * 1369 * Doesn't actually do anything except notify userspace. As a consequence of 1370 * this event, userspace should try to remap the bad rows since statistically 1371 * it is likely the same row is more likely to go bad again. 1372 */ 1373 static void ivybridge_parity_work(struct work_struct *work) 1374 { 1375 struct drm_i915_private *dev_priv = 1376 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1377 u32 error_status, row, bank, subbank; 1378 char *parity_event[6]; 1379 uint32_t misccpctl; 1380 uint8_t slice = 0; 1381 1382 /* We must turn off DOP level clock gating to access the L3 registers. 1383 * In order to prevent a get/put style interface, acquire struct mutex 1384 * any time we access those registers. 1385 */ 1386 mutex_lock(&dev_priv->drm.struct_mutex); 1387 1388 /* If we've screwed up tracking, just let the interrupt fire again */ 1389 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1390 goto out; 1391 1392 misccpctl = I915_READ(GEN7_MISCCPCTL); 1393 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1394 POSTING_READ(GEN7_MISCCPCTL); 1395 1396 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1397 i915_reg_t reg; 1398 1399 slice--; 1400 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1401 break; 1402 1403 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1404 1405 reg = GEN7_L3CDERRST1(slice); 1406 1407 error_status = I915_READ(reg); 1408 row = GEN7_PARITY_ERROR_ROW(error_status); 1409 bank = GEN7_PARITY_ERROR_BANK(error_status); 1410 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1411 1412 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1413 POSTING_READ(reg); 1414 1415 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1416 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1417 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1418 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1419 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1420 parity_event[5] = NULL; 1421 1422 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1423 KOBJ_CHANGE, parity_event); 1424 1425 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1426 slice, row, bank, subbank); 1427 1428 kfree(parity_event[4]); 1429 kfree(parity_event[3]); 1430 kfree(parity_event[2]); 1431 kfree(parity_event[1]); 1432 } 1433 1434 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1435 1436 out: 1437 WARN_ON(dev_priv->l3_parity.which_slice); 1438 spin_lock_irq(&dev_priv->irq_lock); 1439 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1440 spin_unlock_irq(&dev_priv->irq_lock); 1441 1442 mutex_unlock(&dev_priv->drm.struct_mutex); 1443 } 1444 1445 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1446 u32 iir) 1447 { 1448 if (!HAS_L3_DPF(dev_priv)) 1449 return; 1450 1451 spin_lock(&dev_priv->irq_lock); 1452 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1453 spin_unlock(&dev_priv->irq_lock); 1454 1455 iir &= GT_PARITY_ERROR(dev_priv); 1456 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1457 dev_priv->l3_parity.which_slice |= 1 << 1; 1458 1459 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1460 dev_priv->l3_parity.which_slice |= 1 << 0; 1461 1462 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1463 } 1464 1465 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1466 u32 gt_iir) 1467 { 1468 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1469 notify_ring(dev_priv->engine[RCS]); 1470 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1471 notify_ring(dev_priv->engine[VCS]); 1472 } 1473 1474 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1475 u32 gt_iir) 1476 { 1477 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1478 notify_ring(dev_priv->engine[RCS]); 1479 if (gt_iir & GT_BSD_USER_INTERRUPT) 1480 notify_ring(dev_priv->engine[VCS]); 1481 if (gt_iir & GT_BLT_USER_INTERRUPT) 1482 notify_ring(dev_priv->engine[BCS]); 1483 1484 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1485 GT_BSD_CS_ERROR_INTERRUPT | 1486 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1487 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1488 1489 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1490 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1491 } 1492 1493 static void 1494 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1495 { 1496 bool tasklet = false; 1497 1498 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 1499 tasklet = true; 1500 1501 if (iir & GT_RENDER_USER_INTERRUPT) { 1502 notify_ring(engine); 1503 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1504 } 1505 1506 if (tasklet) 1507 tasklet_hi_schedule(&engine->execlists.tasklet); 1508 } 1509 1510 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1511 u32 master_ctl, u32 gt_iir[4]) 1512 { 1513 void __iomem * const regs = i915->regs; 1514 1515 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1516 GEN8_GT_BCS_IRQ | \ 1517 GEN8_GT_VCS1_IRQ | \ 1518 GEN8_GT_VCS2_IRQ | \ 1519 GEN8_GT_VECS_IRQ | \ 1520 GEN8_GT_PM_IRQ | \ 1521 GEN8_GT_GUC_IRQ) 1522 1523 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1524 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1525 if (likely(gt_iir[0])) 1526 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1527 } 1528 1529 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1530 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1531 if (likely(gt_iir[1])) 1532 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1533 } 1534 1535 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1536 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1537 if (likely(gt_iir[2] & (i915->pm_rps_events | 1538 i915->pm_guc_events))) 1539 raw_reg_write(regs, GEN8_GT_IIR(2), 1540 gt_iir[2] & (i915->pm_rps_events | 1541 i915->pm_guc_events)); 1542 } 1543 1544 if (master_ctl & GEN8_GT_VECS_IRQ) { 1545 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1546 if (likely(gt_iir[3])) 1547 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1548 } 1549 } 1550 1551 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1552 u32 master_ctl, u32 gt_iir[4]) 1553 { 1554 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1555 gen8_cs_irq_handler(i915->engine[RCS], 1556 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1557 gen8_cs_irq_handler(i915->engine[BCS], 1558 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1559 } 1560 1561 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1562 gen8_cs_irq_handler(i915->engine[VCS], 1563 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1564 gen8_cs_irq_handler(i915->engine[VCS2], 1565 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1566 } 1567 1568 if (master_ctl & GEN8_GT_VECS_IRQ) { 1569 gen8_cs_irq_handler(i915->engine[VECS], 1570 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1571 } 1572 1573 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1574 gen6_rps_irq_handler(i915, gt_iir[2]); 1575 gen9_guc_irq_handler(i915, gt_iir[2]); 1576 } 1577 } 1578 1579 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1580 { 1581 switch (pin) { 1582 case HPD_PORT_C: 1583 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1584 case HPD_PORT_D: 1585 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1586 case HPD_PORT_E: 1587 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1588 case HPD_PORT_F: 1589 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1590 default: 1591 return false; 1592 } 1593 } 1594 1595 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1596 { 1597 switch (pin) { 1598 case HPD_PORT_A: 1599 return val & PORTA_HOTPLUG_LONG_DETECT; 1600 case HPD_PORT_B: 1601 return val & PORTB_HOTPLUG_LONG_DETECT; 1602 case HPD_PORT_C: 1603 return val & PORTC_HOTPLUG_LONG_DETECT; 1604 default: 1605 return false; 1606 } 1607 } 1608 1609 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1610 { 1611 switch (pin) { 1612 case HPD_PORT_A: 1613 return val & ICP_DDIA_HPD_LONG_DETECT; 1614 case HPD_PORT_B: 1615 return val & ICP_DDIB_HPD_LONG_DETECT; 1616 default: 1617 return false; 1618 } 1619 } 1620 1621 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1622 { 1623 switch (pin) { 1624 case HPD_PORT_C: 1625 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1626 case HPD_PORT_D: 1627 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1628 case HPD_PORT_E: 1629 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1630 case HPD_PORT_F: 1631 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1632 default: 1633 return false; 1634 } 1635 } 1636 1637 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1638 { 1639 switch (pin) { 1640 case HPD_PORT_E: 1641 return val & PORTE_HOTPLUG_LONG_DETECT; 1642 default: 1643 return false; 1644 } 1645 } 1646 1647 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1648 { 1649 switch (pin) { 1650 case HPD_PORT_A: 1651 return val & PORTA_HOTPLUG_LONG_DETECT; 1652 case HPD_PORT_B: 1653 return val & PORTB_HOTPLUG_LONG_DETECT; 1654 case HPD_PORT_C: 1655 return val & PORTC_HOTPLUG_LONG_DETECT; 1656 case HPD_PORT_D: 1657 return val & PORTD_HOTPLUG_LONG_DETECT; 1658 default: 1659 return false; 1660 } 1661 } 1662 1663 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1664 { 1665 switch (pin) { 1666 case HPD_PORT_A: 1667 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1668 default: 1669 return false; 1670 } 1671 } 1672 1673 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1674 { 1675 switch (pin) { 1676 case HPD_PORT_B: 1677 return val & PORTB_HOTPLUG_LONG_DETECT; 1678 case HPD_PORT_C: 1679 return val & PORTC_HOTPLUG_LONG_DETECT; 1680 case HPD_PORT_D: 1681 return val & PORTD_HOTPLUG_LONG_DETECT; 1682 default: 1683 return false; 1684 } 1685 } 1686 1687 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1688 { 1689 switch (pin) { 1690 case HPD_PORT_B: 1691 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1692 case HPD_PORT_C: 1693 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1694 case HPD_PORT_D: 1695 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1696 default: 1697 return false; 1698 } 1699 } 1700 1701 /* 1702 * Get a bit mask of pins that have triggered, and which ones may be long. 1703 * This can be called multiple times with the same masks to accumulate 1704 * hotplug detection results from several registers. 1705 * 1706 * Note that the caller is expected to zero out the masks initially. 1707 */ 1708 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1709 u32 *pin_mask, u32 *long_mask, 1710 u32 hotplug_trigger, u32 dig_hotplug_reg, 1711 const u32 hpd[HPD_NUM_PINS], 1712 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1713 { 1714 enum hpd_pin pin; 1715 1716 for_each_hpd_pin(pin) { 1717 if ((hpd[pin] & hotplug_trigger) == 0) 1718 continue; 1719 1720 *pin_mask |= BIT(pin); 1721 1722 if (long_pulse_detect(pin, dig_hotplug_reg)) 1723 *long_mask |= BIT(pin); 1724 } 1725 1726 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1727 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1728 1729 } 1730 1731 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1732 { 1733 wake_up_all(&dev_priv->gmbus_wait_queue); 1734 } 1735 1736 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1737 { 1738 wake_up_all(&dev_priv->gmbus_wait_queue); 1739 } 1740 1741 #if defined(CONFIG_DEBUG_FS) 1742 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1743 enum pipe pipe, 1744 uint32_t crc0, uint32_t crc1, 1745 uint32_t crc2, uint32_t crc3, 1746 uint32_t crc4) 1747 { 1748 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1749 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1750 uint32_t crcs[5]; 1751 1752 spin_lock(&pipe_crc->lock); 1753 /* 1754 * For some not yet identified reason, the first CRC is 1755 * bonkers. So let's just wait for the next vblank and read 1756 * out the buggy result. 1757 * 1758 * On GEN8+ sometimes the second CRC is bonkers as well, so 1759 * don't trust that one either. 1760 */ 1761 if (pipe_crc->skipped <= 0 || 1762 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1763 pipe_crc->skipped++; 1764 spin_unlock(&pipe_crc->lock); 1765 return; 1766 } 1767 spin_unlock(&pipe_crc->lock); 1768 1769 crcs[0] = crc0; 1770 crcs[1] = crc1; 1771 crcs[2] = crc2; 1772 crcs[3] = crc3; 1773 crcs[4] = crc4; 1774 drm_crtc_add_crc_entry(&crtc->base, true, 1775 drm_crtc_accurate_vblank_count(&crtc->base), 1776 crcs); 1777 } 1778 #else 1779 static inline void 1780 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1781 enum pipe pipe, 1782 uint32_t crc0, uint32_t crc1, 1783 uint32_t crc2, uint32_t crc3, 1784 uint32_t crc4) {} 1785 #endif 1786 1787 1788 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1789 enum pipe pipe) 1790 { 1791 display_pipe_crc_irq_handler(dev_priv, pipe, 1792 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1793 0, 0, 0, 0); 1794 } 1795 1796 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1797 enum pipe pipe) 1798 { 1799 display_pipe_crc_irq_handler(dev_priv, pipe, 1800 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1801 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1802 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1803 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1804 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1805 } 1806 1807 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1808 enum pipe pipe) 1809 { 1810 uint32_t res1, res2; 1811 1812 if (INTEL_GEN(dev_priv) >= 3) 1813 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1814 else 1815 res1 = 0; 1816 1817 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1818 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1819 else 1820 res2 = 0; 1821 1822 display_pipe_crc_irq_handler(dev_priv, pipe, 1823 I915_READ(PIPE_CRC_RES_RED(pipe)), 1824 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1825 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1826 res1, res2); 1827 } 1828 1829 /* The RPS events need forcewake, so we add them to a work queue and mask their 1830 * IMR bits until the work is done. Other interrupts can be processed without 1831 * the work queue. */ 1832 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1833 { 1834 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1835 1836 if (pm_iir & dev_priv->pm_rps_events) { 1837 spin_lock(&dev_priv->irq_lock); 1838 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1839 if (rps->interrupts_enabled) { 1840 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1841 schedule_work(&rps->work); 1842 } 1843 spin_unlock(&dev_priv->irq_lock); 1844 } 1845 1846 if (INTEL_GEN(dev_priv) >= 8) 1847 return; 1848 1849 if (HAS_VEBOX(dev_priv)) { 1850 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1851 notify_ring(dev_priv->engine[VECS]); 1852 1853 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1854 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1855 } 1856 } 1857 1858 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1859 { 1860 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 1861 intel_guc_to_host_event_handler(&dev_priv->guc); 1862 } 1863 1864 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1865 { 1866 enum pipe pipe; 1867 1868 for_each_pipe(dev_priv, pipe) { 1869 I915_WRITE(PIPESTAT(pipe), 1870 PIPESTAT_INT_STATUS_MASK | 1871 PIPE_FIFO_UNDERRUN_STATUS); 1872 1873 dev_priv->pipestat_irq_mask[pipe] = 0; 1874 } 1875 } 1876 1877 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1878 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1879 { 1880 int pipe; 1881 1882 spin_lock(&dev_priv->irq_lock); 1883 1884 if (!dev_priv->display_irqs_enabled) { 1885 spin_unlock(&dev_priv->irq_lock); 1886 return; 1887 } 1888 1889 for_each_pipe(dev_priv, pipe) { 1890 i915_reg_t reg; 1891 u32 status_mask, enable_mask, iir_bit = 0; 1892 1893 /* 1894 * PIPESTAT bits get signalled even when the interrupt is 1895 * disabled with the mask bits, and some of the status bits do 1896 * not generate interrupts at all (like the underrun bit). Hence 1897 * we need to be careful that we only handle what we want to 1898 * handle. 1899 */ 1900 1901 /* fifo underruns are filterered in the underrun handler. */ 1902 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1903 1904 switch (pipe) { 1905 case PIPE_A: 1906 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1907 break; 1908 case PIPE_B: 1909 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1910 break; 1911 case PIPE_C: 1912 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1913 break; 1914 } 1915 if (iir & iir_bit) 1916 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1917 1918 if (!status_mask) 1919 continue; 1920 1921 reg = PIPESTAT(pipe); 1922 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1923 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1924 1925 /* 1926 * Clear the PIPE*STAT regs before the IIR 1927 * 1928 * Toggle the enable bits to make sure we get an 1929 * edge in the ISR pipe event bit if we don't clear 1930 * all the enabled status bits. Otherwise the edge 1931 * triggered IIR on i965/g4x wouldn't notice that 1932 * an interrupt is still pending. 1933 */ 1934 if (pipe_stats[pipe]) { 1935 I915_WRITE(reg, pipe_stats[pipe]); 1936 I915_WRITE(reg, enable_mask); 1937 } 1938 } 1939 spin_unlock(&dev_priv->irq_lock); 1940 } 1941 1942 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1943 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1944 { 1945 enum pipe pipe; 1946 1947 for_each_pipe(dev_priv, pipe) { 1948 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1949 drm_handle_vblank(&dev_priv->drm, pipe); 1950 1951 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1952 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1953 1954 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1955 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1956 } 1957 } 1958 1959 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1960 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1961 { 1962 bool blc_event = false; 1963 enum pipe pipe; 1964 1965 for_each_pipe(dev_priv, pipe) { 1966 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1967 drm_handle_vblank(&dev_priv->drm, pipe); 1968 1969 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1970 blc_event = true; 1971 1972 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1973 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1974 1975 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1976 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1977 } 1978 1979 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1980 intel_opregion_asle_intr(dev_priv); 1981 } 1982 1983 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1984 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1985 { 1986 bool blc_event = false; 1987 enum pipe pipe; 1988 1989 for_each_pipe(dev_priv, pipe) { 1990 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1991 drm_handle_vblank(&dev_priv->drm, pipe); 1992 1993 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1994 blc_event = true; 1995 1996 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1997 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1998 1999 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2000 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2001 } 2002 2003 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2004 intel_opregion_asle_intr(dev_priv); 2005 2006 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2007 gmbus_irq_handler(dev_priv); 2008 } 2009 2010 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2011 u32 pipe_stats[I915_MAX_PIPES]) 2012 { 2013 enum pipe pipe; 2014 2015 for_each_pipe(dev_priv, pipe) { 2016 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2017 drm_handle_vblank(&dev_priv->drm, pipe); 2018 2019 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2020 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2021 2022 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2023 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2024 } 2025 2026 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2027 gmbus_irq_handler(dev_priv); 2028 } 2029 2030 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 2031 { 2032 u32 hotplug_status = 0, hotplug_status_mask; 2033 int i; 2034 2035 if (IS_G4X(dev_priv) || 2036 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2037 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 2038 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 2039 else 2040 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 2041 2042 /* 2043 * We absolutely have to clear all the pending interrupt 2044 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 2045 * interrupt bit won't have an edge, and the i965/g4x 2046 * edge triggered IIR will not notice that an interrupt 2047 * is still pending. We can't use PORT_HOTPLUG_EN to 2048 * guarantee the edge as the act of toggling the enable 2049 * bits can itself generate a new hotplug interrupt :( 2050 */ 2051 for (i = 0; i < 10; i++) { 2052 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 2053 2054 if (tmp == 0) 2055 return hotplug_status; 2056 2057 hotplug_status |= tmp; 2058 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2059 } 2060 2061 WARN_ONCE(1, 2062 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2063 I915_READ(PORT_HOTPLUG_STAT)); 2064 2065 return hotplug_status; 2066 } 2067 2068 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2069 u32 hotplug_status) 2070 { 2071 u32 pin_mask = 0, long_mask = 0; 2072 2073 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2074 IS_CHERRYVIEW(dev_priv)) { 2075 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2076 2077 if (hotplug_trigger) { 2078 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2079 hotplug_trigger, hotplug_trigger, 2080 hpd_status_g4x, 2081 i9xx_port_hotplug_long_detect); 2082 2083 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2084 } 2085 2086 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2087 dp_aux_irq_handler(dev_priv); 2088 } else { 2089 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2090 2091 if (hotplug_trigger) { 2092 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2093 hotplug_trigger, hotplug_trigger, 2094 hpd_status_i915, 2095 i9xx_port_hotplug_long_detect); 2096 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2097 } 2098 } 2099 } 2100 2101 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2102 { 2103 struct drm_device *dev = arg; 2104 struct drm_i915_private *dev_priv = to_i915(dev); 2105 irqreturn_t ret = IRQ_NONE; 2106 2107 if (!intel_irqs_enabled(dev_priv)) 2108 return IRQ_NONE; 2109 2110 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2111 disable_rpm_wakeref_asserts(dev_priv); 2112 2113 do { 2114 u32 iir, gt_iir, pm_iir; 2115 u32 pipe_stats[I915_MAX_PIPES] = {}; 2116 u32 hotplug_status = 0; 2117 u32 ier = 0; 2118 2119 gt_iir = I915_READ(GTIIR); 2120 pm_iir = I915_READ(GEN6_PMIIR); 2121 iir = I915_READ(VLV_IIR); 2122 2123 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2124 break; 2125 2126 ret = IRQ_HANDLED; 2127 2128 /* 2129 * Theory on interrupt generation, based on empirical evidence: 2130 * 2131 * x = ((VLV_IIR & VLV_IER) || 2132 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2133 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2134 * 2135 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2136 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2137 * guarantee the CPU interrupt will be raised again even if we 2138 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2139 * bits this time around. 2140 */ 2141 I915_WRITE(VLV_MASTER_IER, 0); 2142 ier = I915_READ(VLV_IER); 2143 I915_WRITE(VLV_IER, 0); 2144 2145 if (gt_iir) 2146 I915_WRITE(GTIIR, gt_iir); 2147 if (pm_iir) 2148 I915_WRITE(GEN6_PMIIR, pm_iir); 2149 2150 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2151 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2152 2153 /* Call regardless, as some status bits might not be 2154 * signalled in iir */ 2155 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2156 2157 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2158 I915_LPE_PIPE_B_INTERRUPT)) 2159 intel_lpe_audio_irq_handler(dev_priv); 2160 2161 /* 2162 * VLV_IIR is single buffered, and reflects the level 2163 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2164 */ 2165 if (iir) 2166 I915_WRITE(VLV_IIR, iir); 2167 2168 I915_WRITE(VLV_IER, ier); 2169 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2170 2171 if (gt_iir) 2172 snb_gt_irq_handler(dev_priv, gt_iir); 2173 if (pm_iir) 2174 gen6_rps_irq_handler(dev_priv, pm_iir); 2175 2176 if (hotplug_status) 2177 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2178 2179 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2180 } while (0); 2181 2182 enable_rpm_wakeref_asserts(dev_priv); 2183 2184 return ret; 2185 } 2186 2187 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2188 { 2189 struct drm_device *dev = arg; 2190 struct drm_i915_private *dev_priv = to_i915(dev); 2191 irqreturn_t ret = IRQ_NONE; 2192 2193 if (!intel_irqs_enabled(dev_priv)) 2194 return IRQ_NONE; 2195 2196 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2197 disable_rpm_wakeref_asserts(dev_priv); 2198 2199 do { 2200 u32 master_ctl, iir; 2201 u32 pipe_stats[I915_MAX_PIPES] = {}; 2202 u32 hotplug_status = 0; 2203 u32 gt_iir[4]; 2204 u32 ier = 0; 2205 2206 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2207 iir = I915_READ(VLV_IIR); 2208 2209 if (master_ctl == 0 && iir == 0) 2210 break; 2211 2212 ret = IRQ_HANDLED; 2213 2214 /* 2215 * Theory on interrupt generation, based on empirical evidence: 2216 * 2217 * x = ((VLV_IIR & VLV_IER) || 2218 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2219 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2220 * 2221 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2222 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2223 * guarantee the CPU interrupt will be raised again even if we 2224 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2225 * bits this time around. 2226 */ 2227 I915_WRITE(GEN8_MASTER_IRQ, 0); 2228 ier = I915_READ(VLV_IER); 2229 I915_WRITE(VLV_IER, 0); 2230 2231 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2232 2233 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2234 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2235 2236 /* Call regardless, as some status bits might not be 2237 * signalled in iir */ 2238 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2239 2240 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2241 I915_LPE_PIPE_B_INTERRUPT | 2242 I915_LPE_PIPE_C_INTERRUPT)) 2243 intel_lpe_audio_irq_handler(dev_priv); 2244 2245 /* 2246 * VLV_IIR is single buffered, and reflects the level 2247 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2248 */ 2249 if (iir) 2250 I915_WRITE(VLV_IIR, iir); 2251 2252 I915_WRITE(VLV_IER, ier); 2253 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2254 2255 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2256 2257 if (hotplug_status) 2258 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2259 2260 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2261 } while (0); 2262 2263 enable_rpm_wakeref_asserts(dev_priv); 2264 2265 return ret; 2266 } 2267 2268 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2269 u32 hotplug_trigger, 2270 const u32 hpd[HPD_NUM_PINS]) 2271 { 2272 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2273 2274 /* 2275 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2276 * unless we touch the hotplug register, even if hotplug_trigger is 2277 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2278 * errors. 2279 */ 2280 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2281 if (!hotplug_trigger) { 2282 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2283 PORTD_HOTPLUG_STATUS_MASK | 2284 PORTC_HOTPLUG_STATUS_MASK | 2285 PORTB_HOTPLUG_STATUS_MASK; 2286 dig_hotplug_reg &= ~mask; 2287 } 2288 2289 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2290 if (!hotplug_trigger) 2291 return; 2292 2293 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2294 dig_hotplug_reg, hpd, 2295 pch_port_hotplug_long_detect); 2296 2297 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2298 } 2299 2300 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2301 { 2302 int pipe; 2303 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2304 2305 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2306 2307 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2308 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2309 SDE_AUDIO_POWER_SHIFT); 2310 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2311 port_name(port)); 2312 } 2313 2314 if (pch_iir & SDE_AUX_MASK) 2315 dp_aux_irq_handler(dev_priv); 2316 2317 if (pch_iir & SDE_GMBUS) 2318 gmbus_irq_handler(dev_priv); 2319 2320 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2321 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2322 2323 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2324 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2325 2326 if (pch_iir & SDE_POISON) 2327 DRM_ERROR("PCH poison interrupt\n"); 2328 2329 if (pch_iir & SDE_FDI_MASK) 2330 for_each_pipe(dev_priv, pipe) 2331 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2332 pipe_name(pipe), 2333 I915_READ(FDI_RX_IIR(pipe))); 2334 2335 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2336 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2337 2338 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2339 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2340 2341 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2342 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2343 2344 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2345 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2346 } 2347 2348 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2349 { 2350 u32 err_int = I915_READ(GEN7_ERR_INT); 2351 enum pipe pipe; 2352 2353 if (err_int & ERR_INT_POISON) 2354 DRM_ERROR("Poison interrupt\n"); 2355 2356 for_each_pipe(dev_priv, pipe) { 2357 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2358 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2359 2360 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2361 if (IS_IVYBRIDGE(dev_priv)) 2362 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2363 else 2364 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2365 } 2366 } 2367 2368 I915_WRITE(GEN7_ERR_INT, err_int); 2369 } 2370 2371 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2372 { 2373 u32 serr_int = I915_READ(SERR_INT); 2374 enum pipe pipe; 2375 2376 if (serr_int & SERR_INT_POISON) 2377 DRM_ERROR("PCH poison interrupt\n"); 2378 2379 for_each_pipe(dev_priv, pipe) 2380 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2381 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2382 2383 I915_WRITE(SERR_INT, serr_int); 2384 } 2385 2386 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2387 { 2388 int pipe; 2389 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2390 2391 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2392 2393 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2394 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2395 SDE_AUDIO_POWER_SHIFT_CPT); 2396 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2397 port_name(port)); 2398 } 2399 2400 if (pch_iir & SDE_AUX_MASK_CPT) 2401 dp_aux_irq_handler(dev_priv); 2402 2403 if (pch_iir & SDE_GMBUS_CPT) 2404 gmbus_irq_handler(dev_priv); 2405 2406 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2407 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2408 2409 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2410 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2411 2412 if (pch_iir & SDE_FDI_MASK_CPT) 2413 for_each_pipe(dev_priv, pipe) 2414 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2415 pipe_name(pipe), 2416 I915_READ(FDI_RX_IIR(pipe))); 2417 2418 if (pch_iir & SDE_ERROR_CPT) 2419 cpt_serr_int_handler(dev_priv); 2420 } 2421 2422 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2423 { 2424 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 2425 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 2426 u32 pin_mask = 0, long_mask = 0; 2427 2428 if (ddi_hotplug_trigger) { 2429 u32 dig_hotplug_reg; 2430 2431 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 2432 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 2433 2434 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2435 ddi_hotplug_trigger, 2436 dig_hotplug_reg, hpd_icp, 2437 icp_ddi_port_hotplug_long_detect); 2438 } 2439 2440 if (tc_hotplug_trigger) { 2441 u32 dig_hotplug_reg; 2442 2443 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 2444 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 2445 2446 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2447 tc_hotplug_trigger, 2448 dig_hotplug_reg, hpd_icp, 2449 icp_tc_port_hotplug_long_detect); 2450 } 2451 2452 if (pin_mask) 2453 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2454 2455 if (pch_iir & SDE_GMBUS_ICP) 2456 gmbus_irq_handler(dev_priv); 2457 } 2458 2459 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2460 { 2461 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2462 ~SDE_PORTE_HOTPLUG_SPT; 2463 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2464 u32 pin_mask = 0, long_mask = 0; 2465 2466 if (hotplug_trigger) { 2467 u32 dig_hotplug_reg; 2468 2469 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2470 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2471 2472 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2473 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2474 spt_port_hotplug_long_detect); 2475 } 2476 2477 if (hotplug2_trigger) { 2478 u32 dig_hotplug_reg; 2479 2480 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2481 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2482 2483 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2484 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2485 spt_port_hotplug2_long_detect); 2486 } 2487 2488 if (pin_mask) 2489 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2490 2491 if (pch_iir & SDE_GMBUS_CPT) 2492 gmbus_irq_handler(dev_priv); 2493 } 2494 2495 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2496 u32 hotplug_trigger, 2497 const u32 hpd[HPD_NUM_PINS]) 2498 { 2499 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2500 2501 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2502 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2503 2504 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2505 dig_hotplug_reg, hpd, 2506 ilk_port_hotplug_long_detect); 2507 2508 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2509 } 2510 2511 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2512 u32 de_iir) 2513 { 2514 enum pipe pipe; 2515 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2516 2517 if (hotplug_trigger) 2518 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2519 2520 if (de_iir & DE_AUX_CHANNEL_A) 2521 dp_aux_irq_handler(dev_priv); 2522 2523 if (de_iir & DE_GSE) 2524 intel_opregion_asle_intr(dev_priv); 2525 2526 if (de_iir & DE_POISON) 2527 DRM_ERROR("Poison interrupt\n"); 2528 2529 for_each_pipe(dev_priv, pipe) { 2530 if (de_iir & DE_PIPE_VBLANK(pipe)) 2531 drm_handle_vblank(&dev_priv->drm, pipe); 2532 2533 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2534 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2535 2536 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2537 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2538 } 2539 2540 /* check event from PCH */ 2541 if (de_iir & DE_PCH_EVENT) { 2542 u32 pch_iir = I915_READ(SDEIIR); 2543 2544 if (HAS_PCH_CPT(dev_priv)) 2545 cpt_irq_handler(dev_priv, pch_iir); 2546 else 2547 ibx_irq_handler(dev_priv, pch_iir); 2548 2549 /* should clear PCH hotplug event before clear CPU irq */ 2550 I915_WRITE(SDEIIR, pch_iir); 2551 } 2552 2553 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2554 ironlake_rps_change_irq_handler(dev_priv); 2555 } 2556 2557 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2558 u32 de_iir) 2559 { 2560 enum pipe pipe; 2561 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2562 2563 if (hotplug_trigger) 2564 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2565 2566 if (de_iir & DE_ERR_INT_IVB) 2567 ivb_err_int_handler(dev_priv); 2568 2569 if (de_iir & DE_EDP_PSR_INT_HSW) { 2570 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2571 2572 intel_psr_irq_handler(dev_priv, psr_iir); 2573 I915_WRITE(EDP_PSR_IIR, psr_iir); 2574 } 2575 2576 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2577 dp_aux_irq_handler(dev_priv); 2578 2579 if (de_iir & DE_GSE_IVB) 2580 intel_opregion_asle_intr(dev_priv); 2581 2582 for_each_pipe(dev_priv, pipe) { 2583 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2584 drm_handle_vblank(&dev_priv->drm, pipe); 2585 } 2586 2587 /* check event from PCH */ 2588 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2589 u32 pch_iir = I915_READ(SDEIIR); 2590 2591 cpt_irq_handler(dev_priv, pch_iir); 2592 2593 /* clear PCH hotplug event before clear CPU irq */ 2594 I915_WRITE(SDEIIR, pch_iir); 2595 } 2596 } 2597 2598 /* 2599 * To handle irqs with the minimum potential races with fresh interrupts, we: 2600 * 1 - Disable Master Interrupt Control. 2601 * 2 - Find the source(s) of the interrupt. 2602 * 3 - Clear the Interrupt Identity bits (IIR). 2603 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2604 * 5 - Re-enable Master Interrupt Control. 2605 */ 2606 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2607 { 2608 struct drm_device *dev = arg; 2609 struct drm_i915_private *dev_priv = to_i915(dev); 2610 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2611 irqreturn_t ret = IRQ_NONE; 2612 2613 if (!intel_irqs_enabled(dev_priv)) 2614 return IRQ_NONE; 2615 2616 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2617 disable_rpm_wakeref_asserts(dev_priv); 2618 2619 /* disable master interrupt before clearing iir */ 2620 de_ier = I915_READ(DEIER); 2621 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2622 2623 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2624 * interrupts will will be stored on its back queue, and then we'll be 2625 * able to process them after we restore SDEIER (as soon as we restore 2626 * it, we'll get an interrupt if SDEIIR still has something to process 2627 * due to its back queue). */ 2628 if (!HAS_PCH_NOP(dev_priv)) { 2629 sde_ier = I915_READ(SDEIER); 2630 I915_WRITE(SDEIER, 0); 2631 } 2632 2633 /* Find, clear, then process each source of interrupt */ 2634 2635 gt_iir = I915_READ(GTIIR); 2636 if (gt_iir) { 2637 I915_WRITE(GTIIR, gt_iir); 2638 ret = IRQ_HANDLED; 2639 if (INTEL_GEN(dev_priv) >= 6) 2640 snb_gt_irq_handler(dev_priv, gt_iir); 2641 else 2642 ilk_gt_irq_handler(dev_priv, gt_iir); 2643 } 2644 2645 de_iir = I915_READ(DEIIR); 2646 if (de_iir) { 2647 I915_WRITE(DEIIR, de_iir); 2648 ret = IRQ_HANDLED; 2649 if (INTEL_GEN(dev_priv) >= 7) 2650 ivb_display_irq_handler(dev_priv, de_iir); 2651 else 2652 ilk_display_irq_handler(dev_priv, de_iir); 2653 } 2654 2655 if (INTEL_GEN(dev_priv) >= 6) { 2656 u32 pm_iir = I915_READ(GEN6_PMIIR); 2657 if (pm_iir) { 2658 I915_WRITE(GEN6_PMIIR, pm_iir); 2659 ret = IRQ_HANDLED; 2660 gen6_rps_irq_handler(dev_priv, pm_iir); 2661 } 2662 } 2663 2664 I915_WRITE(DEIER, de_ier); 2665 if (!HAS_PCH_NOP(dev_priv)) 2666 I915_WRITE(SDEIER, sde_ier); 2667 2668 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2669 enable_rpm_wakeref_asserts(dev_priv); 2670 2671 return ret; 2672 } 2673 2674 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2675 u32 hotplug_trigger, 2676 const u32 hpd[HPD_NUM_PINS]) 2677 { 2678 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2679 2680 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2681 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2682 2683 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2684 dig_hotplug_reg, hpd, 2685 bxt_port_hotplug_long_detect); 2686 2687 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2688 } 2689 2690 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2691 { 2692 u32 pin_mask = 0, long_mask = 0; 2693 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2694 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2695 2696 if (trigger_tc) { 2697 u32 dig_hotplug_reg; 2698 2699 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2700 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2701 2702 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2703 dig_hotplug_reg, hpd_gen11, 2704 gen11_port_hotplug_long_detect); 2705 } 2706 2707 if (trigger_tbt) { 2708 u32 dig_hotplug_reg; 2709 2710 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2711 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2712 2713 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2714 dig_hotplug_reg, hpd_gen11, 2715 gen11_port_hotplug_long_detect); 2716 } 2717 2718 if (pin_mask) 2719 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2720 else 2721 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2722 } 2723 2724 static irqreturn_t 2725 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2726 { 2727 irqreturn_t ret = IRQ_NONE; 2728 u32 iir; 2729 enum pipe pipe; 2730 2731 if (master_ctl & GEN8_DE_MISC_IRQ) { 2732 iir = I915_READ(GEN8_DE_MISC_IIR); 2733 if (iir) { 2734 bool found = false; 2735 2736 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2737 ret = IRQ_HANDLED; 2738 2739 if (iir & GEN8_DE_MISC_GSE) { 2740 intel_opregion_asle_intr(dev_priv); 2741 found = true; 2742 } 2743 2744 if (iir & GEN8_DE_EDP_PSR) { 2745 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2746 2747 intel_psr_irq_handler(dev_priv, psr_iir); 2748 I915_WRITE(EDP_PSR_IIR, psr_iir); 2749 found = true; 2750 } 2751 2752 if (!found) 2753 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2754 } 2755 else 2756 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2757 } 2758 2759 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2760 iir = I915_READ(GEN11_DE_HPD_IIR); 2761 if (iir) { 2762 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2763 ret = IRQ_HANDLED; 2764 gen11_hpd_irq_handler(dev_priv, iir); 2765 } else { 2766 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2767 } 2768 } 2769 2770 if (master_ctl & GEN8_DE_PORT_IRQ) { 2771 iir = I915_READ(GEN8_DE_PORT_IIR); 2772 if (iir) { 2773 u32 tmp_mask; 2774 bool found = false; 2775 2776 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2777 ret = IRQ_HANDLED; 2778 2779 tmp_mask = GEN8_AUX_CHANNEL_A; 2780 if (INTEL_GEN(dev_priv) >= 9) 2781 tmp_mask |= GEN9_AUX_CHANNEL_B | 2782 GEN9_AUX_CHANNEL_C | 2783 GEN9_AUX_CHANNEL_D; 2784 2785 if (INTEL_GEN(dev_priv) >= 11) 2786 tmp_mask |= ICL_AUX_CHANNEL_E; 2787 2788 if (IS_CNL_WITH_PORT_F(dev_priv) || 2789 INTEL_GEN(dev_priv) >= 11) 2790 tmp_mask |= CNL_AUX_CHANNEL_F; 2791 2792 if (iir & tmp_mask) { 2793 dp_aux_irq_handler(dev_priv); 2794 found = true; 2795 } 2796 2797 if (IS_GEN9_LP(dev_priv)) { 2798 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2799 if (tmp_mask) { 2800 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2801 hpd_bxt); 2802 found = true; 2803 } 2804 } else if (IS_BROADWELL(dev_priv)) { 2805 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2806 if (tmp_mask) { 2807 ilk_hpd_irq_handler(dev_priv, 2808 tmp_mask, hpd_bdw); 2809 found = true; 2810 } 2811 } 2812 2813 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2814 gmbus_irq_handler(dev_priv); 2815 found = true; 2816 } 2817 2818 if (!found) 2819 DRM_ERROR("Unexpected DE Port interrupt\n"); 2820 } 2821 else 2822 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2823 } 2824 2825 for_each_pipe(dev_priv, pipe) { 2826 u32 fault_errors; 2827 2828 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2829 continue; 2830 2831 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2832 if (!iir) { 2833 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2834 continue; 2835 } 2836 2837 ret = IRQ_HANDLED; 2838 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2839 2840 if (iir & GEN8_PIPE_VBLANK) 2841 drm_handle_vblank(&dev_priv->drm, pipe); 2842 2843 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2844 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2845 2846 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2847 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2848 2849 fault_errors = iir; 2850 if (INTEL_GEN(dev_priv) >= 9) 2851 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2852 else 2853 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2854 2855 if (fault_errors) 2856 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2857 pipe_name(pipe), 2858 fault_errors); 2859 } 2860 2861 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2862 master_ctl & GEN8_DE_PCH_IRQ) { 2863 /* 2864 * FIXME(BDW): Assume for now that the new interrupt handling 2865 * scheme also closed the SDE interrupt handling race we've seen 2866 * on older pch-split platforms. But this needs testing. 2867 */ 2868 iir = I915_READ(SDEIIR); 2869 if (iir) { 2870 I915_WRITE(SDEIIR, iir); 2871 ret = IRQ_HANDLED; 2872 2873 if (HAS_PCH_ICP(dev_priv)) 2874 icp_irq_handler(dev_priv, iir); 2875 else if (HAS_PCH_SPT(dev_priv) || 2876 HAS_PCH_KBP(dev_priv) || 2877 HAS_PCH_CNP(dev_priv)) 2878 spt_irq_handler(dev_priv, iir); 2879 else 2880 cpt_irq_handler(dev_priv, iir); 2881 } else { 2882 /* 2883 * Like on previous PCH there seems to be something 2884 * fishy going on with forwarding PCH interrupts. 2885 */ 2886 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2887 } 2888 } 2889 2890 return ret; 2891 } 2892 2893 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2894 { 2895 struct drm_i915_private *dev_priv = to_i915(arg); 2896 u32 master_ctl; 2897 u32 gt_iir[4]; 2898 2899 if (!intel_irqs_enabled(dev_priv)) 2900 return IRQ_NONE; 2901 2902 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2903 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2904 if (!master_ctl) 2905 return IRQ_NONE; 2906 2907 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2908 2909 /* Find, clear, then process each source of interrupt */ 2910 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2911 2912 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2913 if (master_ctl & ~GEN8_GT_IRQS) { 2914 disable_rpm_wakeref_asserts(dev_priv); 2915 gen8_de_irq_handler(dev_priv, master_ctl); 2916 enable_rpm_wakeref_asserts(dev_priv); 2917 } 2918 2919 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2920 2921 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2922 2923 return IRQ_HANDLED; 2924 } 2925 2926 struct wedge_me { 2927 struct delayed_work work; 2928 struct drm_i915_private *i915; 2929 const char *name; 2930 }; 2931 2932 static void wedge_me(struct work_struct *work) 2933 { 2934 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2935 2936 dev_err(w->i915->drm.dev, 2937 "%s timed out, cancelling all in-flight rendering.\n", 2938 w->name); 2939 i915_gem_set_wedged(w->i915); 2940 } 2941 2942 static void __init_wedge(struct wedge_me *w, 2943 struct drm_i915_private *i915, 2944 long timeout, 2945 const char *name) 2946 { 2947 w->i915 = i915; 2948 w->name = name; 2949 2950 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2951 schedule_delayed_work(&w->work, timeout); 2952 } 2953 2954 static void __fini_wedge(struct wedge_me *w) 2955 { 2956 cancel_delayed_work_sync(&w->work); 2957 destroy_delayed_work_on_stack(&w->work); 2958 w->i915 = NULL; 2959 } 2960 2961 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2962 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2963 (W)->i915; \ 2964 __fini_wedge((W))) 2965 2966 static u32 2967 gen11_gt_engine_identity(struct drm_i915_private * const i915, 2968 const unsigned int bank, const unsigned int bit) 2969 { 2970 void __iomem * const regs = i915->regs; 2971 u32 timeout_ts; 2972 u32 ident; 2973 2974 lockdep_assert_held(&i915->irq_lock); 2975 2976 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2977 2978 /* 2979 * NB: Specs do not specify how long to spin wait, 2980 * so we do ~100us as an educated guess. 2981 */ 2982 timeout_ts = (local_clock() >> 10) + 100; 2983 do { 2984 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2985 } while (!(ident & GEN11_INTR_DATA_VALID) && 2986 !time_after32(local_clock() >> 10, timeout_ts)); 2987 2988 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 2989 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 2990 bank, bit, ident); 2991 return 0; 2992 } 2993 2994 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 2995 GEN11_INTR_DATA_VALID); 2996 2997 return ident; 2998 } 2999 3000 static void 3001 gen11_other_irq_handler(struct drm_i915_private * const i915, 3002 const u8 instance, const u16 iir) 3003 { 3004 if (instance == OTHER_GTPM_INSTANCE) 3005 return gen6_rps_irq_handler(i915, iir); 3006 3007 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3008 instance, iir); 3009 } 3010 3011 static void 3012 gen11_engine_irq_handler(struct drm_i915_private * const i915, 3013 const u8 class, const u8 instance, const u16 iir) 3014 { 3015 struct intel_engine_cs *engine; 3016 3017 if (instance <= MAX_ENGINE_INSTANCE) 3018 engine = i915->engine_class[class][instance]; 3019 else 3020 engine = NULL; 3021 3022 if (likely(engine)) 3023 return gen8_cs_irq_handler(engine, iir); 3024 3025 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3026 class, instance); 3027 } 3028 3029 static void 3030 gen11_gt_identity_handler(struct drm_i915_private * const i915, 3031 const u32 identity) 3032 { 3033 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3034 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3035 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3036 3037 if (unlikely(!intr)) 3038 return; 3039 3040 if (class <= COPY_ENGINE_CLASS) 3041 return gen11_engine_irq_handler(i915, class, instance, intr); 3042 3043 if (class == OTHER_CLASS) 3044 return gen11_other_irq_handler(i915, instance, intr); 3045 3046 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3047 class, instance, intr); 3048 } 3049 3050 static void 3051 gen11_gt_bank_handler(struct drm_i915_private * const i915, 3052 const unsigned int bank) 3053 { 3054 void __iomem * const regs = i915->regs; 3055 unsigned long intr_dw; 3056 unsigned int bit; 3057 3058 lockdep_assert_held(&i915->irq_lock); 3059 3060 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 3061 3062 if (unlikely(!intr_dw)) { 3063 DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 3064 return; 3065 } 3066 3067 for_each_set_bit(bit, &intr_dw, 32) { 3068 const u32 ident = gen11_gt_engine_identity(i915, 3069 bank, bit); 3070 3071 gen11_gt_identity_handler(i915, ident); 3072 } 3073 3074 /* Clear must be after shared has been served for engine */ 3075 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 3076 } 3077 3078 static void 3079 gen11_gt_irq_handler(struct drm_i915_private * const i915, 3080 const u32 master_ctl) 3081 { 3082 unsigned int bank; 3083 3084 spin_lock(&i915->irq_lock); 3085 3086 for (bank = 0; bank < 2; bank++) { 3087 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 3088 gen11_gt_bank_handler(i915, bank); 3089 } 3090 3091 spin_unlock(&i915->irq_lock); 3092 } 3093 3094 static void 3095 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, 3096 u32 *iir) 3097 { 3098 void __iomem * const regs = dev_priv->regs; 3099 3100 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3101 return; 3102 3103 *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3104 if (likely(*iir)) 3105 raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir); 3106 } 3107 3108 static void 3109 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, 3110 const u32 master_ctl, const u32 iir) 3111 { 3112 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3113 return; 3114 3115 if (unlikely(!iir)) { 3116 DRM_ERROR("GU_MISC iir blank!\n"); 3117 return; 3118 } 3119 3120 if (iir & GEN11_GU_MISC_GSE) 3121 intel_opregion_asle_intr(dev_priv); 3122 else 3123 DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir); 3124 } 3125 3126 static irqreturn_t gen11_irq_handler(int irq, void *arg) 3127 { 3128 struct drm_i915_private * const i915 = to_i915(arg); 3129 void __iomem * const regs = i915->regs; 3130 u32 master_ctl; 3131 u32 gu_misc_iir; 3132 3133 if (!intel_irqs_enabled(i915)) 3134 return IRQ_NONE; 3135 3136 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3137 master_ctl &= ~GEN11_MASTER_IRQ; 3138 if (!master_ctl) 3139 return IRQ_NONE; 3140 3141 /* Disable interrupts. */ 3142 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 3143 3144 /* Find, clear, then process each source of interrupt. */ 3145 gen11_gt_irq_handler(i915, master_ctl); 3146 3147 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3148 if (master_ctl & GEN11_DISPLAY_IRQ) { 3149 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3150 3151 disable_rpm_wakeref_asserts(i915); 3152 /* 3153 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3154 * for the display related bits. 3155 */ 3156 gen8_de_irq_handler(i915, disp_ctl); 3157 enable_rpm_wakeref_asserts(i915); 3158 } 3159 3160 gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); 3161 3162 /* Acknowledge and enable interrupts. */ 3163 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 3164 3165 gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); 3166 3167 return IRQ_HANDLED; 3168 } 3169 3170 static void i915_reset_device(struct drm_i915_private *dev_priv, 3171 u32 engine_mask, 3172 const char *reason) 3173 { 3174 struct i915_gpu_error *error = &dev_priv->gpu_error; 3175 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3176 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3177 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3178 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 3179 struct wedge_me w; 3180 3181 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 3182 3183 DRM_DEBUG_DRIVER("resetting chip\n"); 3184 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 3185 3186 /* Use a watchdog to ensure that our reset completes */ 3187 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3188 intel_prepare_reset(dev_priv); 3189 3190 error->reason = reason; 3191 error->stalled_mask = engine_mask; 3192 3193 /* Signal that locked waiters should reset the GPU */ 3194 smp_mb__before_atomic(); 3195 set_bit(I915_RESET_HANDOFF, &error->flags); 3196 wake_up_all(&error->wait_queue); 3197 3198 /* Wait for anyone holding the lock to wakeup, without 3199 * blocking indefinitely on struct_mutex. 3200 */ 3201 do { 3202 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3203 i915_reset(dev_priv, engine_mask, reason); 3204 mutex_unlock(&dev_priv->drm.struct_mutex); 3205 } 3206 } while (wait_on_bit_timeout(&error->flags, 3207 I915_RESET_HANDOFF, 3208 TASK_UNINTERRUPTIBLE, 3209 1)); 3210 3211 error->stalled_mask = 0; 3212 error->reason = NULL; 3213 3214 intel_finish_reset(dev_priv); 3215 } 3216 3217 if (!test_bit(I915_WEDGED, &error->flags)) 3218 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3219 } 3220 3221 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3222 { 3223 u32 eir; 3224 3225 if (!IS_GEN2(dev_priv)) 3226 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 3227 3228 if (INTEL_GEN(dev_priv) < 4) 3229 I915_WRITE(IPEIR, I915_READ(IPEIR)); 3230 else 3231 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 3232 3233 I915_WRITE(EIR, I915_READ(EIR)); 3234 eir = I915_READ(EIR); 3235 if (eir) { 3236 /* 3237 * some errors might have become stuck, 3238 * mask them. 3239 */ 3240 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 3241 I915_WRITE(EMR, I915_READ(EMR) | eir); 3242 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 3243 } 3244 } 3245 3246 /** 3247 * i915_handle_error - handle a gpu error 3248 * @dev_priv: i915 device private 3249 * @engine_mask: mask representing engines that are hung 3250 * @flags: control flags 3251 * @fmt: Error message format string 3252 * 3253 * Do some basic checking of register state at error time and 3254 * dump it to the syslog. Also call i915_capture_error_state() to make 3255 * sure we get a record and make it available in debugfs. Fire a uevent 3256 * so userspace knows something bad happened (should trigger collection 3257 * of a ring dump etc.). 3258 */ 3259 void i915_handle_error(struct drm_i915_private *dev_priv, 3260 u32 engine_mask, 3261 unsigned long flags, 3262 const char *fmt, ...) 3263 { 3264 struct intel_engine_cs *engine; 3265 unsigned int tmp; 3266 char error_msg[80]; 3267 char *msg = NULL; 3268 3269 if (fmt) { 3270 va_list args; 3271 3272 va_start(args, fmt); 3273 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 3274 va_end(args); 3275 3276 msg = error_msg; 3277 } 3278 3279 /* 3280 * In most cases it's guaranteed that we get here with an RPM 3281 * reference held, for example because there is a pending GPU 3282 * request that won't finish until the reset is done. This 3283 * isn't the case at least when we get here by doing a 3284 * simulated reset via debugfs, so get an RPM reference. 3285 */ 3286 intel_runtime_pm_get(dev_priv); 3287 3288 engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3289 3290 if (flags & I915_ERROR_CAPTURE) { 3291 i915_capture_error_state(dev_priv, engine_mask, msg); 3292 i915_clear_error_registers(dev_priv); 3293 } 3294 3295 /* 3296 * Try engine reset when available. We fall back to full reset if 3297 * single reset fails. 3298 */ 3299 if (intel_has_reset_engine(dev_priv)) { 3300 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 3301 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3302 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3303 &dev_priv->gpu_error.flags)) 3304 continue; 3305 3306 if (i915_reset_engine(engine, msg) == 0) 3307 engine_mask &= ~intel_engine_flag(engine); 3308 3309 clear_bit(I915_RESET_ENGINE + engine->id, 3310 &dev_priv->gpu_error.flags); 3311 wake_up_bit(&dev_priv->gpu_error.flags, 3312 I915_RESET_ENGINE + engine->id); 3313 } 3314 } 3315 3316 if (!engine_mask) 3317 goto out; 3318 3319 /* Full reset needs the mutex, stop any other user trying to do so. */ 3320 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3321 wait_event(dev_priv->gpu_error.reset_queue, 3322 !test_bit(I915_RESET_BACKOFF, 3323 &dev_priv->gpu_error.flags)); 3324 goto out; 3325 } 3326 3327 /* Prevent any other reset-engine attempt. */ 3328 for_each_engine(engine, dev_priv, tmp) { 3329 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3330 &dev_priv->gpu_error.flags)) 3331 wait_on_bit(&dev_priv->gpu_error.flags, 3332 I915_RESET_ENGINE + engine->id, 3333 TASK_UNINTERRUPTIBLE); 3334 } 3335 3336 i915_reset_device(dev_priv, engine_mask, msg); 3337 3338 for_each_engine(engine, dev_priv, tmp) { 3339 clear_bit(I915_RESET_ENGINE + engine->id, 3340 &dev_priv->gpu_error.flags); 3341 } 3342 3343 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3344 wake_up_all(&dev_priv->gpu_error.reset_queue); 3345 3346 out: 3347 intel_runtime_pm_put(dev_priv); 3348 } 3349 3350 /* Called from drm generic code, passed 'crtc' which 3351 * we use as a pipe index 3352 */ 3353 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3354 { 3355 struct drm_i915_private *dev_priv = to_i915(dev); 3356 unsigned long irqflags; 3357 3358 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3359 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3360 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3361 3362 return 0; 3363 } 3364 3365 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3366 { 3367 struct drm_i915_private *dev_priv = to_i915(dev); 3368 unsigned long irqflags; 3369 3370 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3371 i915_enable_pipestat(dev_priv, pipe, 3372 PIPE_START_VBLANK_INTERRUPT_STATUS); 3373 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3374 3375 return 0; 3376 } 3377 3378 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3379 { 3380 struct drm_i915_private *dev_priv = to_i915(dev); 3381 unsigned long irqflags; 3382 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3383 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3384 3385 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3386 ilk_enable_display_irq(dev_priv, bit); 3387 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3388 3389 /* Even though there is no DMC, frame counter can get stuck when 3390 * PSR is active as no frames are generated. 3391 */ 3392 if (HAS_PSR(dev_priv)) 3393 drm_vblank_restore(dev, pipe); 3394 3395 return 0; 3396 } 3397 3398 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3399 { 3400 struct drm_i915_private *dev_priv = to_i915(dev); 3401 unsigned long irqflags; 3402 3403 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3404 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3405 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3406 3407 /* Even if there is no DMC, frame counter can get stuck when 3408 * PSR is active as no frames are generated, so check only for PSR. 3409 */ 3410 if (HAS_PSR(dev_priv)) 3411 drm_vblank_restore(dev, pipe); 3412 3413 return 0; 3414 } 3415 3416 /* Called from drm generic code, passed 'crtc' which 3417 * we use as a pipe index 3418 */ 3419 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3420 { 3421 struct drm_i915_private *dev_priv = to_i915(dev); 3422 unsigned long irqflags; 3423 3424 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3425 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3426 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3427 } 3428 3429 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3430 { 3431 struct drm_i915_private *dev_priv = to_i915(dev); 3432 unsigned long irqflags; 3433 3434 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3435 i915_disable_pipestat(dev_priv, pipe, 3436 PIPE_START_VBLANK_INTERRUPT_STATUS); 3437 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3438 } 3439 3440 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3441 { 3442 struct drm_i915_private *dev_priv = to_i915(dev); 3443 unsigned long irqflags; 3444 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3445 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3446 3447 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3448 ilk_disable_display_irq(dev_priv, bit); 3449 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3450 } 3451 3452 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3453 { 3454 struct drm_i915_private *dev_priv = to_i915(dev); 3455 unsigned long irqflags; 3456 3457 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3458 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3459 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3460 } 3461 3462 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3463 { 3464 if (HAS_PCH_NOP(dev_priv)) 3465 return; 3466 3467 GEN3_IRQ_RESET(SDE); 3468 3469 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3470 I915_WRITE(SERR_INT, 0xffffffff); 3471 } 3472 3473 /* 3474 * SDEIER is also touched by the interrupt handler to work around missed PCH 3475 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3476 * instead we unconditionally enable all PCH interrupt sources here, but then 3477 * only unmask them as needed with SDEIMR. 3478 * 3479 * This function needs to be called before interrupts are enabled. 3480 */ 3481 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3482 { 3483 struct drm_i915_private *dev_priv = to_i915(dev); 3484 3485 if (HAS_PCH_NOP(dev_priv)) 3486 return; 3487 3488 WARN_ON(I915_READ(SDEIER) != 0); 3489 I915_WRITE(SDEIER, 0xffffffff); 3490 POSTING_READ(SDEIER); 3491 } 3492 3493 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3494 { 3495 GEN3_IRQ_RESET(GT); 3496 if (INTEL_GEN(dev_priv) >= 6) 3497 GEN3_IRQ_RESET(GEN6_PM); 3498 } 3499 3500 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3501 { 3502 if (IS_CHERRYVIEW(dev_priv)) 3503 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3504 else 3505 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3506 3507 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3508 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3509 3510 i9xx_pipestat_irq_reset(dev_priv); 3511 3512 GEN3_IRQ_RESET(VLV_); 3513 dev_priv->irq_mask = ~0u; 3514 } 3515 3516 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3517 { 3518 u32 pipestat_mask; 3519 u32 enable_mask; 3520 enum pipe pipe; 3521 3522 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3523 3524 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3525 for_each_pipe(dev_priv, pipe) 3526 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3527 3528 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3529 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3530 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3531 I915_LPE_PIPE_A_INTERRUPT | 3532 I915_LPE_PIPE_B_INTERRUPT; 3533 3534 if (IS_CHERRYVIEW(dev_priv)) 3535 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3536 I915_LPE_PIPE_C_INTERRUPT; 3537 3538 WARN_ON(dev_priv->irq_mask != ~0u); 3539 3540 dev_priv->irq_mask = ~enable_mask; 3541 3542 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3543 } 3544 3545 /* drm_dma.h hooks 3546 */ 3547 static void ironlake_irq_reset(struct drm_device *dev) 3548 { 3549 struct drm_i915_private *dev_priv = to_i915(dev); 3550 3551 if (IS_GEN5(dev_priv)) 3552 I915_WRITE(HWSTAM, 0xffffffff); 3553 3554 GEN3_IRQ_RESET(DE); 3555 if (IS_GEN7(dev_priv)) 3556 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3557 3558 if (IS_HASWELL(dev_priv)) { 3559 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3560 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3561 } 3562 3563 gen5_gt_irq_reset(dev_priv); 3564 3565 ibx_irq_reset(dev_priv); 3566 } 3567 3568 static void valleyview_irq_reset(struct drm_device *dev) 3569 { 3570 struct drm_i915_private *dev_priv = to_i915(dev); 3571 3572 I915_WRITE(VLV_MASTER_IER, 0); 3573 POSTING_READ(VLV_MASTER_IER); 3574 3575 gen5_gt_irq_reset(dev_priv); 3576 3577 spin_lock_irq(&dev_priv->irq_lock); 3578 if (dev_priv->display_irqs_enabled) 3579 vlv_display_irq_reset(dev_priv); 3580 spin_unlock_irq(&dev_priv->irq_lock); 3581 } 3582 3583 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3584 { 3585 GEN8_IRQ_RESET_NDX(GT, 0); 3586 GEN8_IRQ_RESET_NDX(GT, 1); 3587 GEN8_IRQ_RESET_NDX(GT, 2); 3588 GEN8_IRQ_RESET_NDX(GT, 3); 3589 } 3590 3591 static void gen8_irq_reset(struct drm_device *dev) 3592 { 3593 struct drm_i915_private *dev_priv = to_i915(dev); 3594 int pipe; 3595 3596 I915_WRITE(GEN8_MASTER_IRQ, 0); 3597 POSTING_READ(GEN8_MASTER_IRQ); 3598 3599 gen8_gt_irq_reset(dev_priv); 3600 3601 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3602 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3603 3604 for_each_pipe(dev_priv, pipe) 3605 if (intel_display_power_is_enabled(dev_priv, 3606 POWER_DOMAIN_PIPE(pipe))) 3607 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3608 3609 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3610 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3611 GEN3_IRQ_RESET(GEN8_PCU_); 3612 3613 if (HAS_PCH_SPLIT(dev_priv)) 3614 ibx_irq_reset(dev_priv); 3615 } 3616 3617 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3618 { 3619 /* Disable RCS, BCS, VCS and VECS class engines. */ 3620 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3621 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3622 3623 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3624 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3625 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3626 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3627 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3628 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3629 3630 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3631 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3632 } 3633 3634 static void gen11_irq_reset(struct drm_device *dev) 3635 { 3636 struct drm_i915_private *dev_priv = dev->dev_private; 3637 int pipe; 3638 3639 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3640 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3641 3642 gen11_gt_irq_reset(dev_priv); 3643 3644 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3645 3646 for_each_pipe(dev_priv, pipe) 3647 if (intel_display_power_is_enabled(dev_priv, 3648 POWER_DOMAIN_PIPE(pipe))) 3649 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3650 3651 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3652 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3653 GEN3_IRQ_RESET(GEN11_DE_HPD_); 3654 GEN3_IRQ_RESET(GEN11_GU_MISC_); 3655 GEN3_IRQ_RESET(GEN8_PCU_); 3656 3657 if (HAS_PCH_ICP(dev_priv)) 3658 GEN3_IRQ_RESET(SDE); 3659 } 3660 3661 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3662 u8 pipe_mask) 3663 { 3664 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3665 enum pipe pipe; 3666 3667 spin_lock_irq(&dev_priv->irq_lock); 3668 3669 if (!intel_irqs_enabled(dev_priv)) { 3670 spin_unlock_irq(&dev_priv->irq_lock); 3671 return; 3672 } 3673 3674 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3675 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3676 dev_priv->de_irq_mask[pipe], 3677 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3678 3679 spin_unlock_irq(&dev_priv->irq_lock); 3680 } 3681 3682 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3683 u8 pipe_mask) 3684 { 3685 enum pipe pipe; 3686 3687 spin_lock_irq(&dev_priv->irq_lock); 3688 3689 if (!intel_irqs_enabled(dev_priv)) { 3690 spin_unlock_irq(&dev_priv->irq_lock); 3691 return; 3692 } 3693 3694 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3695 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3696 3697 spin_unlock_irq(&dev_priv->irq_lock); 3698 3699 /* make sure we're done processing display irqs */ 3700 synchronize_irq(dev_priv->drm.irq); 3701 } 3702 3703 static void cherryview_irq_reset(struct drm_device *dev) 3704 { 3705 struct drm_i915_private *dev_priv = to_i915(dev); 3706 3707 I915_WRITE(GEN8_MASTER_IRQ, 0); 3708 POSTING_READ(GEN8_MASTER_IRQ); 3709 3710 gen8_gt_irq_reset(dev_priv); 3711 3712 GEN3_IRQ_RESET(GEN8_PCU_); 3713 3714 spin_lock_irq(&dev_priv->irq_lock); 3715 if (dev_priv->display_irqs_enabled) 3716 vlv_display_irq_reset(dev_priv); 3717 spin_unlock_irq(&dev_priv->irq_lock); 3718 } 3719 3720 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3721 const u32 hpd[HPD_NUM_PINS]) 3722 { 3723 struct intel_encoder *encoder; 3724 u32 enabled_irqs = 0; 3725 3726 for_each_intel_encoder(&dev_priv->drm, encoder) 3727 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3728 enabled_irqs |= hpd[encoder->hpd_pin]; 3729 3730 return enabled_irqs; 3731 } 3732 3733 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3734 { 3735 u32 hotplug; 3736 3737 /* 3738 * Enable digital hotplug on the PCH, and configure the DP short pulse 3739 * duration to 2ms (which is the minimum in the Display Port spec). 3740 * The pulse duration bits are reserved on LPT+. 3741 */ 3742 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3743 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3744 PORTC_PULSE_DURATION_MASK | 3745 PORTD_PULSE_DURATION_MASK); 3746 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3747 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3748 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3749 /* 3750 * When CPU and PCH are on the same package, port A 3751 * HPD must be enabled in both north and south. 3752 */ 3753 if (HAS_PCH_LPT_LP(dev_priv)) 3754 hotplug |= PORTA_HOTPLUG_ENABLE; 3755 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3756 } 3757 3758 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3759 { 3760 u32 hotplug_irqs, enabled_irqs; 3761 3762 if (HAS_PCH_IBX(dev_priv)) { 3763 hotplug_irqs = SDE_HOTPLUG_MASK; 3764 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3765 } else { 3766 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3767 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3768 } 3769 3770 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3771 3772 ibx_hpd_detection_setup(dev_priv); 3773 } 3774 3775 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 3776 { 3777 u32 hotplug; 3778 3779 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3780 hotplug |= ICP_DDIA_HPD_ENABLE | 3781 ICP_DDIB_HPD_ENABLE; 3782 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3783 3784 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3785 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 3786 ICP_TC_HPD_ENABLE(PORT_TC2) | 3787 ICP_TC_HPD_ENABLE(PORT_TC3) | 3788 ICP_TC_HPD_ENABLE(PORT_TC4); 3789 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3790 } 3791 3792 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3793 { 3794 u32 hotplug_irqs, enabled_irqs; 3795 3796 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 3797 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 3798 3799 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3800 3801 icp_hpd_detection_setup(dev_priv); 3802 } 3803 3804 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3805 { 3806 u32 hotplug; 3807 3808 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3809 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3810 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3811 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3812 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3813 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3814 3815 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3816 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3817 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3818 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3819 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3820 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3821 } 3822 3823 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3824 { 3825 u32 hotplug_irqs, enabled_irqs; 3826 u32 val; 3827 3828 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3829 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3830 3831 val = I915_READ(GEN11_DE_HPD_IMR); 3832 val &= ~hotplug_irqs; 3833 I915_WRITE(GEN11_DE_HPD_IMR, val); 3834 POSTING_READ(GEN11_DE_HPD_IMR); 3835 3836 gen11_hpd_detection_setup(dev_priv); 3837 3838 if (HAS_PCH_ICP(dev_priv)) 3839 icp_hpd_irq_setup(dev_priv); 3840 } 3841 3842 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3843 { 3844 u32 val, hotplug; 3845 3846 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3847 if (HAS_PCH_CNP(dev_priv)) { 3848 val = I915_READ(SOUTH_CHICKEN1); 3849 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3850 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3851 I915_WRITE(SOUTH_CHICKEN1, val); 3852 } 3853 3854 /* Enable digital hotplug on the PCH */ 3855 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3856 hotplug |= PORTA_HOTPLUG_ENABLE | 3857 PORTB_HOTPLUG_ENABLE | 3858 PORTC_HOTPLUG_ENABLE | 3859 PORTD_HOTPLUG_ENABLE; 3860 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3861 3862 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3863 hotplug |= PORTE_HOTPLUG_ENABLE; 3864 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3865 } 3866 3867 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3868 { 3869 u32 hotplug_irqs, enabled_irqs; 3870 3871 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3872 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3873 3874 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3875 3876 spt_hpd_detection_setup(dev_priv); 3877 } 3878 3879 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3880 { 3881 u32 hotplug; 3882 3883 /* 3884 * Enable digital hotplug on the CPU, and configure the DP short pulse 3885 * duration to 2ms (which is the minimum in the Display Port spec) 3886 * The pulse duration bits are reserved on HSW+. 3887 */ 3888 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3889 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3890 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3891 DIGITAL_PORTA_PULSE_DURATION_2ms; 3892 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3893 } 3894 3895 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3896 { 3897 u32 hotplug_irqs, enabled_irqs; 3898 3899 if (INTEL_GEN(dev_priv) >= 8) { 3900 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3901 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3902 3903 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3904 } else if (INTEL_GEN(dev_priv) >= 7) { 3905 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3906 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3907 3908 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3909 } else { 3910 hotplug_irqs = DE_DP_A_HOTPLUG; 3911 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3912 3913 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3914 } 3915 3916 ilk_hpd_detection_setup(dev_priv); 3917 3918 ibx_hpd_irq_setup(dev_priv); 3919 } 3920 3921 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3922 u32 enabled_irqs) 3923 { 3924 u32 hotplug; 3925 3926 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3927 hotplug |= PORTA_HOTPLUG_ENABLE | 3928 PORTB_HOTPLUG_ENABLE | 3929 PORTC_HOTPLUG_ENABLE; 3930 3931 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3932 hotplug, enabled_irqs); 3933 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3934 3935 /* 3936 * For BXT invert bit has to be set based on AOB design 3937 * for HPD detection logic, update it based on VBT fields. 3938 */ 3939 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3940 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3941 hotplug |= BXT_DDIA_HPD_INVERT; 3942 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3943 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3944 hotplug |= BXT_DDIB_HPD_INVERT; 3945 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3946 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3947 hotplug |= BXT_DDIC_HPD_INVERT; 3948 3949 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3950 } 3951 3952 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3953 { 3954 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3955 } 3956 3957 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3958 { 3959 u32 hotplug_irqs, enabled_irqs; 3960 3961 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3962 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3963 3964 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3965 3966 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3967 } 3968 3969 static void ibx_irq_postinstall(struct drm_device *dev) 3970 { 3971 struct drm_i915_private *dev_priv = to_i915(dev); 3972 u32 mask; 3973 3974 if (HAS_PCH_NOP(dev_priv)) 3975 return; 3976 3977 if (HAS_PCH_IBX(dev_priv)) 3978 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3979 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3980 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3981 else 3982 mask = SDE_GMBUS_CPT; 3983 3984 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3985 I915_WRITE(SDEIMR, ~mask); 3986 3987 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3988 HAS_PCH_LPT(dev_priv)) 3989 ibx_hpd_detection_setup(dev_priv); 3990 else 3991 spt_hpd_detection_setup(dev_priv); 3992 } 3993 3994 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3995 { 3996 struct drm_i915_private *dev_priv = to_i915(dev); 3997 u32 pm_irqs, gt_irqs; 3998 3999 pm_irqs = gt_irqs = 0; 4000 4001 dev_priv->gt_irq_mask = ~0; 4002 if (HAS_L3_DPF(dev_priv)) { 4003 /* L3 parity interrupt is always unmasked. */ 4004 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4005 gt_irqs |= GT_PARITY_ERROR(dev_priv); 4006 } 4007 4008 gt_irqs |= GT_RENDER_USER_INTERRUPT; 4009 if (IS_GEN5(dev_priv)) { 4010 gt_irqs |= ILK_BSD_USER_INTERRUPT; 4011 } else { 4012 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 4013 } 4014 4015 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 4016 4017 if (INTEL_GEN(dev_priv) >= 6) { 4018 /* 4019 * RPS interrupts will get enabled/disabled on demand when RPS 4020 * itself is enabled/disabled. 4021 */ 4022 if (HAS_VEBOX(dev_priv)) { 4023 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4024 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4025 } 4026 4027 dev_priv->pm_imr = 0xffffffff; 4028 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 4029 } 4030 } 4031 4032 static int ironlake_irq_postinstall(struct drm_device *dev) 4033 { 4034 struct drm_i915_private *dev_priv = to_i915(dev); 4035 u32 display_mask, extra_mask; 4036 4037 if (INTEL_GEN(dev_priv) >= 7) { 4038 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4039 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 4040 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 4041 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 4042 DE_DP_A_HOTPLUG_IVB); 4043 } else { 4044 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4045 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4046 DE_PIPEA_CRC_DONE | DE_POISON); 4047 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4048 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4049 DE_DP_A_HOTPLUG); 4050 } 4051 4052 if (IS_HASWELL(dev_priv)) { 4053 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4054 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4055 display_mask |= DE_EDP_PSR_INT_HSW; 4056 } 4057 4058 dev_priv->irq_mask = ~display_mask; 4059 4060 ibx_irq_pre_postinstall(dev); 4061 4062 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4063 4064 gen5_gt_irq_postinstall(dev); 4065 4066 ilk_hpd_detection_setup(dev_priv); 4067 4068 ibx_irq_postinstall(dev); 4069 4070 if (IS_IRONLAKE_M(dev_priv)) { 4071 /* Enable PCU event interrupts 4072 * 4073 * spinlocking not required here for correctness since interrupt 4074 * setup is guaranteed to run in single-threaded context. But we 4075 * need it to make the assert_spin_locked happy. */ 4076 spin_lock_irq(&dev_priv->irq_lock); 4077 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4078 spin_unlock_irq(&dev_priv->irq_lock); 4079 } 4080 4081 return 0; 4082 } 4083 4084 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4085 { 4086 lockdep_assert_held(&dev_priv->irq_lock); 4087 4088 if (dev_priv->display_irqs_enabled) 4089 return; 4090 4091 dev_priv->display_irqs_enabled = true; 4092 4093 if (intel_irqs_enabled(dev_priv)) { 4094 vlv_display_irq_reset(dev_priv); 4095 vlv_display_irq_postinstall(dev_priv); 4096 } 4097 } 4098 4099 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4100 { 4101 lockdep_assert_held(&dev_priv->irq_lock); 4102 4103 if (!dev_priv->display_irqs_enabled) 4104 return; 4105 4106 dev_priv->display_irqs_enabled = false; 4107 4108 if (intel_irqs_enabled(dev_priv)) 4109 vlv_display_irq_reset(dev_priv); 4110 } 4111 4112 4113 static int valleyview_irq_postinstall(struct drm_device *dev) 4114 { 4115 struct drm_i915_private *dev_priv = to_i915(dev); 4116 4117 gen5_gt_irq_postinstall(dev); 4118 4119 spin_lock_irq(&dev_priv->irq_lock); 4120 if (dev_priv->display_irqs_enabled) 4121 vlv_display_irq_postinstall(dev_priv); 4122 spin_unlock_irq(&dev_priv->irq_lock); 4123 4124 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 4125 POSTING_READ(VLV_MASTER_IER); 4126 4127 return 0; 4128 } 4129 4130 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4131 { 4132 /* These are interrupts we'll toggle with the ring mask register */ 4133 uint32_t gt_interrupts[] = { 4134 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4135 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4136 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 4137 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4138 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4139 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4140 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 4141 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4142 0, 4143 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 4144 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4145 }; 4146 4147 if (HAS_L3_DPF(dev_priv)) 4148 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 4149 4150 dev_priv->pm_ier = 0x0; 4151 dev_priv->pm_imr = ~dev_priv->pm_ier; 4152 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 4153 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 4154 /* 4155 * RPS interrupts will get enabled/disabled on demand when RPS itself 4156 * is enabled/disabled. Same wil be the case for GuC interrupts. 4157 */ 4158 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 4159 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4160 } 4161 4162 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4163 { 4164 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4165 uint32_t de_pipe_enables; 4166 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 4167 u32 de_port_enables; 4168 u32 de_misc_masked = GEN8_DE_EDP_PSR; 4169 enum pipe pipe; 4170 4171 if (INTEL_GEN(dev_priv) <= 10) 4172 de_misc_masked |= GEN8_DE_MISC_GSE; 4173 4174 if (INTEL_GEN(dev_priv) >= 9) { 4175 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 4176 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 4177 GEN9_AUX_CHANNEL_D; 4178 if (IS_GEN9_LP(dev_priv)) 4179 de_port_masked |= BXT_DE_PORT_GMBUS; 4180 } else { 4181 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 4182 } 4183 4184 if (INTEL_GEN(dev_priv) >= 11) 4185 de_port_masked |= ICL_AUX_CHANNEL_E; 4186 4187 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4188 de_port_masked |= CNL_AUX_CHANNEL_F; 4189 4190 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4191 GEN8_PIPE_FIFO_UNDERRUN; 4192 4193 de_port_enables = de_port_masked; 4194 if (IS_GEN9_LP(dev_priv)) 4195 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4196 else if (IS_BROADWELL(dev_priv)) 4197 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 4198 4199 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4200 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4201 4202 for_each_pipe(dev_priv, pipe) { 4203 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4204 4205 if (intel_display_power_is_enabled(dev_priv, 4206 POWER_DOMAIN_PIPE(pipe))) 4207 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4208 dev_priv->de_irq_mask[pipe], 4209 de_pipe_enables); 4210 } 4211 4212 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 4213 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 4214 4215 if (INTEL_GEN(dev_priv) >= 11) { 4216 u32 de_hpd_masked = 0; 4217 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4218 GEN11_DE_TBT_HOTPLUG_MASK; 4219 4220 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4221 gen11_hpd_detection_setup(dev_priv); 4222 } else if (IS_GEN9_LP(dev_priv)) { 4223 bxt_hpd_detection_setup(dev_priv); 4224 } else if (IS_BROADWELL(dev_priv)) { 4225 ilk_hpd_detection_setup(dev_priv); 4226 } 4227 } 4228 4229 static int gen8_irq_postinstall(struct drm_device *dev) 4230 { 4231 struct drm_i915_private *dev_priv = to_i915(dev); 4232 4233 if (HAS_PCH_SPLIT(dev_priv)) 4234 ibx_irq_pre_postinstall(dev); 4235 4236 gen8_gt_irq_postinstall(dev_priv); 4237 gen8_de_irq_postinstall(dev_priv); 4238 4239 if (HAS_PCH_SPLIT(dev_priv)) 4240 ibx_irq_postinstall(dev); 4241 4242 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4243 POSTING_READ(GEN8_MASTER_IRQ); 4244 4245 return 0; 4246 } 4247 4248 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4249 { 4250 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 4251 4252 BUILD_BUG_ON(irqs & 0xffff0000); 4253 4254 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 4255 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 4256 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 4257 4258 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4259 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 4260 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 4261 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 4262 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 4263 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 4264 4265 /* 4266 * RPS interrupts will get enabled/disabled on demand when RPS itself 4267 * is enabled/disabled. 4268 */ 4269 dev_priv->pm_ier = 0x0; 4270 dev_priv->pm_imr = ~dev_priv->pm_ier; 4271 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4272 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4273 } 4274 4275 static void icp_irq_postinstall(struct drm_device *dev) 4276 { 4277 struct drm_i915_private *dev_priv = to_i915(dev); 4278 u32 mask = SDE_GMBUS_ICP; 4279 4280 WARN_ON(I915_READ(SDEIER) != 0); 4281 I915_WRITE(SDEIER, 0xffffffff); 4282 POSTING_READ(SDEIER); 4283 4284 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4285 I915_WRITE(SDEIMR, ~mask); 4286 4287 icp_hpd_detection_setup(dev_priv); 4288 } 4289 4290 static int gen11_irq_postinstall(struct drm_device *dev) 4291 { 4292 struct drm_i915_private *dev_priv = dev->dev_private; 4293 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 4294 4295 if (HAS_PCH_ICP(dev_priv)) 4296 icp_irq_postinstall(dev); 4297 4298 gen11_gt_irq_postinstall(dev_priv); 4299 gen8_de_irq_postinstall(dev_priv); 4300 4301 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4302 4303 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4304 4305 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 4306 POSTING_READ(GEN11_GFX_MSTR_IRQ); 4307 4308 return 0; 4309 } 4310 4311 static int cherryview_irq_postinstall(struct drm_device *dev) 4312 { 4313 struct drm_i915_private *dev_priv = to_i915(dev); 4314 4315 gen8_gt_irq_postinstall(dev_priv); 4316 4317 spin_lock_irq(&dev_priv->irq_lock); 4318 if (dev_priv->display_irqs_enabled) 4319 vlv_display_irq_postinstall(dev_priv); 4320 spin_unlock_irq(&dev_priv->irq_lock); 4321 4322 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4323 POSTING_READ(GEN8_MASTER_IRQ); 4324 4325 return 0; 4326 } 4327 4328 static void i8xx_irq_reset(struct drm_device *dev) 4329 { 4330 struct drm_i915_private *dev_priv = to_i915(dev); 4331 4332 i9xx_pipestat_irq_reset(dev_priv); 4333 4334 I915_WRITE16(HWSTAM, 0xffff); 4335 4336 GEN2_IRQ_RESET(); 4337 } 4338 4339 static int i8xx_irq_postinstall(struct drm_device *dev) 4340 { 4341 struct drm_i915_private *dev_priv = to_i915(dev); 4342 u16 enable_mask; 4343 4344 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4345 I915_ERROR_MEMORY_REFRESH)); 4346 4347 /* Unmask the interrupts that we always want on. */ 4348 dev_priv->irq_mask = 4349 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4350 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4351 I915_MASTER_ERROR_INTERRUPT); 4352 4353 enable_mask = 4354 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4355 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4356 I915_MASTER_ERROR_INTERRUPT | 4357 I915_USER_INTERRUPT; 4358 4359 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4360 4361 /* Interrupt setup is already guaranteed to be single-threaded, this is 4362 * just to make the assert_spin_locked check happy. */ 4363 spin_lock_irq(&dev_priv->irq_lock); 4364 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4365 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4366 spin_unlock_irq(&dev_priv->irq_lock); 4367 4368 return 0; 4369 } 4370 4371 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 4372 u16 *eir, u16 *eir_stuck) 4373 { 4374 u16 emr; 4375 4376 *eir = I915_READ16(EIR); 4377 4378 if (*eir) 4379 I915_WRITE16(EIR, *eir); 4380 4381 *eir_stuck = I915_READ16(EIR); 4382 if (*eir_stuck == 0) 4383 return; 4384 4385 /* 4386 * Toggle all EMR bits to make sure we get an edge 4387 * in the ISR master error bit if we don't clear 4388 * all the EIR bits. Otherwise the edge triggered 4389 * IIR on i965/g4x wouldn't notice that an interrupt 4390 * is still pending. Also some EIR bits can't be 4391 * cleared except by handling the underlying error 4392 * (or by a GPU reset) so we mask any bit that 4393 * remains set. 4394 */ 4395 emr = I915_READ16(EMR); 4396 I915_WRITE16(EMR, 0xffff); 4397 I915_WRITE16(EMR, emr | *eir_stuck); 4398 } 4399 4400 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 4401 u16 eir, u16 eir_stuck) 4402 { 4403 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4404 4405 if (eir_stuck) 4406 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 4407 } 4408 4409 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4410 u32 *eir, u32 *eir_stuck) 4411 { 4412 u32 emr; 4413 4414 *eir = I915_READ(EIR); 4415 4416 I915_WRITE(EIR, *eir); 4417 4418 *eir_stuck = I915_READ(EIR); 4419 if (*eir_stuck == 0) 4420 return; 4421 4422 /* 4423 * Toggle all EMR bits to make sure we get an edge 4424 * in the ISR master error bit if we don't clear 4425 * all the EIR bits. Otherwise the edge triggered 4426 * IIR on i965/g4x wouldn't notice that an interrupt 4427 * is still pending. Also some EIR bits can't be 4428 * cleared except by handling the underlying error 4429 * (or by a GPU reset) so we mask any bit that 4430 * remains set. 4431 */ 4432 emr = I915_READ(EMR); 4433 I915_WRITE(EMR, 0xffffffff); 4434 I915_WRITE(EMR, emr | *eir_stuck); 4435 } 4436 4437 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4438 u32 eir, u32 eir_stuck) 4439 { 4440 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4441 4442 if (eir_stuck) 4443 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 4444 } 4445 4446 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4447 { 4448 struct drm_device *dev = arg; 4449 struct drm_i915_private *dev_priv = to_i915(dev); 4450 irqreturn_t ret = IRQ_NONE; 4451 4452 if (!intel_irqs_enabled(dev_priv)) 4453 return IRQ_NONE; 4454 4455 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4456 disable_rpm_wakeref_asserts(dev_priv); 4457 4458 do { 4459 u32 pipe_stats[I915_MAX_PIPES] = {}; 4460 u16 eir = 0, eir_stuck = 0; 4461 u16 iir; 4462 4463 iir = I915_READ16(IIR); 4464 if (iir == 0) 4465 break; 4466 4467 ret = IRQ_HANDLED; 4468 4469 /* Call regardless, as some status bits might not be 4470 * signalled in iir */ 4471 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4472 4473 if (iir & I915_MASTER_ERROR_INTERRUPT) 4474 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4475 4476 I915_WRITE16(IIR, iir); 4477 4478 if (iir & I915_USER_INTERRUPT) 4479 notify_ring(dev_priv->engine[RCS]); 4480 4481 if (iir & I915_MASTER_ERROR_INTERRUPT) 4482 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4483 4484 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4485 } while (0); 4486 4487 enable_rpm_wakeref_asserts(dev_priv); 4488 4489 return ret; 4490 } 4491 4492 static void i915_irq_reset(struct drm_device *dev) 4493 { 4494 struct drm_i915_private *dev_priv = to_i915(dev); 4495 4496 if (I915_HAS_HOTPLUG(dev_priv)) { 4497 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4498 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4499 } 4500 4501 i9xx_pipestat_irq_reset(dev_priv); 4502 4503 I915_WRITE(HWSTAM, 0xffffffff); 4504 4505 GEN3_IRQ_RESET(); 4506 } 4507 4508 static int i915_irq_postinstall(struct drm_device *dev) 4509 { 4510 struct drm_i915_private *dev_priv = to_i915(dev); 4511 u32 enable_mask; 4512 4513 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4514 I915_ERROR_MEMORY_REFRESH)); 4515 4516 /* Unmask the interrupts that we always want on. */ 4517 dev_priv->irq_mask = 4518 ~(I915_ASLE_INTERRUPT | 4519 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4520 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4521 I915_MASTER_ERROR_INTERRUPT); 4522 4523 enable_mask = 4524 I915_ASLE_INTERRUPT | 4525 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4526 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4527 I915_MASTER_ERROR_INTERRUPT | 4528 I915_USER_INTERRUPT; 4529 4530 if (I915_HAS_HOTPLUG(dev_priv)) { 4531 /* Enable in IER... */ 4532 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4533 /* and unmask in IMR */ 4534 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4535 } 4536 4537 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4538 4539 /* Interrupt setup is already guaranteed to be single-threaded, this is 4540 * just to make the assert_spin_locked check happy. */ 4541 spin_lock_irq(&dev_priv->irq_lock); 4542 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4543 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4544 spin_unlock_irq(&dev_priv->irq_lock); 4545 4546 i915_enable_asle_pipestat(dev_priv); 4547 4548 return 0; 4549 } 4550 4551 static irqreturn_t i915_irq_handler(int irq, void *arg) 4552 { 4553 struct drm_device *dev = arg; 4554 struct drm_i915_private *dev_priv = to_i915(dev); 4555 irqreturn_t ret = IRQ_NONE; 4556 4557 if (!intel_irqs_enabled(dev_priv)) 4558 return IRQ_NONE; 4559 4560 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4561 disable_rpm_wakeref_asserts(dev_priv); 4562 4563 do { 4564 u32 pipe_stats[I915_MAX_PIPES] = {}; 4565 u32 eir = 0, eir_stuck = 0; 4566 u32 hotplug_status = 0; 4567 u32 iir; 4568 4569 iir = I915_READ(IIR); 4570 if (iir == 0) 4571 break; 4572 4573 ret = IRQ_HANDLED; 4574 4575 if (I915_HAS_HOTPLUG(dev_priv) && 4576 iir & I915_DISPLAY_PORT_INTERRUPT) 4577 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4578 4579 /* Call regardless, as some status bits might not be 4580 * signalled in iir */ 4581 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4582 4583 if (iir & I915_MASTER_ERROR_INTERRUPT) 4584 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4585 4586 I915_WRITE(IIR, iir); 4587 4588 if (iir & I915_USER_INTERRUPT) 4589 notify_ring(dev_priv->engine[RCS]); 4590 4591 if (iir & I915_MASTER_ERROR_INTERRUPT) 4592 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4593 4594 if (hotplug_status) 4595 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4596 4597 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4598 } while (0); 4599 4600 enable_rpm_wakeref_asserts(dev_priv); 4601 4602 return ret; 4603 } 4604 4605 static void i965_irq_reset(struct drm_device *dev) 4606 { 4607 struct drm_i915_private *dev_priv = to_i915(dev); 4608 4609 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4610 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4611 4612 i9xx_pipestat_irq_reset(dev_priv); 4613 4614 I915_WRITE(HWSTAM, 0xffffffff); 4615 4616 GEN3_IRQ_RESET(); 4617 } 4618 4619 static int i965_irq_postinstall(struct drm_device *dev) 4620 { 4621 struct drm_i915_private *dev_priv = to_i915(dev); 4622 u32 enable_mask; 4623 u32 error_mask; 4624 4625 /* 4626 * Enable some error detection, note the instruction error mask 4627 * bit is reserved, so we leave it masked. 4628 */ 4629 if (IS_G4X(dev_priv)) { 4630 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4631 GM45_ERROR_MEM_PRIV | 4632 GM45_ERROR_CP_PRIV | 4633 I915_ERROR_MEMORY_REFRESH); 4634 } else { 4635 error_mask = ~(I915_ERROR_PAGE_TABLE | 4636 I915_ERROR_MEMORY_REFRESH); 4637 } 4638 I915_WRITE(EMR, error_mask); 4639 4640 /* Unmask the interrupts that we always want on. */ 4641 dev_priv->irq_mask = 4642 ~(I915_ASLE_INTERRUPT | 4643 I915_DISPLAY_PORT_INTERRUPT | 4644 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4645 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4646 I915_MASTER_ERROR_INTERRUPT); 4647 4648 enable_mask = 4649 I915_ASLE_INTERRUPT | 4650 I915_DISPLAY_PORT_INTERRUPT | 4651 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4652 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4653 I915_MASTER_ERROR_INTERRUPT | 4654 I915_USER_INTERRUPT; 4655 4656 if (IS_G4X(dev_priv)) 4657 enable_mask |= I915_BSD_USER_INTERRUPT; 4658 4659 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4660 4661 /* Interrupt setup is already guaranteed to be single-threaded, this is 4662 * just to make the assert_spin_locked check happy. */ 4663 spin_lock_irq(&dev_priv->irq_lock); 4664 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4665 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4666 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4667 spin_unlock_irq(&dev_priv->irq_lock); 4668 4669 i915_enable_asle_pipestat(dev_priv); 4670 4671 return 0; 4672 } 4673 4674 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4675 { 4676 u32 hotplug_en; 4677 4678 lockdep_assert_held(&dev_priv->irq_lock); 4679 4680 /* Note HDMI and DP share hotplug bits */ 4681 /* enable bits are the same for all generations */ 4682 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4683 /* Programming the CRT detection parameters tends 4684 to generate a spurious hotplug event about three 4685 seconds later. So just do it once. 4686 */ 4687 if (IS_G4X(dev_priv)) 4688 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4689 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4690 4691 /* Ignore TV since it's buggy */ 4692 i915_hotplug_interrupt_update_locked(dev_priv, 4693 HOTPLUG_INT_EN_MASK | 4694 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4695 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4696 hotplug_en); 4697 } 4698 4699 static irqreturn_t i965_irq_handler(int irq, void *arg) 4700 { 4701 struct drm_device *dev = arg; 4702 struct drm_i915_private *dev_priv = to_i915(dev); 4703 irqreturn_t ret = IRQ_NONE; 4704 4705 if (!intel_irqs_enabled(dev_priv)) 4706 return IRQ_NONE; 4707 4708 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4709 disable_rpm_wakeref_asserts(dev_priv); 4710 4711 do { 4712 u32 pipe_stats[I915_MAX_PIPES] = {}; 4713 u32 eir = 0, eir_stuck = 0; 4714 u32 hotplug_status = 0; 4715 u32 iir; 4716 4717 iir = I915_READ(IIR); 4718 if (iir == 0) 4719 break; 4720 4721 ret = IRQ_HANDLED; 4722 4723 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4724 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4725 4726 /* Call regardless, as some status bits might not be 4727 * signalled in iir */ 4728 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4729 4730 if (iir & I915_MASTER_ERROR_INTERRUPT) 4731 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4732 4733 I915_WRITE(IIR, iir); 4734 4735 if (iir & I915_USER_INTERRUPT) 4736 notify_ring(dev_priv->engine[RCS]); 4737 4738 if (iir & I915_BSD_USER_INTERRUPT) 4739 notify_ring(dev_priv->engine[VCS]); 4740 4741 if (iir & I915_MASTER_ERROR_INTERRUPT) 4742 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4743 4744 if (hotplug_status) 4745 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4746 4747 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4748 } while (0); 4749 4750 enable_rpm_wakeref_asserts(dev_priv); 4751 4752 return ret; 4753 } 4754 4755 /** 4756 * intel_irq_init - initializes irq support 4757 * @dev_priv: i915 device instance 4758 * 4759 * This function initializes all the irq support including work items, timers 4760 * and all the vtables. It does not setup the interrupt itself though. 4761 */ 4762 void intel_irq_init(struct drm_i915_private *dev_priv) 4763 { 4764 struct drm_device *dev = &dev_priv->drm; 4765 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4766 int i; 4767 4768 intel_hpd_init_work(dev_priv); 4769 4770 INIT_WORK(&rps->work, gen6_pm_rps_work); 4771 4772 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4773 for (i = 0; i < MAX_L3_SLICES; ++i) 4774 dev_priv->l3_parity.remap_info[i] = NULL; 4775 4776 if (HAS_GUC_SCHED(dev_priv)) 4777 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4778 4779 /* Let's track the enabled rps events */ 4780 if (IS_VALLEYVIEW(dev_priv)) 4781 /* WaGsvRC0ResidencyMethod:vlv */ 4782 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4783 else 4784 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4785 4786 rps->pm_intrmsk_mbz = 0; 4787 4788 /* 4789 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4790 * if GEN6_PM_UP_EI_EXPIRED is masked. 4791 * 4792 * TODO: verify if this can be reproduced on VLV,CHV. 4793 */ 4794 if (INTEL_GEN(dev_priv) <= 7) 4795 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4796 4797 if (INTEL_GEN(dev_priv) >= 8) 4798 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4799 4800 if (IS_GEN2(dev_priv)) { 4801 /* Gen2 doesn't have a hardware frame counter */ 4802 dev->max_vblank_count = 0; 4803 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4804 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4805 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4806 } else { 4807 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4808 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4809 } 4810 4811 /* 4812 * Opt out of the vblank disable timer on everything except gen2. 4813 * Gen2 doesn't have a hardware frame counter and so depends on 4814 * vblank interrupts to produce sane vblank seuquence numbers. 4815 */ 4816 if (!IS_GEN2(dev_priv)) 4817 dev->vblank_disable_immediate = true; 4818 4819 /* Most platforms treat the display irq block as an always-on 4820 * power domain. vlv/chv can disable it at runtime and need 4821 * special care to avoid writing any of the display block registers 4822 * outside of the power domain. We defer setting up the display irqs 4823 * in this case to the runtime pm. 4824 */ 4825 dev_priv->display_irqs_enabled = true; 4826 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4827 dev_priv->display_irqs_enabled = false; 4828 4829 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4830 4831 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4832 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4833 4834 if (IS_CHERRYVIEW(dev_priv)) { 4835 dev->driver->irq_handler = cherryview_irq_handler; 4836 dev->driver->irq_preinstall = cherryview_irq_reset; 4837 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4838 dev->driver->irq_uninstall = cherryview_irq_reset; 4839 dev->driver->enable_vblank = i965_enable_vblank; 4840 dev->driver->disable_vblank = i965_disable_vblank; 4841 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4842 } else if (IS_VALLEYVIEW(dev_priv)) { 4843 dev->driver->irq_handler = valleyview_irq_handler; 4844 dev->driver->irq_preinstall = valleyview_irq_reset; 4845 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4846 dev->driver->irq_uninstall = valleyview_irq_reset; 4847 dev->driver->enable_vblank = i965_enable_vblank; 4848 dev->driver->disable_vblank = i965_disable_vblank; 4849 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4850 } else if (INTEL_GEN(dev_priv) >= 11) { 4851 dev->driver->irq_handler = gen11_irq_handler; 4852 dev->driver->irq_preinstall = gen11_irq_reset; 4853 dev->driver->irq_postinstall = gen11_irq_postinstall; 4854 dev->driver->irq_uninstall = gen11_irq_reset; 4855 dev->driver->enable_vblank = gen8_enable_vblank; 4856 dev->driver->disable_vblank = gen8_disable_vblank; 4857 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4858 } else if (INTEL_GEN(dev_priv) >= 8) { 4859 dev->driver->irq_handler = gen8_irq_handler; 4860 dev->driver->irq_preinstall = gen8_irq_reset; 4861 dev->driver->irq_postinstall = gen8_irq_postinstall; 4862 dev->driver->irq_uninstall = gen8_irq_reset; 4863 dev->driver->enable_vblank = gen8_enable_vblank; 4864 dev->driver->disable_vblank = gen8_disable_vblank; 4865 if (IS_GEN9_LP(dev_priv)) 4866 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4867 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4868 HAS_PCH_CNP(dev_priv)) 4869 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4870 else 4871 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4872 } else if (HAS_PCH_SPLIT(dev_priv)) { 4873 dev->driver->irq_handler = ironlake_irq_handler; 4874 dev->driver->irq_preinstall = ironlake_irq_reset; 4875 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4876 dev->driver->irq_uninstall = ironlake_irq_reset; 4877 dev->driver->enable_vblank = ironlake_enable_vblank; 4878 dev->driver->disable_vblank = ironlake_disable_vblank; 4879 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4880 } else { 4881 if (IS_GEN2(dev_priv)) { 4882 dev->driver->irq_preinstall = i8xx_irq_reset; 4883 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4884 dev->driver->irq_handler = i8xx_irq_handler; 4885 dev->driver->irq_uninstall = i8xx_irq_reset; 4886 dev->driver->enable_vblank = i8xx_enable_vblank; 4887 dev->driver->disable_vblank = i8xx_disable_vblank; 4888 } else if (IS_GEN3(dev_priv)) { 4889 dev->driver->irq_preinstall = i915_irq_reset; 4890 dev->driver->irq_postinstall = i915_irq_postinstall; 4891 dev->driver->irq_uninstall = i915_irq_reset; 4892 dev->driver->irq_handler = i915_irq_handler; 4893 dev->driver->enable_vblank = i8xx_enable_vblank; 4894 dev->driver->disable_vblank = i8xx_disable_vblank; 4895 } else { 4896 dev->driver->irq_preinstall = i965_irq_reset; 4897 dev->driver->irq_postinstall = i965_irq_postinstall; 4898 dev->driver->irq_uninstall = i965_irq_reset; 4899 dev->driver->irq_handler = i965_irq_handler; 4900 dev->driver->enable_vblank = i965_enable_vblank; 4901 dev->driver->disable_vblank = i965_disable_vblank; 4902 } 4903 if (I915_HAS_HOTPLUG(dev_priv)) 4904 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4905 } 4906 } 4907 4908 /** 4909 * intel_irq_fini - deinitializes IRQ support 4910 * @i915: i915 device instance 4911 * 4912 * This function deinitializes all the IRQ support. 4913 */ 4914 void intel_irq_fini(struct drm_i915_private *i915) 4915 { 4916 int i; 4917 4918 for (i = 0; i < MAX_L3_SLICES; ++i) 4919 kfree(i915->l3_parity.remap_info[i]); 4920 } 4921 4922 /** 4923 * intel_irq_install - enables the hardware interrupt 4924 * @dev_priv: i915 device instance 4925 * 4926 * This function enables the hardware interrupt handling, but leaves the hotplug 4927 * handling still disabled. It is called after intel_irq_init(). 4928 * 4929 * In the driver load and resume code we need working interrupts in a few places 4930 * but don't want to deal with the hassle of concurrent probe and hotplug 4931 * workers. Hence the split into this two-stage approach. 4932 */ 4933 int intel_irq_install(struct drm_i915_private *dev_priv) 4934 { 4935 /* 4936 * We enable some interrupt sources in our postinstall hooks, so mark 4937 * interrupts as enabled _before_ actually enabling them to avoid 4938 * special cases in our ordering checks. 4939 */ 4940 dev_priv->runtime_pm.irqs_enabled = true; 4941 4942 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4943 } 4944 4945 /** 4946 * intel_irq_uninstall - finilizes all irq handling 4947 * @dev_priv: i915 device instance 4948 * 4949 * This stops interrupt and hotplug handling and unregisters and frees all 4950 * resources acquired in the init functions. 4951 */ 4952 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4953 { 4954 drm_irq_uninstall(&dev_priv->drm); 4955 intel_hpd_cancel_work(dev_priv); 4956 dev_priv->runtime_pm.irqs_enabled = false; 4957 } 4958 4959 /** 4960 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4961 * @dev_priv: i915 device instance 4962 * 4963 * This function is used to disable interrupts at runtime, both in the runtime 4964 * pm and the system suspend/resume code. 4965 */ 4966 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4967 { 4968 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4969 dev_priv->runtime_pm.irqs_enabled = false; 4970 synchronize_irq(dev_priv->drm.irq); 4971 } 4972 4973 /** 4974 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4975 * @dev_priv: i915 device instance 4976 * 4977 * This function is used to enable interrupts at runtime, both in the runtime 4978 * pm and the system suspend/resume code. 4979 */ 4980 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4981 { 4982 dev_priv->runtime_pm.irqs_enabled = true; 4983 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4984 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4985 } 4986