1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/circ_buf.h> 32 #include <linux/cpuidle.h> 33 #include <linux/slab.h> 34 #include <linux/sysrq.h> 35 36 #include <drm/drm_drv.h> 37 #include <drm/drm_irq.h> 38 #include <drm/i915_drm.h> 39 40 #include "i915_drv.h" 41 #include "i915_trace.h" 42 #include "intel_drv.h" 43 #include "intel_psr.h" 44 45 /** 46 * DOC: interrupt handling 47 * 48 * These functions provide the basic support for enabling and disabling the 49 * interrupt handling support. There's a lot more functionality in i915_irq.c 50 * and related files, but that will be described in separate chapters. 51 */ 52 53 static const u32 hpd_ilk[HPD_NUM_PINS] = { 54 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 55 }; 56 57 static const u32 hpd_ivb[HPD_NUM_PINS] = { 58 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 59 }; 60 61 static const u32 hpd_bdw[HPD_NUM_PINS] = { 62 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 63 }; 64 65 static const u32 hpd_ibx[HPD_NUM_PINS] = { 66 [HPD_CRT] = SDE_CRT_HOTPLUG, 67 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 68 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 69 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 70 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 71 }; 72 73 static const u32 hpd_cpt[HPD_NUM_PINS] = { 74 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 75 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 76 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 77 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 78 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 79 }; 80 81 static const u32 hpd_spt[HPD_NUM_PINS] = { 82 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 83 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 84 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 85 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 86 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 87 }; 88 89 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 90 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 91 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 92 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 93 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 94 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 95 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 96 }; 97 98 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 99 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 100 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 101 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 102 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 103 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 104 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 105 }; 106 107 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 108 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 109 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 110 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 111 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 112 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 113 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 114 }; 115 116 /* BXT hpd list */ 117 static const u32 hpd_bxt[HPD_NUM_PINS] = { 118 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 119 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 120 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 121 }; 122 123 static const u32 hpd_gen11[HPD_NUM_PINS] = { 124 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 125 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 126 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 127 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 128 }; 129 130 static const u32 hpd_icp[HPD_NUM_PINS] = { 131 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 132 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 133 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 134 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 135 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 136 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 137 }; 138 139 static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 140 i915_reg_t iir, i915_reg_t ier) 141 { 142 intel_uncore_write(uncore, imr, 0xffffffff); 143 intel_uncore_posting_read(uncore, imr); 144 145 intel_uncore_write(uncore, ier, 0); 146 147 /* IIR can theoretically queue up two events. Be paranoid. */ 148 intel_uncore_write(uncore, iir, 0xffffffff); 149 intel_uncore_posting_read(uncore, iir); 150 intel_uncore_write(uncore, iir, 0xffffffff); 151 intel_uncore_posting_read(uncore, iir); 152 } 153 154 static void gen2_irq_reset(struct intel_uncore *uncore) 155 { 156 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 157 intel_uncore_posting_read16(uncore, GEN2_IMR); 158 159 intel_uncore_write16(uncore, GEN2_IER, 0); 160 161 /* IIR can theoretically queue up two events. Be paranoid. */ 162 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 163 intel_uncore_posting_read16(uncore, GEN2_IIR); 164 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 165 intel_uncore_posting_read16(uncore, GEN2_IIR); 166 } 167 168 #define GEN8_IRQ_RESET_NDX(uncore, type, which) \ 169 ({ \ 170 unsigned int which_ = which; \ 171 gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \ 172 GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \ 173 }) 174 175 #define GEN3_IRQ_RESET(uncore, type) \ 176 gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER) 177 178 #define GEN2_IRQ_RESET(uncore) \ 179 gen2_irq_reset(uncore) 180 181 /* 182 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 183 */ 184 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 185 { 186 u32 val = intel_uncore_read(uncore, reg); 187 188 if (val == 0) 189 return; 190 191 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 192 i915_mmio_reg_offset(reg), val); 193 intel_uncore_write(uncore, reg, 0xffffffff); 194 intel_uncore_posting_read(uncore, reg); 195 intel_uncore_write(uncore, reg, 0xffffffff); 196 intel_uncore_posting_read(uncore, reg); 197 } 198 199 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 200 { 201 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 202 203 if (val == 0) 204 return; 205 206 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 207 i915_mmio_reg_offset(GEN2_IIR), val); 208 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 209 intel_uncore_posting_read16(uncore, GEN2_IIR); 210 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 211 intel_uncore_posting_read16(uncore, GEN2_IIR); 212 } 213 214 static void gen3_irq_init(struct intel_uncore *uncore, 215 i915_reg_t imr, u32 imr_val, 216 i915_reg_t ier, u32 ier_val, 217 i915_reg_t iir) 218 { 219 gen3_assert_iir_is_zero(uncore, iir); 220 221 intel_uncore_write(uncore, ier, ier_val); 222 intel_uncore_write(uncore, imr, imr_val); 223 intel_uncore_posting_read(uncore, imr); 224 } 225 226 static void gen2_irq_init(struct intel_uncore *uncore, 227 u32 imr_val, u32 ier_val) 228 { 229 gen2_assert_iir_is_zero(uncore); 230 231 intel_uncore_write16(uncore, GEN2_IER, ier_val); 232 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 233 intel_uncore_posting_read16(uncore, GEN2_IMR); 234 } 235 236 #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \ 237 ({ \ 238 unsigned int which_ = which; \ 239 gen3_irq_init((uncore), \ 240 GEN8_##type##_IMR(which_), imr_val, \ 241 GEN8_##type##_IER(which_), ier_val, \ 242 GEN8_##type##_IIR(which_)); \ 243 }) 244 245 #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \ 246 gen3_irq_init((uncore), \ 247 type##IMR, imr_val, \ 248 type##IER, ier_val, \ 249 type##IIR) 250 251 #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \ 252 gen2_irq_init((uncore), imr_val, ier_val) 253 254 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 255 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 256 257 /* For display hotplug interrupt */ 258 static inline void 259 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 260 u32 mask, 261 u32 bits) 262 { 263 u32 val; 264 265 lockdep_assert_held(&dev_priv->irq_lock); 266 WARN_ON(bits & ~mask); 267 268 val = I915_READ(PORT_HOTPLUG_EN); 269 val &= ~mask; 270 val |= bits; 271 I915_WRITE(PORT_HOTPLUG_EN, val); 272 } 273 274 /** 275 * i915_hotplug_interrupt_update - update hotplug interrupt enable 276 * @dev_priv: driver private 277 * @mask: bits to update 278 * @bits: bits to enable 279 * NOTE: the HPD enable bits are modified both inside and outside 280 * of an interrupt context. To avoid that read-modify-write cycles 281 * interfer, these bits are protected by a spinlock. Since this 282 * function is usually not called from a context where the lock is 283 * held already, this function acquires the lock itself. A non-locking 284 * version is also available. 285 */ 286 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 287 u32 mask, 288 u32 bits) 289 { 290 spin_lock_irq(&dev_priv->irq_lock); 291 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 292 spin_unlock_irq(&dev_priv->irq_lock); 293 } 294 295 static u32 296 gen11_gt_engine_identity(struct drm_i915_private * const i915, 297 const unsigned int bank, const unsigned int bit); 298 299 static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 300 const unsigned int bank, 301 const unsigned int bit) 302 { 303 void __iomem * const regs = i915->uncore.regs; 304 u32 dw; 305 306 lockdep_assert_held(&i915->irq_lock); 307 308 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 309 if (dw & BIT(bit)) { 310 /* 311 * According to the BSpec, DW_IIR bits cannot be cleared without 312 * first servicing the Selector & Shared IIR registers. 313 */ 314 gen11_gt_engine_identity(i915, bank, bit); 315 316 /* 317 * We locked GT INT DW by reading it. If we want to (try 318 * to) recover from this succesfully, we need to clear 319 * our bit, otherwise we are locking the register for 320 * everybody. 321 */ 322 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 323 324 return true; 325 } 326 327 return false; 328 } 329 330 /** 331 * ilk_update_display_irq - update DEIMR 332 * @dev_priv: driver private 333 * @interrupt_mask: mask of interrupt bits to update 334 * @enabled_irq_mask: mask of interrupt bits to enable 335 */ 336 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 337 u32 interrupt_mask, 338 u32 enabled_irq_mask) 339 { 340 u32 new_val; 341 342 lockdep_assert_held(&dev_priv->irq_lock); 343 344 WARN_ON(enabled_irq_mask & ~interrupt_mask); 345 346 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 347 return; 348 349 new_val = dev_priv->irq_mask; 350 new_val &= ~interrupt_mask; 351 new_val |= (~enabled_irq_mask & interrupt_mask); 352 353 if (new_val != dev_priv->irq_mask) { 354 dev_priv->irq_mask = new_val; 355 I915_WRITE(DEIMR, dev_priv->irq_mask); 356 POSTING_READ(DEIMR); 357 } 358 } 359 360 /** 361 * ilk_update_gt_irq - update GTIMR 362 * @dev_priv: driver private 363 * @interrupt_mask: mask of interrupt bits to update 364 * @enabled_irq_mask: mask of interrupt bits to enable 365 */ 366 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 367 u32 interrupt_mask, 368 u32 enabled_irq_mask) 369 { 370 lockdep_assert_held(&dev_priv->irq_lock); 371 372 WARN_ON(enabled_irq_mask & ~interrupt_mask); 373 374 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 375 return; 376 377 dev_priv->gt_irq_mask &= ~interrupt_mask; 378 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 379 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 380 } 381 382 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) 383 { 384 ilk_update_gt_irq(dev_priv, mask, mask); 385 POSTING_READ_FW(GTIMR); 386 } 387 388 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) 389 { 390 ilk_update_gt_irq(dev_priv, mask, 0); 391 } 392 393 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 394 { 395 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 396 397 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 398 } 399 400 static void write_pm_imr(struct drm_i915_private *dev_priv) 401 { 402 i915_reg_t reg; 403 u32 mask = dev_priv->pm_imr; 404 405 if (INTEL_GEN(dev_priv) >= 11) { 406 reg = GEN11_GPM_WGBOXPERF_INTR_MASK; 407 /* pm is in upper half */ 408 mask = mask << 16; 409 } else if (INTEL_GEN(dev_priv) >= 8) { 410 reg = GEN8_GT_IMR(2); 411 } else { 412 reg = GEN6_PMIMR; 413 } 414 415 I915_WRITE(reg, mask); 416 POSTING_READ(reg); 417 } 418 419 static void write_pm_ier(struct drm_i915_private *dev_priv) 420 { 421 i915_reg_t reg; 422 u32 mask = dev_priv->pm_ier; 423 424 if (INTEL_GEN(dev_priv) >= 11) { 425 reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE; 426 /* pm is in upper half */ 427 mask = mask << 16; 428 } else if (INTEL_GEN(dev_priv) >= 8) { 429 reg = GEN8_GT_IER(2); 430 } else { 431 reg = GEN6_PMIER; 432 } 433 434 I915_WRITE(reg, mask); 435 } 436 437 /** 438 * snb_update_pm_irq - update GEN6_PMIMR 439 * @dev_priv: driver private 440 * @interrupt_mask: mask of interrupt bits to update 441 * @enabled_irq_mask: mask of interrupt bits to enable 442 */ 443 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 444 u32 interrupt_mask, 445 u32 enabled_irq_mask) 446 { 447 u32 new_val; 448 449 WARN_ON(enabled_irq_mask & ~interrupt_mask); 450 451 lockdep_assert_held(&dev_priv->irq_lock); 452 453 new_val = dev_priv->pm_imr; 454 new_val &= ~interrupt_mask; 455 new_val |= (~enabled_irq_mask & interrupt_mask); 456 457 if (new_val != dev_priv->pm_imr) { 458 dev_priv->pm_imr = new_val; 459 write_pm_imr(dev_priv); 460 } 461 } 462 463 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 464 { 465 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 466 return; 467 468 snb_update_pm_irq(dev_priv, mask, mask); 469 } 470 471 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 472 { 473 snb_update_pm_irq(dev_priv, mask, 0); 474 } 475 476 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 477 { 478 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 479 return; 480 481 __gen6_mask_pm_irq(dev_priv, mask); 482 } 483 484 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 485 { 486 i915_reg_t reg = gen6_pm_iir(dev_priv); 487 488 lockdep_assert_held(&dev_priv->irq_lock); 489 490 I915_WRITE(reg, reset_mask); 491 I915_WRITE(reg, reset_mask); 492 POSTING_READ(reg); 493 } 494 495 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 496 { 497 lockdep_assert_held(&dev_priv->irq_lock); 498 499 dev_priv->pm_ier |= enable_mask; 500 write_pm_ier(dev_priv); 501 gen6_unmask_pm_irq(dev_priv, enable_mask); 502 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 503 } 504 505 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 506 { 507 lockdep_assert_held(&dev_priv->irq_lock); 508 509 dev_priv->pm_ier &= ~disable_mask; 510 __gen6_mask_pm_irq(dev_priv, disable_mask); 511 write_pm_ier(dev_priv); 512 /* though a barrier is missing here, but don't really need a one */ 513 } 514 515 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 516 { 517 spin_lock_irq(&dev_priv->irq_lock); 518 519 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 520 ; 521 522 dev_priv->gt_pm.rps.pm_iir = 0; 523 524 spin_unlock_irq(&dev_priv->irq_lock); 525 } 526 527 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 528 { 529 spin_lock_irq(&dev_priv->irq_lock); 530 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 531 dev_priv->gt_pm.rps.pm_iir = 0; 532 spin_unlock_irq(&dev_priv->irq_lock); 533 } 534 535 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 536 { 537 struct intel_rps *rps = &dev_priv->gt_pm.rps; 538 539 if (READ_ONCE(rps->interrupts_enabled)) 540 return; 541 542 spin_lock_irq(&dev_priv->irq_lock); 543 WARN_ON_ONCE(rps->pm_iir); 544 545 if (INTEL_GEN(dev_priv) >= 11) 546 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 547 else 548 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 549 550 rps->interrupts_enabled = true; 551 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 552 553 spin_unlock_irq(&dev_priv->irq_lock); 554 } 555 556 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 557 { 558 struct intel_rps *rps = &dev_priv->gt_pm.rps; 559 560 if (!READ_ONCE(rps->interrupts_enabled)) 561 return; 562 563 spin_lock_irq(&dev_priv->irq_lock); 564 rps->interrupts_enabled = false; 565 566 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 567 568 gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 569 570 spin_unlock_irq(&dev_priv->irq_lock); 571 synchronize_irq(dev_priv->drm.irq); 572 573 /* Now that we will not be generating any more work, flush any 574 * outstanding tasks. As we are called on the RPS idle path, 575 * we will reset the GPU to minimum frequencies, so the current 576 * state of the worker can be discarded. 577 */ 578 cancel_work_sync(&rps->work); 579 if (INTEL_GEN(dev_priv) >= 11) 580 gen11_reset_rps_interrupts(dev_priv); 581 else 582 gen6_reset_rps_interrupts(dev_priv); 583 } 584 585 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 586 { 587 assert_rpm_wakelock_held(dev_priv); 588 589 spin_lock_irq(&dev_priv->irq_lock); 590 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 591 spin_unlock_irq(&dev_priv->irq_lock); 592 } 593 594 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 595 { 596 assert_rpm_wakelock_held(dev_priv); 597 598 spin_lock_irq(&dev_priv->irq_lock); 599 if (!dev_priv->guc.interrupts_enabled) { 600 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 601 dev_priv->pm_guc_events); 602 dev_priv->guc.interrupts_enabled = true; 603 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 604 } 605 spin_unlock_irq(&dev_priv->irq_lock); 606 } 607 608 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 609 { 610 assert_rpm_wakelock_held(dev_priv); 611 612 spin_lock_irq(&dev_priv->irq_lock); 613 dev_priv->guc.interrupts_enabled = false; 614 615 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 616 617 spin_unlock_irq(&dev_priv->irq_lock); 618 synchronize_irq(dev_priv->drm.irq); 619 620 gen9_reset_guc_interrupts(dev_priv); 621 } 622 623 /** 624 * bdw_update_port_irq - update DE port interrupt 625 * @dev_priv: driver private 626 * @interrupt_mask: mask of interrupt bits to update 627 * @enabled_irq_mask: mask of interrupt bits to enable 628 */ 629 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 630 u32 interrupt_mask, 631 u32 enabled_irq_mask) 632 { 633 u32 new_val; 634 u32 old_val; 635 636 lockdep_assert_held(&dev_priv->irq_lock); 637 638 WARN_ON(enabled_irq_mask & ~interrupt_mask); 639 640 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 641 return; 642 643 old_val = I915_READ(GEN8_DE_PORT_IMR); 644 645 new_val = old_val; 646 new_val &= ~interrupt_mask; 647 new_val |= (~enabled_irq_mask & interrupt_mask); 648 649 if (new_val != old_val) { 650 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 651 POSTING_READ(GEN8_DE_PORT_IMR); 652 } 653 } 654 655 /** 656 * bdw_update_pipe_irq - update DE pipe interrupt 657 * @dev_priv: driver private 658 * @pipe: pipe whose interrupt to update 659 * @interrupt_mask: mask of interrupt bits to update 660 * @enabled_irq_mask: mask of interrupt bits to enable 661 */ 662 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 663 enum pipe pipe, 664 u32 interrupt_mask, 665 u32 enabled_irq_mask) 666 { 667 u32 new_val; 668 669 lockdep_assert_held(&dev_priv->irq_lock); 670 671 WARN_ON(enabled_irq_mask & ~interrupt_mask); 672 673 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 674 return; 675 676 new_val = dev_priv->de_irq_mask[pipe]; 677 new_val &= ~interrupt_mask; 678 new_val |= (~enabled_irq_mask & interrupt_mask); 679 680 if (new_val != dev_priv->de_irq_mask[pipe]) { 681 dev_priv->de_irq_mask[pipe] = new_val; 682 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 683 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 684 } 685 } 686 687 /** 688 * ibx_display_interrupt_update - update SDEIMR 689 * @dev_priv: driver private 690 * @interrupt_mask: mask of interrupt bits to update 691 * @enabled_irq_mask: mask of interrupt bits to enable 692 */ 693 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 694 u32 interrupt_mask, 695 u32 enabled_irq_mask) 696 { 697 u32 sdeimr = I915_READ(SDEIMR); 698 sdeimr &= ~interrupt_mask; 699 sdeimr |= (~enabled_irq_mask & interrupt_mask); 700 701 WARN_ON(enabled_irq_mask & ~interrupt_mask); 702 703 lockdep_assert_held(&dev_priv->irq_lock); 704 705 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 706 return; 707 708 I915_WRITE(SDEIMR, sdeimr); 709 POSTING_READ(SDEIMR); 710 } 711 712 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 713 enum pipe pipe) 714 { 715 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 716 u32 enable_mask = status_mask << 16; 717 718 lockdep_assert_held(&dev_priv->irq_lock); 719 720 if (INTEL_GEN(dev_priv) < 5) 721 goto out; 722 723 /* 724 * On pipe A we don't support the PSR interrupt yet, 725 * on pipe B and C the same bit MBZ. 726 */ 727 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 728 return 0; 729 /* 730 * On pipe B and C we don't support the PSR interrupt yet, on pipe 731 * A the same bit is for perf counters which we don't use either. 732 */ 733 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 734 return 0; 735 736 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 737 SPRITE0_FLIP_DONE_INT_EN_VLV | 738 SPRITE1_FLIP_DONE_INT_EN_VLV); 739 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 740 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 741 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 742 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 743 744 out: 745 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 746 status_mask & ~PIPESTAT_INT_STATUS_MASK, 747 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 748 pipe_name(pipe), enable_mask, status_mask); 749 750 return enable_mask; 751 } 752 753 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 754 enum pipe pipe, u32 status_mask) 755 { 756 i915_reg_t reg = PIPESTAT(pipe); 757 u32 enable_mask; 758 759 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 760 "pipe %c: status_mask=0x%x\n", 761 pipe_name(pipe), status_mask); 762 763 lockdep_assert_held(&dev_priv->irq_lock); 764 WARN_ON(!intel_irqs_enabled(dev_priv)); 765 766 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 767 return; 768 769 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 770 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 771 772 I915_WRITE(reg, enable_mask | status_mask); 773 POSTING_READ(reg); 774 } 775 776 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 777 enum pipe pipe, u32 status_mask) 778 { 779 i915_reg_t reg = PIPESTAT(pipe); 780 u32 enable_mask; 781 782 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 783 "pipe %c: status_mask=0x%x\n", 784 pipe_name(pipe), status_mask); 785 786 lockdep_assert_held(&dev_priv->irq_lock); 787 WARN_ON(!intel_irqs_enabled(dev_priv)); 788 789 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 790 return; 791 792 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 793 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 794 795 I915_WRITE(reg, enable_mask | status_mask); 796 POSTING_READ(reg); 797 } 798 799 static bool i915_has_asle(struct drm_i915_private *dev_priv) 800 { 801 if (!dev_priv->opregion.asle) 802 return false; 803 804 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 805 } 806 807 /** 808 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 809 * @dev_priv: i915 device private 810 */ 811 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 812 { 813 if (!i915_has_asle(dev_priv)) 814 return; 815 816 spin_lock_irq(&dev_priv->irq_lock); 817 818 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 819 if (INTEL_GEN(dev_priv) >= 4) 820 i915_enable_pipestat(dev_priv, PIPE_A, 821 PIPE_LEGACY_BLC_EVENT_STATUS); 822 823 spin_unlock_irq(&dev_priv->irq_lock); 824 } 825 826 /* 827 * This timing diagram depicts the video signal in and 828 * around the vertical blanking period. 829 * 830 * Assumptions about the fictitious mode used in this example: 831 * vblank_start >= 3 832 * vsync_start = vblank_start + 1 833 * vsync_end = vblank_start + 2 834 * vtotal = vblank_start + 3 835 * 836 * start of vblank: 837 * latch double buffered registers 838 * increment frame counter (ctg+) 839 * generate start of vblank interrupt (gen4+) 840 * | 841 * | frame start: 842 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 843 * | may be shifted forward 1-3 extra lines via PIPECONF 844 * | | 845 * | | start of vsync: 846 * | | generate vsync interrupt 847 * | | | 848 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 849 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 850 * ----va---> <-----------------vb--------------------> <--------va------------- 851 * | | <----vs-----> | 852 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 853 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 854 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 855 * | | | 856 * last visible pixel first visible pixel 857 * | increment frame counter (gen3/4) 858 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 859 * 860 * x = horizontal active 861 * _ = horizontal blanking 862 * hs = horizontal sync 863 * va = vertical active 864 * vb = vertical blanking 865 * vs = vertical sync 866 * vbs = vblank_start (number) 867 * 868 * Summary: 869 * - most events happen at the start of horizontal sync 870 * - frame start happens at the start of horizontal blank, 1-4 lines 871 * (depending on PIPECONF settings) after the start of vblank 872 * - gen3/4 pixel and frame counter are synchronized with the start 873 * of horizontal active on the first line of vertical active 874 */ 875 876 /* Called from drm generic code, passed a 'crtc', which 877 * we use as a pipe index 878 */ 879 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 880 { 881 struct drm_i915_private *dev_priv = to_i915(dev); 882 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 883 const struct drm_display_mode *mode = &vblank->hwmode; 884 i915_reg_t high_frame, low_frame; 885 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 886 unsigned long irqflags; 887 888 /* 889 * On i965gm TV output the frame counter only works up to 890 * the point when we enable the TV encoder. After that the 891 * frame counter ceases to work and reads zero. We need a 892 * vblank wait before enabling the TV encoder and so we 893 * have to enable vblank interrupts while the frame counter 894 * is still in a working state. However the core vblank code 895 * does not like us returning non-zero frame counter values 896 * when we've told it that we don't have a working frame 897 * counter. Thus we must stop non-zero values leaking out. 898 */ 899 if (!vblank->max_vblank_count) 900 return 0; 901 902 htotal = mode->crtc_htotal; 903 hsync_start = mode->crtc_hsync_start; 904 vbl_start = mode->crtc_vblank_start; 905 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 906 vbl_start = DIV_ROUND_UP(vbl_start, 2); 907 908 /* Convert to pixel count */ 909 vbl_start *= htotal; 910 911 /* Start of vblank event occurs at start of hsync */ 912 vbl_start -= htotal - hsync_start; 913 914 high_frame = PIPEFRAME(pipe); 915 low_frame = PIPEFRAMEPIXEL(pipe); 916 917 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 918 919 /* 920 * High & low register fields aren't synchronized, so make sure 921 * we get a low value that's stable across two reads of the high 922 * register. 923 */ 924 do { 925 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 926 low = I915_READ_FW(low_frame); 927 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 928 } while (high1 != high2); 929 930 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 931 932 high1 >>= PIPE_FRAME_HIGH_SHIFT; 933 pixel = low & PIPE_PIXEL_MASK; 934 low >>= PIPE_FRAME_LOW_SHIFT; 935 936 /* 937 * The frame counter increments at beginning of active. 938 * Cook up a vblank counter by also checking the pixel 939 * counter against vblank start. 940 */ 941 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 942 } 943 944 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 945 { 946 struct drm_i915_private *dev_priv = to_i915(dev); 947 948 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 949 } 950 951 /* 952 * On certain encoders on certain platforms, pipe 953 * scanline register will not work to get the scanline, 954 * since the timings are driven from the PORT or issues 955 * with scanline register updates. 956 * This function will use Framestamp and current 957 * timestamp registers to calculate the scanline. 958 */ 959 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 960 { 961 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 962 struct drm_vblank_crtc *vblank = 963 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 964 const struct drm_display_mode *mode = &vblank->hwmode; 965 u32 vblank_start = mode->crtc_vblank_start; 966 u32 vtotal = mode->crtc_vtotal; 967 u32 htotal = mode->crtc_htotal; 968 u32 clock = mode->crtc_clock; 969 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 970 971 /* 972 * To avoid the race condition where we might cross into the 973 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 974 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 975 * during the same frame. 976 */ 977 do { 978 /* 979 * This field provides read back of the display 980 * pipe frame time stamp. The time stamp value 981 * is sampled at every start of vertical blank. 982 */ 983 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 984 985 /* 986 * The TIMESTAMP_CTR register has the current 987 * time stamp value. 988 */ 989 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 990 991 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 992 } while (scan_post_time != scan_prev_time); 993 994 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 995 clock), 1000 * htotal); 996 scanline = min(scanline, vtotal - 1); 997 scanline = (scanline + vblank_start) % vtotal; 998 999 return scanline; 1000 } 1001 1002 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 1003 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 1004 { 1005 struct drm_device *dev = crtc->base.dev; 1006 struct drm_i915_private *dev_priv = to_i915(dev); 1007 const struct drm_display_mode *mode; 1008 struct drm_vblank_crtc *vblank; 1009 enum pipe pipe = crtc->pipe; 1010 int position, vtotal; 1011 1012 if (!crtc->active) 1013 return -1; 1014 1015 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 1016 mode = &vblank->hwmode; 1017 1018 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 1019 return __intel_get_crtc_scanline_from_timestamp(crtc); 1020 1021 vtotal = mode->crtc_vtotal; 1022 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1023 vtotal /= 2; 1024 1025 if (IS_GEN(dev_priv, 2)) 1026 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 1027 else 1028 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 1029 1030 /* 1031 * On HSW, the DSL reg (0x70000) appears to return 0 if we 1032 * read it just before the start of vblank. So try it again 1033 * so we don't accidentally end up spanning a vblank frame 1034 * increment, causing the pipe_update_end() code to squak at us. 1035 * 1036 * The nature of this problem means we can't simply check the ISR 1037 * bit and return the vblank start value; nor can we use the scanline 1038 * debug register in the transcoder as it appears to have the same 1039 * problem. We may need to extend this to include other platforms, 1040 * but so far testing only shows the problem on HSW. 1041 */ 1042 if (HAS_DDI(dev_priv) && !position) { 1043 int i, temp; 1044 1045 for (i = 0; i < 100; i++) { 1046 udelay(1); 1047 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 1048 if (temp != position) { 1049 position = temp; 1050 break; 1051 } 1052 } 1053 } 1054 1055 /* 1056 * See update_scanline_offset() for the details on the 1057 * scanline_offset adjustment. 1058 */ 1059 return (position + crtc->scanline_offset) % vtotal; 1060 } 1061 1062 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 1063 bool in_vblank_irq, int *vpos, int *hpos, 1064 ktime_t *stime, ktime_t *etime, 1065 const struct drm_display_mode *mode) 1066 { 1067 struct drm_i915_private *dev_priv = to_i915(dev); 1068 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1069 pipe); 1070 int position; 1071 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1072 unsigned long irqflags; 1073 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 || 1074 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || 1075 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 1076 1077 if (WARN_ON(!mode->crtc_clock)) { 1078 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 1079 "pipe %c\n", pipe_name(pipe)); 1080 return false; 1081 } 1082 1083 htotal = mode->crtc_htotal; 1084 hsync_start = mode->crtc_hsync_start; 1085 vtotal = mode->crtc_vtotal; 1086 vbl_start = mode->crtc_vblank_start; 1087 vbl_end = mode->crtc_vblank_end; 1088 1089 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1090 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1091 vbl_end /= 2; 1092 vtotal /= 2; 1093 } 1094 1095 /* 1096 * Lock uncore.lock, as we will do multiple timing critical raw 1097 * register reads, potentially with preemption disabled, so the 1098 * following code must not block on uncore.lock. 1099 */ 1100 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1101 1102 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1103 1104 /* Get optional system timestamp before query. */ 1105 if (stime) 1106 *stime = ktime_get(); 1107 1108 if (use_scanline_counter) { 1109 /* No obvious pixelcount register. Only query vertical 1110 * scanout position from Display scan line register. 1111 */ 1112 position = __intel_get_crtc_scanline(intel_crtc); 1113 } else { 1114 /* Have access to pixelcount since start of frame. 1115 * We can split this into vertical and horizontal 1116 * scanout position. 1117 */ 1118 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1119 1120 /* convert to pixel counts */ 1121 vbl_start *= htotal; 1122 vbl_end *= htotal; 1123 vtotal *= htotal; 1124 1125 /* 1126 * In interlaced modes, the pixel counter counts all pixels, 1127 * so one field will have htotal more pixels. In order to avoid 1128 * the reported position from jumping backwards when the pixel 1129 * counter is beyond the length of the shorter field, just 1130 * clamp the position the length of the shorter field. This 1131 * matches how the scanline counter based position works since 1132 * the scanline counter doesn't count the two half lines. 1133 */ 1134 if (position >= vtotal) 1135 position = vtotal - 1; 1136 1137 /* 1138 * Start of vblank interrupt is triggered at start of hsync, 1139 * just prior to the first active line of vblank. However we 1140 * consider lines to start at the leading edge of horizontal 1141 * active. So, should we get here before we've crossed into 1142 * the horizontal active of the first line in vblank, we would 1143 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1144 * always add htotal-hsync_start to the current pixel position. 1145 */ 1146 position = (position + htotal - hsync_start) % vtotal; 1147 } 1148 1149 /* Get optional system timestamp after query. */ 1150 if (etime) 1151 *etime = ktime_get(); 1152 1153 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1154 1155 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1156 1157 /* 1158 * While in vblank, position will be negative 1159 * counting up towards 0 at vbl_end. And outside 1160 * vblank, position will be positive counting 1161 * up since vbl_end. 1162 */ 1163 if (position >= vbl_start) 1164 position -= vbl_end; 1165 else 1166 position += vtotal - vbl_end; 1167 1168 if (use_scanline_counter) { 1169 *vpos = position; 1170 *hpos = 0; 1171 } else { 1172 *vpos = position / htotal; 1173 *hpos = position - (*vpos * htotal); 1174 } 1175 1176 return true; 1177 } 1178 1179 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1180 { 1181 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1182 unsigned long irqflags; 1183 int position; 1184 1185 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1186 position = __intel_get_crtc_scanline(crtc); 1187 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1188 1189 return position; 1190 } 1191 1192 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1193 { 1194 u32 busy_up, busy_down, max_avg, min_avg; 1195 u8 new_delay; 1196 1197 spin_lock(&mchdev_lock); 1198 1199 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1200 1201 new_delay = dev_priv->ips.cur_delay; 1202 1203 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1204 busy_up = I915_READ(RCPREVBSYTUPAVG); 1205 busy_down = I915_READ(RCPREVBSYTDNAVG); 1206 max_avg = I915_READ(RCBMAXAVG); 1207 min_avg = I915_READ(RCBMINAVG); 1208 1209 /* Handle RCS change request from hw */ 1210 if (busy_up > max_avg) { 1211 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1212 new_delay = dev_priv->ips.cur_delay - 1; 1213 if (new_delay < dev_priv->ips.max_delay) 1214 new_delay = dev_priv->ips.max_delay; 1215 } else if (busy_down < min_avg) { 1216 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1217 new_delay = dev_priv->ips.cur_delay + 1; 1218 if (new_delay > dev_priv->ips.min_delay) 1219 new_delay = dev_priv->ips.min_delay; 1220 } 1221 1222 if (ironlake_set_drps(dev_priv, new_delay)) 1223 dev_priv->ips.cur_delay = new_delay; 1224 1225 spin_unlock(&mchdev_lock); 1226 1227 return; 1228 } 1229 1230 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1231 struct intel_rps_ei *ei) 1232 { 1233 ei->ktime = ktime_get_raw(); 1234 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1235 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1236 } 1237 1238 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1239 { 1240 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1241 } 1242 1243 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1244 { 1245 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1246 const struct intel_rps_ei *prev = &rps->ei; 1247 struct intel_rps_ei now; 1248 u32 events = 0; 1249 1250 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1251 return 0; 1252 1253 vlv_c0_read(dev_priv, &now); 1254 1255 if (prev->ktime) { 1256 u64 time, c0; 1257 u32 render, media; 1258 1259 time = ktime_us_delta(now.ktime, prev->ktime); 1260 1261 time *= dev_priv->czclk_freq; 1262 1263 /* Workload can be split between render + media, 1264 * e.g. SwapBuffers being blitted in X after being rendered in 1265 * mesa. To account for this we need to combine both engines 1266 * into our activity counter. 1267 */ 1268 render = now.render_c0 - prev->render_c0; 1269 media = now.media_c0 - prev->media_c0; 1270 c0 = max(render, media); 1271 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1272 1273 if (c0 > time * rps->power.up_threshold) 1274 events = GEN6_PM_RP_UP_THRESHOLD; 1275 else if (c0 < time * rps->power.down_threshold) 1276 events = GEN6_PM_RP_DOWN_THRESHOLD; 1277 } 1278 1279 rps->ei = now; 1280 return events; 1281 } 1282 1283 static void gen6_pm_rps_work(struct work_struct *work) 1284 { 1285 struct drm_i915_private *dev_priv = 1286 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1287 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1288 bool client_boost = false; 1289 int new_delay, adj, min, max; 1290 u32 pm_iir = 0; 1291 1292 spin_lock_irq(&dev_priv->irq_lock); 1293 if (rps->interrupts_enabled) { 1294 pm_iir = fetch_and_zero(&rps->pm_iir); 1295 client_boost = atomic_read(&rps->num_waiters); 1296 } 1297 spin_unlock_irq(&dev_priv->irq_lock); 1298 1299 /* Make sure we didn't queue anything we're not going to process. */ 1300 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1301 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1302 goto out; 1303 1304 mutex_lock(&dev_priv->pcu_lock); 1305 1306 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1307 1308 adj = rps->last_adj; 1309 new_delay = rps->cur_freq; 1310 min = rps->min_freq_softlimit; 1311 max = rps->max_freq_softlimit; 1312 if (client_boost) 1313 max = rps->max_freq; 1314 if (client_boost && new_delay < rps->boost_freq) { 1315 new_delay = rps->boost_freq; 1316 adj = 0; 1317 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1318 if (adj > 0) 1319 adj *= 2; 1320 else /* CHV needs even encode values */ 1321 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1322 1323 if (new_delay >= rps->max_freq_softlimit) 1324 adj = 0; 1325 } else if (client_boost) { 1326 adj = 0; 1327 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1328 if (rps->cur_freq > rps->efficient_freq) 1329 new_delay = rps->efficient_freq; 1330 else if (rps->cur_freq > rps->min_freq_softlimit) 1331 new_delay = rps->min_freq_softlimit; 1332 adj = 0; 1333 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1334 if (adj < 0) 1335 adj *= 2; 1336 else /* CHV needs even encode values */ 1337 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1338 1339 if (new_delay <= rps->min_freq_softlimit) 1340 adj = 0; 1341 } else { /* unknown event */ 1342 adj = 0; 1343 } 1344 1345 rps->last_adj = adj; 1346 1347 /* 1348 * Limit deboosting and boosting to keep ourselves at the extremes 1349 * when in the respective power modes (i.e. slowly decrease frequencies 1350 * while in the HIGH_POWER zone and slowly increase frequencies while 1351 * in the LOW_POWER zone). On idle, we will hit the timeout and drop 1352 * to the next level quickly, and conversely if busy we expect to 1353 * hit a waitboost and rapidly switch into max power. 1354 */ 1355 if ((adj < 0 && rps->power.mode == HIGH_POWER) || 1356 (adj > 0 && rps->power.mode == LOW_POWER)) 1357 rps->last_adj = 0; 1358 1359 /* sysfs frequency interfaces may have snuck in while servicing the 1360 * interrupt 1361 */ 1362 new_delay += adj; 1363 new_delay = clamp_t(int, new_delay, min, max); 1364 1365 if (intel_set_rps(dev_priv, new_delay)) { 1366 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1367 rps->last_adj = 0; 1368 } 1369 1370 mutex_unlock(&dev_priv->pcu_lock); 1371 1372 out: 1373 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1374 spin_lock_irq(&dev_priv->irq_lock); 1375 if (rps->interrupts_enabled) 1376 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1377 spin_unlock_irq(&dev_priv->irq_lock); 1378 } 1379 1380 1381 /** 1382 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1383 * occurred. 1384 * @work: workqueue struct 1385 * 1386 * Doesn't actually do anything except notify userspace. As a consequence of 1387 * this event, userspace should try to remap the bad rows since statistically 1388 * it is likely the same row is more likely to go bad again. 1389 */ 1390 static void ivybridge_parity_work(struct work_struct *work) 1391 { 1392 struct drm_i915_private *dev_priv = 1393 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1394 u32 error_status, row, bank, subbank; 1395 char *parity_event[6]; 1396 u32 misccpctl; 1397 u8 slice = 0; 1398 1399 /* We must turn off DOP level clock gating to access the L3 registers. 1400 * In order to prevent a get/put style interface, acquire struct mutex 1401 * any time we access those registers. 1402 */ 1403 mutex_lock(&dev_priv->drm.struct_mutex); 1404 1405 /* If we've screwed up tracking, just let the interrupt fire again */ 1406 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1407 goto out; 1408 1409 misccpctl = I915_READ(GEN7_MISCCPCTL); 1410 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1411 POSTING_READ(GEN7_MISCCPCTL); 1412 1413 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1414 i915_reg_t reg; 1415 1416 slice--; 1417 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1418 break; 1419 1420 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1421 1422 reg = GEN7_L3CDERRST1(slice); 1423 1424 error_status = I915_READ(reg); 1425 row = GEN7_PARITY_ERROR_ROW(error_status); 1426 bank = GEN7_PARITY_ERROR_BANK(error_status); 1427 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1428 1429 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1430 POSTING_READ(reg); 1431 1432 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1433 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1434 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1435 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1436 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1437 parity_event[5] = NULL; 1438 1439 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1440 KOBJ_CHANGE, parity_event); 1441 1442 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1443 slice, row, bank, subbank); 1444 1445 kfree(parity_event[4]); 1446 kfree(parity_event[3]); 1447 kfree(parity_event[2]); 1448 kfree(parity_event[1]); 1449 } 1450 1451 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1452 1453 out: 1454 WARN_ON(dev_priv->l3_parity.which_slice); 1455 spin_lock_irq(&dev_priv->irq_lock); 1456 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1457 spin_unlock_irq(&dev_priv->irq_lock); 1458 1459 mutex_unlock(&dev_priv->drm.struct_mutex); 1460 } 1461 1462 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1463 u32 iir) 1464 { 1465 if (!HAS_L3_DPF(dev_priv)) 1466 return; 1467 1468 spin_lock(&dev_priv->irq_lock); 1469 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1470 spin_unlock(&dev_priv->irq_lock); 1471 1472 iir &= GT_PARITY_ERROR(dev_priv); 1473 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1474 dev_priv->l3_parity.which_slice |= 1 << 1; 1475 1476 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1477 dev_priv->l3_parity.which_slice |= 1 << 0; 1478 1479 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1480 } 1481 1482 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1483 u32 gt_iir) 1484 { 1485 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1486 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 1487 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1488 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); 1489 } 1490 1491 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1492 u32 gt_iir) 1493 { 1494 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1495 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 1496 if (gt_iir & GT_BSD_USER_INTERRUPT) 1497 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); 1498 if (gt_iir & GT_BLT_USER_INTERRUPT) 1499 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]); 1500 1501 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1502 GT_BSD_CS_ERROR_INTERRUPT | 1503 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1504 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1505 1506 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1507 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1508 } 1509 1510 static void 1511 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1512 { 1513 bool tasklet = false; 1514 1515 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 1516 tasklet = true; 1517 1518 if (iir & GT_RENDER_USER_INTERRUPT) { 1519 intel_engine_breadcrumbs_irq(engine); 1520 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 1521 } 1522 1523 if (tasklet) 1524 tasklet_hi_schedule(&engine->execlists.tasklet); 1525 } 1526 1527 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1528 u32 master_ctl, u32 gt_iir[4]) 1529 { 1530 void __iomem * const regs = i915->uncore.regs; 1531 1532 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1533 GEN8_GT_BCS_IRQ | \ 1534 GEN8_GT_VCS0_IRQ | \ 1535 GEN8_GT_VCS1_IRQ | \ 1536 GEN8_GT_VECS_IRQ | \ 1537 GEN8_GT_PM_IRQ | \ 1538 GEN8_GT_GUC_IRQ) 1539 1540 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1541 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1542 if (likely(gt_iir[0])) 1543 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1544 } 1545 1546 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 1547 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1548 if (likely(gt_iir[1])) 1549 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1550 } 1551 1552 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1553 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1554 if (likely(gt_iir[2])) 1555 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 1556 } 1557 1558 if (master_ctl & GEN8_GT_VECS_IRQ) { 1559 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1560 if (likely(gt_iir[3])) 1561 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1562 } 1563 } 1564 1565 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1566 u32 master_ctl, u32 gt_iir[4]) 1567 { 1568 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1569 gen8_cs_irq_handler(i915->engine[RCS0], 1570 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1571 gen8_cs_irq_handler(i915->engine[BCS0], 1572 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1573 } 1574 1575 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 1576 gen8_cs_irq_handler(i915->engine[VCS0], 1577 gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); 1578 gen8_cs_irq_handler(i915->engine[VCS1], 1579 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1580 } 1581 1582 if (master_ctl & GEN8_GT_VECS_IRQ) { 1583 gen8_cs_irq_handler(i915->engine[VECS0], 1584 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1585 } 1586 1587 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1588 gen6_rps_irq_handler(i915, gt_iir[2]); 1589 gen9_guc_irq_handler(i915, gt_iir[2]); 1590 } 1591 } 1592 1593 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1594 { 1595 switch (pin) { 1596 case HPD_PORT_C: 1597 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1598 case HPD_PORT_D: 1599 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1600 case HPD_PORT_E: 1601 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1602 case HPD_PORT_F: 1603 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1604 default: 1605 return false; 1606 } 1607 } 1608 1609 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1610 { 1611 switch (pin) { 1612 case HPD_PORT_A: 1613 return val & PORTA_HOTPLUG_LONG_DETECT; 1614 case HPD_PORT_B: 1615 return val & PORTB_HOTPLUG_LONG_DETECT; 1616 case HPD_PORT_C: 1617 return val & PORTC_HOTPLUG_LONG_DETECT; 1618 default: 1619 return false; 1620 } 1621 } 1622 1623 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1624 { 1625 switch (pin) { 1626 case HPD_PORT_A: 1627 return val & ICP_DDIA_HPD_LONG_DETECT; 1628 case HPD_PORT_B: 1629 return val & ICP_DDIB_HPD_LONG_DETECT; 1630 default: 1631 return false; 1632 } 1633 } 1634 1635 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1636 { 1637 switch (pin) { 1638 case HPD_PORT_C: 1639 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1640 case HPD_PORT_D: 1641 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1642 case HPD_PORT_E: 1643 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1644 case HPD_PORT_F: 1645 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1646 default: 1647 return false; 1648 } 1649 } 1650 1651 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1652 { 1653 switch (pin) { 1654 case HPD_PORT_E: 1655 return val & PORTE_HOTPLUG_LONG_DETECT; 1656 default: 1657 return false; 1658 } 1659 } 1660 1661 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1662 { 1663 switch (pin) { 1664 case HPD_PORT_A: 1665 return val & PORTA_HOTPLUG_LONG_DETECT; 1666 case HPD_PORT_B: 1667 return val & PORTB_HOTPLUG_LONG_DETECT; 1668 case HPD_PORT_C: 1669 return val & PORTC_HOTPLUG_LONG_DETECT; 1670 case HPD_PORT_D: 1671 return val & PORTD_HOTPLUG_LONG_DETECT; 1672 default: 1673 return false; 1674 } 1675 } 1676 1677 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1678 { 1679 switch (pin) { 1680 case HPD_PORT_A: 1681 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1682 default: 1683 return false; 1684 } 1685 } 1686 1687 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1688 { 1689 switch (pin) { 1690 case HPD_PORT_B: 1691 return val & PORTB_HOTPLUG_LONG_DETECT; 1692 case HPD_PORT_C: 1693 return val & PORTC_HOTPLUG_LONG_DETECT; 1694 case HPD_PORT_D: 1695 return val & PORTD_HOTPLUG_LONG_DETECT; 1696 default: 1697 return false; 1698 } 1699 } 1700 1701 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1702 { 1703 switch (pin) { 1704 case HPD_PORT_B: 1705 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1706 case HPD_PORT_C: 1707 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1708 case HPD_PORT_D: 1709 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1710 default: 1711 return false; 1712 } 1713 } 1714 1715 /* 1716 * Get a bit mask of pins that have triggered, and which ones may be long. 1717 * This can be called multiple times with the same masks to accumulate 1718 * hotplug detection results from several registers. 1719 * 1720 * Note that the caller is expected to zero out the masks initially. 1721 */ 1722 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1723 u32 *pin_mask, u32 *long_mask, 1724 u32 hotplug_trigger, u32 dig_hotplug_reg, 1725 const u32 hpd[HPD_NUM_PINS], 1726 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1727 { 1728 enum hpd_pin pin; 1729 1730 for_each_hpd_pin(pin) { 1731 if ((hpd[pin] & hotplug_trigger) == 0) 1732 continue; 1733 1734 *pin_mask |= BIT(pin); 1735 1736 if (long_pulse_detect(pin, dig_hotplug_reg)) 1737 *long_mask |= BIT(pin); 1738 } 1739 1740 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1741 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1742 1743 } 1744 1745 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1746 { 1747 wake_up_all(&dev_priv->gmbus_wait_queue); 1748 } 1749 1750 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1751 { 1752 wake_up_all(&dev_priv->gmbus_wait_queue); 1753 } 1754 1755 #if defined(CONFIG_DEBUG_FS) 1756 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1757 enum pipe pipe, 1758 u32 crc0, u32 crc1, 1759 u32 crc2, u32 crc3, 1760 u32 crc4) 1761 { 1762 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1763 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1764 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1765 1766 trace_intel_pipe_crc(crtc, crcs); 1767 1768 spin_lock(&pipe_crc->lock); 1769 /* 1770 * For some not yet identified reason, the first CRC is 1771 * bonkers. So let's just wait for the next vblank and read 1772 * out the buggy result. 1773 * 1774 * On GEN8+ sometimes the second CRC is bonkers as well, so 1775 * don't trust that one either. 1776 */ 1777 if (pipe_crc->skipped <= 0 || 1778 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1779 pipe_crc->skipped++; 1780 spin_unlock(&pipe_crc->lock); 1781 return; 1782 } 1783 spin_unlock(&pipe_crc->lock); 1784 1785 drm_crtc_add_crc_entry(&crtc->base, true, 1786 drm_crtc_accurate_vblank_count(&crtc->base), 1787 crcs); 1788 } 1789 #else 1790 static inline void 1791 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1792 enum pipe pipe, 1793 u32 crc0, u32 crc1, 1794 u32 crc2, u32 crc3, 1795 u32 crc4) {} 1796 #endif 1797 1798 1799 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1800 enum pipe pipe) 1801 { 1802 display_pipe_crc_irq_handler(dev_priv, pipe, 1803 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1804 0, 0, 0, 0); 1805 } 1806 1807 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1808 enum pipe pipe) 1809 { 1810 display_pipe_crc_irq_handler(dev_priv, pipe, 1811 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1812 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1813 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1814 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1815 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1816 } 1817 1818 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1819 enum pipe pipe) 1820 { 1821 u32 res1, res2; 1822 1823 if (INTEL_GEN(dev_priv) >= 3) 1824 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1825 else 1826 res1 = 0; 1827 1828 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1829 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1830 else 1831 res2 = 0; 1832 1833 display_pipe_crc_irq_handler(dev_priv, pipe, 1834 I915_READ(PIPE_CRC_RES_RED(pipe)), 1835 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1836 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1837 res1, res2); 1838 } 1839 1840 /* The RPS events need forcewake, so we add them to a work queue and mask their 1841 * IMR bits until the work is done. Other interrupts can be processed without 1842 * the work queue. */ 1843 static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir) 1844 { 1845 struct intel_rps *rps = &i915->gt_pm.rps; 1846 const u32 events = i915->pm_rps_events & pm_iir; 1847 1848 lockdep_assert_held(&i915->irq_lock); 1849 1850 if (unlikely(!events)) 1851 return; 1852 1853 gen6_mask_pm_irq(i915, events); 1854 1855 if (!rps->interrupts_enabled) 1856 return; 1857 1858 rps->pm_iir |= events; 1859 schedule_work(&rps->work); 1860 } 1861 1862 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1863 { 1864 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1865 1866 if (pm_iir & dev_priv->pm_rps_events) { 1867 spin_lock(&dev_priv->irq_lock); 1868 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1869 if (rps->interrupts_enabled) { 1870 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1871 schedule_work(&rps->work); 1872 } 1873 spin_unlock(&dev_priv->irq_lock); 1874 } 1875 1876 if (INTEL_GEN(dev_priv) >= 8) 1877 return; 1878 1879 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1880 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]); 1881 1882 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1883 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1884 } 1885 1886 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1887 { 1888 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 1889 intel_guc_to_host_event_handler(&dev_priv->guc); 1890 } 1891 1892 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1893 { 1894 enum pipe pipe; 1895 1896 for_each_pipe(dev_priv, pipe) { 1897 I915_WRITE(PIPESTAT(pipe), 1898 PIPESTAT_INT_STATUS_MASK | 1899 PIPE_FIFO_UNDERRUN_STATUS); 1900 1901 dev_priv->pipestat_irq_mask[pipe] = 0; 1902 } 1903 } 1904 1905 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1906 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1907 { 1908 int pipe; 1909 1910 spin_lock(&dev_priv->irq_lock); 1911 1912 if (!dev_priv->display_irqs_enabled) { 1913 spin_unlock(&dev_priv->irq_lock); 1914 return; 1915 } 1916 1917 for_each_pipe(dev_priv, pipe) { 1918 i915_reg_t reg; 1919 u32 status_mask, enable_mask, iir_bit = 0; 1920 1921 /* 1922 * PIPESTAT bits get signalled even when the interrupt is 1923 * disabled with the mask bits, and some of the status bits do 1924 * not generate interrupts at all (like the underrun bit). Hence 1925 * we need to be careful that we only handle what we want to 1926 * handle. 1927 */ 1928 1929 /* fifo underruns are filterered in the underrun handler. */ 1930 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1931 1932 switch (pipe) { 1933 case PIPE_A: 1934 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1935 break; 1936 case PIPE_B: 1937 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1938 break; 1939 case PIPE_C: 1940 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1941 break; 1942 } 1943 if (iir & iir_bit) 1944 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1945 1946 if (!status_mask) 1947 continue; 1948 1949 reg = PIPESTAT(pipe); 1950 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1951 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1952 1953 /* 1954 * Clear the PIPE*STAT regs before the IIR 1955 * 1956 * Toggle the enable bits to make sure we get an 1957 * edge in the ISR pipe event bit if we don't clear 1958 * all the enabled status bits. Otherwise the edge 1959 * triggered IIR on i965/g4x wouldn't notice that 1960 * an interrupt is still pending. 1961 */ 1962 if (pipe_stats[pipe]) { 1963 I915_WRITE(reg, pipe_stats[pipe]); 1964 I915_WRITE(reg, enable_mask); 1965 } 1966 } 1967 spin_unlock(&dev_priv->irq_lock); 1968 } 1969 1970 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1971 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1972 { 1973 enum pipe pipe; 1974 1975 for_each_pipe(dev_priv, pipe) { 1976 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1977 drm_handle_vblank(&dev_priv->drm, pipe); 1978 1979 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1980 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1981 1982 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1983 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1984 } 1985 } 1986 1987 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1988 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1989 { 1990 bool blc_event = false; 1991 enum pipe pipe; 1992 1993 for_each_pipe(dev_priv, pipe) { 1994 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1995 drm_handle_vblank(&dev_priv->drm, pipe); 1996 1997 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1998 blc_event = true; 1999 2000 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2001 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2002 2003 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2004 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2005 } 2006 2007 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2008 intel_opregion_asle_intr(dev_priv); 2009 } 2010 2011 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2012 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 2013 { 2014 bool blc_event = false; 2015 enum pipe pipe; 2016 2017 for_each_pipe(dev_priv, pipe) { 2018 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2019 drm_handle_vblank(&dev_priv->drm, pipe); 2020 2021 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2022 blc_event = true; 2023 2024 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2025 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2026 2027 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2028 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2029 } 2030 2031 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2032 intel_opregion_asle_intr(dev_priv); 2033 2034 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2035 gmbus_irq_handler(dev_priv); 2036 } 2037 2038 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2039 u32 pipe_stats[I915_MAX_PIPES]) 2040 { 2041 enum pipe pipe; 2042 2043 for_each_pipe(dev_priv, pipe) { 2044 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2045 drm_handle_vblank(&dev_priv->drm, pipe); 2046 2047 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2048 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2049 2050 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2051 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2052 } 2053 2054 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2055 gmbus_irq_handler(dev_priv); 2056 } 2057 2058 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 2059 { 2060 u32 hotplug_status = 0, hotplug_status_mask; 2061 int i; 2062 2063 if (IS_G4X(dev_priv) || 2064 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2065 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 2066 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 2067 else 2068 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 2069 2070 /* 2071 * We absolutely have to clear all the pending interrupt 2072 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 2073 * interrupt bit won't have an edge, and the i965/g4x 2074 * edge triggered IIR will not notice that an interrupt 2075 * is still pending. We can't use PORT_HOTPLUG_EN to 2076 * guarantee the edge as the act of toggling the enable 2077 * bits can itself generate a new hotplug interrupt :( 2078 */ 2079 for (i = 0; i < 10; i++) { 2080 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 2081 2082 if (tmp == 0) 2083 return hotplug_status; 2084 2085 hotplug_status |= tmp; 2086 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2087 } 2088 2089 WARN_ONCE(1, 2090 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2091 I915_READ(PORT_HOTPLUG_STAT)); 2092 2093 return hotplug_status; 2094 } 2095 2096 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2097 u32 hotplug_status) 2098 { 2099 u32 pin_mask = 0, long_mask = 0; 2100 2101 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2102 IS_CHERRYVIEW(dev_priv)) { 2103 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2104 2105 if (hotplug_trigger) { 2106 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2107 hotplug_trigger, hotplug_trigger, 2108 hpd_status_g4x, 2109 i9xx_port_hotplug_long_detect); 2110 2111 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2112 } 2113 2114 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2115 dp_aux_irq_handler(dev_priv); 2116 } else { 2117 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2118 2119 if (hotplug_trigger) { 2120 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2121 hotplug_trigger, hotplug_trigger, 2122 hpd_status_i915, 2123 i9xx_port_hotplug_long_detect); 2124 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2125 } 2126 } 2127 } 2128 2129 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2130 { 2131 struct drm_device *dev = arg; 2132 struct drm_i915_private *dev_priv = to_i915(dev); 2133 irqreturn_t ret = IRQ_NONE; 2134 2135 if (!intel_irqs_enabled(dev_priv)) 2136 return IRQ_NONE; 2137 2138 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2139 disable_rpm_wakeref_asserts(dev_priv); 2140 2141 do { 2142 u32 iir, gt_iir, pm_iir; 2143 u32 pipe_stats[I915_MAX_PIPES] = {}; 2144 u32 hotplug_status = 0; 2145 u32 ier = 0; 2146 2147 gt_iir = I915_READ(GTIIR); 2148 pm_iir = I915_READ(GEN6_PMIIR); 2149 iir = I915_READ(VLV_IIR); 2150 2151 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2152 break; 2153 2154 ret = IRQ_HANDLED; 2155 2156 /* 2157 * Theory on interrupt generation, based on empirical evidence: 2158 * 2159 * x = ((VLV_IIR & VLV_IER) || 2160 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2161 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2162 * 2163 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2164 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2165 * guarantee the CPU interrupt will be raised again even if we 2166 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2167 * bits this time around. 2168 */ 2169 I915_WRITE(VLV_MASTER_IER, 0); 2170 ier = I915_READ(VLV_IER); 2171 I915_WRITE(VLV_IER, 0); 2172 2173 if (gt_iir) 2174 I915_WRITE(GTIIR, gt_iir); 2175 if (pm_iir) 2176 I915_WRITE(GEN6_PMIIR, pm_iir); 2177 2178 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2179 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2180 2181 /* Call regardless, as some status bits might not be 2182 * signalled in iir */ 2183 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2184 2185 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2186 I915_LPE_PIPE_B_INTERRUPT)) 2187 intel_lpe_audio_irq_handler(dev_priv); 2188 2189 /* 2190 * VLV_IIR is single buffered, and reflects the level 2191 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2192 */ 2193 if (iir) 2194 I915_WRITE(VLV_IIR, iir); 2195 2196 I915_WRITE(VLV_IER, ier); 2197 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2198 2199 if (gt_iir) 2200 snb_gt_irq_handler(dev_priv, gt_iir); 2201 if (pm_iir) 2202 gen6_rps_irq_handler(dev_priv, pm_iir); 2203 2204 if (hotplug_status) 2205 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2206 2207 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2208 } while (0); 2209 2210 enable_rpm_wakeref_asserts(dev_priv); 2211 2212 return ret; 2213 } 2214 2215 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2216 { 2217 struct drm_device *dev = arg; 2218 struct drm_i915_private *dev_priv = to_i915(dev); 2219 irqreturn_t ret = IRQ_NONE; 2220 2221 if (!intel_irqs_enabled(dev_priv)) 2222 return IRQ_NONE; 2223 2224 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2225 disable_rpm_wakeref_asserts(dev_priv); 2226 2227 do { 2228 u32 master_ctl, iir; 2229 u32 pipe_stats[I915_MAX_PIPES] = {}; 2230 u32 hotplug_status = 0; 2231 u32 gt_iir[4]; 2232 u32 ier = 0; 2233 2234 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2235 iir = I915_READ(VLV_IIR); 2236 2237 if (master_ctl == 0 && iir == 0) 2238 break; 2239 2240 ret = IRQ_HANDLED; 2241 2242 /* 2243 * Theory on interrupt generation, based on empirical evidence: 2244 * 2245 * x = ((VLV_IIR & VLV_IER) || 2246 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2247 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2248 * 2249 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2250 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2251 * guarantee the CPU interrupt will be raised again even if we 2252 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2253 * bits this time around. 2254 */ 2255 I915_WRITE(GEN8_MASTER_IRQ, 0); 2256 ier = I915_READ(VLV_IER); 2257 I915_WRITE(VLV_IER, 0); 2258 2259 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2260 2261 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2262 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2263 2264 /* Call regardless, as some status bits might not be 2265 * signalled in iir */ 2266 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2267 2268 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2269 I915_LPE_PIPE_B_INTERRUPT | 2270 I915_LPE_PIPE_C_INTERRUPT)) 2271 intel_lpe_audio_irq_handler(dev_priv); 2272 2273 /* 2274 * VLV_IIR is single buffered, and reflects the level 2275 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2276 */ 2277 if (iir) 2278 I915_WRITE(VLV_IIR, iir); 2279 2280 I915_WRITE(VLV_IER, ier); 2281 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2282 2283 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2284 2285 if (hotplug_status) 2286 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2287 2288 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2289 } while (0); 2290 2291 enable_rpm_wakeref_asserts(dev_priv); 2292 2293 return ret; 2294 } 2295 2296 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2297 u32 hotplug_trigger, 2298 const u32 hpd[HPD_NUM_PINS]) 2299 { 2300 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2301 2302 /* 2303 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2304 * unless we touch the hotplug register, even if hotplug_trigger is 2305 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2306 * errors. 2307 */ 2308 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2309 if (!hotplug_trigger) { 2310 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2311 PORTD_HOTPLUG_STATUS_MASK | 2312 PORTC_HOTPLUG_STATUS_MASK | 2313 PORTB_HOTPLUG_STATUS_MASK; 2314 dig_hotplug_reg &= ~mask; 2315 } 2316 2317 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2318 if (!hotplug_trigger) 2319 return; 2320 2321 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2322 dig_hotplug_reg, hpd, 2323 pch_port_hotplug_long_detect); 2324 2325 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2326 } 2327 2328 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2329 { 2330 int pipe; 2331 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2332 2333 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2334 2335 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2336 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2337 SDE_AUDIO_POWER_SHIFT); 2338 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2339 port_name(port)); 2340 } 2341 2342 if (pch_iir & SDE_AUX_MASK) 2343 dp_aux_irq_handler(dev_priv); 2344 2345 if (pch_iir & SDE_GMBUS) 2346 gmbus_irq_handler(dev_priv); 2347 2348 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2349 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2350 2351 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2352 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2353 2354 if (pch_iir & SDE_POISON) 2355 DRM_ERROR("PCH poison interrupt\n"); 2356 2357 if (pch_iir & SDE_FDI_MASK) 2358 for_each_pipe(dev_priv, pipe) 2359 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2360 pipe_name(pipe), 2361 I915_READ(FDI_RX_IIR(pipe))); 2362 2363 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2364 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2365 2366 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2367 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2368 2369 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2370 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2371 2372 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2373 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2374 } 2375 2376 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2377 { 2378 u32 err_int = I915_READ(GEN7_ERR_INT); 2379 enum pipe pipe; 2380 2381 if (err_int & ERR_INT_POISON) 2382 DRM_ERROR("Poison interrupt\n"); 2383 2384 for_each_pipe(dev_priv, pipe) { 2385 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2386 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2387 2388 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2389 if (IS_IVYBRIDGE(dev_priv)) 2390 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2391 else 2392 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2393 } 2394 } 2395 2396 I915_WRITE(GEN7_ERR_INT, err_int); 2397 } 2398 2399 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2400 { 2401 u32 serr_int = I915_READ(SERR_INT); 2402 enum pipe pipe; 2403 2404 if (serr_int & SERR_INT_POISON) 2405 DRM_ERROR("PCH poison interrupt\n"); 2406 2407 for_each_pipe(dev_priv, pipe) 2408 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2409 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2410 2411 I915_WRITE(SERR_INT, serr_int); 2412 } 2413 2414 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2415 { 2416 int pipe; 2417 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2418 2419 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2420 2421 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2422 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2423 SDE_AUDIO_POWER_SHIFT_CPT); 2424 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2425 port_name(port)); 2426 } 2427 2428 if (pch_iir & SDE_AUX_MASK_CPT) 2429 dp_aux_irq_handler(dev_priv); 2430 2431 if (pch_iir & SDE_GMBUS_CPT) 2432 gmbus_irq_handler(dev_priv); 2433 2434 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2435 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2436 2437 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2438 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2439 2440 if (pch_iir & SDE_FDI_MASK_CPT) 2441 for_each_pipe(dev_priv, pipe) 2442 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2443 pipe_name(pipe), 2444 I915_READ(FDI_RX_IIR(pipe))); 2445 2446 if (pch_iir & SDE_ERROR_CPT) 2447 cpt_serr_int_handler(dev_priv); 2448 } 2449 2450 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2451 { 2452 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 2453 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 2454 u32 pin_mask = 0, long_mask = 0; 2455 2456 if (ddi_hotplug_trigger) { 2457 u32 dig_hotplug_reg; 2458 2459 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 2460 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 2461 2462 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2463 ddi_hotplug_trigger, 2464 dig_hotplug_reg, hpd_icp, 2465 icp_ddi_port_hotplug_long_detect); 2466 } 2467 2468 if (tc_hotplug_trigger) { 2469 u32 dig_hotplug_reg; 2470 2471 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 2472 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 2473 2474 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2475 tc_hotplug_trigger, 2476 dig_hotplug_reg, hpd_icp, 2477 icp_tc_port_hotplug_long_detect); 2478 } 2479 2480 if (pin_mask) 2481 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2482 2483 if (pch_iir & SDE_GMBUS_ICP) 2484 gmbus_irq_handler(dev_priv); 2485 } 2486 2487 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2488 { 2489 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2490 ~SDE_PORTE_HOTPLUG_SPT; 2491 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2492 u32 pin_mask = 0, long_mask = 0; 2493 2494 if (hotplug_trigger) { 2495 u32 dig_hotplug_reg; 2496 2497 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2498 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2499 2500 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2501 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2502 spt_port_hotplug_long_detect); 2503 } 2504 2505 if (hotplug2_trigger) { 2506 u32 dig_hotplug_reg; 2507 2508 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2509 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2510 2511 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2512 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2513 spt_port_hotplug2_long_detect); 2514 } 2515 2516 if (pin_mask) 2517 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2518 2519 if (pch_iir & SDE_GMBUS_CPT) 2520 gmbus_irq_handler(dev_priv); 2521 } 2522 2523 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2524 u32 hotplug_trigger, 2525 const u32 hpd[HPD_NUM_PINS]) 2526 { 2527 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2528 2529 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2530 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2531 2532 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2533 dig_hotplug_reg, hpd, 2534 ilk_port_hotplug_long_detect); 2535 2536 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2537 } 2538 2539 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2540 u32 de_iir) 2541 { 2542 enum pipe pipe; 2543 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2544 2545 if (hotplug_trigger) 2546 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2547 2548 if (de_iir & DE_AUX_CHANNEL_A) 2549 dp_aux_irq_handler(dev_priv); 2550 2551 if (de_iir & DE_GSE) 2552 intel_opregion_asle_intr(dev_priv); 2553 2554 if (de_iir & DE_POISON) 2555 DRM_ERROR("Poison interrupt\n"); 2556 2557 for_each_pipe(dev_priv, pipe) { 2558 if (de_iir & DE_PIPE_VBLANK(pipe)) 2559 drm_handle_vblank(&dev_priv->drm, pipe); 2560 2561 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2562 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2563 2564 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2565 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2566 } 2567 2568 /* check event from PCH */ 2569 if (de_iir & DE_PCH_EVENT) { 2570 u32 pch_iir = I915_READ(SDEIIR); 2571 2572 if (HAS_PCH_CPT(dev_priv)) 2573 cpt_irq_handler(dev_priv, pch_iir); 2574 else 2575 ibx_irq_handler(dev_priv, pch_iir); 2576 2577 /* should clear PCH hotplug event before clear CPU irq */ 2578 I915_WRITE(SDEIIR, pch_iir); 2579 } 2580 2581 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 2582 ironlake_rps_change_irq_handler(dev_priv); 2583 } 2584 2585 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2586 u32 de_iir) 2587 { 2588 enum pipe pipe; 2589 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2590 2591 if (hotplug_trigger) 2592 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2593 2594 if (de_iir & DE_ERR_INT_IVB) 2595 ivb_err_int_handler(dev_priv); 2596 2597 if (de_iir & DE_EDP_PSR_INT_HSW) { 2598 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2599 2600 intel_psr_irq_handler(dev_priv, psr_iir); 2601 I915_WRITE(EDP_PSR_IIR, psr_iir); 2602 } 2603 2604 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2605 dp_aux_irq_handler(dev_priv); 2606 2607 if (de_iir & DE_GSE_IVB) 2608 intel_opregion_asle_intr(dev_priv); 2609 2610 for_each_pipe(dev_priv, pipe) { 2611 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2612 drm_handle_vblank(&dev_priv->drm, pipe); 2613 } 2614 2615 /* check event from PCH */ 2616 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2617 u32 pch_iir = I915_READ(SDEIIR); 2618 2619 cpt_irq_handler(dev_priv, pch_iir); 2620 2621 /* clear PCH hotplug event before clear CPU irq */ 2622 I915_WRITE(SDEIIR, pch_iir); 2623 } 2624 } 2625 2626 /* 2627 * To handle irqs with the minimum potential races with fresh interrupts, we: 2628 * 1 - Disable Master Interrupt Control. 2629 * 2 - Find the source(s) of the interrupt. 2630 * 3 - Clear the Interrupt Identity bits (IIR). 2631 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2632 * 5 - Re-enable Master Interrupt Control. 2633 */ 2634 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2635 { 2636 struct drm_device *dev = arg; 2637 struct drm_i915_private *dev_priv = to_i915(dev); 2638 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2639 irqreturn_t ret = IRQ_NONE; 2640 2641 if (!intel_irqs_enabled(dev_priv)) 2642 return IRQ_NONE; 2643 2644 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2645 disable_rpm_wakeref_asserts(dev_priv); 2646 2647 /* disable master interrupt before clearing iir */ 2648 de_ier = I915_READ(DEIER); 2649 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2650 2651 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2652 * interrupts will will be stored on its back queue, and then we'll be 2653 * able to process them after we restore SDEIER (as soon as we restore 2654 * it, we'll get an interrupt if SDEIIR still has something to process 2655 * due to its back queue). */ 2656 if (!HAS_PCH_NOP(dev_priv)) { 2657 sde_ier = I915_READ(SDEIER); 2658 I915_WRITE(SDEIER, 0); 2659 } 2660 2661 /* Find, clear, then process each source of interrupt */ 2662 2663 gt_iir = I915_READ(GTIIR); 2664 if (gt_iir) { 2665 I915_WRITE(GTIIR, gt_iir); 2666 ret = IRQ_HANDLED; 2667 if (INTEL_GEN(dev_priv) >= 6) 2668 snb_gt_irq_handler(dev_priv, gt_iir); 2669 else 2670 ilk_gt_irq_handler(dev_priv, gt_iir); 2671 } 2672 2673 de_iir = I915_READ(DEIIR); 2674 if (de_iir) { 2675 I915_WRITE(DEIIR, de_iir); 2676 ret = IRQ_HANDLED; 2677 if (INTEL_GEN(dev_priv) >= 7) 2678 ivb_display_irq_handler(dev_priv, de_iir); 2679 else 2680 ilk_display_irq_handler(dev_priv, de_iir); 2681 } 2682 2683 if (INTEL_GEN(dev_priv) >= 6) { 2684 u32 pm_iir = I915_READ(GEN6_PMIIR); 2685 if (pm_iir) { 2686 I915_WRITE(GEN6_PMIIR, pm_iir); 2687 ret = IRQ_HANDLED; 2688 gen6_rps_irq_handler(dev_priv, pm_iir); 2689 } 2690 } 2691 2692 I915_WRITE(DEIER, de_ier); 2693 if (!HAS_PCH_NOP(dev_priv)) 2694 I915_WRITE(SDEIER, sde_ier); 2695 2696 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2697 enable_rpm_wakeref_asserts(dev_priv); 2698 2699 return ret; 2700 } 2701 2702 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2703 u32 hotplug_trigger, 2704 const u32 hpd[HPD_NUM_PINS]) 2705 { 2706 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2707 2708 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2709 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2710 2711 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2712 dig_hotplug_reg, hpd, 2713 bxt_port_hotplug_long_detect); 2714 2715 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2716 } 2717 2718 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2719 { 2720 u32 pin_mask = 0, long_mask = 0; 2721 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2722 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2723 2724 if (trigger_tc) { 2725 u32 dig_hotplug_reg; 2726 2727 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2728 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2729 2730 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2731 dig_hotplug_reg, hpd_gen11, 2732 gen11_port_hotplug_long_detect); 2733 } 2734 2735 if (trigger_tbt) { 2736 u32 dig_hotplug_reg; 2737 2738 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2739 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2740 2741 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2742 dig_hotplug_reg, hpd_gen11, 2743 gen11_port_hotplug_long_detect); 2744 } 2745 2746 if (pin_mask) 2747 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2748 else 2749 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2750 } 2751 2752 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2753 { 2754 u32 mask = GEN8_AUX_CHANNEL_A; 2755 2756 if (INTEL_GEN(dev_priv) >= 9) 2757 mask |= GEN9_AUX_CHANNEL_B | 2758 GEN9_AUX_CHANNEL_C | 2759 GEN9_AUX_CHANNEL_D; 2760 2761 if (IS_CNL_WITH_PORT_F(dev_priv)) 2762 mask |= CNL_AUX_CHANNEL_F; 2763 2764 if (INTEL_GEN(dev_priv) >= 11) 2765 mask |= ICL_AUX_CHANNEL_E | 2766 CNL_AUX_CHANNEL_F; 2767 2768 return mask; 2769 } 2770 2771 static irqreturn_t 2772 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2773 { 2774 irqreturn_t ret = IRQ_NONE; 2775 u32 iir; 2776 enum pipe pipe; 2777 2778 if (master_ctl & GEN8_DE_MISC_IRQ) { 2779 iir = I915_READ(GEN8_DE_MISC_IIR); 2780 if (iir) { 2781 bool found = false; 2782 2783 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2784 ret = IRQ_HANDLED; 2785 2786 if (iir & GEN8_DE_MISC_GSE) { 2787 intel_opregion_asle_intr(dev_priv); 2788 found = true; 2789 } 2790 2791 if (iir & GEN8_DE_EDP_PSR) { 2792 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2793 2794 intel_psr_irq_handler(dev_priv, psr_iir); 2795 I915_WRITE(EDP_PSR_IIR, psr_iir); 2796 found = true; 2797 } 2798 2799 if (!found) 2800 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2801 } 2802 else 2803 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2804 } 2805 2806 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2807 iir = I915_READ(GEN11_DE_HPD_IIR); 2808 if (iir) { 2809 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2810 ret = IRQ_HANDLED; 2811 gen11_hpd_irq_handler(dev_priv, iir); 2812 } else { 2813 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2814 } 2815 } 2816 2817 if (master_ctl & GEN8_DE_PORT_IRQ) { 2818 iir = I915_READ(GEN8_DE_PORT_IIR); 2819 if (iir) { 2820 u32 tmp_mask; 2821 bool found = false; 2822 2823 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2824 ret = IRQ_HANDLED; 2825 2826 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2827 dp_aux_irq_handler(dev_priv); 2828 found = true; 2829 } 2830 2831 if (IS_GEN9_LP(dev_priv)) { 2832 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2833 if (tmp_mask) { 2834 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2835 hpd_bxt); 2836 found = true; 2837 } 2838 } else if (IS_BROADWELL(dev_priv)) { 2839 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2840 if (tmp_mask) { 2841 ilk_hpd_irq_handler(dev_priv, 2842 tmp_mask, hpd_bdw); 2843 found = true; 2844 } 2845 } 2846 2847 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2848 gmbus_irq_handler(dev_priv); 2849 found = true; 2850 } 2851 2852 if (!found) 2853 DRM_ERROR("Unexpected DE Port interrupt\n"); 2854 } 2855 else 2856 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2857 } 2858 2859 for_each_pipe(dev_priv, pipe) { 2860 u32 fault_errors; 2861 2862 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2863 continue; 2864 2865 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2866 if (!iir) { 2867 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2868 continue; 2869 } 2870 2871 ret = IRQ_HANDLED; 2872 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2873 2874 if (iir & GEN8_PIPE_VBLANK) 2875 drm_handle_vblank(&dev_priv->drm, pipe); 2876 2877 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2878 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2879 2880 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2881 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2882 2883 fault_errors = iir; 2884 if (INTEL_GEN(dev_priv) >= 9) 2885 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2886 else 2887 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2888 2889 if (fault_errors) 2890 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2891 pipe_name(pipe), 2892 fault_errors); 2893 } 2894 2895 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2896 master_ctl & GEN8_DE_PCH_IRQ) { 2897 /* 2898 * FIXME(BDW): Assume for now that the new interrupt handling 2899 * scheme also closed the SDE interrupt handling race we've seen 2900 * on older pch-split platforms. But this needs testing. 2901 */ 2902 iir = I915_READ(SDEIIR); 2903 if (iir) { 2904 I915_WRITE(SDEIIR, iir); 2905 ret = IRQ_HANDLED; 2906 2907 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2908 icp_irq_handler(dev_priv, iir); 2909 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2910 spt_irq_handler(dev_priv, iir); 2911 else 2912 cpt_irq_handler(dev_priv, iir); 2913 } else { 2914 /* 2915 * Like on previous PCH there seems to be something 2916 * fishy going on with forwarding PCH interrupts. 2917 */ 2918 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2919 } 2920 } 2921 2922 return ret; 2923 } 2924 2925 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2926 { 2927 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2928 2929 /* 2930 * Now with master disabled, get a sample of level indications 2931 * for this interrupt. Indications will be cleared on related acks. 2932 * New indications can and will light up during processing, 2933 * and will generate new interrupt after enabling master. 2934 */ 2935 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2936 } 2937 2938 static inline void gen8_master_intr_enable(void __iomem * const regs) 2939 { 2940 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2941 } 2942 2943 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2944 { 2945 struct drm_i915_private *dev_priv = to_i915(arg); 2946 void __iomem * const regs = dev_priv->uncore.regs; 2947 u32 master_ctl; 2948 u32 gt_iir[4]; 2949 2950 if (!intel_irqs_enabled(dev_priv)) 2951 return IRQ_NONE; 2952 2953 master_ctl = gen8_master_intr_disable(regs); 2954 if (!master_ctl) { 2955 gen8_master_intr_enable(regs); 2956 return IRQ_NONE; 2957 } 2958 2959 /* Find, clear, then process each source of interrupt */ 2960 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2961 2962 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2963 if (master_ctl & ~GEN8_GT_IRQS) { 2964 disable_rpm_wakeref_asserts(dev_priv); 2965 gen8_de_irq_handler(dev_priv, master_ctl); 2966 enable_rpm_wakeref_asserts(dev_priv); 2967 } 2968 2969 gen8_master_intr_enable(regs); 2970 2971 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2972 2973 return IRQ_HANDLED; 2974 } 2975 2976 static u32 2977 gen11_gt_engine_identity(struct drm_i915_private * const i915, 2978 const unsigned int bank, const unsigned int bit) 2979 { 2980 void __iomem * const regs = i915->uncore.regs; 2981 u32 timeout_ts; 2982 u32 ident; 2983 2984 lockdep_assert_held(&i915->irq_lock); 2985 2986 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2987 2988 /* 2989 * NB: Specs do not specify how long to spin wait, 2990 * so we do ~100us as an educated guess. 2991 */ 2992 timeout_ts = (local_clock() >> 10) + 100; 2993 do { 2994 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2995 } while (!(ident & GEN11_INTR_DATA_VALID) && 2996 !time_after32(local_clock() >> 10, timeout_ts)); 2997 2998 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 2999 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 3000 bank, bit, ident); 3001 return 0; 3002 } 3003 3004 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 3005 GEN11_INTR_DATA_VALID); 3006 3007 return ident; 3008 } 3009 3010 static void 3011 gen11_other_irq_handler(struct drm_i915_private * const i915, 3012 const u8 instance, const u16 iir) 3013 { 3014 if (instance == OTHER_GTPM_INSTANCE) 3015 return gen11_rps_irq_handler(i915, iir); 3016 3017 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3018 instance, iir); 3019 } 3020 3021 static void 3022 gen11_engine_irq_handler(struct drm_i915_private * const i915, 3023 const u8 class, const u8 instance, const u16 iir) 3024 { 3025 struct intel_engine_cs *engine; 3026 3027 if (instance <= MAX_ENGINE_INSTANCE) 3028 engine = i915->engine_class[class][instance]; 3029 else 3030 engine = NULL; 3031 3032 if (likely(engine)) 3033 return gen8_cs_irq_handler(engine, iir); 3034 3035 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3036 class, instance); 3037 } 3038 3039 static void 3040 gen11_gt_identity_handler(struct drm_i915_private * const i915, 3041 const u32 identity) 3042 { 3043 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3044 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3045 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3046 3047 if (unlikely(!intr)) 3048 return; 3049 3050 if (class <= COPY_ENGINE_CLASS) 3051 return gen11_engine_irq_handler(i915, class, instance, intr); 3052 3053 if (class == OTHER_CLASS) 3054 return gen11_other_irq_handler(i915, instance, intr); 3055 3056 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3057 class, instance, intr); 3058 } 3059 3060 static void 3061 gen11_gt_bank_handler(struct drm_i915_private * const i915, 3062 const unsigned int bank) 3063 { 3064 void __iomem * const regs = i915->uncore.regs; 3065 unsigned long intr_dw; 3066 unsigned int bit; 3067 3068 lockdep_assert_held(&i915->irq_lock); 3069 3070 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 3071 3072 for_each_set_bit(bit, &intr_dw, 32) { 3073 const u32 ident = gen11_gt_engine_identity(i915, bank, bit); 3074 3075 gen11_gt_identity_handler(i915, ident); 3076 } 3077 3078 /* Clear must be after shared has been served for engine */ 3079 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 3080 } 3081 3082 static void 3083 gen11_gt_irq_handler(struct drm_i915_private * const i915, 3084 const u32 master_ctl) 3085 { 3086 unsigned int bank; 3087 3088 spin_lock(&i915->irq_lock); 3089 3090 for (bank = 0; bank < 2; bank++) { 3091 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 3092 gen11_gt_bank_handler(i915, bank); 3093 } 3094 3095 spin_unlock(&i915->irq_lock); 3096 } 3097 3098 static u32 3099 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) 3100 { 3101 void __iomem * const regs = dev_priv->uncore.regs; 3102 u32 iir; 3103 3104 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3105 return 0; 3106 3107 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3108 if (likely(iir)) 3109 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 3110 3111 return iir; 3112 } 3113 3114 static void 3115 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) 3116 { 3117 if (iir & GEN11_GU_MISC_GSE) 3118 intel_opregion_asle_intr(dev_priv); 3119 } 3120 3121 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 3122 { 3123 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 3124 3125 /* 3126 * Now with master disabled, get a sample of level indications 3127 * for this interrupt. Indications will be cleared on related acks. 3128 * New indications can and will light up during processing, 3129 * and will generate new interrupt after enabling master. 3130 */ 3131 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3132 } 3133 3134 static inline void gen11_master_intr_enable(void __iomem * const regs) 3135 { 3136 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 3137 } 3138 3139 static irqreturn_t gen11_irq_handler(int irq, void *arg) 3140 { 3141 struct drm_i915_private * const i915 = to_i915(arg); 3142 void __iomem * const regs = i915->uncore.regs; 3143 u32 master_ctl; 3144 u32 gu_misc_iir; 3145 3146 if (!intel_irqs_enabled(i915)) 3147 return IRQ_NONE; 3148 3149 master_ctl = gen11_master_intr_disable(regs); 3150 if (!master_ctl) { 3151 gen11_master_intr_enable(regs); 3152 return IRQ_NONE; 3153 } 3154 3155 /* Find, clear, then process each source of interrupt. */ 3156 gen11_gt_irq_handler(i915, master_ctl); 3157 3158 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3159 if (master_ctl & GEN11_DISPLAY_IRQ) { 3160 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3161 3162 disable_rpm_wakeref_asserts(i915); 3163 /* 3164 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3165 * for the display related bits. 3166 */ 3167 gen8_de_irq_handler(i915, disp_ctl); 3168 enable_rpm_wakeref_asserts(i915); 3169 } 3170 3171 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3172 3173 gen11_master_intr_enable(regs); 3174 3175 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3176 3177 return IRQ_HANDLED; 3178 } 3179 3180 /* Called from drm generic code, passed 'crtc' which 3181 * we use as a pipe index 3182 */ 3183 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3184 { 3185 struct drm_i915_private *dev_priv = to_i915(dev); 3186 unsigned long irqflags; 3187 3188 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3189 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3190 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3191 3192 return 0; 3193 } 3194 3195 static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe) 3196 { 3197 struct drm_i915_private *dev_priv = to_i915(dev); 3198 3199 if (dev_priv->i945gm_vblank.enabled++ == 0) 3200 schedule_work(&dev_priv->i945gm_vblank.work); 3201 3202 return i8xx_enable_vblank(dev, pipe); 3203 } 3204 3205 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3206 { 3207 struct drm_i915_private *dev_priv = to_i915(dev); 3208 unsigned long irqflags; 3209 3210 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3211 i915_enable_pipestat(dev_priv, pipe, 3212 PIPE_START_VBLANK_INTERRUPT_STATUS); 3213 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3214 3215 return 0; 3216 } 3217 3218 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3219 { 3220 struct drm_i915_private *dev_priv = to_i915(dev); 3221 unsigned long irqflags; 3222 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 3223 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3224 3225 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3226 ilk_enable_display_irq(dev_priv, bit); 3227 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3228 3229 /* Even though there is no DMC, frame counter can get stuck when 3230 * PSR is active as no frames are generated. 3231 */ 3232 if (HAS_PSR(dev_priv)) 3233 drm_vblank_restore(dev, pipe); 3234 3235 return 0; 3236 } 3237 3238 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3239 { 3240 struct drm_i915_private *dev_priv = to_i915(dev); 3241 unsigned long irqflags; 3242 3243 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3244 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3245 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3246 3247 /* Even if there is no DMC, frame counter can get stuck when 3248 * PSR is active as no frames are generated, so check only for PSR. 3249 */ 3250 if (HAS_PSR(dev_priv)) 3251 drm_vblank_restore(dev, pipe); 3252 3253 return 0; 3254 } 3255 3256 /* Called from drm generic code, passed 'crtc' which 3257 * we use as a pipe index 3258 */ 3259 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3260 { 3261 struct drm_i915_private *dev_priv = to_i915(dev); 3262 unsigned long irqflags; 3263 3264 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3265 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3266 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3267 } 3268 3269 static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe) 3270 { 3271 struct drm_i915_private *dev_priv = to_i915(dev); 3272 3273 i8xx_disable_vblank(dev, pipe); 3274 3275 if (--dev_priv->i945gm_vblank.enabled == 0) 3276 schedule_work(&dev_priv->i945gm_vblank.work); 3277 } 3278 3279 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3280 { 3281 struct drm_i915_private *dev_priv = to_i915(dev); 3282 unsigned long irqflags; 3283 3284 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3285 i915_disable_pipestat(dev_priv, pipe, 3286 PIPE_START_VBLANK_INTERRUPT_STATUS); 3287 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3288 } 3289 3290 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3291 { 3292 struct drm_i915_private *dev_priv = to_i915(dev); 3293 unsigned long irqflags; 3294 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 3295 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3296 3297 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3298 ilk_disable_display_irq(dev_priv, bit); 3299 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3300 } 3301 3302 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3303 { 3304 struct drm_i915_private *dev_priv = to_i915(dev); 3305 unsigned long irqflags; 3306 3307 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3308 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3310 } 3311 3312 static void i945gm_vblank_work_func(struct work_struct *work) 3313 { 3314 struct drm_i915_private *dev_priv = 3315 container_of(work, struct drm_i915_private, i945gm_vblank.work); 3316 3317 /* 3318 * Vblank interrupts fail to wake up the device from C3, 3319 * hence we want to prevent C3 usage while vblank interrupts 3320 * are enabled. 3321 */ 3322 pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos, 3323 READ_ONCE(dev_priv->i945gm_vblank.enabled) ? 3324 dev_priv->i945gm_vblank.c3_disable_latency : 3325 PM_QOS_DEFAULT_VALUE); 3326 } 3327 3328 static int cstate_disable_latency(const char *name) 3329 { 3330 const struct cpuidle_driver *drv; 3331 int i; 3332 3333 drv = cpuidle_get_driver(); 3334 if (!drv) 3335 return 0; 3336 3337 for (i = 0; i < drv->state_count; i++) { 3338 const struct cpuidle_state *state = &drv->states[i]; 3339 3340 if (!strcmp(state->name, name)) 3341 return state->exit_latency ? 3342 state->exit_latency - 1 : 0; 3343 } 3344 3345 return 0; 3346 } 3347 3348 static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv) 3349 { 3350 INIT_WORK(&dev_priv->i945gm_vblank.work, 3351 i945gm_vblank_work_func); 3352 3353 dev_priv->i945gm_vblank.c3_disable_latency = 3354 cstate_disable_latency("C3"); 3355 pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos, 3356 PM_QOS_CPU_DMA_LATENCY, 3357 PM_QOS_DEFAULT_VALUE); 3358 } 3359 3360 static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv) 3361 { 3362 cancel_work_sync(&dev_priv->i945gm_vblank.work); 3363 pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos); 3364 } 3365 3366 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3367 { 3368 struct intel_uncore *uncore = &dev_priv->uncore; 3369 3370 if (HAS_PCH_NOP(dev_priv)) 3371 return; 3372 3373 GEN3_IRQ_RESET(uncore, SDE); 3374 3375 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3376 I915_WRITE(SERR_INT, 0xffffffff); 3377 } 3378 3379 /* 3380 * SDEIER is also touched by the interrupt handler to work around missed PCH 3381 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3382 * instead we unconditionally enable all PCH interrupt sources here, but then 3383 * only unmask them as needed with SDEIMR. 3384 * 3385 * This function needs to be called before interrupts are enabled. 3386 */ 3387 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3388 { 3389 struct drm_i915_private *dev_priv = to_i915(dev); 3390 3391 if (HAS_PCH_NOP(dev_priv)) 3392 return; 3393 3394 WARN_ON(I915_READ(SDEIER) != 0); 3395 I915_WRITE(SDEIER, 0xffffffff); 3396 POSTING_READ(SDEIER); 3397 } 3398 3399 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3400 { 3401 struct intel_uncore *uncore = &dev_priv->uncore; 3402 3403 GEN3_IRQ_RESET(uncore, GT); 3404 if (INTEL_GEN(dev_priv) >= 6) 3405 GEN3_IRQ_RESET(uncore, GEN6_PM); 3406 } 3407 3408 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3409 { 3410 struct intel_uncore *uncore = &dev_priv->uncore; 3411 3412 if (IS_CHERRYVIEW(dev_priv)) 3413 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3414 else 3415 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3416 3417 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3418 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3419 3420 i9xx_pipestat_irq_reset(dev_priv); 3421 3422 GEN3_IRQ_RESET(uncore, VLV_); 3423 dev_priv->irq_mask = ~0u; 3424 } 3425 3426 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3427 { 3428 struct intel_uncore *uncore = &dev_priv->uncore; 3429 3430 u32 pipestat_mask; 3431 u32 enable_mask; 3432 enum pipe pipe; 3433 3434 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3435 3436 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3437 for_each_pipe(dev_priv, pipe) 3438 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3439 3440 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3441 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3442 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3443 I915_LPE_PIPE_A_INTERRUPT | 3444 I915_LPE_PIPE_B_INTERRUPT; 3445 3446 if (IS_CHERRYVIEW(dev_priv)) 3447 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3448 I915_LPE_PIPE_C_INTERRUPT; 3449 3450 WARN_ON(dev_priv->irq_mask != ~0u); 3451 3452 dev_priv->irq_mask = ~enable_mask; 3453 3454 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 3455 } 3456 3457 /* drm_dma.h hooks 3458 */ 3459 static void ironlake_irq_reset(struct drm_device *dev) 3460 { 3461 struct drm_i915_private *dev_priv = to_i915(dev); 3462 struct intel_uncore *uncore = &dev_priv->uncore; 3463 3464 GEN3_IRQ_RESET(uncore, DE); 3465 if (IS_GEN(dev_priv, 7)) 3466 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3467 3468 if (IS_HASWELL(dev_priv)) { 3469 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3470 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3471 } 3472 3473 gen5_gt_irq_reset(dev_priv); 3474 3475 ibx_irq_reset(dev_priv); 3476 } 3477 3478 static void valleyview_irq_reset(struct drm_device *dev) 3479 { 3480 struct drm_i915_private *dev_priv = to_i915(dev); 3481 3482 I915_WRITE(VLV_MASTER_IER, 0); 3483 POSTING_READ(VLV_MASTER_IER); 3484 3485 gen5_gt_irq_reset(dev_priv); 3486 3487 spin_lock_irq(&dev_priv->irq_lock); 3488 if (dev_priv->display_irqs_enabled) 3489 vlv_display_irq_reset(dev_priv); 3490 spin_unlock_irq(&dev_priv->irq_lock); 3491 } 3492 3493 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3494 { 3495 struct intel_uncore *uncore = &dev_priv->uncore; 3496 3497 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 3498 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 3499 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 3500 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 3501 } 3502 3503 static void gen8_irq_reset(struct drm_device *dev) 3504 { 3505 struct drm_i915_private *dev_priv = to_i915(dev); 3506 struct intel_uncore *uncore = &dev_priv->uncore; 3507 int pipe; 3508 3509 gen8_master_intr_disable(dev_priv->uncore.regs); 3510 3511 gen8_gt_irq_reset(dev_priv); 3512 3513 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3514 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3515 3516 for_each_pipe(dev_priv, pipe) 3517 if (intel_display_power_is_enabled(dev_priv, 3518 POWER_DOMAIN_PIPE(pipe))) 3519 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3520 3521 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3522 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3523 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3524 3525 if (HAS_PCH_SPLIT(dev_priv)) 3526 ibx_irq_reset(dev_priv); 3527 } 3528 3529 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3530 { 3531 /* Disable RCS, BCS, VCS and VECS class engines. */ 3532 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3533 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3534 3535 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3536 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3537 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3538 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3539 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3540 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3541 3542 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3543 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3544 } 3545 3546 static void gen11_irq_reset(struct drm_device *dev) 3547 { 3548 struct drm_i915_private *dev_priv = dev->dev_private; 3549 struct intel_uncore *uncore = &dev_priv->uncore; 3550 int pipe; 3551 3552 gen11_master_intr_disable(dev_priv->uncore.regs); 3553 3554 gen11_gt_irq_reset(dev_priv); 3555 3556 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3557 3558 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3559 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3560 3561 for_each_pipe(dev_priv, pipe) 3562 if (intel_display_power_is_enabled(dev_priv, 3563 POWER_DOMAIN_PIPE(pipe))) 3564 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3565 3566 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3567 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3568 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 3569 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3570 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3571 3572 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3573 GEN3_IRQ_RESET(uncore, SDE); 3574 } 3575 3576 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3577 u8 pipe_mask) 3578 { 3579 struct intel_uncore *uncore = &dev_priv->uncore; 3580 3581 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3582 enum pipe pipe; 3583 3584 spin_lock_irq(&dev_priv->irq_lock); 3585 3586 if (!intel_irqs_enabled(dev_priv)) { 3587 spin_unlock_irq(&dev_priv->irq_lock); 3588 return; 3589 } 3590 3591 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3592 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3593 dev_priv->de_irq_mask[pipe], 3594 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3595 3596 spin_unlock_irq(&dev_priv->irq_lock); 3597 } 3598 3599 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3600 u8 pipe_mask) 3601 { 3602 struct intel_uncore *uncore = &dev_priv->uncore; 3603 enum pipe pipe; 3604 3605 spin_lock_irq(&dev_priv->irq_lock); 3606 3607 if (!intel_irqs_enabled(dev_priv)) { 3608 spin_unlock_irq(&dev_priv->irq_lock); 3609 return; 3610 } 3611 3612 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3613 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3614 3615 spin_unlock_irq(&dev_priv->irq_lock); 3616 3617 /* make sure we're done processing display irqs */ 3618 synchronize_irq(dev_priv->drm.irq); 3619 } 3620 3621 static void cherryview_irq_reset(struct drm_device *dev) 3622 { 3623 struct drm_i915_private *dev_priv = to_i915(dev); 3624 struct intel_uncore *uncore = &dev_priv->uncore; 3625 3626 I915_WRITE(GEN8_MASTER_IRQ, 0); 3627 POSTING_READ(GEN8_MASTER_IRQ); 3628 3629 gen8_gt_irq_reset(dev_priv); 3630 3631 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3632 3633 spin_lock_irq(&dev_priv->irq_lock); 3634 if (dev_priv->display_irqs_enabled) 3635 vlv_display_irq_reset(dev_priv); 3636 spin_unlock_irq(&dev_priv->irq_lock); 3637 } 3638 3639 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3640 const u32 hpd[HPD_NUM_PINS]) 3641 { 3642 struct intel_encoder *encoder; 3643 u32 enabled_irqs = 0; 3644 3645 for_each_intel_encoder(&dev_priv->drm, encoder) 3646 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3647 enabled_irqs |= hpd[encoder->hpd_pin]; 3648 3649 return enabled_irqs; 3650 } 3651 3652 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3653 { 3654 u32 hotplug; 3655 3656 /* 3657 * Enable digital hotplug on the PCH, and configure the DP short pulse 3658 * duration to 2ms (which is the minimum in the Display Port spec). 3659 * The pulse duration bits are reserved on LPT+. 3660 */ 3661 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3662 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3663 PORTC_PULSE_DURATION_MASK | 3664 PORTD_PULSE_DURATION_MASK); 3665 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3666 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3667 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3668 /* 3669 * When CPU and PCH are on the same package, port A 3670 * HPD must be enabled in both north and south. 3671 */ 3672 if (HAS_PCH_LPT_LP(dev_priv)) 3673 hotplug |= PORTA_HOTPLUG_ENABLE; 3674 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3675 } 3676 3677 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3678 { 3679 u32 hotplug_irqs, enabled_irqs; 3680 3681 if (HAS_PCH_IBX(dev_priv)) { 3682 hotplug_irqs = SDE_HOTPLUG_MASK; 3683 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3684 } else { 3685 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3686 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3687 } 3688 3689 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3690 3691 ibx_hpd_detection_setup(dev_priv); 3692 } 3693 3694 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 3695 { 3696 u32 hotplug; 3697 3698 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3699 hotplug |= ICP_DDIA_HPD_ENABLE | 3700 ICP_DDIB_HPD_ENABLE; 3701 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3702 3703 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3704 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 3705 ICP_TC_HPD_ENABLE(PORT_TC2) | 3706 ICP_TC_HPD_ENABLE(PORT_TC3) | 3707 ICP_TC_HPD_ENABLE(PORT_TC4); 3708 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3709 } 3710 3711 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3712 { 3713 u32 hotplug_irqs, enabled_irqs; 3714 3715 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 3716 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 3717 3718 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3719 3720 icp_hpd_detection_setup(dev_priv); 3721 } 3722 3723 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3724 { 3725 u32 hotplug; 3726 3727 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3728 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3729 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3730 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3731 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3732 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3733 3734 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3735 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3736 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3737 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3738 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3739 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3740 } 3741 3742 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3743 { 3744 u32 hotplug_irqs, enabled_irqs; 3745 u32 val; 3746 3747 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3748 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3749 3750 val = I915_READ(GEN11_DE_HPD_IMR); 3751 val &= ~hotplug_irqs; 3752 I915_WRITE(GEN11_DE_HPD_IMR, val); 3753 POSTING_READ(GEN11_DE_HPD_IMR); 3754 3755 gen11_hpd_detection_setup(dev_priv); 3756 3757 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3758 icp_hpd_irq_setup(dev_priv); 3759 } 3760 3761 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3762 { 3763 u32 val, hotplug; 3764 3765 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3766 if (HAS_PCH_CNP(dev_priv)) { 3767 val = I915_READ(SOUTH_CHICKEN1); 3768 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3769 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3770 I915_WRITE(SOUTH_CHICKEN1, val); 3771 } 3772 3773 /* Enable digital hotplug on the PCH */ 3774 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3775 hotplug |= PORTA_HOTPLUG_ENABLE | 3776 PORTB_HOTPLUG_ENABLE | 3777 PORTC_HOTPLUG_ENABLE | 3778 PORTD_HOTPLUG_ENABLE; 3779 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3780 3781 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3782 hotplug |= PORTE_HOTPLUG_ENABLE; 3783 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3784 } 3785 3786 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3787 { 3788 u32 hotplug_irqs, enabled_irqs; 3789 3790 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3791 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3792 3793 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3794 3795 spt_hpd_detection_setup(dev_priv); 3796 } 3797 3798 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3799 { 3800 u32 hotplug; 3801 3802 /* 3803 * Enable digital hotplug on the CPU, and configure the DP short pulse 3804 * duration to 2ms (which is the minimum in the Display Port spec) 3805 * The pulse duration bits are reserved on HSW+. 3806 */ 3807 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3808 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3809 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3810 DIGITAL_PORTA_PULSE_DURATION_2ms; 3811 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3812 } 3813 3814 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3815 { 3816 u32 hotplug_irqs, enabled_irqs; 3817 3818 if (INTEL_GEN(dev_priv) >= 8) { 3819 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3820 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3821 3822 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3823 } else if (INTEL_GEN(dev_priv) >= 7) { 3824 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3825 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3826 3827 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3828 } else { 3829 hotplug_irqs = DE_DP_A_HOTPLUG; 3830 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3831 3832 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3833 } 3834 3835 ilk_hpd_detection_setup(dev_priv); 3836 3837 ibx_hpd_irq_setup(dev_priv); 3838 } 3839 3840 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3841 u32 enabled_irqs) 3842 { 3843 u32 hotplug; 3844 3845 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3846 hotplug |= PORTA_HOTPLUG_ENABLE | 3847 PORTB_HOTPLUG_ENABLE | 3848 PORTC_HOTPLUG_ENABLE; 3849 3850 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3851 hotplug, enabled_irqs); 3852 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3853 3854 /* 3855 * For BXT invert bit has to be set based on AOB design 3856 * for HPD detection logic, update it based on VBT fields. 3857 */ 3858 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3859 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3860 hotplug |= BXT_DDIA_HPD_INVERT; 3861 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3862 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3863 hotplug |= BXT_DDIB_HPD_INVERT; 3864 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3865 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3866 hotplug |= BXT_DDIC_HPD_INVERT; 3867 3868 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3869 } 3870 3871 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3872 { 3873 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3874 } 3875 3876 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3877 { 3878 u32 hotplug_irqs, enabled_irqs; 3879 3880 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3881 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3882 3883 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3884 3885 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3886 } 3887 3888 static void ibx_irq_postinstall(struct drm_device *dev) 3889 { 3890 struct drm_i915_private *dev_priv = to_i915(dev); 3891 u32 mask; 3892 3893 if (HAS_PCH_NOP(dev_priv)) 3894 return; 3895 3896 if (HAS_PCH_IBX(dev_priv)) 3897 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3898 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3899 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3900 else 3901 mask = SDE_GMBUS_CPT; 3902 3903 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 3904 I915_WRITE(SDEIMR, ~mask); 3905 3906 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3907 HAS_PCH_LPT(dev_priv)) 3908 ibx_hpd_detection_setup(dev_priv); 3909 else 3910 spt_hpd_detection_setup(dev_priv); 3911 } 3912 3913 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3914 { 3915 struct drm_i915_private *dev_priv = to_i915(dev); 3916 struct intel_uncore *uncore = &dev_priv->uncore; 3917 u32 pm_irqs, gt_irqs; 3918 3919 pm_irqs = gt_irqs = 0; 3920 3921 dev_priv->gt_irq_mask = ~0; 3922 if (HAS_L3_DPF(dev_priv)) { 3923 /* L3 parity interrupt is always unmasked. */ 3924 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3925 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3926 } 3927 3928 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3929 if (IS_GEN(dev_priv, 5)) { 3930 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3931 } else { 3932 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3933 } 3934 3935 GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs); 3936 3937 if (INTEL_GEN(dev_priv) >= 6) { 3938 /* 3939 * RPS interrupts will get enabled/disabled on demand when RPS 3940 * itself is enabled/disabled. 3941 */ 3942 if (HAS_ENGINE(dev_priv, VECS0)) { 3943 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3944 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3945 } 3946 3947 dev_priv->pm_imr = 0xffffffff; 3948 GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs); 3949 } 3950 } 3951 3952 static int ironlake_irq_postinstall(struct drm_device *dev) 3953 { 3954 struct drm_i915_private *dev_priv = to_i915(dev); 3955 struct intel_uncore *uncore = &dev_priv->uncore; 3956 u32 display_mask, extra_mask; 3957 3958 if (INTEL_GEN(dev_priv) >= 7) { 3959 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3960 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3961 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3962 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3963 DE_DP_A_HOTPLUG_IVB); 3964 } else { 3965 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3966 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3967 DE_PIPEA_CRC_DONE | DE_POISON); 3968 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3969 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3970 DE_DP_A_HOTPLUG); 3971 } 3972 3973 if (IS_HASWELL(dev_priv)) { 3974 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 3975 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 3976 display_mask |= DE_EDP_PSR_INT_HSW; 3977 } 3978 3979 dev_priv->irq_mask = ~display_mask; 3980 3981 ibx_irq_pre_postinstall(dev); 3982 3983 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 3984 display_mask | extra_mask); 3985 3986 gen5_gt_irq_postinstall(dev); 3987 3988 ilk_hpd_detection_setup(dev_priv); 3989 3990 ibx_irq_postinstall(dev); 3991 3992 if (IS_IRONLAKE_M(dev_priv)) { 3993 /* Enable PCU event interrupts 3994 * 3995 * spinlocking not required here for correctness since interrupt 3996 * setup is guaranteed to run in single-threaded context. But we 3997 * need it to make the assert_spin_locked happy. */ 3998 spin_lock_irq(&dev_priv->irq_lock); 3999 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4000 spin_unlock_irq(&dev_priv->irq_lock); 4001 } 4002 4003 return 0; 4004 } 4005 4006 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4007 { 4008 lockdep_assert_held(&dev_priv->irq_lock); 4009 4010 if (dev_priv->display_irqs_enabled) 4011 return; 4012 4013 dev_priv->display_irqs_enabled = true; 4014 4015 if (intel_irqs_enabled(dev_priv)) { 4016 vlv_display_irq_reset(dev_priv); 4017 vlv_display_irq_postinstall(dev_priv); 4018 } 4019 } 4020 4021 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4022 { 4023 lockdep_assert_held(&dev_priv->irq_lock); 4024 4025 if (!dev_priv->display_irqs_enabled) 4026 return; 4027 4028 dev_priv->display_irqs_enabled = false; 4029 4030 if (intel_irqs_enabled(dev_priv)) 4031 vlv_display_irq_reset(dev_priv); 4032 } 4033 4034 4035 static int valleyview_irq_postinstall(struct drm_device *dev) 4036 { 4037 struct drm_i915_private *dev_priv = to_i915(dev); 4038 4039 gen5_gt_irq_postinstall(dev); 4040 4041 spin_lock_irq(&dev_priv->irq_lock); 4042 if (dev_priv->display_irqs_enabled) 4043 vlv_display_irq_postinstall(dev_priv); 4044 spin_unlock_irq(&dev_priv->irq_lock); 4045 4046 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 4047 POSTING_READ(VLV_MASTER_IER); 4048 4049 return 0; 4050 } 4051 4052 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4053 { 4054 struct intel_uncore *uncore = &dev_priv->uncore; 4055 4056 /* These are interrupts we'll toggle with the ring mask register */ 4057 u32 gt_interrupts[] = { 4058 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4059 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4060 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 4061 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), 4062 4063 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | 4064 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | 4065 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4066 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), 4067 4068 0, 4069 4070 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 4071 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) 4072 }; 4073 4074 dev_priv->pm_ier = 0x0; 4075 dev_priv->pm_imr = ~dev_priv->pm_ier; 4076 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 4077 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 4078 /* 4079 * RPS interrupts will get enabled/disabled on demand when RPS itself 4080 * is enabled/disabled. Same wil be the case for GuC interrupts. 4081 */ 4082 GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 4083 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4084 } 4085 4086 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4087 { 4088 struct intel_uncore *uncore = &dev_priv->uncore; 4089 4090 u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4091 u32 de_pipe_enables; 4092 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 4093 u32 de_port_enables; 4094 u32 de_misc_masked = GEN8_DE_EDP_PSR; 4095 enum pipe pipe; 4096 4097 if (INTEL_GEN(dev_priv) <= 10) 4098 de_misc_masked |= GEN8_DE_MISC_GSE; 4099 4100 if (INTEL_GEN(dev_priv) >= 9) { 4101 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 4102 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 4103 GEN9_AUX_CHANNEL_D; 4104 if (IS_GEN9_LP(dev_priv)) 4105 de_port_masked |= BXT_DE_PORT_GMBUS; 4106 } else { 4107 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 4108 } 4109 4110 if (INTEL_GEN(dev_priv) >= 11) 4111 de_port_masked |= ICL_AUX_CHANNEL_E; 4112 4113 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4114 de_port_masked |= CNL_AUX_CHANNEL_F; 4115 4116 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4117 GEN8_PIPE_FIFO_UNDERRUN; 4118 4119 de_port_enables = de_port_masked; 4120 if (IS_GEN9_LP(dev_priv)) 4121 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4122 else if (IS_BROADWELL(dev_priv)) 4123 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 4124 4125 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 4126 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4127 4128 for_each_pipe(dev_priv, pipe) { 4129 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4130 4131 if (intel_display_power_is_enabled(dev_priv, 4132 POWER_DOMAIN_PIPE(pipe))) 4133 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 4134 dev_priv->de_irq_mask[pipe], 4135 de_pipe_enables); 4136 } 4137 4138 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 4139 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 4140 4141 if (INTEL_GEN(dev_priv) >= 11) { 4142 u32 de_hpd_masked = 0; 4143 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4144 GEN11_DE_TBT_HOTPLUG_MASK; 4145 4146 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 4147 de_hpd_enables); 4148 gen11_hpd_detection_setup(dev_priv); 4149 } else if (IS_GEN9_LP(dev_priv)) { 4150 bxt_hpd_detection_setup(dev_priv); 4151 } else if (IS_BROADWELL(dev_priv)) { 4152 ilk_hpd_detection_setup(dev_priv); 4153 } 4154 } 4155 4156 static int gen8_irq_postinstall(struct drm_device *dev) 4157 { 4158 struct drm_i915_private *dev_priv = to_i915(dev); 4159 4160 if (HAS_PCH_SPLIT(dev_priv)) 4161 ibx_irq_pre_postinstall(dev); 4162 4163 gen8_gt_irq_postinstall(dev_priv); 4164 gen8_de_irq_postinstall(dev_priv); 4165 4166 if (HAS_PCH_SPLIT(dev_priv)) 4167 ibx_irq_postinstall(dev); 4168 4169 gen8_master_intr_enable(dev_priv->uncore.regs); 4170 4171 return 0; 4172 } 4173 4174 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4175 { 4176 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 4177 4178 BUILD_BUG_ON(irqs & 0xffff0000); 4179 4180 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 4181 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 4182 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 4183 4184 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4185 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 4186 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 4187 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 4188 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 4189 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 4190 4191 /* 4192 * RPS interrupts will get enabled/disabled on demand when RPS itself 4193 * is enabled/disabled. 4194 */ 4195 dev_priv->pm_ier = 0x0; 4196 dev_priv->pm_imr = ~dev_priv->pm_ier; 4197 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4198 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4199 } 4200 4201 static void icp_irq_postinstall(struct drm_device *dev) 4202 { 4203 struct drm_i915_private *dev_priv = to_i915(dev); 4204 u32 mask = SDE_GMBUS_ICP; 4205 4206 WARN_ON(I915_READ(SDEIER) != 0); 4207 I915_WRITE(SDEIER, 0xffffffff); 4208 POSTING_READ(SDEIER); 4209 4210 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 4211 I915_WRITE(SDEIMR, ~mask); 4212 4213 icp_hpd_detection_setup(dev_priv); 4214 } 4215 4216 static int gen11_irq_postinstall(struct drm_device *dev) 4217 { 4218 struct drm_i915_private *dev_priv = dev->dev_private; 4219 struct intel_uncore *uncore = &dev_priv->uncore; 4220 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 4221 4222 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 4223 icp_irq_postinstall(dev); 4224 4225 gen11_gt_irq_postinstall(dev_priv); 4226 gen8_de_irq_postinstall(dev_priv); 4227 4228 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4229 4230 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4231 4232 gen11_master_intr_enable(dev_priv->uncore.regs); 4233 POSTING_READ(GEN11_GFX_MSTR_IRQ); 4234 4235 return 0; 4236 } 4237 4238 static int cherryview_irq_postinstall(struct drm_device *dev) 4239 { 4240 struct drm_i915_private *dev_priv = to_i915(dev); 4241 4242 gen8_gt_irq_postinstall(dev_priv); 4243 4244 spin_lock_irq(&dev_priv->irq_lock); 4245 if (dev_priv->display_irqs_enabled) 4246 vlv_display_irq_postinstall(dev_priv); 4247 spin_unlock_irq(&dev_priv->irq_lock); 4248 4249 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4250 POSTING_READ(GEN8_MASTER_IRQ); 4251 4252 return 0; 4253 } 4254 4255 static void i8xx_irq_reset(struct drm_device *dev) 4256 { 4257 struct drm_i915_private *dev_priv = to_i915(dev); 4258 struct intel_uncore *uncore = &dev_priv->uncore; 4259 4260 i9xx_pipestat_irq_reset(dev_priv); 4261 4262 GEN2_IRQ_RESET(uncore); 4263 } 4264 4265 static int i8xx_irq_postinstall(struct drm_device *dev) 4266 { 4267 struct drm_i915_private *dev_priv = to_i915(dev); 4268 struct intel_uncore *uncore = &dev_priv->uncore; 4269 u16 enable_mask; 4270 4271 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4272 I915_ERROR_MEMORY_REFRESH)); 4273 4274 /* Unmask the interrupts that we always want on. */ 4275 dev_priv->irq_mask = 4276 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4277 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4278 I915_MASTER_ERROR_INTERRUPT); 4279 4280 enable_mask = 4281 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4282 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4283 I915_MASTER_ERROR_INTERRUPT | 4284 I915_USER_INTERRUPT; 4285 4286 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask); 4287 4288 /* Interrupt setup is already guaranteed to be single-threaded, this is 4289 * just to make the assert_spin_locked check happy. */ 4290 spin_lock_irq(&dev_priv->irq_lock); 4291 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4292 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4293 spin_unlock_irq(&dev_priv->irq_lock); 4294 4295 return 0; 4296 } 4297 4298 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 4299 u16 *eir, u16 *eir_stuck) 4300 { 4301 u16 emr; 4302 4303 *eir = I915_READ16(EIR); 4304 4305 if (*eir) 4306 I915_WRITE16(EIR, *eir); 4307 4308 *eir_stuck = I915_READ16(EIR); 4309 if (*eir_stuck == 0) 4310 return; 4311 4312 /* 4313 * Toggle all EMR bits to make sure we get an edge 4314 * in the ISR master error bit if we don't clear 4315 * all the EIR bits. Otherwise the edge triggered 4316 * IIR on i965/g4x wouldn't notice that an interrupt 4317 * is still pending. Also some EIR bits can't be 4318 * cleared except by handling the underlying error 4319 * (or by a GPU reset) so we mask any bit that 4320 * remains set. 4321 */ 4322 emr = I915_READ16(EMR); 4323 I915_WRITE16(EMR, 0xffff); 4324 I915_WRITE16(EMR, emr | *eir_stuck); 4325 } 4326 4327 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 4328 u16 eir, u16 eir_stuck) 4329 { 4330 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4331 4332 if (eir_stuck) 4333 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 4334 } 4335 4336 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4337 u32 *eir, u32 *eir_stuck) 4338 { 4339 u32 emr; 4340 4341 *eir = I915_READ(EIR); 4342 4343 I915_WRITE(EIR, *eir); 4344 4345 *eir_stuck = I915_READ(EIR); 4346 if (*eir_stuck == 0) 4347 return; 4348 4349 /* 4350 * Toggle all EMR bits to make sure we get an edge 4351 * in the ISR master error bit if we don't clear 4352 * all the EIR bits. Otherwise the edge triggered 4353 * IIR on i965/g4x wouldn't notice that an interrupt 4354 * is still pending. Also some EIR bits can't be 4355 * cleared except by handling the underlying error 4356 * (or by a GPU reset) so we mask any bit that 4357 * remains set. 4358 */ 4359 emr = I915_READ(EMR); 4360 I915_WRITE(EMR, 0xffffffff); 4361 I915_WRITE(EMR, emr | *eir_stuck); 4362 } 4363 4364 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4365 u32 eir, u32 eir_stuck) 4366 { 4367 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4368 4369 if (eir_stuck) 4370 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 4371 } 4372 4373 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4374 { 4375 struct drm_device *dev = arg; 4376 struct drm_i915_private *dev_priv = to_i915(dev); 4377 irqreturn_t ret = IRQ_NONE; 4378 4379 if (!intel_irqs_enabled(dev_priv)) 4380 return IRQ_NONE; 4381 4382 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4383 disable_rpm_wakeref_asserts(dev_priv); 4384 4385 do { 4386 u32 pipe_stats[I915_MAX_PIPES] = {}; 4387 u16 eir = 0, eir_stuck = 0; 4388 u16 iir; 4389 4390 iir = I915_READ16(GEN2_IIR); 4391 if (iir == 0) 4392 break; 4393 4394 ret = IRQ_HANDLED; 4395 4396 /* Call regardless, as some status bits might not be 4397 * signalled in iir */ 4398 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4399 4400 if (iir & I915_MASTER_ERROR_INTERRUPT) 4401 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4402 4403 I915_WRITE16(GEN2_IIR, iir); 4404 4405 if (iir & I915_USER_INTERRUPT) 4406 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 4407 4408 if (iir & I915_MASTER_ERROR_INTERRUPT) 4409 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4410 4411 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4412 } while (0); 4413 4414 enable_rpm_wakeref_asserts(dev_priv); 4415 4416 return ret; 4417 } 4418 4419 static void i915_irq_reset(struct drm_device *dev) 4420 { 4421 struct drm_i915_private *dev_priv = to_i915(dev); 4422 struct intel_uncore *uncore = &dev_priv->uncore; 4423 4424 if (I915_HAS_HOTPLUG(dev_priv)) { 4425 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4426 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4427 } 4428 4429 i9xx_pipestat_irq_reset(dev_priv); 4430 4431 GEN3_IRQ_RESET(uncore, GEN2_); 4432 } 4433 4434 static int i915_irq_postinstall(struct drm_device *dev) 4435 { 4436 struct drm_i915_private *dev_priv = to_i915(dev); 4437 struct intel_uncore *uncore = &dev_priv->uncore; 4438 u32 enable_mask; 4439 4440 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4441 I915_ERROR_MEMORY_REFRESH)); 4442 4443 /* Unmask the interrupts that we always want on. */ 4444 dev_priv->irq_mask = 4445 ~(I915_ASLE_INTERRUPT | 4446 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4447 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4448 I915_MASTER_ERROR_INTERRUPT); 4449 4450 enable_mask = 4451 I915_ASLE_INTERRUPT | 4452 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4453 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4454 I915_MASTER_ERROR_INTERRUPT | 4455 I915_USER_INTERRUPT; 4456 4457 if (I915_HAS_HOTPLUG(dev_priv)) { 4458 /* Enable in IER... */ 4459 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4460 /* and unmask in IMR */ 4461 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4462 } 4463 4464 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4465 4466 /* Interrupt setup is already guaranteed to be single-threaded, this is 4467 * just to make the assert_spin_locked check happy. */ 4468 spin_lock_irq(&dev_priv->irq_lock); 4469 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4470 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4471 spin_unlock_irq(&dev_priv->irq_lock); 4472 4473 i915_enable_asle_pipestat(dev_priv); 4474 4475 return 0; 4476 } 4477 4478 static irqreturn_t i915_irq_handler(int irq, void *arg) 4479 { 4480 struct drm_device *dev = arg; 4481 struct drm_i915_private *dev_priv = to_i915(dev); 4482 irqreturn_t ret = IRQ_NONE; 4483 4484 if (!intel_irqs_enabled(dev_priv)) 4485 return IRQ_NONE; 4486 4487 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4488 disable_rpm_wakeref_asserts(dev_priv); 4489 4490 do { 4491 u32 pipe_stats[I915_MAX_PIPES] = {}; 4492 u32 eir = 0, eir_stuck = 0; 4493 u32 hotplug_status = 0; 4494 u32 iir; 4495 4496 iir = I915_READ(GEN2_IIR); 4497 if (iir == 0) 4498 break; 4499 4500 ret = IRQ_HANDLED; 4501 4502 if (I915_HAS_HOTPLUG(dev_priv) && 4503 iir & I915_DISPLAY_PORT_INTERRUPT) 4504 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4505 4506 /* Call regardless, as some status bits might not be 4507 * signalled in iir */ 4508 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4509 4510 if (iir & I915_MASTER_ERROR_INTERRUPT) 4511 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4512 4513 I915_WRITE(GEN2_IIR, iir); 4514 4515 if (iir & I915_USER_INTERRUPT) 4516 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 4517 4518 if (iir & I915_MASTER_ERROR_INTERRUPT) 4519 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4520 4521 if (hotplug_status) 4522 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4523 4524 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4525 } while (0); 4526 4527 enable_rpm_wakeref_asserts(dev_priv); 4528 4529 return ret; 4530 } 4531 4532 static void i965_irq_reset(struct drm_device *dev) 4533 { 4534 struct drm_i915_private *dev_priv = to_i915(dev); 4535 struct intel_uncore *uncore = &dev_priv->uncore; 4536 4537 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4538 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4539 4540 i9xx_pipestat_irq_reset(dev_priv); 4541 4542 GEN3_IRQ_RESET(uncore, GEN2_); 4543 } 4544 4545 static int i965_irq_postinstall(struct drm_device *dev) 4546 { 4547 struct drm_i915_private *dev_priv = to_i915(dev); 4548 struct intel_uncore *uncore = &dev_priv->uncore; 4549 u32 enable_mask; 4550 u32 error_mask; 4551 4552 /* 4553 * Enable some error detection, note the instruction error mask 4554 * bit is reserved, so we leave it masked. 4555 */ 4556 if (IS_G4X(dev_priv)) { 4557 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4558 GM45_ERROR_MEM_PRIV | 4559 GM45_ERROR_CP_PRIV | 4560 I915_ERROR_MEMORY_REFRESH); 4561 } else { 4562 error_mask = ~(I915_ERROR_PAGE_TABLE | 4563 I915_ERROR_MEMORY_REFRESH); 4564 } 4565 I915_WRITE(EMR, error_mask); 4566 4567 /* Unmask the interrupts that we always want on. */ 4568 dev_priv->irq_mask = 4569 ~(I915_ASLE_INTERRUPT | 4570 I915_DISPLAY_PORT_INTERRUPT | 4571 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4572 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4573 I915_MASTER_ERROR_INTERRUPT); 4574 4575 enable_mask = 4576 I915_ASLE_INTERRUPT | 4577 I915_DISPLAY_PORT_INTERRUPT | 4578 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4579 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4580 I915_MASTER_ERROR_INTERRUPT | 4581 I915_USER_INTERRUPT; 4582 4583 if (IS_G4X(dev_priv)) 4584 enable_mask |= I915_BSD_USER_INTERRUPT; 4585 4586 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4587 4588 /* Interrupt setup is already guaranteed to be single-threaded, this is 4589 * just to make the assert_spin_locked check happy. */ 4590 spin_lock_irq(&dev_priv->irq_lock); 4591 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4592 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4593 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4594 spin_unlock_irq(&dev_priv->irq_lock); 4595 4596 i915_enable_asle_pipestat(dev_priv); 4597 4598 return 0; 4599 } 4600 4601 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4602 { 4603 u32 hotplug_en; 4604 4605 lockdep_assert_held(&dev_priv->irq_lock); 4606 4607 /* Note HDMI and DP share hotplug bits */ 4608 /* enable bits are the same for all generations */ 4609 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4610 /* Programming the CRT detection parameters tends 4611 to generate a spurious hotplug event about three 4612 seconds later. So just do it once. 4613 */ 4614 if (IS_G4X(dev_priv)) 4615 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4616 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4617 4618 /* Ignore TV since it's buggy */ 4619 i915_hotplug_interrupt_update_locked(dev_priv, 4620 HOTPLUG_INT_EN_MASK | 4621 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4622 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4623 hotplug_en); 4624 } 4625 4626 static irqreturn_t i965_irq_handler(int irq, void *arg) 4627 { 4628 struct drm_device *dev = arg; 4629 struct drm_i915_private *dev_priv = to_i915(dev); 4630 irqreturn_t ret = IRQ_NONE; 4631 4632 if (!intel_irqs_enabled(dev_priv)) 4633 return IRQ_NONE; 4634 4635 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4636 disable_rpm_wakeref_asserts(dev_priv); 4637 4638 do { 4639 u32 pipe_stats[I915_MAX_PIPES] = {}; 4640 u32 eir = 0, eir_stuck = 0; 4641 u32 hotplug_status = 0; 4642 u32 iir; 4643 4644 iir = I915_READ(GEN2_IIR); 4645 if (iir == 0) 4646 break; 4647 4648 ret = IRQ_HANDLED; 4649 4650 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4651 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4652 4653 /* Call regardless, as some status bits might not be 4654 * signalled in iir */ 4655 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4656 4657 if (iir & I915_MASTER_ERROR_INTERRUPT) 4658 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4659 4660 I915_WRITE(GEN2_IIR, iir); 4661 4662 if (iir & I915_USER_INTERRUPT) 4663 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 4664 4665 if (iir & I915_BSD_USER_INTERRUPT) 4666 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); 4667 4668 if (iir & I915_MASTER_ERROR_INTERRUPT) 4669 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4670 4671 if (hotplug_status) 4672 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4673 4674 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4675 } while (0); 4676 4677 enable_rpm_wakeref_asserts(dev_priv); 4678 4679 return ret; 4680 } 4681 4682 /** 4683 * intel_irq_init - initializes irq support 4684 * @dev_priv: i915 device instance 4685 * 4686 * This function initializes all the irq support including work items, timers 4687 * and all the vtables. It does not setup the interrupt itself though. 4688 */ 4689 void intel_irq_init(struct drm_i915_private *dev_priv) 4690 { 4691 struct drm_device *dev = &dev_priv->drm; 4692 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4693 int i; 4694 4695 if (IS_I945GM(dev_priv)) 4696 i945gm_vblank_work_init(dev_priv); 4697 4698 intel_hpd_init_work(dev_priv); 4699 4700 INIT_WORK(&rps->work, gen6_pm_rps_work); 4701 4702 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4703 for (i = 0; i < MAX_L3_SLICES; ++i) 4704 dev_priv->l3_parity.remap_info[i] = NULL; 4705 4706 if (HAS_GUC_SCHED(dev_priv)) 4707 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4708 4709 /* Let's track the enabled rps events */ 4710 if (IS_VALLEYVIEW(dev_priv)) 4711 /* WaGsvRC0ResidencyMethod:vlv */ 4712 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4713 else 4714 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 4715 GEN6_PM_RP_DOWN_THRESHOLD | 4716 GEN6_PM_RP_DOWN_TIMEOUT); 4717 4718 /* We share the register with other engine */ 4719 if (INTEL_GEN(dev_priv) > 9) 4720 GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000); 4721 4722 rps->pm_intrmsk_mbz = 0; 4723 4724 /* 4725 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4726 * if GEN6_PM_UP_EI_EXPIRED is masked. 4727 * 4728 * TODO: verify if this can be reproduced on VLV,CHV. 4729 */ 4730 if (INTEL_GEN(dev_priv) <= 7) 4731 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4732 4733 if (INTEL_GEN(dev_priv) >= 8) 4734 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4735 4736 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4737 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4738 else if (INTEL_GEN(dev_priv) >= 3) 4739 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4740 4741 dev->vblank_disable_immediate = true; 4742 4743 /* Most platforms treat the display irq block as an always-on 4744 * power domain. vlv/chv can disable it at runtime and need 4745 * special care to avoid writing any of the display block registers 4746 * outside of the power domain. We defer setting up the display irqs 4747 * in this case to the runtime pm. 4748 */ 4749 dev_priv->display_irqs_enabled = true; 4750 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4751 dev_priv->display_irqs_enabled = false; 4752 4753 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4754 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4755 * detection, as short HPD storms will occur as a natural part of 4756 * sideband messaging with MST. 4757 * On older platforms however, IRQ storms can occur with both long and 4758 * short pulses, as seen on some G4x systems. 4759 */ 4760 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4761 4762 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4763 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4764 4765 if (IS_CHERRYVIEW(dev_priv)) { 4766 dev->driver->irq_handler = cherryview_irq_handler; 4767 dev->driver->irq_preinstall = cherryview_irq_reset; 4768 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4769 dev->driver->irq_uninstall = cherryview_irq_reset; 4770 dev->driver->enable_vblank = i965_enable_vblank; 4771 dev->driver->disable_vblank = i965_disable_vblank; 4772 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4773 } else if (IS_VALLEYVIEW(dev_priv)) { 4774 dev->driver->irq_handler = valleyview_irq_handler; 4775 dev->driver->irq_preinstall = valleyview_irq_reset; 4776 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4777 dev->driver->irq_uninstall = valleyview_irq_reset; 4778 dev->driver->enable_vblank = i965_enable_vblank; 4779 dev->driver->disable_vblank = i965_disable_vblank; 4780 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4781 } else if (INTEL_GEN(dev_priv) >= 11) { 4782 dev->driver->irq_handler = gen11_irq_handler; 4783 dev->driver->irq_preinstall = gen11_irq_reset; 4784 dev->driver->irq_postinstall = gen11_irq_postinstall; 4785 dev->driver->irq_uninstall = gen11_irq_reset; 4786 dev->driver->enable_vblank = gen8_enable_vblank; 4787 dev->driver->disable_vblank = gen8_disable_vblank; 4788 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4789 } else if (INTEL_GEN(dev_priv) >= 8) { 4790 dev->driver->irq_handler = gen8_irq_handler; 4791 dev->driver->irq_preinstall = gen8_irq_reset; 4792 dev->driver->irq_postinstall = gen8_irq_postinstall; 4793 dev->driver->irq_uninstall = gen8_irq_reset; 4794 dev->driver->enable_vblank = gen8_enable_vblank; 4795 dev->driver->disable_vblank = gen8_disable_vblank; 4796 if (IS_GEN9_LP(dev_priv)) 4797 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4798 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 4799 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4800 else 4801 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4802 } else if (HAS_PCH_SPLIT(dev_priv)) { 4803 dev->driver->irq_handler = ironlake_irq_handler; 4804 dev->driver->irq_preinstall = ironlake_irq_reset; 4805 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4806 dev->driver->irq_uninstall = ironlake_irq_reset; 4807 dev->driver->enable_vblank = ironlake_enable_vblank; 4808 dev->driver->disable_vblank = ironlake_disable_vblank; 4809 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4810 } else { 4811 if (IS_GEN(dev_priv, 2)) { 4812 dev->driver->irq_preinstall = i8xx_irq_reset; 4813 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4814 dev->driver->irq_handler = i8xx_irq_handler; 4815 dev->driver->irq_uninstall = i8xx_irq_reset; 4816 dev->driver->enable_vblank = i8xx_enable_vblank; 4817 dev->driver->disable_vblank = i8xx_disable_vblank; 4818 } else if (IS_I945GM(dev_priv)) { 4819 dev->driver->irq_preinstall = i915_irq_reset; 4820 dev->driver->irq_postinstall = i915_irq_postinstall; 4821 dev->driver->irq_uninstall = i915_irq_reset; 4822 dev->driver->irq_handler = i915_irq_handler; 4823 dev->driver->enable_vblank = i945gm_enable_vblank; 4824 dev->driver->disable_vblank = i945gm_disable_vblank; 4825 } else if (IS_GEN(dev_priv, 3)) { 4826 dev->driver->irq_preinstall = i915_irq_reset; 4827 dev->driver->irq_postinstall = i915_irq_postinstall; 4828 dev->driver->irq_uninstall = i915_irq_reset; 4829 dev->driver->irq_handler = i915_irq_handler; 4830 dev->driver->enable_vblank = i8xx_enable_vblank; 4831 dev->driver->disable_vblank = i8xx_disable_vblank; 4832 } else { 4833 dev->driver->irq_preinstall = i965_irq_reset; 4834 dev->driver->irq_postinstall = i965_irq_postinstall; 4835 dev->driver->irq_uninstall = i965_irq_reset; 4836 dev->driver->irq_handler = i965_irq_handler; 4837 dev->driver->enable_vblank = i965_enable_vblank; 4838 dev->driver->disable_vblank = i965_disable_vblank; 4839 } 4840 if (I915_HAS_HOTPLUG(dev_priv)) 4841 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4842 } 4843 } 4844 4845 /** 4846 * intel_irq_fini - deinitializes IRQ support 4847 * @i915: i915 device instance 4848 * 4849 * This function deinitializes all the IRQ support. 4850 */ 4851 void intel_irq_fini(struct drm_i915_private *i915) 4852 { 4853 int i; 4854 4855 if (IS_I945GM(i915)) 4856 i945gm_vblank_work_fini(i915); 4857 4858 for (i = 0; i < MAX_L3_SLICES; ++i) 4859 kfree(i915->l3_parity.remap_info[i]); 4860 } 4861 4862 /** 4863 * intel_irq_install - enables the hardware interrupt 4864 * @dev_priv: i915 device instance 4865 * 4866 * This function enables the hardware interrupt handling, but leaves the hotplug 4867 * handling still disabled. It is called after intel_irq_init(). 4868 * 4869 * In the driver load and resume code we need working interrupts in a few places 4870 * but don't want to deal with the hassle of concurrent probe and hotplug 4871 * workers. Hence the split into this two-stage approach. 4872 */ 4873 int intel_irq_install(struct drm_i915_private *dev_priv) 4874 { 4875 /* 4876 * We enable some interrupt sources in our postinstall hooks, so mark 4877 * interrupts as enabled _before_ actually enabling them to avoid 4878 * special cases in our ordering checks. 4879 */ 4880 dev_priv->runtime_pm.irqs_enabled = true; 4881 4882 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4883 } 4884 4885 /** 4886 * intel_irq_uninstall - finilizes all irq handling 4887 * @dev_priv: i915 device instance 4888 * 4889 * This stops interrupt and hotplug handling and unregisters and frees all 4890 * resources acquired in the init functions. 4891 */ 4892 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4893 { 4894 drm_irq_uninstall(&dev_priv->drm); 4895 intel_hpd_cancel_work(dev_priv); 4896 dev_priv->runtime_pm.irqs_enabled = false; 4897 } 4898 4899 /** 4900 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4901 * @dev_priv: i915 device instance 4902 * 4903 * This function is used to disable interrupts at runtime, both in the runtime 4904 * pm and the system suspend/resume code. 4905 */ 4906 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4907 { 4908 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4909 dev_priv->runtime_pm.irqs_enabled = false; 4910 synchronize_irq(dev_priv->drm.irq); 4911 } 4912 4913 /** 4914 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4915 * @dev_priv: i915 device instance 4916 * 4917 * This function is used to enable interrupts at runtime, both in the runtime 4918 * pm and the system suspend/resume code. 4919 */ 4920 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4921 { 4922 dev_priv->runtime_pm.irqs_enabled = true; 4923 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4924 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4925 } 4926