1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/circ_buf.h> 32 #include <linux/cpuidle.h> 33 #include <linux/slab.h> 34 #include <linux/sysrq.h> 35 36 #include <drm/drm_drv.h> 37 #include <drm/drm_irq.h> 38 #include <drm/i915_drm.h> 39 40 #include "display/intel_fifo_underrun.h" 41 #include "display/intel_hotplug.h" 42 #include "display/intel_lpe_audio.h" 43 #include "display/intel_psr.h" 44 45 #include "gt/intel_gt.h" 46 47 #include "i915_drv.h" 48 #include "i915_irq.h" 49 #include "i915_trace.h" 50 #include "intel_drv.h" 51 #include "intel_pm.h" 52 53 /** 54 * DOC: interrupt handling 55 * 56 * These functions provide the basic support for enabling and disabling the 57 * interrupt handling support. There's a lot more functionality in i915_irq.c 58 * and related files, but that will be described in separate chapters. 59 */ 60 61 static const u32 hpd_ilk[HPD_NUM_PINS] = { 62 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 63 }; 64 65 static const u32 hpd_ivb[HPD_NUM_PINS] = { 66 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 67 }; 68 69 static const u32 hpd_bdw[HPD_NUM_PINS] = { 70 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 71 }; 72 73 static const u32 hpd_ibx[HPD_NUM_PINS] = { 74 [HPD_CRT] = SDE_CRT_HOTPLUG, 75 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 76 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 77 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 78 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 79 }; 80 81 static const u32 hpd_cpt[HPD_NUM_PINS] = { 82 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 83 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 84 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 85 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 86 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 87 }; 88 89 static const u32 hpd_spt[HPD_NUM_PINS] = { 90 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 91 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 92 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 93 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 94 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 95 }; 96 97 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 98 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 99 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 100 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 101 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 102 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 103 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 104 }; 105 106 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 107 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 108 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 109 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 110 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 111 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 112 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 113 }; 114 115 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 116 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 117 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 118 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 119 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 120 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 121 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 122 }; 123 124 /* BXT hpd list */ 125 static const u32 hpd_bxt[HPD_NUM_PINS] = { 126 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 127 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 128 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 129 }; 130 131 static const u32 hpd_gen11[HPD_NUM_PINS] = { 132 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 133 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 134 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 135 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 136 }; 137 138 static const u32 hpd_icp[HPD_NUM_PINS] = { 139 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 140 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 141 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 142 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 143 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 144 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 145 }; 146 147 static const u32 hpd_mcc[HPD_NUM_PINS] = { 148 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 149 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 150 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP 151 }; 152 153 static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 154 i915_reg_t iir, i915_reg_t ier) 155 { 156 intel_uncore_write(uncore, imr, 0xffffffff); 157 intel_uncore_posting_read(uncore, imr); 158 159 intel_uncore_write(uncore, ier, 0); 160 161 /* IIR can theoretically queue up two events. Be paranoid. */ 162 intel_uncore_write(uncore, iir, 0xffffffff); 163 intel_uncore_posting_read(uncore, iir); 164 intel_uncore_write(uncore, iir, 0xffffffff); 165 intel_uncore_posting_read(uncore, iir); 166 } 167 168 static void gen2_irq_reset(struct intel_uncore *uncore) 169 { 170 intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 171 intel_uncore_posting_read16(uncore, GEN2_IMR); 172 173 intel_uncore_write16(uncore, GEN2_IER, 0); 174 175 /* IIR can theoretically queue up two events. Be paranoid. */ 176 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 177 intel_uncore_posting_read16(uncore, GEN2_IIR); 178 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 179 intel_uncore_posting_read16(uncore, GEN2_IIR); 180 } 181 182 #define GEN8_IRQ_RESET_NDX(uncore, type, which) \ 183 ({ \ 184 unsigned int which_ = which; \ 185 gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \ 186 GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \ 187 }) 188 189 #define GEN3_IRQ_RESET(uncore, type) \ 190 gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER) 191 192 #define GEN2_IRQ_RESET(uncore) \ 193 gen2_irq_reset(uncore) 194 195 /* 196 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 197 */ 198 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 199 { 200 u32 val = intel_uncore_read(uncore, reg); 201 202 if (val == 0) 203 return; 204 205 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 206 i915_mmio_reg_offset(reg), val); 207 intel_uncore_write(uncore, reg, 0xffffffff); 208 intel_uncore_posting_read(uncore, reg); 209 intel_uncore_write(uncore, reg, 0xffffffff); 210 intel_uncore_posting_read(uncore, reg); 211 } 212 213 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 214 { 215 u16 val = intel_uncore_read16(uncore, GEN2_IIR); 216 217 if (val == 0) 218 return; 219 220 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 221 i915_mmio_reg_offset(GEN2_IIR), val); 222 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 223 intel_uncore_posting_read16(uncore, GEN2_IIR); 224 intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 225 intel_uncore_posting_read16(uncore, GEN2_IIR); 226 } 227 228 static void gen3_irq_init(struct intel_uncore *uncore, 229 i915_reg_t imr, u32 imr_val, 230 i915_reg_t ier, u32 ier_val, 231 i915_reg_t iir) 232 { 233 gen3_assert_iir_is_zero(uncore, iir); 234 235 intel_uncore_write(uncore, ier, ier_val); 236 intel_uncore_write(uncore, imr, imr_val); 237 intel_uncore_posting_read(uncore, imr); 238 } 239 240 static void gen2_irq_init(struct intel_uncore *uncore, 241 u32 imr_val, u32 ier_val) 242 { 243 gen2_assert_iir_is_zero(uncore); 244 245 intel_uncore_write16(uncore, GEN2_IER, ier_val); 246 intel_uncore_write16(uncore, GEN2_IMR, imr_val); 247 intel_uncore_posting_read16(uncore, GEN2_IMR); 248 } 249 250 #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \ 251 ({ \ 252 unsigned int which_ = which; \ 253 gen3_irq_init((uncore), \ 254 GEN8_##type##_IMR(which_), imr_val, \ 255 GEN8_##type##_IER(which_), ier_val, \ 256 GEN8_##type##_IIR(which_)); \ 257 }) 258 259 #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \ 260 gen3_irq_init((uncore), \ 261 type##IMR, imr_val, \ 262 type##IER, ier_val, \ 263 type##IIR) 264 265 #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \ 266 gen2_irq_init((uncore), imr_val, ier_val) 267 268 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 269 static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir); 270 271 /* For display hotplug interrupt */ 272 static inline void 273 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 274 u32 mask, 275 u32 bits) 276 { 277 u32 val; 278 279 lockdep_assert_held(&dev_priv->irq_lock); 280 WARN_ON(bits & ~mask); 281 282 val = I915_READ(PORT_HOTPLUG_EN); 283 val &= ~mask; 284 val |= bits; 285 I915_WRITE(PORT_HOTPLUG_EN, val); 286 } 287 288 /** 289 * i915_hotplug_interrupt_update - update hotplug interrupt enable 290 * @dev_priv: driver private 291 * @mask: bits to update 292 * @bits: bits to enable 293 * NOTE: the HPD enable bits are modified both inside and outside 294 * of an interrupt context. To avoid that read-modify-write cycles 295 * interfer, these bits are protected by a spinlock. Since this 296 * function is usually not called from a context where the lock is 297 * held already, this function acquires the lock itself. A non-locking 298 * version is also available. 299 */ 300 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 301 u32 mask, 302 u32 bits) 303 { 304 spin_lock_irq(&dev_priv->irq_lock); 305 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 306 spin_unlock_irq(&dev_priv->irq_lock); 307 } 308 309 static u32 310 gen11_gt_engine_identity(struct intel_gt *gt, 311 const unsigned int bank, const unsigned int bit); 312 313 static bool gen11_reset_one_iir(struct intel_gt *gt, 314 const unsigned int bank, 315 const unsigned int bit) 316 { 317 void __iomem * const regs = gt->uncore->regs; 318 u32 dw; 319 320 lockdep_assert_held(>->i915->irq_lock); 321 322 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 323 if (dw & BIT(bit)) { 324 /* 325 * According to the BSpec, DW_IIR bits cannot be cleared without 326 * first servicing the Selector & Shared IIR registers. 327 */ 328 gen11_gt_engine_identity(gt, bank, bit); 329 330 /* 331 * We locked GT INT DW by reading it. If we want to (try 332 * to) recover from this succesfully, we need to clear 333 * our bit, otherwise we are locking the register for 334 * everybody. 335 */ 336 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 337 338 return true; 339 } 340 341 return false; 342 } 343 344 /** 345 * ilk_update_display_irq - update DEIMR 346 * @dev_priv: driver private 347 * @interrupt_mask: mask of interrupt bits to update 348 * @enabled_irq_mask: mask of interrupt bits to enable 349 */ 350 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 351 u32 interrupt_mask, 352 u32 enabled_irq_mask) 353 { 354 u32 new_val; 355 356 lockdep_assert_held(&dev_priv->irq_lock); 357 358 WARN_ON(enabled_irq_mask & ~interrupt_mask); 359 360 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 361 return; 362 363 new_val = dev_priv->irq_mask; 364 new_val &= ~interrupt_mask; 365 new_val |= (~enabled_irq_mask & interrupt_mask); 366 367 if (new_val != dev_priv->irq_mask) { 368 dev_priv->irq_mask = new_val; 369 I915_WRITE(DEIMR, dev_priv->irq_mask); 370 POSTING_READ(DEIMR); 371 } 372 } 373 374 /** 375 * ilk_update_gt_irq - update GTIMR 376 * @dev_priv: driver private 377 * @interrupt_mask: mask of interrupt bits to update 378 * @enabled_irq_mask: mask of interrupt bits to enable 379 */ 380 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 381 u32 interrupt_mask, 382 u32 enabled_irq_mask) 383 { 384 lockdep_assert_held(&dev_priv->irq_lock); 385 386 WARN_ON(enabled_irq_mask & ~interrupt_mask); 387 388 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 389 return; 390 391 dev_priv->gt_irq_mask &= ~interrupt_mask; 392 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 393 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 394 } 395 396 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) 397 { 398 ilk_update_gt_irq(dev_priv, mask, mask); 399 intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR); 400 } 401 402 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) 403 { 404 ilk_update_gt_irq(dev_priv, mask, 0); 405 } 406 407 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 408 { 409 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 410 411 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 412 } 413 414 static void write_pm_imr(struct intel_gt *gt) 415 { 416 struct drm_i915_private *i915 = gt->i915; 417 struct intel_uncore *uncore = gt->uncore; 418 u32 mask = gt->pm_imr; 419 i915_reg_t reg; 420 421 if (INTEL_GEN(i915) >= 11) { 422 reg = GEN11_GPM_WGBOXPERF_INTR_MASK; 423 /* pm is in upper half */ 424 mask = mask << 16; 425 } else if (INTEL_GEN(i915) >= 8) { 426 reg = GEN8_GT_IMR(2); 427 } else { 428 reg = GEN6_PMIMR; 429 } 430 431 intel_uncore_write(uncore, reg, mask); 432 intel_uncore_posting_read(uncore, reg); 433 } 434 435 static void write_pm_ier(struct intel_gt *gt) 436 { 437 struct drm_i915_private *i915 = gt->i915; 438 struct intel_uncore *uncore = gt->uncore; 439 u32 mask = gt->pm_ier; 440 i915_reg_t reg; 441 442 if (INTEL_GEN(i915) >= 11) { 443 reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE; 444 /* pm is in upper half */ 445 mask = mask << 16; 446 } else if (INTEL_GEN(i915) >= 8) { 447 reg = GEN8_GT_IER(2); 448 } else { 449 reg = GEN6_PMIER; 450 } 451 452 intel_uncore_write(uncore, reg, mask); 453 } 454 455 /** 456 * snb_update_pm_irq - update GEN6_PMIMR 457 * @gt: gt for the interrupts 458 * @interrupt_mask: mask of interrupt bits to update 459 * @enabled_irq_mask: mask of interrupt bits to enable 460 */ 461 static void snb_update_pm_irq(struct intel_gt *gt, 462 u32 interrupt_mask, 463 u32 enabled_irq_mask) 464 { 465 u32 new_val; 466 467 WARN_ON(enabled_irq_mask & ~interrupt_mask); 468 469 lockdep_assert_held(>->i915->irq_lock); 470 471 new_val = gt->pm_imr; 472 new_val &= ~interrupt_mask; 473 new_val |= (~enabled_irq_mask & interrupt_mask); 474 475 if (new_val != gt->pm_imr) { 476 gt->pm_imr = new_val; 477 write_pm_imr(gt); 478 } 479 } 480 481 void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask) 482 { 483 if (WARN_ON(!intel_irqs_enabled(gt->i915))) 484 return; 485 486 snb_update_pm_irq(gt, mask, mask); 487 } 488 489 static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask) 490 { 491 snb_update_pm_irq(gt, mask, 0); 492 } 493 494 void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask) 495 { 496 if (WARN_ON(!intel_irqs_enabled(gt->i915))) 497 return; 498 499 __gen6_mask_pm_irq(gt, mask); 500 } 501 502 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 503 { 504 i915_reg_t reg = gen6_pm_iir(dev_priv); 505 506 lockdep_assert_held(&dev_priv->irq_lock); 507 508 I915_WRITE(reg, reset_mask); 509 I915_WRITE(reg, reset_mask); 510 POSTING_READ(reg); 511 } 512 513 static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask) 514 { 515 lockdep_assert_held(>->i915->irq_lock); 516 517 gt->pm_ier |= enable_mask; 518 write_pm_ier(gt); 519 gen6_unmask_pm_irq(gt, enable_mask); 520 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 521 } 522 523 static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask) 524 { 525 lockdep_assert_held(>->i915->irq_lock); 526 527 gt->pm_ier &= ~disable_mask; 528 __gen6_mask_pm_irq(gt, disable_mask); 529 write_pm_ier(gt); 530 /* though a barrier is missing here, but don't really need a one */ 531 } 532 533 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 534 { 535 spin_lock_irq(&dev_priv->irq_lock); 536 537 while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM)) 538 ; 539 540 dev_priv->gt_pm.rps.pm_iir = 0; 541 542 spin_unlock_irq(&dev_priv->irq_lock); 543 } 544 545 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 546 { 547 spin_lock_irq(&dev_priv->irq_lock); 548 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 549 dev_priv->gt_pm.rps.pm_iir = 0; 550 spin_unlock_irq(&dev_priv->irq_lock); 551 } 552 553 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 554 { 555 struct intel_gt *gt = &dev_priv->gt; 556 struct intel_rps *rps = &dev_priv->gt_pm.rps; 557 558 if (READ_ONCE(rps->interrupts_enabled)) 559 return; 560 561 spin_lock_irq(&dev_priv->irq_lock); 562 WARN_ON_ONCE(rps->pm_iir); 563 564 if (INTEL_GEN(dev_priv) >= 11) 565 WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM)); 566 else 567 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 568 569 rps->interrupts_enabled = true; 570 gen6_enable_pm_irq(gt, dev_priv->pm_rps_events); 571 572 spin_unlock_irq(&dev_priv->irq_lock); 573 } 574 575 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 576 { 577 struct intel_rps *rps = &dev_priv->gt_pm.rps; 578 579 if (!READ_ONCE(rps->interrupts_enabled)) 580 return; 581 582 spin_lock_irq(&dev_priv->irq_lock); 583 rps->interrupts_enabled = false; 584 585 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 586 587 gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS); 588 589 spin_unlock_irq(&dev_priv->irq_lock); 590 intel_synchronize_irq(dev_priv); 591 592 /* Now that we will not be generating any more work, flush any 593 * outstanding tasks. As we are called on the RPS idle path, 594 * we will reset the GPU to minimum frequencies, so the current 595 * state of the worker can be discarded. 596 */ 597 cancel_work_sync(&rps->work); 598 if (INTEL_GEN(dev_priv) >= 11) 599 gen11_reset_rps_interrupts(dev_priv); 600 else 601 gen6_reset_rps_interrupts(dev_priv); 602 } 603 604 void gen9_reset_guc_interrupts(struct intel_guc *guc) 605 { 606 struct intel_gt *gt = guc_to_gt(guc); 607 struct drm_i915_private *i915 = gt->i915; 608 609 assert_rpm_wakelock_held(&i915->runtime_pm); 610 611 spin_lock_irq(&i915->irq_lock); 612 gen6_reset_pm_iir(i915, gt->pm_guc_events); 613 spin_unlock_irq(&i915->irq_lock); 614 } 615 616 void gen9_enable_guc_interrupts(struct intel_guc *guc) 617 { 618 struct intel_gt *gt = guc_to_gt(guc); 619 struct drm_i915_private *i915 = gt->i915; 620 621 assert_rpm_wakelock_held(&i915->runtime_pm); 622 623 spin_lock_irq(&i915->irq_lock); 624 if (!guc->interrupts.enabled) { 625 WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) & 626 gt->pm_guc_events); 627 guc->interrupts.enabled = true; 628 gen6_enable_pm_irq(gt, gt->pm_guc_events); 629 } 630 spin_unlock_irq(&i915->irq_lock); 631 } 632 633 void gen9_disable_guc_interrupts(struct intel_guc *guc) 634 { 635 struct intel_gt *gt = guc_to_gt(guc); 636 struct drm_i915_private *i915 = gt->i915; 637 638 assert_rpm_wakelock_held(&i915->runtime_pm); 639 640 spin_lock_irq(&i915->irq_lock); 641 guc->interrupts.enabled = false; 642 643 gen6_disable_pm_irq(gt, gt->pm_guc_events); 644 645 spin_unlock_irq(&i915->irq_lock); 646 intel_synchronize_irq(i915); 647 648 gen9_reset_guc_interrupts(guc); 649 } 650 651 void gen11_reset_guc_interrupts(struct intel_guc *guc) 652 { 653 struct intel_gt *gt = guc_to_gt(guc); 654 struct drm_i915_private *i915 = gt->i915; 655 656 spin_lock_irq(&i915->irq_lock); 657 gen11_reset_one_iir(gt, 0, GEN11_GUC); 658 spin_unlock_irq(&i915->irq_lock); 659 } 660 661 void gen11_enable_guc_interrupts(struct intel_guc *guc) 662 { 663 struct intel_gt *gt = guc_to_gt(guc); 664 665 spin_lock_irq(>->i915->irq_lock); 666 if (!guc->interrupts.enabled) { 667 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); 668 669 WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC)); 670 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events); 671 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events); 672 guc->interrupts.enabled = true; 673 } 674 spin_unlock_irq(>->i915->irq_lock); 675 } 676 677 void gen11_disable_guc_interrupts(struct intel_guc *guc) 678 { 679 struct intel_gt *gt = guc_to_gt(guc); 680 struct drm_i915_private *i915 = gt->i915; 681 682 spin_lock_irq(&i915->irq_lock); 683 guc->interrupts.enabled = false; 684 685 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); 686 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 687 688 spin_unlock_irq(&i915->irq_lock); 689 intel_synchronize_irq(i915); 690 691 gen11_reset_guc_interrupts(guc); 692 } 693 694 /** 695 * bdw_update_port_irq - update DE port interrupt 696 * @dev_priv: driver private 697 * @interrupt_mask: mask of interrupt bits to update 698 * @enabled_irq_mask: mask of interrupt bits to enable 699 */ 700 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 701 u32 interrupt_mask, 702 u32 enabled_irq_mask) 703 { 704 u32 new_val; 705 u32 old_val; 706 707 lockdep_assert_held(&dev_priv->irq_lock); 708 709 WARN_ON(enabled_irq_mask & ~interrupt_mask); 710 711 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 712 return; 713 714 old_val = I915_READ(GEN8_DE_PORT_IMR); 715 716 new_val = old_val; 717 new_val &= ~interrupt_mask; 718 new_val |= (~enabled_irq_mask & interrupt_mask); 719 720 if (new_val != old_val) { 721 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 722 POSTING_READ(GEN8_DE_PORT_IMR); 723 } 724 } 725 726 /** 727 * bdw_update_pipe_irq - update DE pipe interrupt 728 * @dev_priv: driver private 729 * @pipe: pipe whose interrupt to update 730 * @interrupt_mask: mask of interrupt bits to update 731 * @enabled_irq_mask: mask of interrupt bits to enable 732 */ 733 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 734 enum pipe pipe, 735 u32 interrupt_mask, 736 u32 enabled_irq_mask) 737 { 738 u32 new_val; 739 740 lockdep_assert_held(&dev_priv->irq_lock); 741 742 WARN_ON(enabled_irq_mask & ~interrupt_mask); 743 744 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 745 return; 746 747 new_val = dev_priv->de_irq_mask[pipe]; 748 new_val &= ~interrupt_mask; 749 new_val |= (~enabled_irq_mask & interrupt_mask); 750 751 if (new_val != dev_priv->de_irq_mask[pipe]) { 752 dev_priv->de_irq_mask[pipe] = new_val; 753 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 754 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 755 } 756 } 757 758 /** 759 * ibx_display_interrupt_update - update SDEIMR 760 * @dev_priv: driver private 761 * @interrupt_mask: mask of interrupt bits to update 762 * @enabled_irq_mask: mask of interrupt bits to enable 763 */ 764 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 765 u32 interrupt_mask, 766 u32 enabled_irq_mask) 767 { 768 u32 sdeimr = I915_READ(SDEIMR); 769 sdeimr &= ~interrupt_mask; 770 sdeimr |= (~enabled_irq_mask & interrupt_mask); 771 772 WARN_ON(enabled_irq_mask & ~interrupt_mask); 773 774 lockdep_assert_held(&dev_priv->irq_lock); 775 776 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 777 return; 778 779 I915_WRITE(SDEIMR, sdeimr); 780 POSTING_READ(SDEIMR); 781 } 782 783 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 784 enum pipe pipe) 785 { 786 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 787 u32 enable_mask = status_mask << 16; 788 789 lockdep_assert_held(&dev_priv->irq_lock); 790 791 if (INTEL_GEN(dev_priv) < 5) 792 goto out; 793 794 /* 795 * On pipe A we don't support the PSR interrupt yet, 796 * on pipe B and C the same bit MBZ. 797 */ 798 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 799 return 0; 800 /* 801 * On pipe B and C we don't support the PSR interrupt yet, on pipe 802 * A the same bit is for perf counters which we don't use either. 803 */ 804 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 805 return 0; 806 807 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 808 SPRITE0_FLIP_DONE_INT_EN_VLV | 809 SPRITE1_FLIP_DONE_INT_EN_VLV); 810 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 811 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 812 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 813 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 814 815 out: 816 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 817 status_mask & ~PIPESTAT_INT_STATUS_MASK, 818 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 819 pipe_name(pipe), enable_mask, status_mask); 820 821 return enable_mask; 822 } 823 824 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 825 enum pipe pipe, u32 status_mask) 826 { 827 i915_reg_t reg = PIPESTAT(pipe); 828 u32 enable_mask; 829 830 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 831 "pipe %c: status_mask=0x%x\n", 832 pipe_name(pipe), status_mask); 833 834 lockdep_assert_held(&dev_priv->irq_lock); 835 WARN_ON(!intel_irqs_enabled(dev_priv)); 836 837 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 838 return; 839 840 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 841 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 842 843 I915_WRITE(reg, enable_mask | status_mask); 844 POSTING_READ(reg); 845 } 846 847 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 848 enum pipe pipe, u32 status_mask) 849 { 850 i915_reg_t reg = PIPESTAT(pipe); 851 u32 enable_mask; 852 853 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 854 "pipe %c: status_mask=0x%x\n", 855 pipe_name(pipe), status_mask); 856 857 lockdep_assert_held(&dev_priv->irq_lock); 858 WARN_ON(!intel_irqs_enabled(dev_priv)); 859 860 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 861 return; 862 863 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 864 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 865 866 I915_WRITE(reg, enable_mask | status_mask); 867 POSTING_READ(reg); 868 } 869 870 static bool i915_has_asle(struct drm_i915_private *dev_priv) 871 { 872 if (!dev_priv->opregion.asle) 873 return false; 874 875 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 876 } 877 878 /** 879 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 880 * @dev_priv: i915 device private 881 */ 882 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 883 { 884 if (!i915_has_asle(dev_priv)) 885 return; 886 887 spin_lock_irq(&dev_priv->irq_lock); 888 889 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 890 if (INTEL_GEN(dev_priv) >= 4) 891 i915_enable_pipestat(dev_priv, PIPE_A, 892 PIPE_LEGACY_BLC_EVENT_STATUS); 893 894 spin_unlock_irq(&dev_priv->irq_lock); 895 } 896 897 /* 898 * This timing diagram depicts the video signal in and 899 * around the vertical blanking period. 900 * 901 * Assumptions about the fictitious mode used in this example: 902 * vblank_start >= 3 903 * vsync_start = vblank_start + 1 904 * vsync_end = vblank_start + 2 905 * vtotal = vblank_start + 3 906 * 907 * start of vblank: 908 * latch double buffered registers 909 * increment frame counter (ctg+) 910 * generate start of vblank interrupt (gen4+) 911 * | 912 * | frame start: 913 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 914 * | may be shifted forward 1-3 extra lines via PIPECONF 915 * | | 916 * | | start of vsync: 917 * | | generate vsync interrupt 918 * | | | 919 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 920 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 921 * ----va---> <-----------------vb--------------------> <--------va------------- 922 * | | <----vs-----> | 923 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 924 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 925 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 926 * | | | 927 * last visible pixel first visible pixel 928 * | increment frame counter (gen3/4) 929 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 930 * 931 * x = horizontal active 932 * _ = horizontal blanking 933 * hs = horizontal sync 934 * va = vertical active 935 * vb = vertical blanking 936 * vs = vertical sync 937 * vbs = vblank_start (number) 938 * 939 * Summary: 940 * - most events happen at the start of horizontal sync 941 * - frame start happens at the start of horizontal blank, 1-4 lines 942 * (depending on PIPECONF settings) after the start of vblank 943 * - gen3/4 pixel and frame counter are synchronized with the start 944 * of horizontal active on the first line of vertical active 945 */ 946 947 /* Called from drm generic code, passed a 'crtc', which 948 * we use as a pipe index 949 */ 950 u32 i915_get_vblank_counter(struct drm_crtc *crtc) 951 { 952 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 953 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; 954 const struct drm_display_mode *mode = &vblank->hwmode; 955 enum pipe pipe = to_intel_crtc(crtc)->pipe; 956 i915_reg_t high_frame, low_frame; 957 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 958 unsigned long irqflags; 959 960 /* 961 * On i965gm TV output the frame counter only works up to 962 * the point when we enable the TV encoder. After that the 963 * frame counter ceases to work and reads zero. We need a 964 * vblank wait before enabling the TV encoder and so we 965 * have to enable vblank interrupts while the frame counter 966 * is still in a working state. However the core vblank code 967 * does not like us returning non-zero frame counter values 968 * when we've told it that we don't have a working frame 969 * counter. Thus we must stop non-zero values leaking out. 970 */ 971 if (!vblank->max_vblank_count) 972 return 0; 973 974 htotal = mode->crtc_htotal; 975 hsync_start = mode->crtc_hsync_start; 976 vbl_start = mode->crtc_vblank_start; 977 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 978 vbl_start = DIV_ROUND_UP(vbl_start, 2); 979 980 /* Convert to pixel count */ 981 vbl_start *= htotal; 982 983 /* Start of vblank event occurs at start of hsync */ 984 vbl_start -= htotal - hsync_start; 985 986 high_frame = PIPEFRAME(pipe); 987 low_frame = PIPEFRAMEPIXEL(pipe); 988 989 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 990 991 /* 992 * High & low register fields aren't synchronized, so make sure 993 * we get a low value that's stable across two reads of the high 994 * register. 995 */ 996 do { 997 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 998 low = I915_READ_FW(low_frame); 999 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 1000 } while (high1 != high2); 1001 1002 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1003 1004 high1 >>= PIPE_FRAME_HIGH_SHIFT; 1005 pixel = low & PIPE_PIXEL_MASK; 1006 low >>= PIPE_FRAME_LOW_SHIFT; 1007 1008 /* 1009 * The frame counter increments at beginning of active. 1010 * Cook up a vblank counter by also checking the pixel 1011 * counter against vblank start. 1012 */ 1013 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 1014 } 1015 1016 u32 g4x_get_vblank_counter(struct drm_crtc *crtc) 1017 { 1018 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1019 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1020 1021 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 1022 } 1023 1024 /* 1025 * On certain encoders on certain platforms, pipe 1026 * scanline register will not work to get the scanline, 1027 * since the timings are driven from the PORT or issues 1028 * with scanline register updates. 1029 * This function will use Framestamp and current 1030 * timestamp registers to calculate the scanline. 1031 */ 1032 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 1033 { 1034 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1035 struct drm_vblank_crtc *vblank = 1036 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 1037 const struct drm_display_mode *mode = &vblank->hwmode; 1038 u32 vblank_start = mode->crtc_vblank_start; 1039 u32 vtotal = mode->crtc_vtotal; 1040 u32 htotal = mode->crtc_htotal; 1041 u32 clock = mode->crtc_clock; 1042 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 1043 1044 /* 1045 * To avoid the race condition where we might cross into the 1046 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 1047 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 1048 * during the same frame. 1049 */ 1050 do { 1051 /* 1052 * This field provides read back of the display 1053 * pipe frame time stamp. The time stamp value 1054 * is sampled at every start of vertical blank. 1055 */ 1056 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 1057 1058 /* 1059 * The TIMESTAMP_CTR register has the current 1060 * time stamp value. 1061 */ 1062 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 1063 1064 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 1065 } while (scan_post_time != scan_prev_time); 1066 1067 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 1068 clock), 1000 * htotal); 1069 scanline = min(scanline, vtotal - 1); 1070 scanline = (scanline + vblank_start) % vtotal; 1071 1072 return scanline; 1073 } 1074 1075 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 1076 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 1077 { 1078 struct drm_device *dev = crtc->base.dev; 1079 struct drm_i915_private *dev_priv = to_i915(dev); 1080 const struct drm_display_mode *mode; 1081 struct drm_vblank_crtc *vblank; 1082 enum pipe pipe = crtc->pipe; 1083 int position, vtotal; 1084 1085 if (!crtc->active) 1086 return -1; 1087 1088 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 1089 mode = &vblank->hwmode; 1090 1091 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 1092 return __intel_get_crtc_scanline_from_timestamp(crtc); 1093 1094 vtotal = mode->crtc_vtotal; 1095 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1096 vtotal /= 2; 1097 1098 if (IS_GEN(dev_priv, 2)) 1099 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 1100 else 1101 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 1102 1103 /* 1104 * On HSW, the DSL reg (0x70000) appears to return 0 if we 1105 * read it just before the start of vblank. So try it again 1106 * so we don't accidentally end up spanning a vblank frame 1107 * increment, causing the pipe_update_end() code to squak at us. 1108 * 1109 * The nature of this problem means we can't simply check the ISR 1110 * bit and return the vblank start value; nor can we use the scanline 1111 * debug register in the transcoder as it appears to have the same 1112 * problem. We may need to extend this to include other platforms, 1113 * but so far testing only shows the problem on HSW. 1114 */ 1115 if (HAS_DDI(dev_priv) && !position) { 1116 int i, temp; 1117 1118 for (i = 0; i < 100; i++) { 1119 udelay(1); 1120 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 1121 if (temp != position) { 1122 position = temp; 1123 break; 1124 } 1125 } 1126 } 1127 1128 /* 1129 * See update_scanline_offset() for the details on the 1130 * scanline_offset adjustment. 1131 */ 1132 return (position + crtc->scanline_offset) % vtotal; 1133 } 1134 1135 bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 1136 bool in_vblank_irq, int *vpos, int *hpos, 1137 ktime_t *stime, ktime_t *etime, 1138 const struct drm_display_mode *mode) 1139 { 1140 struct drm_i915_private *dev_priv = to_i915(dev); 1141 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1142 pipe); 1143 int position; 1144 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1145 unsigned long irqflags; 1146 bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 || 1147 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || 1148 mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; 1149 1150 if (WARN_ON(!mode->crtc_clock)) { 1151 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 1152 "pipe %c\n", pipe_name(pipe)); 1153 return false; 1154 } 1155 1156 htotal = mode->crtc_htotal; 1157 hsync_start = mode->crtc_hsync_start; 1158 vtotal = mode->crtc_vtotal; 1159 vbl_start = mode->crtc_vblank_start; 1160 vbl_end = mode->crtc_vblank_end; 1161 1162 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1163 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1164 vbl_end /= 2; 1165 vtotal /= 2; 1166 } 1167 1168 /* 1169 * Lock uncore.lock, as we will do multiple timing critical raw 1170 * register reads, potentially with preemption disabled, so the 1171 * following code must not block on uncore.lock. 1172 */ 1173 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1174 1175 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1176 1177 /* Get optional system timestamp before query. */ 1178 if (stime) 1179 *stime = ktime_get(); 1180 1181 if (use_scanline_counter) { 1182 /* No obvious pixelcount register. Only query vertical 1183 * scanout position from Display scan line register. 1184 */ 1185 position = __intel_get_crtc_scanline(intel_crtc); 1186 } else { 1187 /* Have access to pixelcount since start of frame. 1188 * We can split this into vertical and horizontal 1189 * scanout position. 1190 */ 1191 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1192 1193 /* convert to pixel counts */ 1194 vbl_start *= htotal; 1195 vbl_end *= htotal; 1196 vtotal *= htotal; 1197 1198 /* 1199 * In interlaced modes, the pixel counter counts all pixels, 1200 * so one field will have htotal more pixels. In order to avoid 1201 * the reported position from jumping backwards when the pixel 1202 * counter is beyond the length of the shorter field, just 1203 * clamp the position the length of the shorter field. This 1204 * matches how the scanline counter based position works since 1205 * the scanline counter doesn't count the two half lines. 1206 */ 1207 if (position >= vtotal) 1208 position = vtotal - 1; 1209 1210 /* 1211 * Start of vblank interrupt is triggered at start of hsync, 1212 * just prior to the first active line of vblank. However we 1213 * consider lines to start at the leading edge of horizontal 1214 * active. So, should we get here before we've crossed into 1215 * the horizontal active of the first line in vblank, we would 1216 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1217 * always add htotal-hsync_start to the current pixel position. 1218 */ 1219 position = (position + htotal - hsync_start) % vtotal; 1220 } 1221 1222 /* Get optional system timestamp after query. */ 1223 if (etime) 1224 *etime = ktime_get(); 1225 1226 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1227 1228 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1229 1230 /* 1231 * While in vblank, position will be negative 1232 * counting up towards 0 at vbl_end. And outside 1233 * vblank, position will be positive counting 1234 * up since vbl_end. 1235 */ 1236 if (position >= vbl_start) 1237 position -= vbl_end; 1238 else 1239 position += vtotal - vbl_end; 1240 1241 if (use_scanline_counter) { 1242 *vpos = position; 1243 *hpos = 0; 1244 } else { 1245 *vpos = position / htotal; 1246 *hpos = position - (*vpos * htotal); 1247 } 1248 1249 return true; 1250 } 1251 1252 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1253 { 1254 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1255 unsigned long irqflags; 1256 int position; 1257 1258 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1259 position = __intel_get_crtc_scanline(crtc); 1260 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1261 1262 return position; 1263 } 1264 1265 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1266 { 1267 struct intel_uncore *uncore = &dev_priv->uncore; 1268 u32 busy_up, busy_down, max_avg, min_avg; 1269 u8 new_delay; 1270 1271 spin_lock(&mchdev_lock); 1272 1273 intel_uncore_write16(uncore, 1274 MEMINTRSTS, 1275 intel_uncore_read(uncore, MEMINTRSTS)); 1276 1277 new_delay = dev_priv->ips.cur_delay; 1278 1279 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1280 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1281 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1282 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1283 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1284 1285 /* Handle RCS change request from hw */ 1286 if (busy_up > max_avg) { 1287 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1288 new_delay = dev_priv->ips.cur_delay - 1; 1289 if (new_delay < dev_priv->ips.max_delay) 1290 new_delay = dev_priv->ips.max_delay; 1291 } else if (busy_down < min_avg) { 1292 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1293 new_delay = dev_priv->ips.cur_delay + 1; 1294 if (new_delay > dev_priv->ips.min_delay) 1295 new_delay = dev_priv->ips.min_delay; 1296 } 1297 1298 if (ironlake_set_drps(dev_priv, new_delay)) 1299 dev_priv->ips.cur_delay = new_delay; 1300 1301 spin_unlock(&mchdev_lock); 1302 1303 return; 1304 } 1305 1306 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1307 struct intel_rps_ei *ei) 1308 { 1309 ei->ktime = ktime_get_raw(); 1310 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1311 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1312 } 1313 1314 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1315 { 1316 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1317 } 1318 1319 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1320 { 1321 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1322 const struct intel_rps_ei *prev = &rps->ei; 1323 struct intel_rps_ei now; 1324 u32 events = 0; 1325 1326 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1327 return 0; 1328 1329 vlv_c0_read(dev_priv, &now); 1330 1331 if (prev->ktime) { 1332 u64 time, c0; 1333 u32 render, media; 1334 1335 time = ktime_us_delta(now.ktime, prev->ktime); 1336 1337 time *= dev_priv->czclk_freq; 1338 1339 /* Workload can be split between render + media, 1340 * e.g. SwapBuffers being blitted in X after being rendered in 1341 * mesa. To account for this we need to combine both engines 1342 * into our activity counter. 1343 */ 1344 render = now.render_c0 - prev->render_c0; 1345 media = now.media_c0 - prev->media_c0; 1346 c0 = max(render, media); 1347 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1348 1349 if (c0 > time * rps->power.up_threshold) 1350 events = GEN6_PM_RP_UP_THRESHOLD; 1351 else if (c0 < time * rps->power.down_threshold) 1352 events = GEN6_PM_RP_DOWN_THRESHOLD; 1353 } 1354 1355 rps->ei = now; 1356 return events; 1357 } 1358 1359 static void gen6_pm_rps_work(struct work_struct *work) 1360 { 1361 struct drm_i915_private *dev_priv = 1362 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1363 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1364 bool client_boost = false; 1365 int new_delay, adj, min, max; 1366 u32 pm_iir = 0; 1367 1368 spin_lock_irq(&dev_priv->irq_lock); 1369 if (rps->interrupts_enabled) { 1370 pm_iir = fetch_and_zero(&rps->pm_iir); 1371 client_boost = atomic_read(&rps->num_waiters); 1372 } 1373 spin_unlock_irq(&dev_priv->irq_lock); 1374 1375 /* Make sure we didn't queue anything we're not going to process. */ 1376 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1377 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1378 goto out; 1379 1380 mutex_lock(&rps->lock); 1381 1382 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1383 1384 adj = rps->last_adj; 1385 new_delay = rps->cur_freq; 1386 min = rps->min_freq_softlimit; 1387 max = rps->max_freq_softlimit; 1388 if (client_boost) 1389 max = rps->max_freq; 1390 if (client_boost && new_delay < rps->boost_freq) { 1391 new_delay = rps->boost_freq; 1392 adj = 0; 1393 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1394 if (adj > 0) 1395 adj *= 2; 1396 else /* CHV needs even encode values */ 1397 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1398 1399 if (new_delay >= rps->max_freq_softlimit) 1400 adj = 0; 1401 } else if (client_boost) { 1402 adj = 0; 1403 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1404 if (rps->cur_freq > rps->efficient_freq) 1405 new_delay = rps->efficient_freq; 1406 else if (rps->cur_freq > rps->min_freq_softlimit) 1407 new_delay = rps->min_freq_softlimit; 1408 adj = 0; 1409 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1410 if (adj < 0) 1411 adj *= 2; 1412 else /* CHV needs even encode values */ 1413 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1414 1415 if (new_delay <= rps->min_freq_softlimit) 1416 adj = 0; 1417 } else { /* unknown event */ 1418 adj = 0; 1419 } 1420 1421 rps->last_adj = adj; 1422 1423 /* 1424 * Limit deboosting and boosting to keep ourselves at the extremes 1425 * when in the respective power modes (i.e. slowly decrease frequencies 1426 * while in the HIGH_POWER zone and slowly increase frequencies while 1427 * in the LOW_POWER zone). On idle, we will hit the timeout and drop 1428 * to the next level quickly, and conversely if busy we expect to 1429 * hit a waitboost and rapidly switch into max power. 1430 */ 1431 if ((adj < 0 && rps->power.mode == HIGH_POWER) || 1432 (adj > 0 && rps->power.mode == LOW_POWER)) 1433 rps->last_adj = 0; 1434 1435 /* sysfs frequency interfaces may have snuck in while servicing the 1436 * interrupt 1437 */ 1438 new_delay += adj; 1439 new_delay = clamp_t(int, new_delay, min, max); 1440 1441 if (intel_set_rps(dev_priv, new_delay)) { 1442 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1443 rps->last_adj = 0; 1444 } 1445 1446 mutex_unlock(&rps->lock); 1447 1448 out: 1449 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1450 spin_lock_irq(&dev_priv->irq_lock); 1451 if (rps->interrupts_enabled) 1452 gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events); 1453 spin_unlock_irq(&dev_priv->irq_lock); 1454 } 1455 1456 1457 /** 1458 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1459 * occurred. 1460 * @work: workqueue struct 1461 * 1462 * Doesn't actually do anything except notify userspace. As a consequence of 1463 * this event, userspace should try to remap the bad rows since statistically 1464 * it is likely the same row is more likely to go bad again. 1465 */ 1466 static void ivybridge_parity_work(struct work_struct *work) 1467 { 1468 struct drm_i915_private *dev_priv = 1469 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1470 u32 error_status, row, bank, subbank; 1471 char *parity_event[6]; 1472 u32 misccpctl; 1473 u8 slice = 0; 1474 1475 /* We must turn off DOP level clock gating to access the L3 registers. 1476 * In order to prevent a get/put style interface, acquire struct mutex 1477 * any time we access those registers. 1478 */ 1479 mutex_lock(&dev_priv->drm.struct_mutex); 1480 1481 /* If we've screwed up tracking, just let the interrupt fire again */ 1482 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1483 goto out; 1484 1485 misccpctl = I915_READ(GEN7_MISCCPCTL); 1486 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1487 POSTING_READ(GEN7_MISCCPCTL); 1488 1489 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1490 i915_reg_t reg; 1491 1492 slice--; 1493 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1494 break; 1495 1496 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1497 1498 reg = GEN7_L3CDERRST1(slice); 1499 1500 error_status = I915_READ(reg); 1501 row = GEN7_PARITY_ERROR_ROW(error_status); 1502 bank = GEN7_PARITY_ERROR_BANK(error_status); 1503 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1504 1505 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1506 POSTING_READ(reg); 1507 1508 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1509 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1510 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1511 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1512 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1513 parity_event[5] = NULL; 1514 1515 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1516 KOBJ_CHANGE, parity_event); 1517 1518 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1519 slice, row, bank, subbank); 1520 1521 kfree(parity_event[4]); 1522 kfree(parity_event[3]); 1523 kfree(parity_event[2]); 1524 kfree(parity_event[1]); 1525 } 1526 1527 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1528 1529 out: 1530 WARN_ON(dev_priv->l3_parity.which_slice); 1531 spin_lock_irq(&dev_priv->irq_lock); 1532 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1533 spin_unlock_irq(&dev_priv->irq_lock); 1534 1535 mutex_unlock(&dev_priv->drm.struct_mutex); 1536 } 1537 1538 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1539 u32 iir) 1540 { 1541 if (!HAS_L3_DPF(dev_priv)) 1542 return; 1543 1544 spin_lock(&dev_priv->irq_lock); 1545 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1546 spin_unlock(&dev_priv->irq_lock); 1547 1548 iir &= GT_PARITY_ERROR(dev_priv); 1549 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1550 dev_priv->l3_parity.which_slice |= 1 << 1; 1551 1552 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1553 dev_priv->l3_parity.which_slice |= 1 << 0; 1554 1555 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1556 } 1557 1558 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1559 u32 gt_iir) 1560 { 1561 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1562 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 1563 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1564 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); 1565 } 1566 1567 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1568 u32 gt_iir) 1569 { 1570 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1571 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 1572 if (gt_iir & GT_BSD_USER_INTERRUPT) 1573 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); 1574 if (gt_iir & GT_BLT_USER_INTERRUPT) 1575 intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]); 1576 1577 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1578 GT_BSD_CS_ERROR_INTERRUPT | 1579 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1580 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1581 1582 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1583 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1584 } 1585 1586 static void 1587 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1588 { 1589 bool tasklet = false; 1590 1591 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 1592 tasklet = true; 1593 1594 if (iir & GT_RENDER_USER_INTERRUPT) { 1595 intel_engine_breadcrumbs_irq(engine); 1596 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 1597 } 1598 1599 if (tasklet) 1600 tasklet_hi_schedule(&engine->execlists.tasklet); 1601 } 1602 1603 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1604 u32 master_ctl, u32 gt_iir[4]) 1605 { 1606 void __iomem * const regs = i915->uncore.regs; 1607 1608 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1609 GEN8_GT_BCS_IRQ | \ 1610 GEN8_GT_VCS0_IRQ | \ 1611 GEN8_GT_VCS1_IRQ | \ 1612 GEN8_GT_VECS_IRQ | \ 1613 GEN8_GT_PM_IRQ | \ 1614 GEN8_GT_GUC_IRQ) 1615 1616 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1617 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1618 if (likely(gt_iir[0])) 1619 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1620 } 1621 1622 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 1623 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1624 if (likely(gt_iir[1])) 1625 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1626 } 1627 1628 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1629 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1630 if (likely(gt_iir[2])) 1631 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 1632 } 1633 1634 if (master_ctl & GEN8_GT_VECS_IRQ) { 1635 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1636 if (likely(gt_iir[3])) 1637 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1638 } 1639 } 1640 1641 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1642 u32 master_ctl, u32 gt_iir[4]) 1643 { 1644 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1645 gen8_cs_irq_handler(i915->engine[RCS0], 1646 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1647 gen8_cs_irq_handler(i915->engine[BCS0], 1648 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1649 } 1650 1651 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 1652 gen8_cs_irq_handler(i915->engine[VCS0], 1653 gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); 1654 gen8_cs_irq_handler(i915->engine[VCS1], 1655 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1656 } 1657 1658 if (master_ctl & GEN8_GT_VECS_IRQ) { 1659 gen8_cs_irq_handler(i915->engine[VECS0], 1660 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1661 } 1662 1663 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1664 gen6_rps_irq_handler(i915, gt_iir[2]); 1665 guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16); 1666 } 1667 } 1668 1669 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1670 { 1671 switch (pin) { 1672 case HPD_PORT_C: 1673 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1674 case HPD_PORT_D: 1675 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1676 case HPD_PORT_E: 1677 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1678 case HPD_PORT_F: 1679 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1680 default: 1681 return false; 1682 } 1683 } 1684 1685 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1686 { 1687 switch (pin) { 1688 case HPD_PORT_A: 1689 return val & PORTA_HOTPLUG_LONG_DETECT; 1690 case HPD_PORT_B: 1691 return val & PORTB_HOTPLUG_LONG_DETECT; 1692 case HPD_PORT_C: 1693 return val & PORTC_HOTPLUG_LONG_DETECT; 1694 default: 1695 return false; 1696 } 1697 } 1698 1699 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1700 { 1701 switch (pin) { 1702 case HPD_PORT_A: 1703 return val & ICP_DDIA_HPD_LONG_DETECT; 1704 case HPD_PORT_B: 1705 return val & ICP_DDIB_HPD_LONG_DETECT; 1706 default: 1707 return false; 1708 } 1709 } 1710 1711 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1712 { 1713 switch (pin) { 1714 case HPD_PORT_C: 1715 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1716 case HPD_PORT_D: 1717 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1718 case HPD_PORT_E: 1719 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1720 case HPD_PORT_F: 1721 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1722 default: 1723 return false; 1724 } 1725 } 1726 1727 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1728 { 1729 switch (pin) { 1730 case HPD_PORT_E: 1731 return val & PORTE_HOTPLUG_LONG_DETECT; 1732 default: 1733 return false; 1734 } 1735 } 1736 1737 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1738 { 1739 switch (pin) { 1740 case HPD_PORT_A: 1741 return val & PORTA_HOTPLUG_LONG_DETECT; 1742 case HPD_PORT_B: 1743 return val & PORTB_HOTPLUG_LONG_DETECT; 1744 case HPD_PORT_C: 1745 return val & PORTC_HOTPLUG_LONG_DETECT; 1746 case HPD_PORT_D: 1747 return val & PORTD_HOTPLUG_LONG_DETECT; 1748 default: 1749 return false; 1750 } 1751 } 1752 1753 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1754 { 1755 switch (pin) { 1756 case HPD_PORT_A: 1757 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1758 default: 1759 return false; 1760 } 1761 } 1762 1763 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1764 { 1765 switch (pin) { 1766 case HPD_PORT_B: 1767 return val & PORTB_HOTPLUG_LONG_DETECT; 1768 case HPD_PORT_C: 1769 return val & PORTC_HOTPLUG_LONG_DETECT; 1770 case HPD_PORT_D: 1771 return val & PORTD_HOTPLUG_LONG_DETECT; 1772 default: 1773 return false; 1774 } 1775 } 1776 1777 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1778 { 1779 switch (pin) { 1780 case HPD_PORT_B: 1781 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1782 case HPD_PORT_C: 1783 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1784 case HPD_PORT_D: 1785 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1786 default: 1787 return false; 1788 } 1789 } 1790 1791 /* 1792 * Get a bit mask of pins that have triggered, and which ones may be long. 1793 * This can be called multiple times with the same masks to accumulate 1794 * hotplug detection results from several registers. 1795 * 1796 * Note that the caller is expected to zero out the masks initially. 1797 */ 1798 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1799 u32 *pin_mask, u32 *long_mask, 1800 u32 hotplug_trigger, u32 dig_hotplug_reg, 1801 const u32 hpd[HPD_NUM_PINS], 1802 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1803 { 1804 enum hpd_pin pin; 1805 1806 for_each_hpd_pin(pin) { 1807 if ((hpd[pin] & hotplug_trigger) == 0) 1808 continue; 1809 1810 *pin_mask |= BIT(pin); 1811 1812 if (long_pulse_detect(pin, dig_hotplug_reg)) 1813 *long_mask |= BIT(pin); 1814 } 1815 1816 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1817 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1818 1819 } 1820 1821 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1822 { 1823 wake_up_all(&dev_priv->gmbus_wait_queue); 1824 } 1825 1826 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1827 { 1828 wake_up_all(&dev_priv->gmbus_wait_queue); 1829 } 1830 1831 #if defined(CONFIG_DEBUG_FS) 1832 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1833 enum pipe pipe, 1834 u32 crc0, u32 crc1, 1835 u32 crc2, u32 crc3, 1836 u32 crc4) 1837 { 1838 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1839 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1840 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 1841 1842 trace_intel_pipe_crc(crtc, crcs); 1843 1844 spin_lock(&pipe_crc->lock); 1845 /* 1846 * For some not yet identified reason, the first CRC is 1847 * bonkers. So let's just wait for the next vblank and read 1848 * out the buggy result. 1849 * 1850 * On GEN8+ sometimes the second CRC is bonkers as well, so 1851 * don't trust that one either. 1852 */ 1853 if (pipe_crc->skipped <= 0 || 1854 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1855 pipe_crc->skipped++; 1856 spin_unlock(&pipe_crc->lock); 1857 return; 1858 } 1859 spin_unlock(&pipe_crc->lock); 1860 1861 drm_crtc_add_crc_entry(&crtc->base, true, 1862 drm_crtc_accurate_vblank_count(&crtc->base), 1863 crcs); 1864 } 1865 #else 1866 static inline void 1867 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1868 enum pipe pipe, 1869 u32 crc0, u32 crc1, 1870 u32 crc2, u32 crc3, 1871 u32 crc4) {} 1872 #endif 1873 1874 1875 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1876 enum pipe pipe) 1877 { 1878 display_pipe_crc_irq_handler(dev_priv, pipe, 1879 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1880 0, 0, 0, 0); 1881 } 1882 1883 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1884 enum pipe pipe) 1885 { 1886 display_pipe_crc_irq_handler(dev_priv, pipe, 1887 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1888 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1889 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1890 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1891 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1892 } 1893 1894 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1895 enum pipe pipe) 1896 { 1897 u32 res1, res2; 1898 1899 if (INTEL_GEN(dev_priv) >= 3) 1900 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1901 else 1902 res1 = 0; 1903 1904 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1905 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1906 else 1907 res2 = 0; 1908 1909 display_pipe_crc_irq_handler(dev_priv, pipe, 1910 I915_READ(PIPE_CRC_RES_RED(pipe)), 1911 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1912 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1913 res1, res2); 1914 } 1915 1916 /* The RPS events need forcewake, so we add them to a work queue and mask their 1917 * IMR bits until the work is done. Other interrupts can be processed without 1918 * the work queue. */ 1919 static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir) 1920 { 1921 struct drm_i915_private *i915 = gt->i915; 1922 struct intel_rps *rps = &i915->gt_pm.rps; 1923 const u32 events = i915->pm_rps_events & pm_iir; 1924 1925 lockdep_assert_held(&i915->irq_lock); 1926 1927 if (unlikely(!events)) 1928 return; 1929 1930 gen6_mask_pm_irq(gt, events); 1931 1932 if (!rps->interrupts_enabled) 1933 return; 1934 1935 rps->pm_iir |= events; 1936 schedule_work(&rps->work); 1937 } 1938 1939 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1940 { 1941 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1942 1943 if (pm_iir & dev_priv->pm_rps_events) { 1944 spin_lock(&dev_priv->irq_lock); 1945 gen6_mask_pm_irq(&dev_priv->gt, 1946 pm_iir & dev_priv->pm_rps_events); 1947 if (rps->interrupts_enabled) { 1948 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1949 schedule_work(&rps->work); 1950 } 1951 spin_unlock(&dev_priv->irq_lock); 1952 } 1953 1954 if (INTEL_GEN(dev_priv) >= 8) 1955 return; 1956 1957 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1958 intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]); 1959 1960 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1961 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1962 } 1963 1964 static void guc_irq_handler(struct intel_guc *guc, u16 iir) 1965 { 1966 if (iir & GUC_INTR_GUC2HOST) 1967 intel_guc_to_host_event_handler(guc); 1968 } 1969 1970 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1971 { 1972 enum pipe pipe; 1973 1974 for_each_pipe(dev_priv, pipe) { 1975 I915_WRITE(PIPESTAT(pipe), 1976 PIPESTAT_INT_STATUS_MASK | 1977 PIPE_FIFO_UNDERRUN_STATUS); 1978 1979 dev_priv->pipestat_irq_mask[pipe] = 0; 1980 } 1981 } 1982 1983 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1984 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1985 { 1986 int pipe; 1987 1988 spin_lock(&dev_priv->irq_lock); 1989 1990 if (!dev_priv->display_irqs_enabled) { 1991 spin_unlock(&dev_priv->irq_lock); 1992 return; 1993 } 1994 1995 for_each_pipe(dev_priv, pipe) { 1996 i915_reg_t reg; 1997 u32 status_mask, enable_mask, iir_bit = 0; 1998 1999 /* 2000 * PIPESTAT bits get signalled even when the interrupt is 2001 * disabled with the mask bits, and some of the status bits do 2002 * not generate interrupts at all (like the underrun bit). Hence 2003 * we need to be careful that we only handle what we want to 2004 * handle. 2005 */ 2006 2007 /* fifo underruns are filterered in the underrun handler. */ 2008 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 2009 2010 switch (pipe) { 2011 case PIPE_A: 2012 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 2013 break; 2014 case PIPE_B: 2015 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2016 break; 2017 case PIPE_C: 2018 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2019 break; 2020 } 2021 if (iir & iir_bit) 2022 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 2023 2024 if (!status_mask) 2025 continue; 2026 2027 reg = PIPESTAT(pipe); 2028 pipe_stats[pipe] = I915_READ(reg) & status_mask; 2029 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 2030 2031 /* 2032 * Clear the PIPE*STAT regs before the IIR 2033 * 2034 * Toggle the enable bits to make sure we get an 2035 * edge in the ISR pipe event bit if we don't clear 2036 * all the enabled status bits. Otherwise the edge 2037 * triggered IIR on i965/g4x wouldn't notice that 2038 * an interrupt is still pending. 2039 */ 2040 if (pipe_stats[pipe]) { 2041 I915_WRITE(reg, pipe_stats[pipe]); 2042 I915_WRITE(reg, enable_mask); 2043 } 2044 } 2045 spin_unlock(&dev_priv->irq_lock); 2046 } 2047 2048 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2049 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 2050 { 2051 enum pipe pipe; 2052 2053 for_each_pipe(dev_priv, pipe) { 2054 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 2055 drm_handle_vblank(&dev_priv->drm, pipe); 2056 2057 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2058 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2059 2060 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2061 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2062 } 2063 } 2064 2065 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2066 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 2067 { 2068 bool blc_event = false; 2069 enum pipe pipe; 2070 2071 for_each_pipe(dev_priv, pipe) { 2072 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 2073 drm_handle_vblank(&dev_priv->drm, pipe); 2074 2075 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2076 blc_event = true; 2077 2078 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2079 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2080 2081 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2082 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2083 } 2084 2085 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2086 intel_opregion_asle_intr(dev_priv); 2087 } 2088 2089 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2090 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 2091 { 2092 bool blc_event = false; 2093 enum pipe pipe; 2094 2095 for_each_pipe(dev_priv, pipe) { 2096 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2097 drm_handle_vblank(&dev_priv->drm, pipe); 2098 2099 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2100 blc_event = true; 2101 2102 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2103 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2104 2105 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2106 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2107 } 2108 2109 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2110 intel_opregion_asle_intr(dev_priv); 2111 2112 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2113 gmbus_irq_handler(dev_priv); 2114 } 2115 2116 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2117 u32 pipe_stats[I915_MAX_PIPES]) 2118 { 2119 enum pipe pipe; 2120 2121 for_each_pipe(dev_priv, pipe) { 2122 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2123 drm_handle_vblank(&dev_priv->drm, pipe); 2124 2125 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2126 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2127 2128 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2129 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2130 } 2131 2132 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2133 gmbus_irq_handler(dev_priv); 2134 } 2135 2136 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 2137 { 2138 u32 hotplug_status = 0, hotplug_status_mask; 2139 int i; 2140 2141 if (IS_G4X(dev_priv) || 2142 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2143 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 2144 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 2145 else 2146 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 2147 2148 /* 2149 * We absolutely have to clear all the pending interrupt 2150 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 2151 * interrupt bit won't have an edge, and the i965/g4x 2152 * edge triggered IIR will not notice that an interrupt 2153 * is still pending. We can't use PORT_HOTPLUG_EN to 2154 * guarantee the edge as the act of toggling the enable 2155 * bits can itself generate a new hotplug interrupt :( 2156 */ 2157 for (i = 0; i < 10; i++) { 2158 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 2159 2160 if (tmp == 0) 2161 return hotplug_status; 2162 2163 hotplug_status |= tmp; 2164 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2165 } 2166 2167 WARN_ONCE(1, 2168 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2169 I915_READ(PORT_HOTPLUG_STAT)); 2170 2171 return hotplug_status; 2172 } 2173 2174 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2175 u32 hotplug_status) 2176 { 2177 u32 pin_mask = 0, long_mask = 0; 2178 2179 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2180 IS_CHERRYVIEW(dev_priv)) { 2181 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2182 2183 if (hotplug_trigger) { 2184 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2185 hotplug_trigger, hotplug_trigger, 2186 hpd_status_g4x, 2187 i9xx_port_hotplug_long_detect); 2188 2189 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2190 } 2191 2192 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2193 dp_aux_irq_handler(dev_priv); 2194 } else { 2195 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2196 2197 if (hotplug_trigger) { 2198 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2199 hotplug_trigger, hotplug_trigger, 2200 hpd_status_i915, 2201 i9xx_port_hotplug_long_detect); 2202 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2203 } 2204 } 2205 } 2206 2207 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2208 { 2209 struct drm_i915_private *dev_priv = arg; 2210 irqreturn_t ret = IRQ_NONE; 2211 2212 if (!intel_irqs_enabled(dev_priv)) 2213 return IRQ_NONE; 2214 2215 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2216 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2217 2218 do { 2219 u32 iir, gt_iir, pm_iir; 2220 u32 pipe_stats[I915_MAX_PIPES] = {}; 2221 u32 hotplug_status = 0; 2222 u32 ier = 0; 2223 2224 gt_iir = I915_READ(GTIIR); 2225 pm_iir = I915_READ(GEN6_PMIIR); 2226 iir = I915_READ(VLV_IIR); 2227 2228 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2229 break; 2230 2231 ret = IRQ_HANDLED; 2232 2233 /* 2234 * Theory on interrupt generation, based on empirical evidence: 2235 * 2236 * x = ((VLV_IIR & VLV_IER) || 2237 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2238 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2239 * 2240 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2241 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2242 * guarantee the CPU interrupt will be raised again even if we 2243 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2244 * bits this time around. 2245 */ 2246 I915_WRITE(VLV_MASTER_IER, 0); 2247 ier = I915_READ(VLV_IER); 2248 I915_WRITE(VLV_IER, 0); 2249 2250 if (gt_iir) 2251 I915_WRITE(GTIIR, gt_iir); 2252 if (pm_iir) 2253 I915_WRITE(GEN6_PMIIR, pm_iir); 2254 2255 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2256 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2257 2258 /* Call regardless, as some status bits might not be 2259 * signalled in iir */ 2260 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2261 2262 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2263 I915_LPE_PIPE_B_INTERRUPT)) 2264 intel_lpe_audio_irq_handler(dev_priv); 2265 2266 /* 2267 * VLV_IIR is single buffered, and reflects the level 2268 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2269 */ 2270 if (iir) 2271 I915_WRITE(VLV_IIR, iir); 2272 2273 I915_WRITE(VLV_IER, ier); 2274 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2275 2276 if (gt_iir) 2277 snb_gt_irq_handler(dev_priv, gt_iir); 2278 if (pm_iir) 2279 gen6_rps_irq_handler(dev_priv, pm_iir); 2280 2281 if (hotplug_status) 2282 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2283 2284 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2285 } while (0); 2286 2287 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2288 2289 return ret; 2290 } 2291 2292 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2293 { 2294 struct drm_i915_private *dev_priv = arg; 2295 irqreturn_t ret = IRQ_NONE; 2296 2297 if (!intel_irqs_enabled(dev_priv)) 2298 return IRQ_NONE; 2299 2300 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2301 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2302 2303 do { 2304 u32 master_ctl, iir; 2305 u32 pipe_stats[I915_MAX_PIPES] = {}; 2306 u32 hotplug_status = 0; 2307 u32 gt_iir[4]; 2308 u32 ier = 0; 2309 2310 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2311 iir = I915_READ(VLV_IIR); 2312 2313 if (master_ctl == 0 && iir == 0) 2314 break; 2315 2316 ret = IRQ_HANDLED; 2317 2318 /* 2319 * Theory on interrupt generation, based on empirical evidence: 2320 * 2321 * x = ((VLV_IIR & VLV_IER) || 2322 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2323 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2324 * 2325 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2326 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2327 * guarantee the CPU interrupt will be raised again even if we 2328 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2329 * bits this time around. 2330 */ 2331 I915_WRITE(GEN8_MASTER_IRQ, 0); 2332 ier = I915_READ(VLV_IER); 2333 I915_WRITE(VLV_IER, 0); 2334 2335 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2336 2337 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2338 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2339 2340 /* Call regardless, as some status bits might not be 2341 * signalled in iir */ 2342 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2343 2344 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2345 I915_LPE_PIPE_B_INTERRUPT | 2346 I915_LPE_PIPE_C_INTERRUPT)) 2347 intel_lpe_audio_irq_handler(dev_priv); 2348 2349 /* 2350 * VLV_IIR is single buffered, and reflects the level 2351 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2352 */ 2353 if (iir) 2354 I915_WRITE(VLV_IIR, iir); 2355 2356 I915_WRITE(VLV_IER, ier); 2357 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2358 2359 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2360 2361 if (hotplug_status) 2362 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2363 2364 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2365 } while (0); 2366 2367 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2368 2369 return ret; 2370 } 2371 2372 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2373 u32 hotplug_trigger, 2374 const u32 hpd[HPD_NUM_PINS]) 2375 { 2376 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2377 2378 /* 2379 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2380 * unless we touch the hotplug register, even if hotplug_trigger is 2381 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2382 * errors. 2383 */ 2384 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2385 if (!hotplug_trigger) { 2386 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2387 PORTD_HOTPLUG_STATUS_MASK | 2388 PORTC_HOTPLUG_STATUS_MASK | 2389 PORTB_HOTPLUG_STATUS_MASK; 2390 dig_hotplug_reg &= ~mask; 2391 } 2392 2393 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2394 if (!hotplug_trigger) 2395 return; 2396 2397 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2398 dig_hotplug_reg, hpd, 2399 pch_port_hotplug_long_detect); 2400 2401 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2402 } 2403 2404 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2405 { 2406 int pipe; 2407 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2408 2409 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2410 2411 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2412 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2413 SDE_AUDIO_POWER_SHIFT); 2414 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2415 port_name(port)); 2416 } 2417 2418 if (pch_iir & SDE_AUX_MASK) 2419 dp_aux_irq_handler(dev_priv); 2420 2421 if (pch_iir & SDE_GMBUS) 2422 gmbus_irq_handler(dev_priv); 2423 2424 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2425 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2426 2427 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2428 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2429 2430 if (pch_iir & SDE_POISON) 2431 DRM_ERROR("PCH poison interrupt\n"); 2432 2433 if (pch_iir & SDE_FDI_MASK) 2434 for_each_pipe(dev_priv, pipe) 2435 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2436 pipe_name(pipe), 2437 I915_READ(FDI_RX_IIR(pipe))); 2438 2439 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2440 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2441 2442 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2443 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2444 2445 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2446 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2447 2448 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2449 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2450 } 2451 2452 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2453 { 2454 u32 err_int = I915_READ(GEN7_ERR_INT); 2455 enum pipe pipe; 2456 2457 if (err_int & ERR_INT_POISON) 2458 DRM_ERROR("Poison interrupt\n"); 2459 2460 for_each_pipe(dev_priv, pipe) { 2461 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2462 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2463 2464 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2465 if (IS_IVYBRIDGE(dev_priv)) 2466 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2467 else 2468 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2469 } 2470 } 2471 2472 I915_WRITE(GEN7_ERR_INT, err_int); 2473 } 2474 2475 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2476 { 2477 u32 serr_int = I915_READ(SERR_INT); 2478 enum pipe pipe; 2479 2480 if (serr_int & SERR_INT_POISON) 2481 DRM_ERROR("PCH poison interrupt\n"); 2482 2483 for_each_pipe(dev_priv, pipe) 2484 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2485 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2486 2487 I915_WRITE(SERR_INT, serr_int); 2488 } 2489 2490 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2491 { 2492 int pipe; 2493 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2494 2495 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2496 2497 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2498 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2499 SDE_AUDIO_POWER_SHIFT_CPT); 2500 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2501 port_name(port)); 2502 } 2503 2504 if (pch_iir & SDE_AUX_MASK_CPT) 2505 dp_aux_irq_handler(dev_priv); 2506 2507 if (pch_iir & SDE_GMBUS_CPT) 2508 gmbus_irq_handler(dev_priv); 2509 2510 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2511 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2512 2513 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2514 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2515 2516 if (pch_iir & SDE_FDI_MASK_CPT) 2517 for_each_pipe(dev_priv, pipe) 2518 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2519 pipe_name(pipe), 2520 I915_READ(FDI_RX_IIR(pipe))); 2521 2522 if (pch_iir & SDE_ERROR_CPT) 2523 cpt_serr_int_handler(dev_priv); 2524 } 2525 2526 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir, 2527 const u32 *pins) 2528 { 2529 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 2530 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 2531 u32 pin_mask = 0, long_mask = 0; 2532 2533 if (ddi_hotplug_trigger) { 2534 u32 dig_hotplug_reg; 2535 2536 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 2537 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 2538 2539 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2540 ddi_hotplug_trigger, 2541 dig_hotplug_reg, pins, 2542 icp_ddi_port_hotplug_long_detect); 2543 } 2544 2545 if (tc_hotplug_trigger) { 2546 u32 dig_hotplug_reg; 2547 2548 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 2549 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 2550 2551 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2552 tc_hotplug_trigger, 2553 dig_hotplug_reg, pins, 2554 icp_tc_port_hotplug_long_detect); 2555 } 2556 2557 if (pin_mask) 2558 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2559 2560 if (pch_iir & SDE_GMBUS_ICP) 2561 gmbus_irq_handler(dev_priv); 2562 } 2563 2564 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2565 { 2566 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2567 ~SDE_PORTE_HOTPLUG_SPT; 2568 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2569 u32 pin_mask = 0, long_mask = 0; 2570 2571 if (hotplug_trigger) { 2572 u32 dig_hotplug_reg; 2573 2574 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2575 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2576 2577 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2578 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2579 spt_port_hotplug_long_detect); 2580 } 2581 2582 if (hotplug2_trigger) { 2583 u32 dig_hotplug_reg; 2584 2585 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2586 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2587 2588 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2589 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2590 spt_port_hotplug2_long_detect); 2591 } 2592 2593 if (pin_mask) 2594 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2595 2596 if (pch_iir & SDE_GMBUS_CPT) 2597 gmbus_irq_handler(dev_priv); 2598 } 2599 2600 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2601 u32 hotplug_trigger, 2602 const u32 hpd[HPD_NUM_PINS]) 2603 { 2604 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2605 2606 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2607 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2608 2609 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2610 dig_hotplug_reg, hpd, 2611 ilk_port_hotplug_long_detect); 2612 2613 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2614 } 2615 2616 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2617 u32 de_iir) 2618 { 2619 enum pipe pipe; 2620 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2621 2622 if (hotplug_trigger) 2623 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2624 2625 if (de_iir & DE_AUX_CHANNEL_A) 2626 dp_aux_irq_handler(dev_priv); 2627 2628 if (de_iir & DE_GSE) 2629 intel_opregion_asle_intr(dev_priv); 2630 2631 if (de_iir & DE_POISON) 2632 DRM_ERROR("Poison interrupt\n"); 2633 2634 for_each_pipe(dev_priv, pipe) { 2635 if (de_iir & DE_PIPE_VBLANK(pipe)) 2636 drm_handle_vblank(&dev_priv->drm, pipe); 2637 2638 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2639 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2640 2641 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2642 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2643 } 2644 2645 /* check event from PCH */ 2646 if (de_iir & DE_PCH_EVENT) { 2647 u32 pch_iir = I915_READ(SDEIIR); 2648 2649 if (HAS_PCH_CPT(dev_priv)) 2650 cpt_irq_handler(dev_priv, pch_iir); 2651 else 2652 ibx_irq_handler(dev_priv, pch_iir); 2653 2654 /* should clear PCH hotplug event before clear CPU irq */ 2655 I915_WRITE(SDEIIR, pch_iir); 2656 } 2657 2658 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 2659 ironlake_rps_change_irq_handler(dev_priv); 2660 } 2661 2662 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2663 u32 de_iir) 2664 { 2665 enum pipe pipe; 2666 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2667 2668 if (hotplug_trigger) 2669 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2670 2671 if (de_iir & DE_ERR_INT_IVB) 2672 ivb_err_int_handler(dev_priv); 2673 2674 if (de_iir & DE_EDP_PSR_INT_HSW) { 2675 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2676 2677 intel_psr_irq_handler(dev_priv, psr_iir); 2678 I915_WRITE(EDP_PSR_IIR, psr_iir); 2679 } 2680 2681 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2682 dp_aux_irq_handler(dev_priv); 2683 2684 if (de_iir & DE_GSE_IVB) 2685 intel_opregion_asle_intr(dev_priv); 2686 2687 for_each_pipe(dev_priv, pipe) { 2688 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2689 drm_handle_vblank(&dev_priv->drm, pipe); 2690 } 2691 2692 /* check event from PCH */ 2693 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2694 u32 pch_iir = I915_READ(SDEIIR); 2695 2696 cpt_irq_handler(dev_priv, pch_iir); 2697 2698 /* clear PCH hotplug event before clear CPU irq */ 2699 I915_WRITE(SDEIIR, pch_iir); 2700 } 2701 } 2702 2703 /* 2704 * To handle irqs with the minimum potential races with fresh interrupts, we: 2705 * 1 - Disable Master Interrupt Control. 2706 * 2 - Find the source(s) of the interrupt. 2707 * 3 - Clear the Interrupt Identity bits (IIR). 2708 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2709 * 5 - Re-enable Master Interrupt Control. 2710 */ 2711 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2712 { 2713 struct drm_i915_private *dev_priv = arg; 2714 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2715 irqreturn_t ret = IRQ_NONE; 2716 2717 if (!intel_irqs_enabled(dev_priv)) 2718 return IRQ_NONE; 2719 2720 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2721 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2722 2723 /* disable master interrupt before clearing iir */ 2724 de_ier = I915_READ(DEIER); 2725 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2726 2727 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2728 * interrupts will will be stored on its back queue, and then we'll be 2729 * able to process them after we restore SDEIER (as soon as we restore 2730 * it, we'll get an interrupt if SDEIIR still has something to process 2731 * due to its back queue). */ 2732 if (!HAS_PCH_NOP(dev_priv)) { 2733 sde_ier = I915_READ(SDEIER); 2734 I915_WRITE(SDEIER, 0); 2735 } 2736 2737 /* Find, clear, then process each source of interrupt */ 2738 2739 gt_iir = I915_READ(GTIIR); 2740 if (gt_iir) { 2741 I915_WRITE(GTIIR, gt_iir); 2742 ret = IRQ_HANDLED; 2743 if (INTEL_GEN(dev_priv) >= 6) 2744 snb_gt_irq_handler(dev_priv, gt_iir); 2745 else 2746 ilk_gt_irq_handler(dev_priv, gt_iir); 2747 } 2748 2749 de_iir = I915_READ(DEIIR); 2750 if (de_iir) { 2751 I915_WRITE(DEIIR, de_iir); 2752 ret = IRQ_HANDLED; 2753 if (INTEL_GEN(dev_priv) >= 7) 2754 ivb_display_irq_handler(dev_priv, de_iir); 2755 else 2756 ilk_display_irq_handler(dev_priv, de_iir); 2757 } 2758 2759 if (INTEL_GEN(dev_priv) >= 6) { 2760 u32 pm_iir = I915_READ(GEN6_PMIIR); 2761 if (pm_iir) { 2762 I915_WRITE(GEN6_PMIIR, pm_iir); 2763 ret = IRQ_HANDLED; 2764 gen6_rps_irq_handler(dev_priv, pm_iir); 2765 } 2766 } 2767 2768 I915_WRITE(DEIER, de_ier); 2769 if (!HAS_PCH_NOP(dev_priv)) 2770 I915_WRITE(SDEIER, sde_ier); 2771 2772 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2773 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2774 2775 return ret; 2776 } 2777 2778 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2779 u32 hotplug_trigger, 2780 const u32 hpd[HPD_NUM_PINS]) 2781 { 2782 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2783 2784 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2785 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2786 2787 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2788 dig_hotplug_reg, hpd, 2789 bxt_port_hotplug_long_detect); 2790 2791 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2792 } 2793 2794 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2795 { 2796 u32 pin_mask = 0, long_mask = 0; 2797 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2798 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2799 2800 if (trigger_tc) { 2801 u32 dig_hotplug_reg; 2802 2803 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2804 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2805 2806 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2807 dig_hotplug_reg, hpd_gen11, 2808 gen11_port_hotplug_long_detect); 2809 } 2810 2811 if (trigger_tbt) { 2812 u32 dig_hotplug_reg; 2813 2814 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2815 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2816 2817 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2818 dig_hotplug_reg, hpd_gen11, 2819 gen11_port_hotplug_long_detect); 2820 } 2821 2822 if (pin_mask) 2823 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2824 else 2825 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2826 } 2827 2828 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 2829 { 2830 u32 mask = GEN8_AUX_CHANNEL_A; 2831 2832 if (INTEL_GEN(dev_priv) >= 9) 2833 mask |= GEN9_AUX_CHANNEL_B | 2834 GEN9_AUX_CHANNEL_C | 2835 GEN9_AUX_CHANNEL_D; 2836 2837 if (IS_CNL_WITH_PORT_F(dev_priv)) 2838 mask |= CNL_AUX_CHANNEL_F; 2839 2840 if (INTEL_GEN(dev_priv) >= 11) 2841 mask |= ICL_AUX_CHANNEL_E | 2842 CNL_AUX_CHANNEL_F; 2843 2844 return mask; 2845 } 2846 2847 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 2848 { 2849 if (INTEL_GEN(dev_priv) >= 9) 2850 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2851 else 2852 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2853 } 2854 2855 static irqreturn_t 2856 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2857 { 2858 irqreturn_t ret = IRQ_NONE; 2859 u32 iir; 2860 enum pipe pipe; 2861 2862 if (master_ctl & GEN8_DE_MISC_IRQ) { 2863 iir = I915_READ(GEN8_DE_MISC_IIR); 2864 if (iir) { 2865 bool found = false; 2866 2867 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2868 ret = IRQ_HANDLED; 2869 2870 if (iir & GEN8_DE_MISC_GSE) { 2871 intel_opregion_asle_intr(dev_priv); 2872 found = true; 2873 } 2874 2875 if (iir & GEN8_DE_EDP_PSR) { 2876 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2877 2878 intel_psr_irq_handler(dev_priv, psr_iir); 2879 I915_WRITE(EDP_PSR_IIR, psr_iir); 2880 found = true; 2881 } 2882 2883 if (!found) 2884 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2885 } 2886 else 2887 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2888 } 2889 2890 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2891 iir = I915_READ(GEN11_DE_HPD_IIR); 2892 if (iir) { 2893 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2894 ret = IRQ_HANDLED; 2895 gen11_hpd_irq_handler(dev_priv, iir); 2896 } else { 2897 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2898 } 2899 } 2900 2901 if (master_ctl & GEN8_DE_PORT_IRQ) { 2902 iir = I915_READ(GEN8_DE_PORT_IIR); 2903 if (iir) { 2904 u32 tmp_mask; 2905 bool found = false; 2906 2907 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2908 ret = IRQ_HANDLED; 2909 2910 if (iir & gen8_de_port_aux_mask(dev_priv)) { 2911 dp_aux_irq_handler(dev_priv); 2912 found = true; 2913 } 2914 2915 if (IS_GEN9_LP(dev_priv)) { 2916 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2917 if (tmp_mask) { 2918 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2919 hpd_bxt); 2920 found = true; 2921 } 2922 } else if (IS_BROADWELL(dev_priv)) { 2923 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2924 if (tmp_mask) { 2925 ilk_hpd_irq_handler(dev_priv, 2926 tmp_mask, hpd_bdw); 2927 found = true; 2928 } 2929 } 2930 2931 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2932 gmbus_irq_handler(dev_priv); 2933 found = true; 2934 } 2935 2936 if (!found) 2937 DRM_ERROR("Unexpected DE Port interrupt\n"); 2938 } 2939 else 2940 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2941 } 2942 2943 for_each_pipe(dev_priv, pipe) { 2944 u32 fault_errors; 2945 2946 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2947 continue; 2948 2949 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2950 if (!iir) { 2951 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2952 continue; 2953 } 2954 2955 ret = IRQ_HANDLED; 2956 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2957 2958 if (iir & GEN8_PIPE_VBLANK) 2959 drm_handle_vblank(&dev_priv->drm, pipe); 2960 2961 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2962 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2963 2964 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2965 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2966 2967 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 2968 if (fault_errors) 2969 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2970 pipe_name(pipe), 2971 fault_errors); 2972 } 2973 2974 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2975 master_ctl & GEN8_DE_PCH_IRQ) { 2976 /* 2977 * FIXME(BDW): Assume for now that the new interrupt handling 2978 * scheme also closed the SDE interrupt handling race we've seen 2979 * on older pch-split platforms. But this needs testing. 2980 */ 2981 iir = I915_READ(SDEIIR); 2982 if (iir) { 2983 I915_WRITE(SDEIIR, iir); 2984 ret = IRQ_HANDLED; 2985 2986 if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC) 2987 icp_irq_handler(dev_priv, iir, hpd_mcc); 2988 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2989 icp_irq_handler(dev_priv, iir, hpd_icp); 2990 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 2991 spt_irq_handler(dev_priv, iir); 2992 else 2993 cpt_irq_handler(dev_priv, iir); 2994 } else { 2995 /* 2996 * Like on previous PCH there seems to be something 2997 * fishy going on with forwarding PCH interrupts. 2998 */ 2999 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 3000 } 3001 } 3002 3003 return ret; 3004 } 3005 3006 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 3007 { 3008 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 3009 3010 /* 3011 * Now with master disabled, get a sample of level indications 3012 * for this interrupt. Indications will be cleared on related acks. 3013 * New indications can and will light up during processing, 3014 * and will generate new interrupt after enabling master. 3015 */ 3016 return raw_reg_read(regs, GEN8_MASTER_IRQ); 3017 } 3018 3019 static inline void gen8_master_intr_enable(void __iomem * const regs) 3020 { 3021 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3022 } 3023 3024 static irqreturn_t gen8_irq_handler(int irq, void *arg) 3025 { 3026 struct drm_i915_private *dev_priv = arg; 3027 void __iomem * const regs = dev_priv->uncore.regs; 3028 u32 master_ctl; 3029 u32 gt_iir[4]; 3030 3031 if (!intel_irqs_enabled(dev_priv)) 3032 return IRQ_NONE; 3033 3034 master_ctl = gen8_master_intr_disable(regs); 3035 if (!master_ctl) { 3036 gen8_master_intr_enable(regs); 3037 return IRQ_NONE; 3038 } 3039 3040 /* Find, clear, then process each source of interrupt */ 3041 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 3042 3043 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3044 if (master_ctl & ~GEN8_GT_IRQS) { 3045 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3046 gen8_de_irq_handler(dev_priv, master_ctl); 3047 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3048 } 3049 3050 gen8_master_intr_enable(regs); 3051 3052 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 3053 3054 return IRQ_HANDLED; 3055 } 3056 3057 static u32 3058 gen11_gt_engine_identity(struct intel_gt *gt, 3059 const unsigned int bank, const unsigned int bit) 3060 { 3061 void __iomem * const regs = gt->uncore->regs; 3062 u32 timeout_ts; 3063 u32 ident; 3064 3065 lockdep_assert_held(>->i915->irq_lock); 3066 3067 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 3068 3069 /* 3070 * NB: Specs do not specify how long to spin wait, 3071 * so we do ~100us as an educated guess. 3072 */ 3073 timeout_ts = (local_clock() >> 10) + 100; 3074 do { 3075 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 3076 } while (!(ident & GEN11_INTR_DATA_VALID) && 3077 !time_after32(local_clock() >> 10, timeout_ts)); 3078 3079 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 3080 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 3081 bank, bit, ident); 3082 return 0; 3083 } 3084 3085 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 3086 GEN11_INTR_DATA_VALID); 3087 3088 return ident; 3089 } 3090 3091 static void 3092 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 3093 const u16 iir) 3094 { 3095 if (instance == OTHER_GUC_INSTANCE) 3096 return guc_irq_handler(>->uc.guc, iir); 3097 3098 if (instance == OTHER_GTPM_INSTANCE) 3099 return gen11_rps_irq_handler(gt, iir); 3100 3101 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3102 instance, iir); 3103 } 3104 3105 static void 3106 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, 3107 const u8 instance, const u16 iir) 3108 { 3109 struct intel_engine_cs *engine; 3110 3111 if (instance <= MAX_ENGINE_INSTANCE) 3112 engine = gt->i915->engine_class[class][instance]; 3113 else 3114 engine = NULL; 3115 3116 if (likely(engine)) 3117 return gen8_cs_irq_handler(engine, iir); 3118 3119 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3120 class, instance); 3121 } 3122 3123 static void 3124 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) 3125 { 3126 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3127 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3128 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3129 3130 if (unlikely(!intr)) 3131 return; 3132 3133 if (class <= COPY_ENGINE_CLASS) 3134 return gen11_engine_irq_handler(gt, class, instance, intr); 3135 3136 if (class == OTHER_CLASS) 3137 return gen11_other_irq_handler(gt, instance, intr); 3138 3139 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3140 class, instance, intr); 3141 } 3142 3143 static void 3144 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 3145 { 3146 void __iomem * const regs = gt->uncore->regs; 3147 unsigned long intr_dw; 3148 unsigned int bit; 3149 3150 lockdep_assert_held(>->i915->irq_lock); 3151 3152 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 3153 3154 for_each_set_bit(bit, &intr_dw, 32) { 3155 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 3156 3157 gen11_gt_identity_handler(gt, ident); 3158 } 3159 3160 /* Clear must be after shared has been served for engine */ 3161 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 3162 } 3163 3164 static void 3165 gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 3166 { 3167 struct drm_i915_private *i915 = gt->i915; 3168 unsigned int bank; 3169 3170 spin_lock(&i915->irq_lock); 3171 3172 for (bank = 0; bank < 2; bank++) { 3173 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 3174 gen11_gt_bank_handler(gt, bank); 3175 } 3176 3177 spin_unlock(&i915->irq_lock); 3178 } 3179 3180 static u32 3181 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) 3182 { 3183 void __iomem * const regs = gt->uncore->regs; 3184 u32 iir; 3185 3186 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3187 return 0; 3188 3189 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3190 if (likely(iir)) 3191 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 3192 3193 return iir; 3194 } 3195 3196 static void 3197 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) 3198 { 3199 if (iir & GEN11_GU_MISC_GSE) 3200 intel_opregion_asle_intr(gt->i915); 3201 } 3202 3203 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 3204 { 3205 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 3206 3207 /* 3208 * Now with master disabled, get a sample of level indications 3209 * for this interrupt. Indications will be cleared on related acks. 3210 * New indications can and will light up during processing, 3211 * and will generate new interrupt after enabling master. 3212 */ 3213 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3214 } 3215 3216 static inline void gen11_master_intr_enable(void __iomem * const regs) 3217 { 3218 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 3219 } 3220 3221 static irqreturn_t gen11_irq_handler(int irq, void *arg) 3222 { 3223 struct drm_i915_private * const i915 = arg; 3224 void __iomem * const regs = i915->uncore.regs; 3225 struct intel_gt *gt = &i915->gt; 3226 u32 master_ctl; 3227 u32 gu_misc_iir; 3228 3229 if (!intel_irqs_enabled(i915)) 3230 return IRQ_NONE; 3231 3232 master_ctl = gen11_master_intr_disable(regs); 3233 if (!master_ctl) { 3234 gen11_master_intr_enable(regs); 3235 return IRQ_NONE; 3236 } 3237 3238 /* Find, clear, then process each source of interrupt. */ 3239 gen11_gt_irq_handler(gt, master_ctl); 3240 3241 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3242 if (master_ctl & GEN11_DISPLAY_IRQ) { 3243 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3244 3245 disable_rpm_wakeref_asserts(&i915->runtime_pm); 3246 /* 3247 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3248 * for the display related bits. 3249 */ 3250 gen8_de_irq_handler(i915, disp_ctl); 3251 enable_rpm_wakeref_asserts(&i915->runtime_pm); 3252 } 3253 3254 gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); 3255 3256 gen11_master_intr_enable(regs); 3257 3258 gen11_gu_misc_irq_handler(gt, gu_misc_iir); 3259 3260 return IRQ_HANDLED; 3261 } 3262 3263 /* Called from drm generic code, passed 'crtc' which 3264 * we use as a pipe index 3265 */ 3266 int i8xx_enable_vblank(struct drm_crtc *crtc) 3267 { 3268 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3269 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3270 unsigned long irqflags; 3271 3272 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3273 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3274 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3275 3276 return 0; 3277 } 3278 3279 int i945gm_enable_vblank(struct drm_crtc *crtc) 3280 { 3281 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3282 3283 if (dev_priv->i945gm_vblank.enabled++ == 0) 3284 schedule_work(&dev_priv->i945gm_vblank.work); 3285 3286 return i8xx_enable_vblank(crtc); 3287 } 3288 3289 int i965_enable_vblank(struct drm_crtc *crtc) 3290 { 3291 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3292 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3293 unsigned long irqflags; 3294 3295 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3296 i915_enable_pipestat(dev_priv, pipe, 3297 PIPE_START_VBLANK_INTERRUPT_STATUS); 3298 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3299 3300 return 0; 3301 } 3302 3303 int ilk_enable_vblank(struct drm_crtc *crtc) 3304 { 3305 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3306 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3307 unsigned long irqflags; 3308 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 3309 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3310 3311 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3312 ilk_enable_display_irq(dev_priv, bit); 3313 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3314 3315 /* Even though there is no DMC, frame counter can get stuck when 3316 * PSR is active as no frames are generated. 3317 */ 3318 if (HAS_PSR(dev_priv)) 3319 drm_crtc_vblank_restore(crtc); 3320 3321 return 0; 3322 } 3323 3324 int bdw_enable_vblank(struct drm_crtc *crtc) 3325 { 3326 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3327 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3328 unsigned long irqflags; 3329 3330 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3331 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3332 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3333 3334 /* Even if there is no DMC, frame counter can get stuck when 3335 * PSR is active as no frames are generated, so check only for PSR. 3336 */ 3337 if (HAS_PSR(dev_priv)) 3338 drm_crtc_vblank_restore(crtc); 3339 3340 return 0; 3341 } 3342 3343 /* Called from drm generic code, passed 'crtc' which 3344 * we use as a pipe index 3345 */ 3346 void i8xx_disable_vblank(struct drm_crtc *crtc) 3347 { 3348 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3349 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3350 unsigned long irqflags; 3351 3352 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3353 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3354 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3355 } 3356 3357 void i945gm_disable_vblank(struct drm_crtc *crtc) 3358 { 3359 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3360 3361 i8xx_disable_vblank(crtc); 3362 3363 if (--dev_priv->i945gm_vblank.enabled == 0) 3364 schedule_work(&dev_priv->i945gm_vblank.work); 3365 } 3366 3367 void i965_disable_vblank(struct drm_crtc *crtc) 3368 { 3369 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3370 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3371 unsigned long irqflags; 3372 3373 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3374 i915_disable_pipestat(dev_priv, pipe, 3375 PIPE_START_VBLANK_INTERRUPT_STATUS); 3376 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3377 } 3378 3379 void ilk_disable_vblank(struct drm_crtc *crtc) 3380 { 3381 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3382 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3383 unsigned long irqflags; 3384 u32 bit = INTEL_GEN(dev_priv) >= 7 ? 3385 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3386 3387 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3388 ilk_disable_display_irq(dev_priv, bit); 3389 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3390 } 3391 3392 void bdw_disable_vblank(struct drm_crtc *crtc) 3393 { 3394 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3395 enum pipe pipe = to_intel_crtc(crtc)->pipe; 3396 unsigned long irqflags; 3397 3398 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3399 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3400 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3401 } 3402 3403 static void i945gm_vblank_work_func(struct work_struct *work) 3404 { 3405 struct drm_i915_private *dev_priv = 3406 container_of(work, struct drm_i915_private, i945gm_vblank.work); 3407 3408 /* 3409 * Vblank interrupts fail to wake up the device from C3, 3410 * hence we want to prevent C3 usage while vblank interrupts 3411 * are enabled. 3412 */ 3413 pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos, 3414 READ_ONCE(dev_priv->i945gm_vblank.enabled) ? 3415 dev_priv->i945gm_vblank.c3_disable_latency : 3416 PM_QOS_DEFAULT_VALUE); 3417 } 3418 3419 static int cstate_disable_latency(const char *name) 3420 { 3421 const struct cpuidle_driver *drv; 3422 int i; 3423 3424 drv = cpuidle_get_driver(); 3425 if (!drv) 3426 return 0; 3427 3428 for (i = 0; i < drv->state_count; i++) { 3429 const struct cpuidle_state *state = &drv->states[i]; 3430 3431 if (!strcmp(state->name, name)) 3432 return state->exit_latency ? 3433 state->exit_latency - 1 : 0; 3434 } 3435 3436 return 0; 3437 } 3438 3439 static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv) 3440 { 3441 INIT_WORK(&dev_priv->i945gm_vblank.work, 3442 i945gm_vblank_work_func); 3443 3444 dev_priv->i945gm_vblank.c3_disable_latency = 3445 cstate_disable_latency("C3"); 3446 pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos, 3447 PM_QOS_CPU_DMA_LATENCY, 3448 PM_QOS_DEFAULT_VALUE); 3449 } 3450 3451 static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv) 3452 { 3453 cancel_work_sync(&dev_priv->i945gm_vblank.work); 3454 pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos); 3455 } 3456 3457 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3458 { 3459 struct intel_uncore *uncore = &dev_priv->uncore; 3460 3461 if (HAS_PCH_NOP(dev_priv)) 3462 return; 3463 3464 GEN3_IRQ_RESET(uncore, SDE); 3465 3466 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3467 I915_WRITE(SERR_INT, 0xffffffff); 3468 } 3469 3470 /* 3471 * SDEIER is also touched by the interrupt handler to work around missed PCH 3472 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3473 * instead we unconditionally enable all PCH interrupt sources here, but then 3474 * only unmask them as needed with SDEIMR. 3475 * 3476 * This function needs to be called before interrupts are enabled. 3477 */ 3478 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv) 3479 { 3480 if (HAS_PCH_NOP(dev_priv)) 3481 return; 3482 3483 WARN_ON(I915_READ(SDEIER) != 0); 3484 I915_WRITE(SDEIER, 0xffffffff); 3485 POSTING_READ(SDEIER); 3486 } 3487 3488 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3489 { 3490 struct intel_uncore *uncore = &dev_priv->uncore; 3491 3492 GEN3_IRQ_RESET(uncore, GT); 3493 if (INTEL_GEN(dev_priv) >= 6) 3494 GEN3_IRQ_RESET(uncore, GEN6_PM); 3495 } 3496 3497 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3498 { 3499 struct intel_uncore *uncore = &dev_priv->uncore; 3500 3501 if (IS_CHERRYVIEW(dev_priv)) 3502 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3503 else 3504 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); 3505 3506 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3507 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3508 3509 i9xx_pipestat_irq_reset(dev_priv); 3510 3511 GEN3_IRQ_RESET(uncore, VLV_); 3512 dev_priv->irq_mask = ~0u; 3513 } 3514 3515 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3516 { 3517 struct intel_uncore *uncore = &dev_priv->uncore; 3518 3519 u32 pipestat_mask; 3520 u32 enable_mask; 3521 enum pipe pipe; 3522 3523 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3524 3525 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3526 for_each_pipe(dev_priv, pipe) 3527 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3528 3529 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3530 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3531 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3532 I915_LPE_PIPE_A_INTERRUPT | 3533 I915_LPE_PIPE_B_INTERRUPT; 3534 3535 if (IS_CHERRYVIEW(dev_priv)) 3536 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3537 I915_LPE_PIPE_C_INTERRUPT; 3538 3539 WARN_ON(dev_priv->irq_mask != ~0u); 3540 3541 dev_priv->irq_mask = ~enable_mask; 3542 3543 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 3544 } 3545 3546 /* drm_dma.h hooks 3547 */ 3548 static void ironlake_irq_reset(struct drm_i915_private *dev_priv) 3549 { 3550 struct intel_uncore *uncore = &dev_priv->uncore; 3551 3552 GEN3_IRQ_RESET(uncore, DE); 3553 if (IS_GEN(dev_priv, 7)) 3554 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 3555 3556 if (IS_HASWELL(dev_priv)) { 3557 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3558 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3559 } 3560 3561 gen5_gt_irq_reset(dev_priv); 3562 3563 ibx_irq_reset(dev_priv); 3564 } 3565 3566 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 3567 { 3568 I915_WRITE(VLV_MASTER_IER, 0); 3569 POSTING_READ(VLV_MASTER_IER); 3570 3571 gen5_gt_irq_reset(dev_priv); 3572 3573 spin_lock_irq(&dev_priv->irq_lock); 3574 if (dev_priv->display_irqs_enabled) 3575 vlv_display_irq_reset(dev_priv); 3576 spin_unlock_irq(&dev_priv->irq_lock); 3577 } 3578 3579 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3580 { 3581 struct intel_uncore *uncore = &dev_priv->uncore; 3582 3583 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 3584 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 3585 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 3586 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 3587 } 3588 3589 static void gen8_irq_reset(struct drm_i915_private *dev_priv) 3590 { 3591 struct intel_uncore *uncore = &dev_priv->uncore; 3592 int pipe; 3593 3594 gen8_master_intr_disable(dev_priv->uncore.regs); 3595 3596 gen8_gt_irq_reset(dev_priv); 3597 3598 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3599 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3600 3601 for_each_pipe(dev_priv, pipe) 3602 if (intel_display_power_is_enabled(dev_priv, 3603 POWER_DOMAIN_PIPE(pipe))) 3604 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3605 3606 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3607 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3608 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3609 3610 if (HAS_PCH_SPLIT(dev_priv)) 3611 ibx_irq_reset(dev_priv); 3612 } 3613 3614 static void gen11_gt_irq_reset(struct intel_gt *gt) 3615 { 3616 struct intel_uncore *uncore = gt->uncore; 3617 3618 /* Disable RCS, BCS, VCS and VECS class engines. */ 3619 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 3620 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); 3621 3622 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3623 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); 3624 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); 3625 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); 3626 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); 3627 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); 3628 3629 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3630 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3631 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 3632 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 3633 } 3634 3635 static void gen11_irq_reset(struct drm_i915_private *dev_priv) 3636 { 3637 struct intel_uncore *uncore = &dev_priv->uncore; 3638 int pipe; 3639 3640 gen11_master_intr_disable(dev_priv->uncore.regs); 3641 3642 gen11_gt_irq_reset(&dev_priv->gt); 3643 3644 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 3645 3646 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 3647 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 3648 3649 for_each_pipe(dev_priv, pipe) 3650 if (intel_display_power_is_enabled(dev_priv, 3651 POWER_DOMAIN_PIPE(pipe))) 3652 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3653 3654 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 3655 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 3656 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 3657 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 3658 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3659 3660 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3661 GEN3_IRQ_RESET(uncore, SDE); 3662 } 3663 3664 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3665 u8 pipe_mask) 3666 { 3667 struct intel_uncore *uncore = &dev_priv->uncore; 3668 3669 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3670 enum pipe pipe; 3671 3672 spin_lock_irq(&dev_priv->irq_lock); 3673 3674 if (!intel_irqs_enabled(dev_priv)) { 3675 spin_unlock_irq(&dev_priv->irq_lock); 3676 return; 3677 } 3678 3679 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3680 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 3681 dev_priv->de_irq_mask[pipe], 3682 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3683 3684 spin_unlock_irq(&dev_priv->irq_lock); 3685 } 3686 3687 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3688 u8 pipe_mask) 3689 { 3690 struct intel_uncore *uncore = &dev_priv->uncore; 3691 enum pipe pipe; 3692 3693 spin_lock_irq(&dev_priv->irq_lock); 3694 3695 if (!intel_irqs_enabled(dev_priv)) { 3696 spin_unlock_irq(&dev_priv->irq_lock); 3697 return; 3698 } 3699 3700 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3701 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 3702 3703 spin_unlock_irq(&dev_priv->irq_lock); 3704 3705 /* make sure we're done processing display irqs */ 3706 intel_synchronize_irq(dev_priv); 3707 } 3708 3709 static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 3710 { 3711 struct intel_uncore *uncore = &dev_priv->uncore; 3712 3713 I915_WRITE(GEN8_MASTER_IRQ, 0); 3714 POSTING_READ(GEN8_MASTER_IRQ); 3715 3716 gen8_gt_irq_reset(dev_priv); 3717 3718 GEN3_IRQ_RESET(uncore, GEN8_PCU_); 3719 3720 spin_lock_irq(&dev_priv->irq_lock); 3721 if (dev_priv->display_irqs_enabled) 3722 vlv_display_irq_reset(dev_priv); 3723 spin_unlock_irq(&dev_priv->irq_lock); 3724 } 3725 3726 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3727 const u32 hpd[HPD_NUM_PINS]) 3728 { 3729 struct intel_encoder *encoder; 3730 u32 enabled_irqs = 0; 3731 3732 for_each_intel_encoder(&dev_priv->drm, encoder) 3733 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3734 enabled_irqs |= hpd[encoder->hpd_pin]; 3735 3736 return enabled_irqs; 3737 } 3738 3739 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3740 { 3741 u32 hotplug; 3742 3743 /* 3744 * Enable digital hotplug on the PCH, and configure the DP short pulse 3745 * duration to 2ms (which is the minimum in the Display Port spec). 3746 * The pulse duration bits are reserved on LPT+. 3747 */ 3748 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3749 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3750 PORTC_PULSE_DURATION_MASK | 3751 PORTD_PULSE_DURATION_MASK); 3752 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3753 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3754 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3755 /* 3756 * When CPU and PCH are on the same package, port A 3757 * HPD must be enabled in both north and south. 3758 */ 3759 if (HAS_PCH_LPT_LP(dev_priv)) 3760 hotplug |= PORTA_HOTPLUG_ENABLE; 3761 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3762 } 3763 3764 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3765 { 3766 u32 hotplug_irqs, enabled_irqs; 3767 3768 if (HAS_PCH_IBX(dev_priv)) { 3769 hotplug_irqs = SDE_HOTPLUG_MASK; 3770 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3771 } else { 3772 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3773 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3774 } 3775 3776 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3777 3778 ibx_hpd_detection_setup(dev_priv); 3779 } 3780 3781 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 3782 { 3783 u32 hotplug; 3784 3785 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3786 hotplug |= ICP_DDIA_HPD_ENABLE | 3787 ICP_DDIB_HPD_ENABLE; 3788 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3789 3790 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3791 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 3792 ICP_TC_HPD_ENABLE(PORT_TC2) | 3793 ICP_TC_HPD_ENABLE(PORT_TC3) | 3794 ICP_TC_HPD_ENABLE(PORT_TC4); 3795 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3796 } 3797 3798 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3799 { 3800 u32 hotplug_irqs, enabled_irqs; 3801 3802 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 3803 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 3804 3805 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3806 3807 icp_hpd_detection_setup(dev_priv); 3808 } 3809 3810 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3811 { 3812 u32 hotplug; 3813 3814 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3815 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3816 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3817 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3818 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3819 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3820 3821 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3822 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3823 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3824 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3825 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3826 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3827 } 3828 3829 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3830 { 3831 u32 hotplug_irqs, enabled_irqs; 3832 u32 val; 3833 3834 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3835 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3836 3837 val = I915_READ(GEN11_DE_HPD_IMR); 3838 val &= ~hotplug_irqs; 3839 I915_WRITE(GEN11_DE_HPD_IMR, val); 3840 POSTING_READ(GEN11_DE_HPD_IMR); 3841 3842 gen11_hpd_detection_setup(dev_priv); 3843 3844 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 3845 icp_hpd_irq_setup(dev_priv); 3846 } 3847 3848 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3849 { 3850 u32 val, hotplug; 3851 3852 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3853 if (HAS_PCH_CNP(dev_priv)) { 3854 val = I915_READ(SOUTH_CHICKEN1); 3855 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3856 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3857 I915_WRITE(SOUTH_CHICKEN1, val); 3858 } 3859 3860 /* Enable digital hotplug on the PCH */ 3861 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3862 hotplug |= PORTA_HOTPLUG_ENABLE | 3863 PORTB_HOTPLUG_ENABLE | 3864 PORTC_HOTPLUG_ENABLE | 3865 PORTD_HOTPLUG_ENABLE; 3866 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3867 3868 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3869 hotplug |= PORTE_HOTPLUG_ENABLE; 3870 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3871 } 3872 3873 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3874 { 3875 u32 hotplug_irqs, enabled_irqs; 3876 3877 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3878 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3879 3880 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3881 3882 spt_hpd_detection_setup(dev_priv); 3883 } 3884 3885 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3886 { 3887 u32 hotplug; 3888 3889 /* 3890 * Enable digital hotplug on the CPU, and configure the DP short pulse 3891 * duration to 2ms (which is the minimum in the Display Port spec) 3892 * The pulse duration bits are reserved on HSW+. 3893 */ 3894 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3895 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3896 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3897 DIGITAL_PORTA_PULSE_DURATION_2ms; 3898 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3899 } 3900 3901 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3902 { 3903 u32 hotplug_irqs, enabled_irqs; 3904 3905 if (INTEL_GEN(dev_priv) >= 8) { 3906 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3907 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3908 3909 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3910 } else if (INTEL_GEN(dev_priv) >= 7) { 3911 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3912 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3913 3914 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3915 } else { 3916 hotplug_irqs = DE_DP_A_HOTPLUG; 3917 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3918 3919 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3920 } 3921 3922 ilk_hpd_detection_setup(dev_priv); 3923 3924 ibx_hpd_irq_setup(dev_priv); 3925 } 3926 3927 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3928 u32 enabled_irqs) 3929 { 3930 u32 hotplug; 3931 3932 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3933 hotplug |= PORTA_HOTPLUG_ENABLE | 3934 PORTB_HOTPLUG_ENABLE | 3935 PORTC_HOTPLUG_ENABLE; 3936 3937 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3938 hotplug, enabled_irqs); 3939 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3940 3941 /* 3942 * For BXT invert bit has to be set based on AOB design 3943 * for HPD detection logic, update it based on VBT fields. 3944 */ 3945 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3946 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3947 hotplug |= BXT_DDIA_HPD_INVERT; 3948 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3949 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3950 hotplug |= BXT_DDIB_HPD_INVERT; 3951 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3952 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3953 hotplug |= BXT_DDIC_HPD_INVERT; 3954 3955 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3956 } 3957 3958 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3959 { 3960 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3961 } 3962 3963 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3964 { 3965 u32 hotplug_irqs, enabled_irqs; 3966 3967 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3968 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3969 3970 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3971 3972 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3973 } 3974 3975 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 3976 { 3977 u32 mask; 3978 3979 if (HAS_PCH_NOP(dev_priv)) 3980 return; 3981 3982 if (HAS_PCH_IBX(dev_priv)) 3983 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3984 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3985 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3986 else 3987 mask = SDE_GMBUS_CPT; 3988 3989 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 3990 I915_WRITE(SDEIMR, ~mask); 3991 3992 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3993 HAS_PCH_LPT(dev_priv)) 3994 ibx_hpd_detection_setup(dev_priv); 3995 else 3996 spt_hpd_detection_setup(dev_priv); 3997 } 3998 3999 static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4000 { 4001 struct intel_uncore *uncore = &dev_priv->uncore; 4002 u32 pm_irqs, gt_irqs; 4003 4004 pm_irqs = gt_irqs = 0; 4005 4006 dev_priv->gt_irq_mask = ~0; 4007 if (HAS_L3_DPF(dev_priv)) { 4008 /* L3 parity interrupt is always unmasked. */ 4009 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4010 gt_irqs |= GT_PARITY_ERROR(dev_priv); 4011 } 4012 4013 gt_irqs |= GT_RENDER_USER_INTERRUPT; 4014 if (IS_GEN(dev_priv, 5)) { 4015 gt_irqs |= ILK_BSD_USER_INTERRUPT; 4016 } else { 4017 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 4018 } 4019 4020 GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs); 4021 4022 if (INTEL_GEN(dev_priv) >= 6) { 4023 /* 4024 * RPS interrupts will get enabled/disabled on demand when RPS 4025 * itself is enabled/disabled. 4026 */ 4027 if (HAS_ENGINE(dev_priv, VECS0)) { 4028 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4029 dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT; 4030 } 4031 4032 dev_priv->gt.pm_imr = 0xffffffff; 4033 GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs); 4034 } 4035 } 4036 4037 static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) 4038 { 4039 struct intel_uncore *uncore = &dev_priv->uncore; 4040 u32 display_mask, extra_mask; 4041 4042 if (INTEL_GEN(dev_priv) >= 7) { 4043 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4044 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 4045 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 4046 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 4047 DE_DP_A_HOTPLUG_IVB); 4048 } else { 4049 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4050 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4051 DE_PIPEA_CRC_DONE | DE_POISON); 4052 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4053 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4054 DE_DP_A_HOTPLUG); 4055 } 4056 4057 if (IS_HASWELL(dev_priv)) { 4058 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 4059 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4060 display_mask |= DE_EDP_PSR_INT_HSW; 4061 } 4062 4063 dev_priv->irq_mask = ~display_mask; 4064 4065 ibx_irq_pre_postinstall(dev_priv); 4066 4067 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 4068 display_mask | extra_mask); 4069 4070 gen5_gt_irq_postinstall(dev_priv); 4071 4072 ilk_hpd_detection_setup(dev_priv); 4073 4074 ibx_irq_postinstall(dev_priv); 4075 4076 if (IS_IRONLAKE_M(dev_priv)) { 4077 /* Enable PCU event interrupts 4078 * 4079 * spinlocking not required here for correctness since interrupt 4080 * setup is guaranteed to run in single-threaded context. But we 4081 * need it to make the assert_spin_locked happy. */ 4082 spin_lock_irq(&dev_priv->irq_lock); 4083 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4084 spin_unlock_irq(&dev_priv->irq_lock); 4085 } 4086 } 4087 4088 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4089 { 4090 lockdep_assert_held(&dev_priv->irq_lock); 4091 4092 if (dev_priv->display_irqs_enabled) 4093 return; 4094 4095 dev_priv->display_irqs_enabled = true; 4096 4097 if (intel_irqs_enabled(dev_priv)) { 4098 vlv_display_irq_reset(dev_priv); 4099 vlv_display_irq_postinstall(dev_priv); 4100 } 4101 } 4102 4103 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4104 { 4105 lockdep_assert_held(&dev_priv->irq_lock); 4106 4107 if (!dev_priv->display_irqs_enabled) 4108 return; 4109 4110 dev_priv->display_irqs_enabled = false; 4111 4112 if (intel_irqs_enabled(dev_priv)) 4113 vlv_display_irq_reset(dev_priv); 4114 } 4115 4116 4117 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 4118 { 4119 gen5_gt_irq_postinstall(dev_priv); 4120 4121 spin_lock_irq(&dev_priv->irq_lock); 4122 if (dev_priv->display_irqs_enabled) 4123 vlv_display_irq_postinstall(dev_priv); 4124 spin_unlock_irq(&dev_priv->irq_lock); 4125 4126 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 4127 POSTING_READ(VLV_MASTER_IER); 4128 } 4129 4130 static void gen8_gt_irq_postinstall(struct drm_i915_private *i915) 4131 { 4132 struct intel_gt *gt = &i915->gt; 4133 struct intel_uncore *uncore = gt->uncore; 4134 4135 /* These are interrupts we'll toggle with the ring mask register */ 4136 u32 gt_interrupts[] = { 4137 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4138 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4139 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 4140 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), 4141 4142 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | 4143 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | 4144 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4145 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), 4146 4147 0, 4148 4149 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 4150 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) 4151 }; 4152 4153 gt->pm_ier = 0x0; 4154 gt->pm_imr = ~gt->pm_ier; 4155 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 4156 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 4157 /* 4158 * RPS interrupts will get enabled/disabled on demand when RPS itself 4159 * is enabled/disabled. Same wil be the case for GuC interrupts. 4160 */ 4161 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); 4162 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4163 } 4164 4165 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4166 { 4167 struct intel_uncore *uncore = &dev_priv->uncore; 4168 4169 u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4170 u32 de_pipe_enables; 4171 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 4172 u32 de_port_enables; 4173 u32 de_misc_masked = GEN8_DE_EDP_PSR; 4174 enum pipe pipe; 4175 4176 if (INTEL_GEN(dev_priv) <= 10) 4177 de_misc_masked |= GEN8_DE_MISC_GSE; 4178 4179 if (INTEL_GEN(dev_priv) >= 9) { 4180 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 4181 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 4182 GEN9_AUX_CHANNEL_D; 4183 if (IS_GEN9_LP(dev_priv)) 4184 de_port_masked |= BXT_DE_PORT_GMBUS; 4185 } else { 4186 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 4187 } 4188 4189 if (INTEL_GEN(dev_priv) >= 11) 4190 de_port_masked |= ICL_AUX_CHANNEL_E; 4191 4192 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4193 de_port_masked |= CNL_AUX_CHANNEL_F; 4194 4195 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4196 GEN8_PIPE_FIFO_UNDERRUN; 4197 4198 de_port_enables = de_port_masked; 4199 if (IS_GEN9_LP(dev_priv)) 4200 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4201 else if (IS_BROADWELL(dev_priv)) 4202 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 4203 4204 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 4205 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4206 4207 for_each_pipe(dev_priv, pipe) { 4208 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4209 4210 if (intel_display_power_is_enabled(dev_priv, 4211 POWER_DOMAIN_PIPE(pipe))) 4212 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 4213 dev_priv->de_irq_mask[pipe], 4214 de_pipe_enables); 4215 } 4216 4217 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 4218 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 4219 4220 if (INTEL_GEN(dev_priv) >= 11) { 4221 u32 de_hpd_masked = 0; 4222 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4223 GEN11_DE_TBT_HOTPLUG_MASK; 4224 4225 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 4226 de_hpd_enables); 4227 gen11_hpd_detection_setup(dev_priv); 4228 } else if (IS_GEN9_LP(dev_priv)) { 4229 bxt_hpd_detection_setup(dev_priv); 4230 } else if (IS_BROADWELL(dev_priv)) { 4231 ilk_hpd_detection_setup(dev_priv); 4232 } 4233 } 4234 4235 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 4236 { 4237 if (HAS_PCH_SPLIT(dev_priv)) 4238 ibx_irq_pre_postinstall(dev_priv); 4239 4240 gen8_gt_irq_postinstall(dev_priv); 4241 gen8_de_irq_postinstall(dev_priv); 4242 4243 if (HAS_PCH_SPLIT(dev_priv)) 4244 ibx_irq_postinstall(dev_priv); 4245 4246 gen8_master_intr_enable(dev_priv->uncore.regs); 4247 } 4248 4249 static void gen11_gt_irq_postinstall(struct intel_gt *gt) 4250 { 4251 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 4252 struct intel_uncore *uncore = gt->uncore; 4253 const u32 dmask = irqs << 16 | irqs; 4254 const u32 smask = irqs << 16; 4255 4256 BUILD_BUG_ON(irqs & 0xffff0000); 4257 4258 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 4259 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); 4260 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); 4261 4262 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4263 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); 4264 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); 4265 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); 4266 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); 4267 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); 4268 4269 /* 4270 * RPS interrupts will get enabled/disabled on demand when RPS itself 4271 * is enabled/disabled. 4272 */ 4273 gt->pm_ier = 0x0; 4274 gt->pm_imr = ~gt->pm_ier; 4275 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4276 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4277 4278 /* Same thing for GuC interrupts */ 4279 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 4280 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 4281 } 4282 4283 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 4284 { 4285 u32 mask = SDE_GMBUS_ICP; 4286 4287 WARN_ON(I915_READ(SDEIER) != 0); 4288 I915_WRITE(SDEIER, 0xffffffff); 4289 POSTING_READ(SDEIER); 4290 4291 gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); 4292 I915_WRITE(SDEIMR, ~mask); 4293 4294 icp_hpd_detection_setup(dev_priv); 4295 } 4296 4297 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 4298 { 4299 struct intel_uncore *uncore = &dev_priv->uncore; 4300 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 4301 4302 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 4303 icp_irq_postinstall(dev_priv); 4304 4305 gen11_gt_irq_postinstall(&dev_priv->gt); 4306 gen8_de_irq_postinstall(dev_priv); 4307 4308 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4309 4310 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4311 4312 gen11_master_intr_enable(uncore->regs); 4313 POSTING_READ(GEN11_GFX_MSTR_IRQ); 4314 } 4315 4316 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 4317 { 4318 gen8_gt_irq_postinstall(dev_priv); 4319 4320 spin_lock_irq(&dev_priv->irq_lock); 4321 if (dev_priv->display_irqs_enabled) 4322 vlv_display_irq_postinstall(dev_priv); 4323 spin_unlock_irq(&dev_priv->irq_lock); 4324 4325 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4326 POSTING_READ(GEN8_MASTER_IRQ); 4327 } 4328 4329 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 4330 { 4331 struct intel_uncore *uncore = &dev_priv->uncore; 4332 4333 i9xx_pipestat_irq_reset(dev_priv); 4334 4335 GEN2_IRQ_RESET(uncore); 4336 } 4337 4338 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 4339 { 4340 struct intel_uncore *uncore = &dev_priv->uncore; 4341 u16 enable_mask; 4342 4343 intel_uncore_write16(uncore, 4344 EMR, 4345 ~(I915_ERROR_PAGE_TABLE | 4346 I915_ERROR_MEMORY_REFRESH)); 4347 4348 /* Unmask the interrupts that we always want on. */ 4349 dev_priv->irq_mask = 4350 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4351 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4352 I915_MASTER_ERROR_INTERRUPT); 4353 4354 enable_mask = 4355 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4356 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4357 I915_MASTER_ERROR_INTERRUPT | 4358 I915_USER_INTERRUPT; 4359 4360 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask); 4361 4362 /* Interrupt setup is already guaranteed to be single-threaded, this is 4363 * just to make the assert_spin_locked check happy. */ 4364 spin_lock_irq(&dev_priv->irq_lock); 4365 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4366 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4367 spin_unlock_irq(&dev_priv->irq_lock); 4368 } 4369 4370 static void i8xx_error_irq_ack(struct drm_i915_private *i915, 4371 u16 *eir, u16 *eir_stuck) 4372 { 4373 struct intel_uncore *uncore = &i915->uncore; 4374 u16 emr; 4375 4376 *eir = intel_uncore_read16(uncore, EIR); 4377 4378 if (*eir) 4379 intel_uncore_write16(uncore, EIR, *eir); 4380 4381 *eir_stuck = intel_uncore_read16(uncore, EIR); 4382 if (*eir_stuck == 0) 4383 return; 4384 4385 /* 4386 * Toggle all EMR bits to make sure we get an edge 4387 * in the ISR master error bit if we don't clear 4388 * all the EIR bits. Otherwise the edge triggered 4389 * IIR on i965/g4x wouldn't notice that an interrupt 4390 * is still pending. Also some EIR bits can't be 4391 * cleared except by handling the underlying error 4392 * (or by a GPU reset) so we mask any bit that 4393 * remains set. 4394 */ 4395 emr = intel_uncore_read16(uncore, EMR); 4396 intel_uncore_write16(uncore, EMR, 0xffff); 4397 intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 4398 } 4399 4400 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 4401 u16 eir, u16 eir_stuck) 4402 { 4403 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4404 4405 if (eir_stuck) 4406 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 4407 } 4408 4409 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4410 u32 *eir, u32 *eir_stuck) 4411 { 4412 u32 emr; 4413 4414 *eir = I915_READ(EIR); 4415 4416 I915_WRITE(EIR, *eir); 4417 4418 *eir_stuck = I915_READ(EIR); 4419 if (*eir_stuck == 0) 4420 return; 4421 4422 /* 4423 * Toggle all EMR bits to make sure we get an edge 4424 * in the ISR master error bit if we don't clear 4425 * all the EIR bits. Otherwise the edge triggered 4426 * IIR on i965/g4x wouldn't notice that an interrupt 4427 * is still pending. Also some EIR bits can't be 4428 * cleared except by handling the underlying error 4429 * (or by a GPU reset) so we mask any bit that 4430 * remains set. 4431 */ 4432 emr = I915_READ(EMR); 4433 I915_WRITE(EMR, 0xffffffff); 4434 I915_WRITE(EMR, emr | *eir_stuck); 4435 } 4436 4437 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4438 u32 eir, u32 eir_stuck) 4439 { 4440 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4441 4442 if (eir_stuck) 4443 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 4444 } 4445 4446 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4447 { 4448 struct drm_i915_private *dev_priv = arg; 4449 irqreturn_t ret = IRQ_NONE; 4450 4451 if (!intel_irqs_enabled(dev_priv)) 4452 return IRQ_NONE; 4453 4454 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4455 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4456 4457 do { 4458 u32 pipe_stats[I915_MAX_PIPES] = {}; 4459 u16 eir = 0, eir_stuck = 0; 4460 u16 iir; 4461 4462 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 4463 if (iir == 0) 4464 break; 4465 4466 ret = IRQ_HANDLED; 4467 4468 /* Call regardless, as some status bits might not be 4469 * signalled in iir */ 4470 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4471 4472 if (iir & I915_MASTER_ERROR_INTERRUPT) 4473 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4474 4475 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 4476 4477 if (iir & I915_USER_INTERRUPT) 4478 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 4479 4480 if (iir & I915_MASTER_ERROR_INTERRUPT) 4481 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4482 4483 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4484 } while (0); 4485 4486 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4487 4488 return ret; 4489 } 4490 4491 static void i915_irq_reset(struct drm_i915_private *dev_priv) 4492 { 4493 struct intel_uncore *uncore = &dev_priv->uncore; 4494 4495 if (I915_HAS_HOTPLUG(dev_priv)) { 4496 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4497 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4498 } 4499 4500 i9xx_pipestat_irq_reset(dev_priv); 4501 4502 GEN3_IRQ_RESET(uncore, GEN2_); 4503 } 4504 4505 static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 4506 { 4507 struct intel_uncore *uncore = &dev_priv->uncore; 4508 u32 enable_mask; 4509 4510 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4511 I915_ERROR_MEMORY_REFRESH)); 4512 4513 /* Unmask the interrupts that we always want on. */ 4514 dev_priv->irq_mask = 4515 ~(I915_ASLE_INTERRUPT | 4516 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4517 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4518 I915_MASTER_ERROR_INTERRUPT); 4519 4520 enable_mask = 4521 I915_ASLE_INTERRUPT | 4522 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4523 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4524 I915_MASTER_ERROR_INTERRUPT | 4525 I915_USER_INTERRUPT; 4526 4527 if (I915_HAS_HOTPLUG(dev_priv)) { 4528 /* Enable in IER... */ 4529 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4530 /* and unmask in IMR */ 4531 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4532 } 4533 4534 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4535 4536 /* Interrupt setup is already guaranteed to be single-threaded, this is 4537 * just to make the assert_spin_locked check happy. */ 4538 spin_lock_irq(&dev_priv->irq_lock); 4539 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4540 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4541 spin_unlock_irq(&dev_priv->irq_lock); 4542 4543 i915_enable_asle_pipestat(dev_priv); 4544 } 4545 4546 static irqreturn_t i915_irq_handler(int irq, void *arg) 4547 { 4548 struct drm_i915_private *dev_priv = arg; 4549 irqreturn_t ret = IRQ_NONE; 4550 4551 if (!intel_irqs_enabled(dev_priv)) 4552 return IRQ_NONE; 4553 4554 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4555 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4556 4557 do { 4558 u32 pipe_stats[I915_MAX_PIPES] = {}; 4559 u32 eir = 0, eir_stuck = 0; 4560 u32 hotplug_status = 0; 4561 u32 iir; 4562 4563 iir = I915_READ(GEN2_IIR); 4564 if (iir == 0) 4565 break; 4566 4567 ret = IRQ_HANDLED; 4568 4569 if (I915_HAS_HOTPLUG(dev_priv) && 4570 iir & I915_DISPLAY_PORT_INTERRUPT) 4571 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4572 4573 /* Call regardless, as some status bits might not be 4574 * signalled in iir */ 4575 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4576 4577 if (iir & I915_MASTER_ERROR_INTERRUPT) 4578 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4579 4580 I915_WRITE(GEN2_IIR, iir); 4581 4582 if (iir & I915_USER_INTERRUPT) 4583 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 4584 4585 if (iir & I915_MASTER_ERROR_INTERRUPT) 4586 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4587 4588 if (hotplug_status) 4589 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4590 4591 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4592 } while (0); 4593 4594 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4595 4596 return ret; 4597 } 4598 4599 static void i965_irq_reset(struct drm_i915_private *dev_priv) 4600 { 4601 struct intel_uncore *uncore = &dev_priv->uncore; 4602 4603 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4604 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4605 4606 i9xx_pipestat_irq_reset(dev_priv); 4607 4608 GEN3_IRQ_RESET(uncore, GEN2_); 4609 } 4610 4611 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 4612 { 4613 struct intel_uncore *uncore = &dev_priv->uncore; 4614 u32 enable_mask; 4615 u32 error_mask; 4616 4617 /* 4618 * Enable some error detection, note the instruction error mask 4619 * bit is reserved, so we leave it masked. 4620 */ 4621 if (IS_G4X(dev_priv)) { 4622 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4623 GM45_ERROR_MEM_PRIV | 4624 GM45_ERROR_CP_PRIV | 4625 I915_ERROR_MEMORY_REFRESH); 4626 } else { 4627 error_mask = ~(I915_ERROR_PAGE_TABLE | 4628 I915_ERROR_MEMORY_REFRESH); 4629 } 4630 I915_WRITE(EMR, error_mask); 4631 4632 /* Unmask the interrupts that we always want on. */ 4633 dev_priv->irq_mask = 4634 ~(I915_ASLE_INTERRUPT | 4635 I915_DISPLAY_PORT_INTERRUPT | 4636 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4637 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4638 I915_MASTER_ERROR_INTERRUPT); 4639 4640 enable_mask = 4641 I915_ASLE_INTERRUPT | 4642 I915_DISPLAY_PORT_INTERRUPT | 4643 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4644 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4645 I915_MASTER_ERROR_INTERRUPT | 4646 I915_USER_INTERRUPT; 4647 4648 if (IS_G4X(dev_priv)) 4649 enable_mask |= I915_BSD_USER_INTERRUPT; 4650 4651 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 4652 4653 /* Interrupt setup is already guaranteed to be single-threaded, this is 4654 * just to make the assert_spin_locked check happy. */ 4655 spin_lock_irq(&dev_priv->irq_lock); 4656 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4657 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4658 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4659 spin_unlock_irq(&dev_priv->irq_lock); 4660 4661 i915_enable_asle_pipestat(dev_priv); 4662 } 4663 4664 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4665 { 4666 u32 hotplug_en; 4667 4668 lockdep_assert_held(&dev_priv->irq_lock); 4669 4670 /* Note HDMI and DP share hotplug bits */ 4671 /* enable bits are the same for all generations */ 4672 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4673 /* Programming the CRT detection parameters tends 4674 to generate a spurious hotplug event about three 4675 seconds later. So just do it once. 4676 */ 4677 if (IS_G4X(dev_priv)) 4678 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4679 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4680 4681 /* Ignore TV since it's buggy */ 4682 i915_hotplug_interrupt_update_locked(dev_priv, 4683 HOTPLUG_INT_EN_MASK | 4684 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4685 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4686 hotplug_en); 4687 } 4688 4689 static irqreturn_t i965_irq_handler(int irq, void *arg) 4690 { 4691 struct drm_i915_private *dev_priv = arg; 4692 irqreturn_t ret = IRQ_NONE; 4693 4694 if (!intel_irqs_enabled(dev_priv)) 4695 return IRQ_NONE; 4696 4697 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4698 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4699 4700 do { 4701 u32 pipe_stats[I915_MAX_PIPES] = {}; 4702 u32 eir = 0, eir_stuck = 0; 4703 u32 hotplug_status = 0; 4704 u32 iir; 4705 4706 iir = I915_READ(GEN2_IIR); 4707 if (iir == 0) 4708 break; 4709 4710 ret = IRQ_HANDLED; 4711 4712 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4713 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4714 4715 /* Call regardless, as some status bits might not be 4716 * signalled in iir */ 4717 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4718 4719 if (iir & I915_MASTER_ERROR_INTERRUPT) 4720 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4721 4722 I915_WRITE(GEN2_IIR, iir); 4723 4724 if (iir & I915_USER_INTERRUPT) 4725 intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); 4726 4727 if (iir & I915_BSD_USER_INTERRUPT) 4728 intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); 4729 4730 if (iir & I915_MASTER_ERROR_INTERRUPT) 4731 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4732 4733 if (hotplug_status) 4734 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4735 4736 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4737 } while (0); 4738 4739 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4740 4741 return ret; 4742 } 4743 4744 /** 4745 * intel_irq_init - initializes irq support 4746 * @dev_priv: i915 device instance 4747 * 4748 * This function initializes all the irq support including work items, timers 4749 * and all the vtables. It does not setup the interrupt itself though. 4750 */ 4751 void intel_irq_init(struct drm_i915_private *dev_priv) 4752 { 4753 struct drm_device *dev = &dev_priv->drm; 4754 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4755 int i; 4756 4757 if (IS_I945GM(dev_priv)) 4758 i945gm_vblank_work_init(dev_priv); 4759 4760 intel_hpd_init_work(dev_priv); 4761 4762 INIT_WORK(&rps->work, gen6_pm_rps_work); 4763 4764 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4765 for (i = 0; i < MAX_L3_SLICES; ++i) 4766 dev_priv->l3_parity.remap_info[i] = NULL; 4767 4768 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 4769 if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) 4770 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; 4771 4772 /* Let's track the enabled rps events */ 4773 if (IS_VALLEYVIEW(dev_priv)) 4774 /* WaGsvRC0ResidencyMethod:vlv */ 4775 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4776 else 4777 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 4778 GEN6_PM_RP_DOWN_THRESHOLD | 4779 GEN6_PM_RP_DOWN_TIMEOUT); 4780 4781 /* We share the register with other engine */ 4782 if (INTEL_GEN(dev_priv) > 9) 4783 GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000); 4784 4785 rps->pm_intrmsk_mbz = 0; 4786 4787 /* 4788 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4789 * if GEN6_PM_UP_EI_EXPIRED is masked. 4790 * 4791 * TODO: verify if this can be reproduced on VLV,CHV. 4792 */ 4793 if (INTEL_GEN(dev_priv) <= 7) 4794 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4795 4796 if (INTEL_GEN(dev_priv) >= 8) 4797 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4798 4799 dev->vblank_disable_immediate = true; 4800 4801 /* Most platforms treat the display irq block as an always-on 4802 * power domain. vlv/chv can disable it at runtime and need 4803 * special care to avoid writing any of the display block registers 4804 * outside of the power domain. We defer setting up the display irqs 4805 * in this case to the runtime pm. 4806 */ 4807 dev_priv->display_irqs_enabled = true; 4808 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4809 dev_priv->display_irqs_enabled = false; 4810 4811 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4812 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4813 * detection, as short HPD storms will occur as a natural part of 4814 * sideband messaging with MST. 4815 * On older platforms however, IRQ storms can occur with both long and 4816 * short pulses, as seen on some G4x systems. 4817 */ 4818 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4819 4820 if (HAS_GMCH(dev_priv)) { 4821 if (I915_HAS_HOTPLUG(dev_priv)) 4822 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4823 } else { 4824 if (INTEL_GEN(dev_priv) >= 11) 4825 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4826 else if (IS_GEN9_LP(dev_priv)) 4827 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4828 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 4829 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4830 else 4831 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4832 } 4833 } 4834 4835 /** 4836 * intel_irq_fini - deinitializes IRQ support 4837 * @i915: i915 device instance 4838 * 4839 * This function deinitializes all the IRQ support. 4840 */ 4841 void intel_irq_fini(struct drm_i915_private *i915) 4842 { 4843 int i; 4844 4845 if (IS_I945GM(i915)) 4846 i945gm_vblank_work_fini(i915); 4847 4848 for (i = 0; i < MAX_L3_SLICES; ++i) 4849 kfree(i915->l3_parity.remap_info[i]); 4850 } 4851 4852 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 4853 { 4854 if (HAS_GMCH(dev_priv)) { 4855 if (IS_CHERRYVIEW(dev_priv)) 4856 return cherryview_irq_handler; 4857 else if (IS_VALLEYVIEW(dev_priv)) 4858 return valleyview_irq_handler; 4859 else if (IS_GEN(dev_priv, 4)) 4860 return i965_irq_handler; 4861 else if (IS_GEN(dev_priv, 3)) 4862 return i915_irq_handler; 4863 else 4864 return i8xx_irq_handler; 4865 } else { 4866 if (INTEL_GEN(dev_priv) >= 11) 4867 return gen11_irq_handler; 4868 else if (INTEL_GEN(dev_priv) >= 8) 4869 return gen8_irq_handler; 4870 else 4871 return ironlake_irq_handler; 4872 } 4873 } 4874 4875 static void intel_irq_reset(struct drm_i915_private *dev_priv) 4876 { 4877 if (HAS_GMCH(dev_priv)) { 4878 if (IS_CHERRYVIEW(dev_priv)) 4879 cherryview_irq_reset(dev_priv); 4880 else if (IS_VALLEYVIEW(dev_priv)) 4881 valleyview_irq_reset(dev_priv); 4882 else if (IS_GEN(dev_priv, 4)) 4883 i965_irq_reset(dev_priv); 4884 else if (IS_GEN(dev_priv, 3)) 4885 i915_irq_reset(dev_priv); 4886 else 4887 i8xx_irq_reset(dev_priv); 4888 } else { 4889 if (INTEL_GEN(dev_priv) >= 11) 4890 gen11_irq_reset(dev_priv); 4891 else if (INTEL_GEN(dev_priv) >= 8) 4892 gen8_irq_reset(dev_priv); 4893 else 4894 ironlake_irq_reset(dev_priv); 4895 } 4896 } 4897 4898 static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 4899 { 4900 if (HAS_GMCH(dev_priv)) { 4901 if (IS_CHERRYVIEW(dev_priv)) 4902 cherryview_irq_postinstall(dev_priv); 4903 else if (IS_VALLEYVIEW(dev_priv)) 4904 valleyview_irq_postinstall(dev_priv); 4905 else if (IS_GEN(dev_priv, 4)) 4906 i965_irq_postinstall(dev_priv); 4907 else if (IS_GEN(dev_priv, 3)) 4908 i915_irq_postinstall(dev_priv); 4909 else 4910 i8xx_irq_postinstall(dev_priv); 4911 } else { 4912 if (INTEL_GEN(dev_priv) >= 11) 4913 gen11_irq_postinstall(dev_priv); 4914 else if (INTEL_GEN(dev_priv) >= 8) 4915 gen8_irq_postinstall(dev_priv); 4916 else 4917 ironlake_irq_postinstall(dev_priv); 4918 } 4919 } 4920 4921 /** 4922 * intel_irq_install - enables the hardware interrupt 4923 * @dev_priv: i915 device instance 4924 * 4925 * This function enables the hardware interrupt handling, but leaves the hotplug 4926 * handling still disabled. It is called after intel_irq_init(). 4927 * 4928 * In the driver load and resume code we need working interrupts in a few places 4929 * but don't want to deal with the hassle of concurrent probe and hotplug 4930 * workers. Hence the split into this two-stage approach. 4931 */ 4932 int intel_irq_install(struct drm_i915_private *dev_priv) 4933 { 4934 int irq = dev_priv->drm.pdev->irq; 4935 int ret; 4936 4937 /* 4938 * We enable some interrupt sources in our postinstall hooks, so mark 4939 * interrupts as enabled _before_ actually enabling them to avoid 4940 * special cases in our ordering checks. 4941 */ 4942 dev_priv->runtime_pm.irqs_enabled = true; 4943 4944 dev_priv->drm.irq_enabled = true; 4945 4946 intel_irq_reset(dev_priv); 4947 4948 ret = request_irq(irq, intel_irq_handler(dev_priv), 4949 IRQF_SHARED, DRIVER_NAME, dev_priv); 4950 if (ret < 0) { 4951 dev_priv->drm.irq_enabled = false; 4952 return ret; 4953 } 4954 4955 intel_irq_postinstall(dev_priv); 4956 4957 return ret; 4958 } 4959 4960 /** 4961 * intel_irq_uninstall - finilizes all irq handling 4962 * @dev_priv: i915 device instance 4963 * 4964 * This stops interrupt and hotplug handling and unregisters and frees all 4965 * resources acquired in the init functions. 4966 */ 4967 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4968 { 4969 int irq = dev_priv->drm.pdev->irq; 4970 4971 /* 4972 * FIXME we can get called twice during driver load 4973 * error handling due to intel_modeset_cleanup() 4974 * calling us out of sequence. Would be nice if 4975 * it didn't do that... 4976 */ 4977 if (!dev_priv->drm.irq_enabled) 4978 return; 4979 4980 dev_priv->drm.irq_enabled = false; 4981 4982 intel_irq_reset(dev_priv); 4983 4984 free_irq(irq, dev_priv); 4985 4986 intel_hpd_cancel_work(dev_priv); 4987 dev_priv->runtime_pm.irqs_enabled = false; 4988 } 4989 4990 /** 4991 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4992 * @dev_priv: i915 device instance 4993 * 4994 * This function is used to disable interrupts at runtime, both in the runtime 4995 * pm and the system suspend/resume code. 4996 */ 4997 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4998 { 4999 intel_irq_reset(dev_priv); 5000 dev_priv->runtime_pm.irqs_enabled = false; 5001 intel_synchronize_irq(dev_priv); 5002 } 5003 5004 /** 5005 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 5006 * @dev_priv: i915 device instance 5007 * 5008 * This function is used to enable interrupts at runtime, both in the runtime 5009 * pm and the system suspend/resume code. 5010 */ 5011 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 5012 { 5013 dev_priv->runtime_pm.irqs_enabled = true; 5014 intel_irq_reset(dev_priv); 5015 intel_irq_postinstall(dev_priv); 5016 } 5017