1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN3_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 #define GEN2_IRQ_RESET(type) do { \ 140 I915_WRITE16(type##IMR, 0xffff); \ 141 POSTING_READ16(type##IMR); \ 142 I915_WRITE16(type##IER, 0); \ 143 I915_WRITE16(type##IIR, 0xffff); \ 144 POSTING_READ16(type##IIR); \ 145 I915_WRITE16(type##IIR, 0xffff); \ 146 POSTING_READ16(type##IIR); \ 147 } while (0) 148 149 /* 150 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 151 */ 152 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 153 i915_reg_t reg) 154 { 155 u32 val = I915_READ(reg); 156 157 if (val == 0) 158 return; 159 160 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 161 i915_mmio_reg_offset(reg), val); 162 I915_WRITE(reg, 0xffffffff); 163 POSTING_READ(reg); 164 I915_WRITE(reg, 0xffffffff); 165 POSTING_READ(reg); 166 } 167 168 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u16 val = I915_READ16(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE16(reg, 0xffff); 179 POSTING_READ16(reg); 180 I915_WRITE16(reg, 0xffff); 181 POSTING_READ16(reg); 182 } 183 184 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 185 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 186 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 187 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 188 POSTING_READ(GEN8_##type##_IMR(which)); \ 189 } while (0) 190 191 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 192 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 193 I915_WRITE(type##IER, (ier_val)); \ 194 I915_WRITE(type##IMR, (imr_val)); \ 195 POSTING_READ(type##IMR); \ 196 } while (0) 197 198 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 199 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 200 I915_WRITE16(type##IER, (ier_val)); \ 201 I915_WRITE16(type##IMR, (imr_val)); \ 202 POSTING_READ16(type##IMR); \ 203 } while (0) 204 205 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 206 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 207 208 /* For display hotplug interrupt */ 209 static inline void 210 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 211 uint32_t mask, 212 uint32_t bits) 213 { 214 uint32_t val; 215 216 lockdep_assert_held(&dev_priv->irq_lock); 217 WARN_ON(bits & ~mask); 218 219 val = I915_READ(PORT_HOTPLUG_EN); 220 val &= ~mask; 221 val |= bits; 222 I915_WRITE(PORT_HOTPLUG_EN, val); 223 } 224 225 /** 226 * i915_hotplug_interrupt_update - update hotplug interrupt enable 227 * @dev_priv: driver private 228 * @mask: bits to update 229 * @bits: bits to enable 230 * NOTE: the HPD enable bits are modified both inside and outside 231 * of an interrupt context. To avoid that read-modify-write cycles 232 * interfer, these bits are protected by a spinlock. Since this 233 * function is usually not called from a context where the lock is 234 * held already, this function acquires the lock itself. A non-locking 235 * version is also available. 236 */ 237 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 238 uint32_t mask, 239 uint32_t bits) 240 { 241 spin_lock_irq(&dev_priv->irq_lock); 242 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 243 spin_unlock_irq(&dev_priv->irq_lock); 244 } 245 246 /** 247 * ilk_update_display_irq - update DEIMR 248 * @dev_priv: driver private 249 * @interrupt_mask: mask of interrupt bits to update 250 * @enabled_irq_mask: mask of interrupt bits to enable 251 */ 252 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 253 uint32_t interrupt_mask, 254 uint32_t enabled_irq_mask) 255 { 256 uint32_t new_val; 257 258 lockdep_assert_held(&dev_priv->irq_lock); 259 260 WARN_ON(enabled_irq_mask & ~interrupt_mask); 261 262 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 263 return; 264 265 new_val = dev_priv->irq_mask; 266 new_val &= ~interrupt_mask; 267 new_val |= (~enabled_irq_mask & interrupt_mask); 268 269 if (new_val != dev_priv->irq_mask) { 270 dev_priv->irq_mask = new_val; 271 I915_WRITE(DEIMR, dev_priv->irq_mask); 272 POSTING_READ(DEIMR); 273 } 274 } 275 276 /** 277 * ilk_update_gt_irq - update GTIMR 278 * @dev_priv: driver private 279 * @interrupt_mask: mask of interrupt bits to update 280 * @enabled_irq_mask: mask of interrupt bits to enable 281 */ 282 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 283 uint32_t interrupt_mask, 284 uint32_t enabled_irq_mask) 285 { 286 lockdep_assert_held(&dev_priv->irq_lock); 287 288 WARN_ON(enabled_irq_mask & ~interrupt_mask); 289 290 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 291 return; 292 293 dev_priv->gt_irq_mask &= ~interrupt_mask; 294 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 295 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 296 } 297 298 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 299 { 300 ilk_update_gt_irq(dev_priv, mask, mask); 301 POSTING_READ_FW(GTIMR); 302 } 303 304 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 305 { 306 ilk_update_gt_irq(dev_priv, mask, 0); 307 } 308 309 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 310 { 311 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 312 } 313 314 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 315 { 316 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 317 } 318 319 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 320 { 321 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 322 } 323 324 /** 325 * snb_update_pm_irq - update GEN6_PMIMR 326 * @dev_priv: driver private 327 * @interrupt_mask: mask of interrupt bits to update 328 * @enabled_irq_mask: mask of interrupt bits to enable 329 */ 330 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 331 uint32_t interrupt_mask, 332 uint32_t enabled_irq_mask) 333 { 334 uint32_t new_val; 335 336 WARN_ON(enabled_irq_mask & ~interrupt_mask); 337 338 lockdep_assert_held(&dev_priv->irq_lock); 339 340 new_val = dev_priv->pm_imr; 341 new_val &= ~interrupt_mask; 342 new_val |= (~enabled_irq_mask & interrupt_mask); 343 344 if (new_val != dev_priv->pm_imr) { 345 dev_priv->pm_imr = new_val; 346 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 347 POSTING_READ(gen6_pm_imr(dev_priv)); 348 } 349 } 350 351 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 352 { 353 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 354 return; 355 356 snb_update_pm_irq(dev_priv, mask, mask); 357 } 358 359 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 360 { 361 snb_update_pm_irq(dev_priv, mask, 0); 362 } 363 364 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 365 { 366 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 367 return; 368 369 __gen6_mask_pm_irq(dev_priv, mask); 370 } 371 372 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 373 { 374 i915_reg_t reg = gen6_pm_iir(dev_priv); 375 376 lockdep_assert_held(&dev_priv->irq_lock); 377 378 I915_WRITE(reg, reset_mask); 379 I915_WRITE(reg, reset_mask); 380 POSTING_READ(reg); 381 } 382 383 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 384 { 385 lockdep_assert_held(&dev_priv->irq_lock); 386 387 dev_priv->pm_ier |= enable_mask; 388 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 389 gen6_unmask_pm_irq(dev_priv, enable_mask); 390 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 391 } 392 393 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 394 { 395 lockdep_assert_held(&dev_priv->irq_lock); 396 397 dev_priv->pm_ier &= ~disable_mask; 398 __gen6_mask_pm_irq(dev_priv, disable_mask); 399 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 400 /* though a barrier is missing here, but don't really need a one */ 401 } 402 403 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 404 { 405 spin_lock_irq(&dev_priv->irq_lock); 406 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 407 dev_priv->gt_pm.rps.pm_iir = 0; 408 spin_unlock_irq(&dev_priv->irq_lock); 409 } 410 411 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 412 { 413 struct intel_rps *rps = &dev_priv->gt_pm.rps; 414 415 if (READ_ONCE(rps->interrupts_enabled)) 416 return; 417 418 if (WARN_ON_ONCE(IS_GEN11(dev_priv))) 419 return; 420 421 spin_lock_irq(&dev_priv->irq_lock); 422 WARN_ON_ONCE(rps->pm_iir); 423 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 424 rps->interrupts_enabled = true; 425 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 426 427 spin_unlock_irq(&dev_priv->irq_lock); 428 } 429 430 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 431 { 432 struct intel_rps *rps = &dev_priv->gt_pm.rps; 433 434 if (!READ_ONCE(rps->interrupts_enabled)) 435 return; 436 437 if (WARN_ON_ONCE(IS_GEN11(dev_priv))) 438 return; 439 440 spin_lock_irq(&dev_priv->irq_lock); 441 rps->interrupts_enabled = false; 442 443 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 444 445 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 446 447 spin_unlock_irq(&dev_priv->irq_lock); 448 synchronize_irq(dev_priv->drm.irq); 449 450 /* Now that we will not be generating any more work, flush any 451 * outstanding tasks. As we are called on the RPS idle path, 452 * we will reset the GPU to minimum frequencies, so the current 453 * state of the worker can be discarded. 454 */ 455 cancel_work_sync(&rps->work); 456 gen6_reset_rps_interrupts(dev_priv); 457 } 458 459 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 460 { 461 assert_rpm_wakelock_held(dev_priv); 462 463 spin_lock_irq(&dev_priv->irq_lock); 464 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 465 spin_unlock_irq(&dev_priv->irq_lock); 466 } 467 468 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 469 { 470 assert_rpm_wakelock_held(dev_priv); 471 472 spin_lock_irq(&dev_priv->irq_lock); 473 if (!dev_priv->guc.interrupts_enabled) { 474 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 475 dev_priv->pm_guc_events); 476 dev_priv->guc.interrupts_enabled = true; 477 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 478 } 479 spin_unlock_irq(&dev_priv->irq_lock); 480 } 481 482 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 483 { 484 assert_rpm_wakelock_held(dev_priv); 485 486 spin_lock_irq(&dev_priv->irq_lock); 487 dev_priv->guc.interrupts_enabled = false; 488 489 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 490 491 spin_unlock_irq(&dev_priv->irq_lock); 492 synchronize_irq(dev_priv->drm.irq); 493 494 gen9_reset_guc_interrupts(dev_priv); 495 } 496 497 /** 498 * bdw_update_port_irq - update DE port interrupt 499 * @dev_priv: driver private 500 * @interrupt_mask: mask of interrupt bits to update 501 * @enabled_irq_mask: mask of interrupt bits to enable 502 */ 503 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 504 uint32_t interrupt_mask, 505 uint32_t enabled_irq_mask) 506 { 507 uint32_t new_val; 508 uint32_t old_val; 509 510 lockdep_assert_held(&dev_priv->irq_lock); 511 512 WARN_ON(enabled_irq_mask & ~interrupt_mask); 513 514 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 515 return; 516 517 old_val = I915_READ(GEN8_DE_PORT_IMR); 518 519 new_val = old_val; 520 new_val &= ~interrupt_mask; 521 new_val |= (~enabled_irq_mask & interrupt_mask); 522 523 if (new_val != old_val) { 524 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 525 POSTING_READ(GEN8_DE_PORT_IMR); 526 } 527 } 528 529 /** 530 * bdw_update_pipe_irq - update DE pipe interrupt 531 * @dev_priv: driver private 532 * @pipe: pipe whose interrupt to update 533 * @interrupt_mask: mask of interrupt bits to update 534 * @enabled_irq_mask: mask of interrupt bits to enable 535 */ 536 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 537 enum pipe pipe, 538 uint32_t interrupt_mask, 539 uint32_t enabled_irq_mask) 540 { 541 uint32_t new_val; 542 543 lockdep_assert_held(&dev_priv->irq_lock); 544 545 WARN_ON(enabled_irq_mask & ~interrupt_mask); 546 547 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 548 return; 549 550 new_val = dev_priv->de_irq_mask[pipe]; 551 new_val &= ~interrupt_mask; 552 new_val |= (~enabled_irq_mask & interrupt_mask); 553 554 if (new_val != dev_priv->de_irq_mask[pipe]) { 555 dev_priv->de_irq_mask[pipe] = new_val; 556 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 557 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 558 } 559 } 560 561 /** 562 * ibx_display_interrupt_update - update SDEIMR 563 * @dev_priv: driver private 564 * @interrupt_mask: mask of interrupt bits to update 565 * @enabled_irq_mask: mask of interrupt bits to enable 566 */ 567 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 568 uint32_t interrupt_mask, 569 uint32_t enabled_irq_mask) 570 { 571 uint32_t sdeimr = I915_READ(SDEIMR); 572 sdeimr &= ~interrupt_mask; 573 sdeimr |= (~enabled_irq_mask & interrupt_mask); 574 575 WARN_ON(enabled_irq_mask & ~interrupt_mask); 576 577 lockdep_assert_held(&dev_priv->irq_lock); 578 579 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 580 return; 581 582 I915_WRITE(SDEIMR, sdeimr); 583 POSTING_READ(SDEIMR); 584 } 585 586 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 587 enum pipe pipe) 588 { 589 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 590 u32 enable_mask = status_mask << 16; 591 592 lockdep_assert_held(&dev_priv->irq_lock); 593 594 if (INTEL_GEN(dev_priv) < 5) 595 goto out; 596 597 /* 598 * On pipe A we don't support the PSR interrupt yet, 599 * on pipe B and C the same bit MBZ. 600 */ 601 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 602 return 0; 603 /* 604 * On pipe B and C we don't support the PSR interrupt yet, on pipe 605 * A the same bit is for perf counters which we don't use either. 606 */ 607 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 608 return 0; 609 610 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 611 SPRITE0_FLIP_DONE_INT_EN_VLV | 612 SPRITE1_FLIP_DONE_INT_EN_VLV); 613 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 614 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 615 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 616 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 617 618 out: 619 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 620 status_mask & ~PIPESTAT_INT_STATUS_MASK, 621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 622 pipe_name(pipe), enable_mask, status_mask); 623 624 return enable_mask; 625 } 626 627 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 628 enum pipe pipe, u32 status_mask) 629 { 630 i915_reg_t reg = PIPESTAT(pipe); 631 u32 enable_mask; 632 633 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 634 "pipe %c: status_mask=0x%x\n", 635 pipe_name(pipe), status_mask); 636 637 lockdep_assert_held(&dev_priv->irq_lock); 638 WARN_ON(!intel_irqs_enabled(dev_priv)); 639 640 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 641 return; 642 643 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 644 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 645 646 I915_WRITE(reg, enable_mask | status_mask); 647 POSTING_READ(reg); 648 } 649 650 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 651 enum pipe pipe, u32 status_mask) 652 { 653 i915_reg_t reg = PIPESTAT(pipe); 654 u32 enable_mask; 655 656 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 657 "pipe %c: status_mask=0x%x\n", 658 pipe_name(pipe), status_mask); 659 660 lockdep_assert_held(&dev_priv->irq_lock); 661 WARN_ON(!intel_irqs_enabled(dev_priv)); 662 663 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 664 return; 665 666 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 667 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 668 669 I915_WRITE(reg, enable_mask | status_mask); 670 POSTING_READ(reg); 671 } 672 673 /** 674 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 675 * @dev_priv: i915 device private 676 */ 677 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 678 { 679 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 680 return; 681 682 spin_lock_irq(&dev_priv->irq_lock); 683 684 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 685 if (INTEL_GEN(dev_priv) >= 4) 686 i915_enable_pipestat(dev_priv, PIPE_A, 687 PIPE_LEGACY_BLC_EVENT_STATUS); 688 689 spin_unlock_irq(&dev_priv->irq_lock); 690 } 691 692 /* 693 * This timing diagram depicts the video signal in and 694 * around the vertical blanking period. 695 * 696 * Assumptions about the fictitious mode used in this example: 697 * vblank_start >= 3 698 * vsync_start = vblank_start + 1 699 * vsync_end = vblank_start + 2 700 * vtotal = vblank_start + 3 701 * 702 * start of vblank: 703 * latch double buffered registers 704 * increment frame counter (ctg+) 705 * generate start of vblank interrupt (gen4+) 706 * | 707 * | frame start: 708 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 709 * | may be shifted forward 1-3 extra lines via PIPECONF 710 * | | 711 * | | start of vsync: 712 * | | generate vsync interrupt 713 * | | | 714 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 715 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 716 * ----va---> <-----------------vb--------------------> <--------va------------- 717 * | | <----vs-----> | 718 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 719 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 720 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 721 * | | | 722 * last visible pixel first visible pixel 723 * | increment frame counter (gen3/4) 724 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 725 * 726 * x = horizontal active 727 * _ = horizontal blanking 728 * hs = horizontal sync 729 * va = vertical active 730 * vb = vertical blanking 731 * vs = vertical sync 732 * vbs = vblank_start (number) 733 * 734 * Summary: 735 * - most events happen at the start of horizontal sync 736 * - frame start happens at the start of horizontal blank, 1-4 lines 737 * (depending on PIPECONF settings) after the start of vblank 738 * - gen3/4 pixel and frame counter are synchronized with the start 739 * of horizontal active on the first line of vertical active 740 */ 741 742 /* Called from drm generic code, passed a 'crtc', which 743 * we use as a pipe index 744 */ 745 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 746 { 747 struct drm_i915_private *dev_priv = to_i915(dev); 748 i915_reg_t high_frame, low_frame; 749 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 750 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 751 unsigned long irqflags; 752 753 htotal = mode->crtc_htotal; 754 hsync_start = mode->crtc_hsync_start; 755 vbl_start = mode->crtc_vblank_start; 756 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 757 vbl_start = DIV_ROUND_UP(vbl_start, 2); 758 759 /* Convert to pixel count */ 760 vbl_start *= htotal; 761 762 /* Start of vblank event occurs at start of hsync */ 763 vbl_start -= htotal - hsync_start; 764 765 high_frame = PIPEFRAME(pipe); 766 low_frame = PIPEFRAMEPIXEL(pipe); 767 768 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 769 770 /* 771 * High & low register fields aren't synchronized, so make sure 772 * we get a low value that's stable across two reads of the high 773 * register. 774 */ 775 do { 776 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 777 low = I915_READ_FW(low_frame); 778 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 779 } while (high1 != high2); 780 781 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 782 783 high1 >>= PIPE_FRAME_HIGH_SHIFT; 784 pixel = low & PIPE_PIXEL_MASK; 785 low >>= PIPE_FRAME_LOW_SHIFT; 786 787 /* 788 * The frame counter increments at beginning of active. 789 * Cook up a vblank counter by also checking the pixel 790 * counter against vblank start. 791 */ 792 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 793 } 794 795 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 796 { 797 struct drm_i915_private *dev_priv = to_i915(dev); 798 799 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 800 } 801 802 /* 803 * On certain encoders on certain platforms, pipe 804 * scanline register will not work to get the scanline, 805 * since the timings are driven from the PORT or issues 806 * with scanline register updates. 807 * This function will use Framestamp and current 808 * timestamp registers to calculate the scanline. 809 */ 810 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 811 { 812 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 813 struct drm_vblank_crtc *vblank = 814 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 815 const struct drm_display_mode *mode = &vblank->hwmode; 816 u32 vblank_start = mode->crtc_vblank_start; 817 u32 vtotal = mode->crtc_vtotal; 818 u32 htotal = mode->crtc_htotal; 819 u32 clock = mode->crtc_clock; 820 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 821 822 /* 823 * To avoid the race condition where we might cross into the 824 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 825 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 826 * during the same frame. 827 */ 828 do { 829 /* 830 * This field provides read back of the display 831 * pipe frame time stamp. The time stamp value 832 * is sampled at every start of vertical blank. 833 */ 834 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 835 836 /* 837 * The TIMESTAMP_CTR register has the current 838 * time stamp value. 839 */ 840 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 841 842 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 843 } while (scan_post_time != scan_prev_time); 844 845 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 846 clock), 1000 * htotal); 847 scanline = min(scanline, vtotal - 1); 848 scanline = (scanline + vblank_start) % vtotal; 849 850 return scanline; 851 } 852 853 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 854 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 855 { 856 struct drm_device *dev = crtc->base.dev; 857 struct drm_i915_private *dev_priv = to_i915(dev); 858 const struct drm_display_mode *mode; 859 struct drm_vblank_crtc *vblank; 860 enum pipe pipe = crtc->pipe; 861 int position, vtotal; 862 863 if (!crtc->active) 864 return -1; 865 866 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 867 mode = &vblank->hwmode; 868 869 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 870 return __intel_get_crtc_scanline_from_timestamp(crtc); 871 872 vtotal = mode->crtc_vtotal; 873 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 874 vtotal /= 2; 875 876 if (IS_GEN2(dev_priv)) 877 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 878 else 879 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 880 881 /* 882 * On HSW, the DSL reg (0x70000) appears to return 0 if we 883 * read it just before the start of vblank. So try it again 884 * so we don't accidentally end up spanning a vblank frame 885 * increment, causing the pipe_update_end() code to squak at us. 886 * 887 * The nature of this problem means we can't simply check the ISR 888 * bit and return the vblank start value; nor can we use the scanline 889 * debug register in the transcoder as it appears to have the same 890 * problem. We may need to extend this to include other platforms, 891 * but so far testing only shows the problem on HSW. 892 */ 893 if (HAS_DDI(dev_priv) && !position) { 894 int i, temp; 895 896 for (i = 0; i < 100; i++) { 897 udelay(1); 898 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 899 if (temp != position) { 900 position = temp; 901 break; 902 } 903 } 904 } 905 906 /* 907 * See update_scanline_offset() for the details on the 908 * scanline_offset adjustment. 909 */ 910 return (position + crtc->scanline_offset) % vtotal; 911 } 912 913 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 914 bool in_vblank_irq, int *vpos, int *hpos, 915 ktime_t *stime, ktime_t *etime, 916 const struct drm_display_mode *mode) 917 { 918 struct drm_i915_private *dev_priv = to_i915(dev); 919 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 920 pipe); 921 int position; 922 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 923 unsigned long irqflags; 924 925 if (WARN_ON(!mode->crtc_clock)) { 926 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 927 "pipe %c\n", pipe_name(pipe)); 928 return false; 929 } 930 931 htotal = mode->crtc_htotal; 932 hsync_start = mode->crtc_hsync_start; 933 vtotal = mode->crtc_vtotal; 934 vbl_start = mode->crtc_vblank_start; 935 vbl_end = mode->crtc_vblank_end; 936 937 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 938 vbl_start = DIV_ROUND_UP(vbl_start, 2); 939 vbl_end /= 2; 940 vtotal /= 2; 941 } 942 943 /* 944 * Lock uncore.lock, as we will do multiple timing critical raw 945 * register reads, potentially with preemption disabled, so the 946 * following code must not block on uncore.lock. 947 */ 948 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 949 950 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 951 952 /* Get optional system timestamp before query. */ 953 if (stime) 954 *stime = ktime_get(); 955 956 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 957 /* No obvious pixelcount register. Only query vertical 958 * scanout position from Display scan line register. 959 */ 960 position = __intel_get_crtc_scanline(intel_crtc); 961 } else { 962 /* Have access to pixelcount since start of frame. 963 * We can split this into vertical and horizontal 964 * scanout position. 965 */ 966 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 967 968 /* convert to pixel counts */ 969 vbl_start *= htotal; 970 vbl_end *= htotal; 971 vtotal *= htotal; 972 973 /* 974 * In interlaced modes, the pixel counter counts all pixels, 975 * so one field will have htotal more pixels. In order to avoid 976 * the reported position from jumping backwards when the pixel 977 * counter is beyond the length of the shorter field, just 978 * clamp the position the length of the shorter field. This 979 * matches how the scanline counter based position works since 980 * the scanline counter doesn't count the two half lines. 981 */ 982 if (position >= vtotal) 983 position = vtotal - 1; 984 985 /* 986 * Start of vblank interrupt is triggered at start of hsync, 987 * just prior to the first active line of vblank. However we 988 * consider lines to start at the leading edge of horizontal 989 * active. So, should we get here before we've crossed into 990 * the horizontal active of the first line in vblank, we would 991 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 992 * always add htotal-hsync_start to the current pixel position. 993 */ 994 position = (position + htotal - hsync_start) % vtotal; 995 } 996 997 /* Get optional system timestamp after query. */ 998 if (etime) 999 *etime = ktime_get(); 1000 1001 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1002 1003 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1004 1005 /* 1006 * While in vblank, position will be negative 1007 * counting up towards 0 at vbl_end. And outside 1008 * vblank, position will be positive counting 1009 * up since vbl_end. 1010 */ 1011 if (position >= vbl_start) 1012 position -= vbl_end; 1013 else 1014 position += vtotal - vbl_end; 1015 1016 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1017 *vpos = position; 1018 *hpos = 0; 1019 } else { 1020 *vpos = position / htotal; 1021 *hpos = position - (*vpos * htotal); 1022 } 1023 1024 return true; 1025 } 1026 1027 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1028 { 1029 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1030 unsigned long irqflags; 1031 int position; 1032 1033 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1034 position = __intel_get_crtc_scanline(crtc); 1035 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1036 1037 return position; 1038 } 1039 1040 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1041 { 1042 u32 busy_up, busy_down, max_avg, min_avg; 1043 u8 new_delay; 1044 1045 spin_lock(&mchdev_lock); 1046 1047 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1048 1049 new_delay = dev_priv->ips.cur_delay; 1050 1051 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1052 busy_up = I915_READ(RCPREVBSYTUPAVG); 1053 busy_down = I915_READ(RCPREVBSYTDNAVG); 1054 max_avg = I915_READ(RCBMAXAVG); 1055 min_avg = I915_READ(RCBMINAVG); 1056 1057 /* Handle RCS change request from hw */ 1058 if (busy_up > max_avg) { 1059 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1060 new_delay = dev_priv->ips.cur_delay - 1; 1061 if (new_delay < dev_priv->ips.max_delay) 1062 new_delay = dev_priv->ips.max_delay; 1063 } else if (busy_down < min_avg) { 1064 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1065 new_delay = dev_priv->ips.cur_delay + 1; 1066 if (new_delay > dev_priv->ips.min_delay) 1067 new_delay = dev_priv->ips.min_delay; 1068 } 1069 1070 if (ironlake_set_drps(dev_priv, new_delay)) 1071 dev_priv->ips.cur_delay = new_delay; 1072 1073 spin_unlock(&mchdev_lock); 1074 1075 return; 1076 } 1077 1078 static void notify_ring(struct intel_engine_cs *engine) 1079 { 1080 struct i915_request *rq = NULL; 1081 struct intel_wait *wait; 1082 1083 if (!engine->breadcrumbs.irq_armed) 1084 return; 1085 1086 atomic_inc(&engine->irq_count); 1087 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1088 1089 spin_lock(&engine->breadcrumbs.irq_lock); 1090 wait = engine->breadcrumbs.irq_wait; 1091 if (wait) { 1092 bool wakeup = engine->irq_seqno_barrier; 1093 1094 /* We use a callback from the dma-fence to submit 1095 * requests after waiting on our own requests. To 1096 * ensure minimum delay in queuing the next request to 1097 * hardware, signal the fence now rather than wait for 1098 * the signaler to be woken up. We still wake up the 1099 * waiter in order to handle the irq-seqno coherency 1100 * issues (we may receive the interrupt before the 1101 * seqno is written, see __i915_request_irq_complete()) 1102 * and to handle coalescing of multiple seqno updates 1103 * and many waiters. 1104 */ 1105 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1106 wait->seqno)) { 1107 struct i915_request *waiter = wait->request; 1108 1109 wakeup = true; 1110 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1111 &waiter->fence.flags) && 1112 intel_wait_check_request(wait, waiter)) 1113 rq = i915_request_get(waiter); 1114 } 1115 1116 if (wakeup) 1117 wake_up_process(wait->tsk); 1118 } else { 1119 if (engine->breadcrumbs.irq_armed) 1120 __intel_engine_disarm_breadcrumbs(engine); 1121 } 1122 spin_unlock(&engine->breadcrumbs.irq_lock); 1123 1124 if (rq) { 1125 dma_fence_signal(&rq->fence); 1126 GEM_BUG_ON(!i915_request_completed(rq)); 1127 i915_request_put(rq); 1128 } 1129 1130 trace_intel_engine_notify(engine, wait); 1131 } 1132 1133 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1134 struct intel_rps_ei *ei) 1135 { 1136 ei->ktime = ktime_get_raw(); 1137 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1138 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1139 } 1140 1141 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1142 { 1143 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1144 } 1145 1146 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1147 { 1148 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1149 const struct intel_rps_ei *prev = &rps->ei; 1150 struct intel_rps_ei now; 1151 u32 events = 0; 1152 1153 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1154 return 0; 1155 1156 vlv_c0_read(dev_priv, &now); 1157 1158 if (prev->ktime) { 1159 u64 time, c0; 1160 u32 render, media; 1161 1162 time = ktime_us_delta(now.ktime, prev->ktime); 1163 1164 time *= dev_priv->czclk_freq; 1165 1166 /* Workload can be split between render + media, 1167 * e.g. SwapBuffers being blitted in X after being rendered in 1168 * mesa. To account for this we need to combine both engines 1169 * into our activity counter. 1170 */ 1171 render = now.render_c0 - prev->render_c0; 1172 media = now.media_c0 - prev->media_c0; 1173 c0 = max(render, media); 1174 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1175 1176 if (c0 > time * rps->up_threshold) 1177 events = GEN6_PM_RP_UP_THRESHOLD; 1178 else if (c0 < time * rps->down_threshold) 1179 events = GEN6_PM_RP_DOWN_THRESHOLD; 1180 } 1181 1182 rps->ei = now; 1183 return events; 1184 } 1185 1186 static void gen6_pm_rps_work(struct work_struct *work) 1187 { 1188 struct drm_i915_private *dev_priv = 1189 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1190 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1191 bool client_boost = false; 1192 int new_delay, adj, min, max; 1193 u32 pm_iir = 0; 1194 1195 spin_lock_irq(&dev_priv->irq_lock); 1196 if (rps->interrupts_enabled) { 1197 pm_iir = fetch_and_zero(&rps->pm_iir); 1198 client_boost = atomic_read(&rps->num_waiters); 1199 } 1200 spin_unlock_irq(&dev_priv->irq_lock); 1201 1202 /* Make sure we didn't queue anything we're not going to process. */ 1203 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1204 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1205 goto out; 1206 1207 mutex_lock(&dev_priv->pcu_lock); 1208 1209 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1210 1211 adj = rps->last_adj; 1212 new_delay = rps->cur_freq; 1213 min = rps->min_freq_softlimit; 1214 max = rps->max_freq_softlimit; 1215 if (client_boost) 1216 max = rps->max_freq; 1217 if (client_boost && new_delay < rps->boost_freq) { 1218 new_delay = rps->boost_freq; 1219 adj = 0; 1220 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1221 if (adj > 0) 1222 adj *= 2; 1223 else /* CHV needs even encode values */ 1224 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1225 1226 if (new_delay >= rps->max_freq_softlimit) 1227 adj = 0; 1228 } else if (client_boost) { 1229 adj = 0; 1230 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1231 if (rps->cur_freq > rps->efficient_freq) 1232 new_delay = rps->efficient_freq; 1233 else if (rps->cur_freq > rps->min_freq_softlimit) 1234 new_delay = rps->min_freq_softlimit; 1235 adj = 0; 1236 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1237 if (adj < 0) 1238 adj *= 2; 1239 else /* CHV needs even encode values */ 1240 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1241 1242 if (new_delay <= rps->min_freq_softlimit) 1243 adj = 0; 1244 } else { /* unknown event */ 1245 adj = 0; 1246 } 1247 1248 rps->last_adj = adj; 1249 1250 /* sysfs frequency interfaces may have snuck in while servicing the 1251 * interrupt 1252 */ 1253 new_delay += adj; 1254 new_delay = clamp_t(int, new_delay, min, max); 1255 1256 if (intel_set_rps(dev_priv, new_delay)) { 1257 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1258 rps->last_adj = 0; 1259 } 1260 1261 mutex_unlock(&dev_priv->pcu_lock); 1262 1263 out: 1264 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1265 spin_lock_irq(&dev_priv->irq_lock); 1266 if (rps->interrupts_enabled) 1267 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1268 spin_unlock_irq(&dev_priv->irq_lock); 1269 } 1270 1271 1272 /** 1273 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1274 * occurred. 1275 * @work: workqueue struct 1276 * 1277 * Doesn't actually do anything except notify userspace. As a consequence of 1278 * this event, userspace should try to remap the bad rows since statistically 1279 * it is likely the same row is more likely to go bad again. 1280 */ 1281 static void ivybridge_parity_work(struct work_struct *work) 1282 { 1283 struct drm_i915_private *dev_priv = 1284 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1285 u32 error_status, row, bank, subbank; 1286 char *parity_event[6]; 1287 uint32_t misccpctl; 1288 uint8_t slice = 0; 1289 1290 /* We must turn off DOP level clock gating to access the L3 registers. 1291 * In order to prevent a get/put style interface, acquire struct mutex 1292 * any time we access those registers. 1293 */ 1294 mutex_lock(&dev_priv->drm.struct_mutex); 1295 1296 /* If we've screwed up tracking, just let the interrupt fire again */ 1297 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1298 goto out; 1299 1300 misccpctl = I915_READ(GEN7_MISCCPCTL); 1301 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1302 POSTING_READ(GEN7_MISCCPCTL); 1303 1304 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1305 i915_reg_t reg; 1306 1307 slice--; 1308 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1309 break; 1310 1311 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1312 1313 reg = GEN7_L3CDERRST1(slice); 1314 1315 error_status = I915_READ(reg); 1316 row = GEN7_PARITY_ERROR_ROW(error_status); 1317 bank = GEN7_PARITY_ERROR_BANK(error_status); 1318 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1319 1320 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1321 POSTING_READ(reg); 1322 1323 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1324 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1325 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1326 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1327 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1328 parity_event[5] = NULL; 1329 1330 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1331 KOBJ_CHANGE, parity_event); 1332 1333 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1334 slice, row, bank, subbank); 1335 1336 kfree(parity_event[4]); 1337 kfree(parity_event[3]); 1338 kfree(parity_event[2]); 1339 kfree(parity_event[1]); 1340 } 1341 1342 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1343 1344 out: 1345 WARN_ON(dev_priv->l3_parity.which_slice); 1346 spin_lock_irq(&dev_priv->irq_lock); 1347 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1348 spin_unlock_irq(&dev_priv->irq_lock); 1349 1350 mutex_unlock(&dev_priv->drm.struct_mutex); 1351 } 1352 1353 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1354 u32 iir) 1355 { 1356 if (!HAS_L3_DPF(dev_priv)) 1357 return; 1358 1359 spin_lock(&dev_priv->irq_lock); 1360 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1361 spin_unlock(&dev_priv->irq_lock); 1362 1363 iir &= GT_PARITY_ERROR(dev_priv); 1364 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1365 dev_priv->l3_parity.which_slice |= 1 << 1; 1366 1367 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1368 dev_priv->l3_parity.which_slice |= 1 << 0; 1369 1370 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1371 } 1372 1373 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1374 u32 gt_iir) 1375 { 1376 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1377 notify_ring(dev_priv->engine[RCS]); 1378 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1379 notify_ring(dev_priv->engine[VCS]); 1380 } 1381 1382 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1383 u32 gt_iir) 1384 { 1385 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1386 notify_ring(dev_priv->engine[RCS]); 1387 if (gt_iir & GT_BSD_USER_INTERRUPT) 1388 notify_ring(dev_priv->engine[VCS]); 1389 if (gt_iir & GT_BLT_USER_INTERRUPT) 1390 notify_ring(dev_priv->engine[BCS]); 1391 1392 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1393 GT_BSD_CS_ERROR_INTERRUPT | 1394 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1395 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1396 1397 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1398 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1399 } 1400 1401 static void 1402 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1403 { 1404 struct intel_engine_execlists * const execlists = &engine->execlists; 1405 bool tasklet = false; 1406 1407 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1408 if (READ_ONCE(engine->execlists.active)) { 1409 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1410 tasklet = true; 1411 } 1412 } 1413 1414 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1415 notify_ring(engine); 1416 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1417 } 1418 1419 if (tasklet) 1420 tasklet_hi_schedule(&execlists->tasklet); 1421 } 1422 1423 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1424 u32 master_ctl, u32 gt_iir[4]) 1425 { 1426 void __iomem * const regs = i915->regs; 1427 1428 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1429 GEN8_GT_BCS_IRQ | \ 1430 GEN8_GT_VCS1_IRQ | \ 1431 GEN8_GT_VCS2_IRQ | \ 1432 GEN8_GT_VECS_IRQ | \ 1433 GEN8_GT_PM_IRQ | \ 1434 GEN8_GT_GUC_IRQ) 1435 1436 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1437 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1438 if (likely(gt_iir[0])) 1439 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1440 } 1441 1442 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1443 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1444 if (likely(gt_iir[1])) 1445 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1446 } 1447 1448 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1449 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1450 if (likely(gt_iir[2] & (i915->pm_rps_events | 1451 i915->pm_guc_events))) 1452 raw_reg_write(regs, GEN8_GT_IIR(2), 1453 gt_iir[2] & (i915->pm_rps_events | 1454 i915->pm_guc_events)); 1455 } 1456 1457 if (master_ctl & GEN8_GT_VECS_IRQ) { 1458 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1459 if (likely(gt_iir[3])) 1460 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1461 } 1462 } 1463 1464 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1465 u32 master_ctl, u32 gt_iir[4]) 1466 { 1467 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1468 gen8_cs_irq_handler(i915->engine[RCS], 1469 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1470 gen8_cs_irq_handler(i915->engine[BCS], 1471 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1472 } 1473 1474 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1475 gen8_cs_irq_handler(i915->engine[VCS], 1476 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1477 gen8_cs_irq_handler(i915->engine[VCS2], 1478 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1479 } 1480 1481 if (master_ctl & GEN8_GT_VECS_IRQ) { 1482 gen8_cs_irq_handler(i915->engine[VECS], 1483 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1484 } 1485 1486 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1487 gen6_rps_irq_handler(i915, gt_iir[2]); 1488 gen9_guc_irq_handler(i915, gt_iir[2]); 1489 } 1490 } 1491 1492 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1493 { 1494 switch (port) { 1495 case PORT_A: 1496 return val & PORTA_HOTPLUG_LONG_DETECT; 1497 case PORT_B: 1498 return val & PORTB_HOTPLUG_LONG_DETECT; 1499 case PORT_C: 1500 return val & PORTC_HOTPLUG_LONG_DETECT; 1501 default: 1502 return false; 1503 } 1504 } 1505 1506 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1507 { 1508 switch (port) { 1509 case PORT_E: 1510 return val & PORTE_HOTPLUG_LONG_DETECT; 1511 default: 1512 return false; 1513 } 1514 } 1515 1516 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1517 { 1518 switch (port) { 1519 case PORT_A: 1520 return val & PORTA_HOTPLUG_LONG_DETECT; 1521 case PORT_B: 1522 return val & PORTB_HOTPLUG_LONG_DETECT; 1523 case PORT_C: 1524 return val & PORTC_HOTPLUG_LONG_DETECT; 1525 case PORT_D: 1526 return val & PORTD_HOTPLUG_LONG_DETECT; 1527 default: 1528 return false; 1529 } 1530 } 1531 1532 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1533 { 1534 switch (port) { 1535 case PORT_A: 1536 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1537 default: 1538 return false; 1539 } 1540 } 1541 1542 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1543 { 1544 switch (port) { 1545 case PORT_B: 1546 return val & PORTB_HOTPLUG_LONG_DETECT; 1547 case PORT_C: 1548 return val & PORTC_HOTPLUG_LONG_DETECT; 1549 case PORT_D: 1550 return val & PORTD_HOTPLUG_LONG_DETECT; 1551 default: 1552 return false; 1553 } 1554 } 1555 1556 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1557 { 1558 switch (port) { 1559 case PORT_B: 1560 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1561 case PORT_C: 1562 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1563 case PORT_D: 1564 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1565 default: 1566 return false; 1567 } 1568 } 1569 1570 /* 1571 * Get a bit mask of pins that have triggered, and which ones may be long. 1572 * This can be called multiple times with the same masks to accumulate 1573 * hotplug detection results from several registers. 1574 * 1575 * Note that the caller is expected to zero out the masks initially. 1576 */ 1577 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1578 u32 *pin_mask, u32 *long_mask, 1579 u32 hotplug_trigger, u32 dig_hotplug_reg, 1580 const u32 hpd[HPD_NUM_PINS], 1581 bool long_pulse_detect(enum port port, u32 val)) 1582 { 1583 enum port port; 1584 int i; 1585 1586 for_each_hpd_pin(i) { 1587 if ((hpd[i] & hotplug_trigger) == 0) 1588 continue; 1589 1590 *pin_mask |= BIT(i); 1591 1592 port = intel_hpd_pin_to_port(dev_priv, i); 1593 if (port == PORT_NONE) 1594 continue; 1595 1596 if (long_pulse_detect(port, dig_hotplug_reg)) 1597 *long_mask |= BIT(i); 1598 } 1599 1600 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1601 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1602 1603 } 1604 1605 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1606 { 1607 wake_up_all(&dev_priv->gmbus_wait_queue); 1608 } 1609 1610 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1611 { 1612 wake_up_all(&dev_priv->gmbus_wait_queue); 1613 } 1614 1615 #if defined(CONFIG_DEBUG_FS) 1616 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1617 enum pipe pipe, 1618 uint32_t crc0, uint32_t crc1, 1619 uint32_t crc2, uint32_t crc3, 1620 uint32_t crc4) 1621 { 1622 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1623 struct intel_pipe_crc_entry *entry; 1624 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1625 struct drm_driver *driver = dev_priv->drm.driver; 1626 uint32_t crcs[5]; 1627 int head, tail; 1628 1629 spin_lock(&pipe_crc->lock); 1630 if (pipe_crc->source) { 1631 if (!pipe_crc->entries) { 1632 spin_unlock(&pipe_crc->lock); 1633 DRM_DEBUG_KMS("spurious interrupt\n"); 1634 return; 1635 } 1636 1637 head = pipe_crc->head; 1638 tail = pipe_crc->tail; 1639 1640 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1641 spin_unlock(&pipe_crc->lock); 1642 DRM_ERROR("CRC buffer overflowing\n"); 1643 return; 1644 } 1645 1646 entry = &pipe_crc->entries[head]; 1647 1648 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1649 entry->crc[0] = crc0; 1650 entry->crc[1] = crc1; 1651 entry->crc[2] = crc2; 1652 entry->crc[3] = crc3; 1653 entry->crc[4] = crc4; 1654 1655 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1656 pipe_crc->head = head; 1657 1658 spin_unlock(&pipe_crc->lock); 1659 1660 wake_up_interruptible(&pipe_crc->wq); 1661 } else { 1662 /* 1663 * For some not yet identified reason, the first CRC is 1664 * bonkers. So let's just wait for the next vblank and read 1665 * out the buggy result. 1666 * 1667 * On GEN8+ sometimes the second CRC is bonkers as well, so 1668 * don't trust that one either. 1669 */ 1670 if (pipe_crc->skipped == 0 || 1671 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1672 pipe_crc->skipped++; 1673 spin_unlock(&pipe_crc->lock); 1674 return; 1675 } 1676 spin_unlock(&pipe_crc->lock); 1677 crcs[0] = crc0; 1678 crcs[1] = crc1; 1679 crcs[2] = crc2; 1680 crcs[3] = crc3; 1681 crcs[4] = crc4; 1682 drm_crtc_add_crc_entry(&crtc->base, true, 1683 drm_crtc_accurate_vblank_count(&crtc->base), 1684 crcs); 1685 } 1686 } 1687 #else 1688 static inline void 1689 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1690 enum pipe pipe, 1691 uint32_t crc0, uint32_t crc1, 1692 uint32_t crc2, uint32_t crc3, 1693 uint32_t crc4) {} 1694 #endif 1695 1696 1697 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1698 enum pipe pipe) 1699 { 1700 display_pipe_crc_irq_handler(dev_priv, pipe, 1701 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1702 0, 0, 0, 0); 1703 } 1704 1705 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1706 enum pipe pipe) 1707 { 1708 display_pipe_crc_irq_handler(dev_priv, pipe, 1709 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1710 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1711 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1712 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1713 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1714 } 1715 1716 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1717 enum pipe pipe) 1718 { 1719 uint32_t res1, res2; 1720 1721 if (INTEL_GEN(dev_priv) >= 3) 1722 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1723 else 1724 res1 = 0; 1725 1726 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1727 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1728 else 1729 res2 = 0; 1730 1731 display_pipe_crc_irq_handler(dev_priv, pipe, 1732 I915_READ(PIPE_CRC_RES_RED(pipe)), 1733 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1734 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1735 res1, res2); 1736 } 1737 1738 /* The RPS events need forcewake, so we add them to a work queue and mask their 1739 * IMR bits until the work is done. Other interrupts can be processed without 1740 * the work queue. */ 1741 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1742 { 1743 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1744 1745 if (pm_iir & dev_priv->pm_rps_events) { 1746 spin_lock(&dev_priv->irq_lock); 1747 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1748 if (rps->interrupts_enabled) { 1749 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1750 schedule_work(&rps->work); 1751 } 1752 spin_unlock(&dev_priv->irq_lock); 1753 } 1754 1755 if (INTEL_GEN(dev_priv) >= 8) 1756 return; 1757 1758 if (HAS_VEBOX(dev_priv)) { 1759 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1760 notify_ring(dev_priv->engine[VECS]); 1761 1762 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1763 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1764 } 1765 } 1766 1767 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1768 { 1769 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1770 /* Sample the log buffer flush related bits & clear them out now 1771 * itself from the message identity register to minimize the 1772 * probability of losing a flush interrupt, when there are back 1773 * to back flush interrupts. 1774 * There can be a new flush interrupt, for different log buffer 1775 * type (like for ISR), whilst Host is handling one (for DPC). 1776 * Since same bit is used in message register for ISR & DPC, it 1777 * could happen that GuC sets the bit for 2nd interrupt but Host 1778 * clears out the bit on handling the 1st interrupt. 1779 */ 1780 u32 msg, flush; 1781 1782 msg = I915_READ(SOFT_SCRATCH(15)); 1783 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1784 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1785 if (flush) { 1786 /* Clear the message bits that are handled */ 1787 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1788 1789 /* Handle flush interrupt in bottom half */ 1790 queue_work(dev_priv->guc.log.runtime.flush_wq, 1791 &dev_priv->guc.log.runtime.flush_work); 1792 1793 dev_priv->guc.log.flush_interrupt_count++; 1794 } else { 1795 /* Not clearing of unhandled event bits won't result in 1796 * re-triggering of the interrupt. 1797 */ 1798 } 1799 } 1800 } 1801 1802 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1803 { 1804 enum pipe pipe; 1805 1806 for_each_pipe(dev_priv, pipe) { 1807 I915_WRITE(PIPESTAT(pipe), 1808 PIPESTAT_INT_STATUS_MASK | 1809 PIPE_FIFO_UNDERRUN_STATUS); 1810 1811 dev_priv->pipestat_irq_mask[pipe] = 0; 1812 } 1813 } 1814 1815 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1816 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1817 { 1818 int pipe; 1819 1820 spin_lock(&dev_priv->irq_lock); 1821 1822 if (!dev_priv->display_irqs_enabled) { 1823 spin_unlock(&dev_priv->irq_lock); 1824 return; 1825 } 1826 1827 for_each_pipe(dev_priv, pipe) { 1828 i915_reg_t reg; 1829 u32 status_mask, enable_mask, iir_bit = 0; 1830 1831 /* 1832 * PIPESTAT bits get signalled even when the interrupt is 1833 * disabled with the mask bits, and some of the status bits do 1834 * not generate interrupts at all (like the underrun bit). Hence 1835 * we need to be careful that we only handle what we want to 1836 * handle. 1837 */ 1838 1839 /* fifo underruns are filterered in the underrun handler. */ 1840 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1841 1842 switch (pipe) { 1843 case PIPE_A: 1844 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1845 break; 1846 case PIPE_B: 1847 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1848 break; 1849 case PIPE_C: 1850 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1851 break; 1852 } 1853 if (iir & iir_bit) 1854 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1855 1856 if (!status_mask) 1857 continue; 1858 1859 reg = PIPESTAT(pipe); 1860 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1861 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1862 1863 /* 1864 * Clear the PIPE*STAT regs before the IIR 1865 */ 1866 if (pipe_stats[pipe]) 1867 I915_WRITE(reg, enable_mask | pipe_stats[pipe]); 1868 } 1869 spin_unlock(&dev_priv->irq_lock); 1870 } 1871 1872 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1873 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1874 { 1875 enum pipe pipe; 1876 1877 for_each_pipe(dev_priv, pipe) { 1878 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1879 drm_handle_vblank(&dev_priv->drm, pipe); 1880 1881 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1882 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1883 1884 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1885 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1886 } 1887 } 1888 1889 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1890 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1891 { 1892 bool blc_event = false; 1893 enum pipe pipe; 1894 1895 for_each_pipe(dev_priv, pipe) { 1896 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1897 drm_handle_vblank(&dev_priv->drm, pipe); 1898 1899 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1900 blc_event = true; 1901 1902 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1903 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1904 1905 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1906 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1907 } 1908 1909 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1910 intel_opregion_asle_intr(dev_priv); 1911 } 1912 1913 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1914 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1915 { 1916 bool blc_event = false; 1917 enum pipe pipe; 1918 1919 for_each_pipe(dev_priv, pipe) { 1920 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1921 drm_handle_vblank(&dev_priv->drm, pipe); 1922 1923 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1924 blc_event = true; 1925 1926 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1927 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1928 1929 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1930 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1931 } 1932 1933 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1934 intel_opregion_asle_intr(dev_priv); 1935 1936 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1937 gmbus_irq_handler(dev_priv); 1938 } 1939 1940 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1941 u32 pipe_stats[I915_MAX_PIPES]) 1942 { 1943 enum pipe pipe; 1944 1945 for_each_pipe(dev_priv, pipe) { 1946 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1947 drm_handle_vblank(&dev_priv->drm, pipe); 1948 1949 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1950 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1951 1952 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1953 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1954 } 1955 1956 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1957 gmbus_irq_handler(dev_priv); 1958 } 1959 1960 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1961 { 1962 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1963 1964 if (hotplug_status) 1965 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1966 1967 return hotplug_status; 1968 } 1969 1970 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1971 u32 hotplug_status) 1972 { 1973 u32 pin_mask = 0, long_mask = 0; 1974 1975 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1976 IS_CHERRYVIEW(dev_priv)) { 1977 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1978 1979 if (hotplug_trigger) { 1980 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1981 hotplug_trigger, hotplug_trigger, 1982 hpd_status_g4x, 1983 i9xx_port_hotplug_long_detect); 1984 1985 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1986 } 1987 1988 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1989 dp_aux_irq_handler(dev_priv); 1990 } else { 1991 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1992 1993 if (hotplug_trigger) { 1994 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1995 hotplug_trigger, hotplug_trigger, 1996 hpd_status_i915, 1997 i9xx_port_hotplug_long_detect); 1998 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1999 } 2000 } 2001 } 2002 2003 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2004 { 2005 struct drm_device *dev = arg; 2006 struct drm_i915_private *dev_priv = to_i915(dev); 2007 irqreturn_t ret = IRQ_NONE; 2008 2009 if (!intel_irqs_enabled(dev_priv)) 2010 return IRQ_NONE; 2011 2012 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2013 disable_rpm_wakeref_asserts(dev_priv); 2014 2015 do { 2016 u32 iir, gt_iir, pm_iir; 2017 u32 pipe_stats[I915_MAX_PIPES] = {}; 2018 u32 hotplug_status = 0; 2019 u32 ier = 0; 2020 2021 gt_iir = I915_READ(GTIIR); 2022 pm_iir = I915_READ(GEN6_PMIIR); 2023 iir = I915_READ(VLV_IIR); 2024 2025 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2026 break; 2027 2028 ret = IRQ_HANDLED; 2029 2030 /* 2031 * Theory on interrupt generation, based on empirical evidence: 2032 * 2033 * x = ((VLV_IIR & VLV_IER) || 2034 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2035 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2036 * 2037 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2038 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2039 * guarantee the CPU interrupt will be raised again even if we 2040 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2041 * bits this time around. 2042 */ 2043 I915_WRITE(VLV_MASTER_IER, 0); 2044 ier = I915_READ(VLV_IER); 2045 I915_WRITE(VLV_IER, 0); 2046 2047 if (gt_iir) 2048 I915_WRITE(GTIIR, gt_iir); 2049 if (pm_iir) 2050 I915_WRITE(GEN6_PMIIR, pm_iir); 2051 2052 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2053 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2054 2055 /* Call regardless, as some status bits might not be 2056 * signalled in iir */ 2057 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2058 2059 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2060 I915_LPE_PIPE_B_INTERRUPT)) 2061 intel_lpe_audio_irq_handler(dev_priv); 2062 2063 /* 2064 * VLV_IIR is single buffered, and reflects the level 2065 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2066 */ 2067 if (iir) 2068 I915_WRITE(VLV_IIR, iir); 2069 2070 I915_WRITE(VLV_IER, ier); 2071 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2072 POSTING_READ(VLV_MASTER_IER); 2073 2074 if (gt_iir) 2075 snb_gt_irq_handler(dev_priv, gt_iir); 2076 if (pm_iir) 2077 gen6_rps_irq_handler(dev_priv, pm_iir); 2078 2079 if (hotplug_status) 2080 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2081 2082 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2083 } while (0); 2084 2085 enable_rpm_wakeref_asserts(dev_priv); 2086 2087 return ret; 2088 } 2089 2090 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2091 { 2092 struct drm_device *dev = arg; 2093 struct drm_i915_private *dev_priv = to_i915(dev); 2094 irqreturn_t ret = IRQ_NONE; 2095 2096 if (!intel_irqs_enabled(dev_priv)) 2097 return IRQ_NONE; 2098 2099 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2100 disable_rpm_wakeref_asserts(dev_priv); 2101 2102 do { 2103 u32 master_ctl, iir; 2104 u32 pipe_stats[I915_MAX_PIPES] = {}; 2105 u32 hotplug_status = 0; 2106 u32 gt_iir[4]; 2107 u32 ier = 0; 2108 2109 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2110 iir = I915_READ(VLV_IIR); 2111 2112 if (master_ctl == 0 && iir == 0) 2113 break; 2114 2115 ret = IRQ_HANDLED; 2116 2117 /* 2118 * Theory on interrupt generation, based on empirical evidence: 2119 * 2120 * x = ((VLV_IIR & VLV_IER) || 2121 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2122 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2123 * 2124 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2125 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2126 * guarantee the CPU interrupt will be raised again even if we 2127 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2128 * bits this time around. 2129 */ 2130 I915_WRITE(GEN8_MASTER_IRQ, 0); 2131 ier = I915_READ(VLV_IER); 2132 I915_WRITE(VLV_IER, 0); 2133 2134 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2135 2136 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2137 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2138 2139 /* Call regardless, as some status bits might not be 2140 * signalled in iir */ 2141 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2142 2143 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2144 I915_LPE_PIPE_B_INTERRUPT | 2145 I915_LPE_PIPE_C_INTERRUPT)) 2146 intel_lpe_audio_irq_handler(dev_priv); 2147 2148 /* 2149 * VLV_IIR is single buffered, and reflects the level 2150 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2151 */ 2152 if (iir) 2153 I915_WRITE(VLV_IIR, iir); 2154 2155 I915_WRITE(VLV_IER, ier); 2156 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2157 POSTING_READ(GEN8_MASTER_IRQ); 2158 2159 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2160 2161 if (hotplug_status) 2162 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2163 2164 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2165 } while (0); 2166 2167 enable_rpm_wakeref_asserts(dev_priv); 2168 2169 return ret; 2170 } 2171 2172 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2173 u32 hotplug_trigger, 2174 const u32 hpd[HPD_NUM_PINS]) 2175 { 2176 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2177 2178 /* 2179 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2180 * unless we touch the hotplug register, even if hotplug_trigger is 2181 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2182 * errors. 2183 */ 2184 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2185 if (!hotplug_trigger) { 2186 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2187 PORTD_HOTPLUG_STATUS_MASK | 2188 PORTC_HOTPLUG_STATUS_MASK | 2189 PORTB_HOTPLUG_STATUS_MASK; 2190 dig_hotplug_reg &= ~mask; 2191 } 2192 2193 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2194 if (!hotplug_trigger) 2195 return; 2196 2197 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2198 dig_hotplug_reg, hpd, 2199 pch_port_hotplug_long_detect); 2200 2201 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2202 } 2203 2204 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2205 { 2206 int pipe; 2207 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2208 2209 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2210 2211 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2212 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2213 SDE_AUDIO_POWER_SHIFT); 2214 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2215 port_name(port)); 2216 } 2217 2218 if (pch_iir & SDE_AUX_MASK) 2219 dp_aux_irq_handler(dev_priv); 2220 2221 if (pch_iir & SDE_GMBUS) 2222 gmbus_irq_handler(dev_priv); 2223 2224 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2225 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2226 2227 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2228 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2229 2230 if (pch_iir & SDE_POISON) 2231 DRM_ERROR("PCH poison interrupt\n"); 2232 2233 if (pch_iir & SDE_FDI_MASK) 2234 for_each_pipe(dev_priv, pipe) 2235 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2236 pipe_name(pipe), 2237 I915_READ(FDI_RX_IIR(pipe))); 2238 2239 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2240 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2241 2242 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2243 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2244 2245 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2246 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2247 2248 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2249 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2250 } 2251 2252 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2253 { 2254 u32 err_int = I915_READ(GEN7_ERR_INT); 2255 enum pipe pipe; 2256 2257 if (err_int & ERR_INT_POISON) 2258 DRM_ERROR("Poison interrupt\n"); 2259 2260 for_each_pipe(dev_priv, pipe) { 2261 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2262 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2263 2264 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2265 if (IS_IVYBRIDGE(dev_priv)) 2266 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2267 else 2268 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2269 } 2270 } 2271 2272 I915_WRITE(GEN7_ERR_INT, err_int); 2273 } 2274 2275 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2276 { 2277 u32 serr_int = I915_READ(SERR_INT); 2278 enum pipe pipe; 2279 2280 if (serr_int & SERR_INT_POISON) 2281 DRM_ERROR("PCH poison interrupt\n"); 2282 2283 for_each_pipe(dev_priv, pipe) 2284 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2285 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2286 2287 I915_WRITE(SERR_INT, serr_int); 2288 } 2289 2290 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2291 { 2292 int pipe; 2293 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2294 2295 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2296 2297 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2298 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2299 SDE_AUDIO_POWER_SHIFT_CPT); 2300 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2301 port_name(port)); 2302 } 2303 2304 if (pch_iir & SDE_AUX_MASK_CPT) 2305 dp_aux_irq_handler(dev_priv); 2306 2307 if (pch_iir & SDE_GMBUS_CPT) 2308 gmbus_irq_handler(dev_priv); 2309 2310 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2311 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2312 2313 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2314 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2315 2316 if (pch_iir & SDE_FDI_MASK_CPT) 2317 for_each_pipe(dev_priv, pipe) 2318 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2319 pipe_name(pipe), 2320 I915_READ(FDI_RX_IIR(pipe))); 2321 2322 if (pch_iir & SDE_ERROR_CPT) 2323 cpt_serr_int_handler(dev_priv); 2324 } 2325 2326 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2327 { 2328 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2329 ~SDE_PORTE_HOTPLUG_SPT; 2330 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2331 u32 pin_mask = 0, long_mask = 0; 2332 2333 if (hotplug_trigger) { 2334 u32 dig_hotplug_reg; 2335 2336 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2337 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2338 2339 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2340 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2341 spt_port_hotplug_long_detect); 2342 } 2343 2344 if (hotplug2_trigger) { 2345 u32 dig_hotplug_reg; 2346 2347 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2348 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2349 2350 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2351 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2352 spt_port_hotplug2_long_detect); 2353 } 2354 2355 if (pin_mask) 2356 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2357 2358 if (pch_iir & SDE_GMBUS_CPT) 2359 gmbus_irq_handler(dev_priv); 2360 } 2361 2362 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2363 u32 hotplug_trigger, 2364 const u32 hpd[HPD_NUM_PINS]) 2365 { 2366 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2367 2368 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2369 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2370 2371 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2372 dig_hotplug_reg, hpd, 2373 ilk_port_hotplug_long_detect); 2374 2375 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2376 } 2377 2378 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2379 u32 de_iir) 2380 { 2381 enum pipe pipe; 2382 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2383 2384 if (hotplug_trigger) 2385 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2386 2387 if (de_iir & DE_AUX_CHANNEL_A) 2388 dp_aux_irq_handler(dev_priv); 2389 2390 if (de_iir & DE_GSE) 2391 intel_opregion_asle_intr(dev_priv); 2392 2393 if (de_iir & DE_POISON) 2394 DRM_ERROR("Poison interrupt\n"); 2395 2396 for_each_pipe(dev_priv, pipe) { 2397 if (de_iir & DE_PIPE_VBLANK(pipe)) 2398 drm_handle_vblank(&dev_priv->drm, pipe); 2399 2400 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2401 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2402 2403 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2404 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2405 } 2406 2407 /* check event from PCH */ 2408 if (de_iir & DE_PCH_EVENT) { 2409 u32 pch_iir = I915_READ(SDEIIR); 2410 2411 if (HAS_PCH_CPT(dev_priv)) 2412 cpt_irq_handler(dev_priv, pch_iir); 2413 else 2414 ibx_irq_handler(dev_priv, pch_iir); 2415 2416 /* should clear PCH hotplug event before clear CPU irq */ 2417 I915_WRITE(SDEIIR, pch_iir); 2418 } 2419 2420 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2421 ironlake_rps_change_irq_handler(dev_priv); 2422 } 2423 2424 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2425 u32 de_iir) 2426 { 2427 enum pipe pipe; 2428 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2429 2430 if (hotplug_trigger) 2431 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2432 2433 if (de_iir & DE_ERR_INT_IVB) 2434 ivb_err_int_handler(dev_priv); 2435 2436 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2437 dp_aux_irq_handler(dev_priv); 2438 2439 if (de_iir & DE_GSE_IVB) 2440 intel_opregion_asle_intr(dev_priv); 2441 2442 for_each_pipe(dev_priv, pipe) { 2443 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2444 drm_handle_vblank(&dev_priv->drm, pipe); 2445 } 2446 2447 /* check event from PCH */ 2448 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2449 u32 pch_iir = I915_READ(SDEIIR); 2450 2451 cpt_irq_handler(dev_priv, pch_iir); 2452 2453 /* clear PCH hotplug event before clear CPU irq */ 2454 I915_WRITE(SDEIIR, pch_iir); 2455 } 2456 } 2457 2458 /* 2459 * To handle irqs with the minimum potential races with fresh interrupts, we: 2460 * 1 - Disable Master Interrupt Control. 2461 * 2 - Find the source(s) of the interrupt. 2462 * 3 - Clear the Interrupt Identity bits (IIR). 2463 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2464 * 5 - Re-enable Master Interrupt Control. 2465 */ 2466 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2467 { 2468 struct drm_device *dev = arg; 2469 struct drm_i915_private *dev_priv = to_i915(dev); 2470 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2471 irqreturn_t ret = IRQ_NONE; 2472 2473 if (!intel_irqs_enabled(dev_priv)) 2474 return IRQ_NONE; 2475 2476 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2477 disable_rpm_wakeref_asserts(dev_priv); 2478 2479 /* disable master interrupt before clearing iir */ 2480 de_ier = I915_READ(DEIER); 2481 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2482 POSTING_READ(DEIER); 2483 2484 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2485 * interrupts will will be stored on its back queue, and then we'll be 2486 * able to process them after we restore SDEIER (as soon as we restore 2487 * it, we'll get an interrupt if SDEIIR still has something to process 2488 * due to its back queue). */ 2489 if (!HAS_PCH_NOP(dev_priv)) { 2490 sde_ier = I915_READ(SDEIER); 2491 I915_WRITE(SDEIER, 0); 2492 POSTING_READ(SDEIER); 2493 } 2494 2495 /* Find, clear, then process each source of interrupt */ 2496 2497 gt_iir = I915_READ(GTIIR); 2498 if (gt_iir) { 2499 I915_WRITE(GTIIR, gt_iir); 2500 ret = IRQ_HANDLED; 2501 if (INTEL_GEN(dev_priv) >= 6) 2502 snb_gt_irq_handler(dev_priv, gt_iir); 2503 else 2504 ilk_gt_irq_handler(dev_priv, gt_iir); 2505 } 2506 2507 de_iir = I915_READ(DEIIR); 2508 if (de_iir) { 2509 I915_WRITE(DEIIR, de_iir); 2510 ret = IRQ_HANDLED; 2511 if (INTEL_GEN(dev_priv) >= 7) 2512 ivb_display_irq_handler(dev_priv, de_iir); 2513 else 2514 ilk_display_irq_handler(dev_priv, de_iir); 2515 } 2516 2517 if (INTEL_GEN(dev_priv) >= 6) { 2518 u32 pm_iir = I915_READ(GEN6_PMIIR); 2519 if (pm_iir) { 2520 I915_WRITE(GEN6_PMIIR, pm_iir); 2521 ret = IRQ_HANDLED; 2522 gen6_rps_irq_handler(dev_priv, pm_iir); 2523 } 2524 } 2525 2526 I915_WRITE(DEIER, de_ier); 2527 POSTING_READ(DEIER); 2528 if (!HAS_PCH_NOP(dev_priv)) { 2529 I915_WRITE(SDEIER, sde_ier); 2530 POSTING_READ(SDEIER); 2531 } 2532 2533 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2534 enable_rpm_wakeref_asserts(dev_priv); 2535 2536 return ret; 2537 } 2538 2539 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2540 u32 hotplug_trigger, 2541 const u32 hpd[HPD_NUM_PINS]) 2542 { 2543 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2544 2545 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2546 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2547 2548 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2549 dig_hotplug_reg, hpd, 2550 bxt_port_hotplug_long_detect); 2551 2552 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2553 } 2554 2555 static irqreturn_t 2556 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2557 { 2558 irqreturn_t ret = IRQ_NONE; 2559 u32 iir; 2560 enum pipe pipe; 2561 2562 if (master_ctl & GEN8_DE_MISC_IRQ) { 2563 iir = I915_READ(GEN8_DE_MISC_IIR); 2564 if (iir) { 2565 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2566 ret = IRQ_HANDLED; 2567 if (iir & GEN8_DE_MISC_GSE) 2568 intel_opregion_asle_intr(dev_priv); 2569 else 2570 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2571 } 2572 else 2573 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2574 } 2575 2576 if (master_ctl & GEN8_DE_PORT_IRQ) { 2577 iir = I915_READ(GEN8_DE_PORT_IIR); 2578 if (iir) { 2579 u32 tmp_mask; 2580 bool found = false; 2581 2582 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2583 ret = IRQ_HANDLED; 2584 2585 tmp_mask = GEN8_AUX_CHANNEL_A; 2586 if (INTEL_GEN(dev_priv) >= 9) 2587 tmp_mask |= GEN9_AUX_CHANNEL_B | 2588 GEN9_AUX_CHANNEL_C | 2589 GEN9_AUX_CHANNEL_D; 2590 2591 if (IS_CNL_WITH_PORT_F(dev_priv)) 2592 tmp_mask |= CNL_AUX_CHANNEL_F; 2593 2594 if (iir & tmp_mask) { 2595 dp_aux_irq_handler(dev_priv); 2596 found = true; 2597 } 2598 2599 if (IS_GEN9_LP(dev_priv)) { 2600 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2601 if (tmp_mask) { 2602 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2603 hpd_bxt); 2604 found = true; 2605 } 2606 } else if (IS_BROADWELL(dev_priv)) { 2607 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2608 if (tmp_mask) { 2609 ilk_hpd_irq_handler(dev_priv, 2610 tmp_mask, hpd_bdw); 2611 found = true; 2612 } 2613 } 2614 2615 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2616 gmbus_irq_handler(dev_priv); 2617 found = true; 2618 } 2619 2620 if (!found) 2621 DRM_ERROR("Unexpected DE Port interrupt\n"); 2622 } 2623 else 2624 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2625 } 2626 2627 for_each_pipe(dev_priv, pipe) { 2628 u32 fault_errors; 2629 2630 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2631 continue; 2632 2633 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2634 if (!iir) { 2635 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2636 continue; 2637 } 2638 2639 ret = IRQ_HANDLED; 2640 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2641 2642 if (iir & GEN8_PIPE_VBLANK) 2643 drm_handle_vblank(&dev_priv->drm, pipe); 2644 2645 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2646 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2647 2648 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2649 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2650 2651 fault_errors = iir; 2652 if (INTEL_GEN(dev_priv) >= 9) 2653 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2654 else 2655 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2656 2657 if (fault_errors) 2658 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2659 pipe_name(pipe), 2660 fault_errors); 2661 } 2662 2663 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2664 master_ctl & GEN8_DE_PCH_IRQ) { 2665 /* 2666 * FIXME(BDW): Assume for now that the new interrupt handling 2667 * scheme also closed the SDE interrupt handling race we've seen 2668 * on older pch-split platforms. But this needs testing. 2669 */ 2670 iir = I915_READ(SDEIIR); 2671 if (iir) { 2672 I915_WRITE(SDEIIR, iir); 2673 ret = IRQ_HANDLED; 2674 2675 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 2676 HAS_PCH_CNP(dev_priv)) 2677 spt_irq_handler(dev_priv, iir); 2678 else 2679 cpt_irq_handler(dev_priv, iir); 2680 } else { 2681 /* 2682 * Like on previous PCH there seems to be something 2683 * fishy going on with forwarding PCH interrupts. 2684 */ 2685 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2686 } 2687 } 2688 2689 return ret; 2690 } 2691 2692 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2693 { 2694 struct drm_i915_private *dev_priv = to_i915(arg); 2695 u32 master_ctl; 2696 u32 gt_iir[4]; 2697 2698 if (!intel_irqs_enabled(dev_priv)) 2699 return IRQ_NONE; 2700 2701 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2702 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2703 if (!master_ctl) 2704 return IRQ_NONE; 2705 2706 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2707 2708 /* Find, clear, then process each source of interrupt */ 2709 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2710 2711 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2712 if (master_ctl & ~GEN8_GT_IRQS) { 2713 disable_rpm_wakeref_asserts(dev_priv); 2714 gen8_de_irq_handler(dev_priv, master_ctl); 2715 enable_rpm_wakeref_asserts(dev_priv); 2716 } 2717 2718 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2719 2720 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2721 2722 return IRQ_HANDLED; 2723 } 2724 2725 struct wedge_me { 2726 struct delayed_work work; 2727 struct drm_i915_private *i915; 2728 const char *name; 2729 }; 2730 2731 static void wedge_me(struct work_struct *work) 2732 { 2733 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2734 2735 dev_err(w->i915->drm.dev, 2736 "%s timed out, cancelling all in-flight rendering.\n", 2737 w->name); 2738 i915_gem_set_wedged(w->i915); 2739 } 2740 2741 static void __init_wedge(struct wedge_me *w, 2742 struct drm_i915_private *i915, 2743 long timeout, 2744 const char *name) 2745 { 2746 w->i915 = i915; 2747 w->name = name; 2748 2749 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2750 schedule_delayed_work(&w->work, timeout); 2751 } 2752 2753 static void __fini_wedge(struct wedge_me *w) 2754 { 2755 cancel_delayed_work_sync(&w->work); 2756 destroy_delayed_work_on_stack(&w->work); 2757 w->i915 = NULL; 2758 } 2759 2760 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2761 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2762 (W)->i915; \ 2763 __fini_wedge((W))) 2764 2765 static __always_inline void 2766 gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir) 2767 { 2768 gen8_cs_irq_handler(engine, iir, 0); 2769 } 2770 2771 static void 2772 gen11_gt_engine_irq_handler(struct drm_i915_private * const i915, 2773 const unsigned int bank, 2774 const unsigned int engine_n, 2775 const u16 iir) 2776 { 2777 struct intel_engine_cs ** const engine = i915->engine; 2778 2779 switch (bank) { 2780 case 0: 2781 switch (engine_n) { 2782 2783 case GEN11_RCS0: 2784 return gen11_cs_irq_handler(engine[RCS], iir); 2785 2786 case GEN11_BCS: 2787 return gen11_cs_irq_handler(engine[BCS], iir); 2788 } 2789 case 1: 2790 switch (engine_n) { 2791 2792 case GEN11_VCS(0): 2793 return gen11_cs_irq_handler(engine[_VCS(0)], iir); 2794 case GEN11_VCS(1): 2795 return gen11_cs_irq_handler(engine[_VCS(1)], iir); 2796 case GEN11_VCS(2): 2797 return gen11_cs_irq_handler(engine[_VCS(2)], iir); 2798 case GEN11_VCS(3): 2799 return gen11_cs_irq_handler(engine[_VCS(3)], iir); 2800 2801 case GEN11_VECS(0): 2802 return gen11_cs_irq_handler(engine[_VECS(0)], iir); 2803 case GEN11_VECS(1): 2804 return gen11_cs_irq_handler(engine[_VECS(1)], iir); 2805 } 2806 } 2807 } 2808 2809 static u32 2810 gen11_gt_engine_intr(struct drm_i915_private * const i915, 2811 const unsigned int bank, const unsigned int bit) 2812 { 2813 void __iomem * const regs = i915->regs; 2814 u32 timeout_ts; 2815 u32 ident; 2816 2817 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2818 2819 /* 2820 * NB: Specs do not specify how long to spin wait, 2821 * so we do ~100us as an educated guess. 2822 */ 2823 timeout_ts = (local_clock() >> 10) + 100; 2824 do { 2825 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2826 } while (!(ident & GEN11_INTR_DATA_VALID) && 2827 !time_after32(local_clock() >> 10, timeout_ts)); 2828 2829 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 2830 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 2831 bank, bit, ident); 2832 return 0; 2833 } 2834 2835 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 2836 GEN11_INTR_DATA_VALID); 2837 2838 return ident & GEN11_INTR_ENGINE_MASK; 2839 } 2840 2841 static void 2842 gen11_gt_irq_handler(struct drm_i915_private * const i915, 2843 const u32 master_ctl) 2844 { 2845 void __iomem * const regs = i915->regs; 2846 unsigned int bank; 2847 2848 for (bank = 0; bank < 2; bank++) { 2849 unsigned long intr_dw; 2850 unsigned int bit; 2851 2852 if (!(master_ctl & GEN11_GT_DW_IRQ(bank))) 2853 continue; 2854 2855 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 2856 2857 if (unlikely(!intr_dw)) { 2858 DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 2859 continue; 2860 } 2861 2862 for_each_set_bit(bit, &intr_dw, 32) { 2863 const u16 iir = gen11_gt_engine_intr(i915, bank, bit); 2864 2865 if (unlikely(!iir)) 2866 continue; 2867 2868 gen11_gt_engine_irq_handler(i915, bank, bit, iir); 2869 } 2870 2871 /* Clear must be after shared has been served for engine */ 2872 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 2873 } 2874 } 2875 2876 static irqreturn_t gen11_irq_handler(int irq, void *arg) 2877 { 2878 struct drm_i915_private * const i915 = to_i915(arg); 2879 void __iomem * const regs = i915->regs; 2880 u32 master_ctl; 2881 2882 if (!intel_irqs_enabled(i915)) 2883 return IRQ_NONE; 2884 2885 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 2886 master_ctl &= ~GEN11_MASTER_IRQ; 2887 if (!master_ctl) 2888 return IRQ_NONE; 2889 2890 /* Disable interrupts. */ 2891 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 2892 2893 /* Find, clear, then process each source of interrupt. */ 2894 gen11_gt_irq_handler(i915, master_ctl); 2895 2896 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2897 if (master_ctl & GEN11_DISPLAY_IRQ) { 2898 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 2899 2900 disable_rpm_wakeref_asserts(i915); 2901 /* 2902 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 2903 * for the display related bits. 2904 */ 2905 gen8_de_irq_handler(i915, disp_ctl); 2906 enable_rpm_wakeref_asserts(i915); 2907 } 2908 2909 /* Acknowledge and enable interrupts. */ 2910 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 2911 2912 return IRQ_HANDLED; 2913 } 2914 2915 /** 2916 * i915_reset_device - do process context error handling work 2917 * @dev_priv: i915 device private 2918 * 2919 * Fire an error uevent so userspace can see that a hang or error 2920 * was detected. 2921 */ 2922 static void i915_reset_device(struct drm_i915_private *dev_priv) 2923 { 2924 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2925 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2926 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2927 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2928 struct wedge_me w; 2929 2930 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2931 2932 DRM_DEBUG_DRIVER("resetting chip\n"); 2933 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2934 2935 /* Use a watchdog to ensure that our reset completes */ 2936 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 2937 intel_prepare_reset(dev_priv); 2938 2939 /* Signal that locked waiters should reset the GPU */ 2940 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2941 wake_up_all(&dev_priv->gpu_error.wait_queue); 2942 2943 /* Wait for anyone holding the lock to wakeup, without 2944 * blocking indefinitely on struct_mutex. 2945 */ 2946 do { 2947 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2948 i915_reset(dev_priv, 0); 2949 mutex_unlock(&dev_priv->drm.struct_mutex); 2950 } 2951 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2952 I915_RESET_HANDOFF, 2953 TASK_UNINTERRUPTIBLE, 2954 1)); 2955 2956 intel_finish_reset(dev_priv); 2957 } 2958 2959 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2960 kobject_uevent_env(kobj, 2961 KOBJ_CHANGE, reset_done_event); 2962 } 2963 2964 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2965 { 2966 u32 eir; 2967 2968 if (!IS_GEN2(dev_priv)) 2969 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2970 2971 if (INTEL_GEN(dev_priv) < 4) 2972 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2973 else 2974 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2975 2976 I915_WRITE(EIR, I915_READ(EIR)); 2977 eir = I915_READ(EIR); 2978 if (eir) { 2979 /* 2980 * some errors might have become stuck, 2981 * mask them. 2982 */ 2983 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2984 I915_WRITE(EMR, I915_READ(EMR) | eir); 2985 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2986 } 2987 } 2988 2989 /** 2990 * i915_handle_error - handle a gpu error 2991 * @dev_priv: i915 device private 2992 * @engine_mask: mask representing engines that are hung 2993 * @fmt: Error message format string 2994 * 2995 * Do some basic checking of register state at error time and 2996 * dump it to the syslog. Also call i915_capture_error_state() to make 2997 * sure we get a record and make it available in debugfs. Fire a uevent 2998 * so userspace knows something bad happened (should trigger collection 2999 * of a ring dump etc.). 3000 */ 3001 void i915_handle_error(struct drm_i915_private *dev_priv, 3002 u32 engine_mask, 3003 const char *fmt, ...) 3004 { 3005 struct intel_engine_cs *engine; 3006 unsigned int tmp; 3007 va_list args; 3008 char error_msg[80]; 3009 3010 va_start(args, fmt); 3011 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 3012 va_end(args); 3013 3014 /* 3015 * In most cases it's guaranteed that we get here with an RPM 3016 * reference held, for example because there is a pending GPU 3017 * request that won't finish until the reset is done. This 3018 * isn't the case at least when we get here by doing a 3019 * simulated reset via debugfs, so get an RPM reference. 3020 */ 3021 intel_runtime_pm_get(dev_priv); 3022 3023 i915_capture_error_state(dev_priv, engine_mask, error_msg); 3024 i915_clear_error_registers(dev_priv); 3025 3026 /* 3027 * Try engine reset when available. We fall back to full reset if 3028 * single reset fails. 3029 */ 3030 if (intel_has_reset_engine(dev_priv)) { 3031 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 3032 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3033 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3034 &dev_priv->gpu_error.flags)) 3035 continue; 3036 3037 if (i915_reset_engine(engine, 0) == 0) 3038 engine_mask &= ~intel_engine_flag(engine); 3039 3040 clear_bit(I915_RESET_ENGINE + engine->id, 3041 &dev_priv->gpu_error.flags); 3042 wake_up_bit(&dev_priv->gpu_error.flags, 3043 I915_RESET_ENGINE + engine->id); 3044 } 3045 } 3046 3047 if (!engine_mask) 3048 goto out; 3049 3050 /* Full reset needs the mutex, stop any other user trying to do so. */ 3051 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3052 wait_event(dev_priv->gpu_error.reset_queue, 3053 !test_bit(I915_RESET_BACKOFF, 3054 &dev_priv->gpu_error.flags)); 3055 goto out; 3056 } 3057 3058 /* Prevent any other reset-engine attempt. */ 3059 for_each_engine(engine, dev_priv, tmp) { 3060 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3061 &dev_priv->gpu_error.flags)) 3062 wait_on_bit(&dev_priv->gpu_error.flags, 3063 I915_RESET_ENGINE + engine->id, 3064 TASK_UNINTERRUPTIBLE); 3065 } 3066 3067 i915_reset_device(dev_priv); 3068 3069 for_each_engine(engine, dev_priv, tmp) { 3070 clear_bit(I915_RESET_ENGINE + engine->id, 3071 &dev_priv->gpu_error.flags); 3072 } 3073 3074 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3075 wake_up_all(&dev_priv->gpu_error.reset_queue); 3076 3077 out: 3078 intel_runtime_pm_put(dev_priv); 3079 } 3080 3081 /* Called from drm generic code, passed 'crtc' which 3082 * we use as a pipe index 3083 */ 3084 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3085 { 3086 struct drm_i915_private *dev_priv = to_i915(dev); 3087 unsigned long irqflags; 3088 3089 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3090 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3091 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3092 3093 return 0; 3094 } 3095 3096 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3097 { 3098 struct drm_i915_private *dev_priv = to_i915(dev); 3099 unsigned long irqflags; 3100 3101 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3102 i915_enable_pipestat(dev_priv, pipe, 3103 PIPE_START_VBLANK_INTERRUPT_STATUS); 3104 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3105 3106 return 0; 3107 } 3108 3109 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3110 { 3111 struct drm_i915_private *dev_priv = to_i915(dev); 3112 unsigned long irqflags; 3113 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3114 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3115 3116 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3117 ilk_enable_display_irq(dev_priv, bit); 3118 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3119 3120 /* Even though there is no DMC, frame counter can get stuck when 3121 * PSR is active as no frames are generated. 3122 */ 3123 if (HAS_PSR(dev_priv)) 3124 drm_vblank_restore(dev, pipe); 3125 3126 return 0; 3127 } 3128 3129 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3130 { 3131 struct drm_i915_private *dev_priv = to_i915(dev); 3132 unsigned long irqflags; 3133 3134 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3135 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3136 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3137 3138 /* Even if there is no DMC, frame counter can get stuck when 3139 * PSR is active as no frames are generated, so check only for PSR. 3140 */ 3141 if (HAS_PSR(dev_priv)) 3142 drm_vblank_restore(dev, pipe); 3143 3144 return 0; 3145 } 3146 3147 /* Called from drm generic code, passed 'crtc' which 3148 * we use as a pipe index 3149 */ 3150 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3151 { 3152 struct drm_i915_private *dev_priv = to_i915(dev); 3153 unsigned long irqflags; 3154 3155 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3156 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3157 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3158 } 3159 3160 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3161 { 3162 struct drm_i915_private *dev_priv = to_i915(dev); 3163 unsigned long irqflags; 3164 3165 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3166 i915_disable_pipestat(dev_priv, pipe, 3167 PIPE_START_VBLANK_INTERRUPT_STATUS); 3168 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3169 } 3170 3171 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3172 { 3173 struct drm_i915_private *dev_priv = to_i915(dev); 3174 unsigned long irqflags; 3175 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3176 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3177 3178 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3179 ilk_disable_display_irq(dev_priv, bit); 3180 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3181 } 3182 3183 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3184 { 3185 struct drm_i915_private *dev_priv = to_i915(dev); 3186 unsigned long irqflags; 3187 3188 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3189 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3190 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3191 } 3192 3193 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3194 { 3195 if (HAS_PCH_NOP(dev_priv)) 3196 return; 3197 3198 GEN3_IRQ_RESET(SDE); 3199 3200 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3201 I915_WRITE(SERR_INT, 0xffffffff); 3202 } 3203 3204 /* 3205 * SDEIER is also touched by the interrupt handler to work around missed PCH 3206 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3207 * instead we unconditionally enable all PCH interrupt sources here, but then 3208 * only unmask them as needed with SDEIMR. 3209 * 3210 * This function needs to be called before interrupts are enabled. 3211 */ 3212 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3213 { 3214 struct drm_i915_private *dev_priv = to_i915(dev); 3215 3216 if (HAS_PCH_NOP(dev_priv)) 3217 return; 3218 3219 WARN_ON(I915_READ(SDEIER) != 0); 3220 I915_WRITE(SDEIER, 0xffffffff); 3221 POSTING_READ(SDEIER); 3222 } 3223 3224 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3225 { 3226 GEN3_IRQ_RESET(GT); 3227 if (INTEL_GEN(dev_priv) >= 6) 3228 GEN3_IRQ_RESET(GEN6_PM); 3229 } 3230 3231 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3232 { 3233 if (IS_CHERRYVIEW(dev_priv)) 3234 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3235 else 3236 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3237 3238 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3239 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3240 3241 i9xx_pipestat_irq_reset(dev_priv); 3242 3243 GEN3_IRQ_RESET(VLV_); 3244 dev_priv->irq_mask = ~0u; 3245 } 3246 3247 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3248 { 3249 u32 pipestat_mask; 3250 u32 enable_mask; 3251 enum pipe pipe; 3252 3253 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3254 3255 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3256 for_each_pipe(dev_priv, pipe) 3257 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3258 3259 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3260 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3261 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3262 I915_LPE_PIPE_A_INTERRUPT | 3263 I915_LPE_PIPE_B_INTERRUPT; 3264 3265 if (IS_CHERRYVIEW(dev_priv)) 3266 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3267 I915_LPE_PIPE_C_INTERRUPT; 3268 3269 WARN_ON(dev_priv->irq_mask != ~0u); 3270 3271 dev_priv->irq_mask = ~enable_mask; 3272 3273 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3274 } 3275 3276 /* drm_dma.h hooks 3277 */ 3278 static void ironlake_irq_reset(struct drm_device *dev) 3279 { 3280 struct drm_i915_private *dev_priv = to_i915(dev); 3281 3282 if (IS_GEN5(dev_priv)) 3283 I915_WRITE(HWSTAM, 0xffffffff); 3284 3285 GEN3_IRQ_RESET(DE); 3286 if (IS_GEN7(dev_priv)) 3287 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3288 3289 gen5_gt_irq_reset(dev_priv); 3290 3291 ibx_irq_reset(dev_priv); 3292 } 3293 3294 static void valleyview_irq_reset(struct drm_device *dev) 3295 { 3296 struct drm_i915_private *dev_priv = to_i915(dev); 3297 3298 I915_WRITE(VLV_MASTER_IER, 0); 3299 POSTING_READ(VLV_MASTER_IER); 3300 3301 gen5_gt_irq_reset(dev_priv); 3302 3303 spin_lock_irq(&dev_priv->irq_lock); 3304 if (dev_priv->display_irqs_enabled) 3305 vlv_display_irq_reset(dev_priv); 3306 spin_unlock_irq(&dev_priv->irq_lock); 3307 } 3308 3309 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3310 { 3311 GEN8_IRQ_RESET_NDX(GT, 0); 3312 GEN8_IRQ_RESET_NDX(GT, 1); 3313 GEN8_IRQ_RESET_NDX(GT, 2); 3314 GEN8_IRQ_RESET_NDX(GT, 3); 3315 } 3316 3317 static void gen8_irq_reset(struct drm_device *dev) 3318 { 3319 struct drm_i915_private *dev_priv = to_i915(dev); 3320 int pipe; 3321 3322 I915_WRITE(GEN8_MASTER_IRQ, 0); 3323 POSTING_READ(GEN8_MASTER_IRQ); 3324 3325 gen8_gt_irq_reset(dev_priv); 3326 3327 for_each_pipe(dev_priv, pipe) 3328 if (intel_display_power_is_enabled(dev_priv, 3329 POWER_DOMAIN_PIPE(pipe))) 3330 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3331 3332 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3333 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3334 GEN3_IRQ_RESET(GEN8_PCU_); 3335 3336 if (HAS_PCH_SPLIT(dev_priv)) 3337 ibx_irq_reset(dev_priv); 3338 } 3339 3340 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3341 { 3342 /* Disable RCS, BCS, VCS and VECS class engines. */ 3343 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3344 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3345 3346 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3347 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3348 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3349 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3350 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3351 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3352 } 3353 3354 static void gen11_irq_reset(struct drm_device *dev) 3355 { 3356 struct drm_i915_private *dev_priv = dev->dev_private; 3357 int pipe; 3358 3359 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3360 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3361 3362 gen11_gt_irq_reset(dev_priv); 3363 3364 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3365 3366 for_each_pipe(dev_priv, pipe) 3367 if (intel_display_power_is_enabled(dev_priv, 3368 POWER_DOMAIN_PIPE(pipe))) 3369 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3370 3371 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3372 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3373 GEN3_IRQ_RESET(GEN8_PCU_); 3374 } 3375 3376 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3377 u8 pipe_mask) 3378 { 3379 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3380 enum pipe pipe; 3381 3382 spin_lock_irq(&dev_priv->irq_lock); 3383 3384 if (!intel_irqs_enabled(dev_priv)) { 3385 spin_unlock_irq(&dev_priv->irq_lock); 3386 return; 3387 } 3388 3389 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3390 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3391 dev_priv->de_irq_mask[pipe], 3392 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3393 3394 spin_unlock_irq(&dev_priv->irq_lock); 3395 } 3396 3397 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3398 u8 pipe_mask) 3399 { 3400 enum pipe pipe; 3401 3402 spin_lock_irq(&dev_priv->irq_lock); 3403 3404 if (!intel_irqs_enabled(dev_priv)) { 3405 spin_unlock_irq(&dev_priv->irq_lock); 3406 return; 3407 } 3408 3409 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3410 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3411 3412 spin_unlock_irq(&dev_priv->irq_lock); 3413 3414 /* make sure we're done processing display irqs */ 3415 synchronize_irq(dev_priv->drm.irq); 3416 } 3417 3418 static void cherryview_irq_reset(struct drm_device *dev) 3419 { 3420 struct drm_i915_private *dev_priv = to_i915(dev); 3421 3422 I915_WRITE(GEN8_MASTER_IRQ, 0); 3423 POSTING_READ(GEN8_MASTER_IRQ); 3424 3425 gen8_gt_irq_reset(dev_priv); 3426 3427 GEN3_IRQ_RESET(GEN8_PCU_); 3428 3429 spin_lock_irq(&dev_priv->irq_lock); 3430 if (dev_priv->display_irqs_enabled) 3431 vlv_display_irq_reset(dev_priv); 3432 spin_unlock_irq(&dev_priv->irq_lock); 3433 } 3434 3435 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3436 const u32 hpd[HPD_NUM_PINS]) 3437 { 3438 struct intel_encoder *encoder; 3439 u32 enabled_irqs = 0; 3440 3441 for_each_intel_encoder(&dev_priv->drm, encoder) 3442 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3443 enabled_irqs |= hpd[encoder->hpd_pin]; 3444 3445 return enabled_irqs; 3446 } 3447 3448 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3449 { 3450 u32 hotplug; 3451 3452 /* 3453 * Enable digital hotplug on the PCH, and configure the DP short pulse 3454 * duration to 2ms (which is the minimum in the Display Port spec). 3455 * The pulse duration bits are reserved on LPT+. 3456 */ 3457 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3458 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3459 PORTC_PULSE_DURATION_MASK | 3460 PORTD_PULSE_DURATION_MASK); 3461 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3462 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3463 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3464 /* 3465 * When CPU and PCH are on the same package, port A 3466 * HPD must be enabled in both north and south. 3467 */ 3468 if (HAS_PCH_LPT_LP(dev_priv)) 3469 hotplug |= PORTA_HOTPLUG_ENABLE; 3470 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3471 } 3472 3473 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3474 { 3475 u32 hotplug_irqs, enabled_irqs; 3476 3477 if (HAS_PCH_IBX(dev_priv)) { 3478 hotplug_irqs = SDE_HOTPLUG_MASK; 3479 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3480 } else { 3481 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3482 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3483 } 3484 3485 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3486 3487 ibx_hpd_detection_setup(dev_priv); 3488 } 3489 3490 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3491 { 3492 u32 val, hotplug; 3493 3494 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3495 if (HAS_PCH_CNP(dev_priv)) { 3496 val = I915_READ(SOUTH_CHICKEN1); 3497 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3498 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3499 I915_WRITE(SOUTH_CHICKEN1, val); 3500 } 3501 3502 /* Enable digital hotplug on the PCH */ 3503 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3504 hotplug |= PORTA_HOTPLUG_ENABLE | 3505 PORTB_HOTPLUG_ENABLE | 3506 PORTC_HOTPLUG_ENABLE | 3507 PORTD_HOTPLUG_ENABLE; 3508 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3509 3510 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3511 hotplug |= PORTE_HOTPLUG_ENABLE; 3512 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3513 } 3514 3515 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3516 { 3517 u32 hotplug_irqs, enabled_irqs; 3518 3519 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3520 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3521 3522 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3523 3524 spt_hpd_detection_setup(dev_priv); 3525 } 3526 3527 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3528 { 3529 u32 hotplug; 3530 3531 /* 3532 * Enable digital hotplug on the CPU, and configure the DP short pulse 3533 * duration to 2ms (which is the minimum in the Display Port spec) 3534 * The pulse duration bits are reserved on HSW+. 3535 */ 3536 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3537 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3538 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3539 DIGITAL_PORTA_PULSE_DURATION_2ms; 3540 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3541 } 3542 3543 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3544 { 3545 u32 hotplug_irqs, enabled_irqs; 3546 3547 if (INTEL_GEN(dev_priv) >= 8) { 3548 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3549 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3550 3551 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3552 } else if (INTEL_GEN(dev_priv) >= 7) { 3553 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3554 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3555 3556 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3557 } else { 3558 hotplug_irqs = DE_DP_A_HOTPLUG; 3559 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3560 3561 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3562 } 3563 3564 ilk_hpd_detection_setup(dev_priv); 3565 3566 ibx_hpd_irq_setup(dev_priv); 3567 } 3568 3569 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3570 u32 enabled_irqs) 3571 { 3572 u32 hotplug; 3573 3574 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3575 hotplug |= PORTA_HOTPLUG_ENABLE | 3576 PORTB_HOTPLUG_ENABLE | 3577 PORTC_HOTPLUG_ENABLE; 3578 3579 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3580 hotplug, enabled_irqs); 3581 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3582 3583 /* 3584 * For BXT invert bit has to be set based on AOB design 3585 * for HPD detection logic, update it based on VBT fields. 3586 */ 3587 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3588 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3589 hotplug |= BXT_DDIA_HPD_INVERT; 3590 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3591 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3592 hotplug |= BXT_DDIB_HPD_INVERT; 3593 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3594 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3595 hotplug |= BXT_DDIC_HPD_INVERT; 3596 3597 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3598 } 3599 3600 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3601 { 3602 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3603 } 3604 3605 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3606 { 3607 u32 hotplug_irqs, enabled_irqs; 3608 3609 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3610 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3611 3612 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3613 3614 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3615 } 3616 3617 static void ibx_irq_postinstall(struct drm_device *dev) 3618 { 3619 struct drm_i915_private *dev_priv = to_i915(dev); 3620 u32 mask; 3621 3622 if (HAS_PCH_NOP(dev_priv)) 3623 return; 3624 3625 if (HAS_PCH_IBX(dev_priv)) 3626 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3627 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3628 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3629 else 3630 mask = SDE_GMBUS_CPT; 3631 3632 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3633 I915_WRITE(SDEIMR, ~mask); 3634 3635 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3636 HAS_PCH_LPT(dev_priv)) 3637 ibx_hpd_detection_setup(dev_priv); 3638 else 3639 spt_hpd_detection_setup(dev_priv); 3640 } 3641 3642 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3643 { 3644 struct drm_i915_private *dev_priv = to_i915(dev); 3645 u32 pm_irqs, gt_irqs; 3646 3647 pm_irqs = gt_irqs = 0; 3648 3649 dev_priv->gt_irq_mask = ~0; 3650 if (HAS_L3_DPF(dev_priv)) { 3651 /* L3 parity interrupt is always unmasked. */ 3652 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3653 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3654 } 3655 3656 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3657 if (IS_GEN5(dev_priv)) { 3658 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3659 } else { 3660 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3661 } 3662 3663 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3664 3665 if (INTEL_GEN(dev_priv) >= 6) { 3666 /* 3667 * RPS interrupts will get enabled/disabled on demand when RPS 3668 * itself is enabled/disabled. 3669 */ 3670 if (HAS_VEBOX(dev_priv)) { 3671 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3672 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3673 } 3674 3675 dev_priv->pm_imr = 0xffffffff; 3676 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3677 } 3678 } 3679 3680 static int ironlake_irq_postinstall(struct drm_device *dev) 3681 { 3682 struct drm_i915_private *dev_priv = to_i915(dev); 3683 u32 display_mask, extra_mask; 3684 3685 if (INTEL_GEN(dev_priv) >= 7) { 3686 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3687 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3688 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3689 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3690 DE_DP_A_HOTPLUG_IVB); 3691 } else { 3692 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3693 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3694 DE_PIPEA_CRC_DONE | DE_POISON); 3695 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3696 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3697 DE_DP_A_HOTPLUG); 3698 } 3699 3700 dev_priv->irq_mask = ~display_mask; 3701 3702 ibx_irq_pre_postinstall(dev); 3703 3704 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3705 3706 gen5_gt_irq_postinstall(dev); 3707 3708 ilk_hpd_detection_setup(dev_priv); 3709 3710 ibx_irq_postinstall(dev); 3711 3712 if (IS_IRONLAKE_M(dev_priv)) { 3713 /* Enable PCU event interrupts 3714 * 3715 * spinlocking not required here for correctness since interrupt 3716 * setup is guaranteed to run in single-threaded context. But we 3717 * need it to make the assert_spin_locked happy. */ 3718 spin_lock_irq(&dev_priv->irq_lock); 3719 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3720 spin_unlock_irq(&dev_priv->irq_lock); 3721 } 3722 3723 return 0; 3724 } 3725 3726 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3727 { 3728 lockdep_assert_held(&dev_priv->irq_lock); 3729 3730 if (dev_priv->display_irqs_enabled) 3731 return; 3732 3733 dev_priv->display_irqs_enabled = true; 3734 3735 if (intel_irqs_enabled(dev_priv)) { 3736 vlv_display_irq_reset(dev_priv); 3737 vlv_display_irq_postinstall(dev_priv); 3738 } 3739 } 3740 3741 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3742 { 3743 lockdep_assert_held(&dev_priv->irq_lock); 3744 3745 if (!dev_priv->display_irqs_enabled) 3746 return; 3747 3748 dev_priv->display_irqs_enabled = false; 3749 3750 if (intel_irqs_enabled(dev_priv)) 3751 vlv_display_irq_reset(dev_priv); 3752 } 3753 3754 3755 static int valleyview_irq_postinstall(struct drm_device *dev) 3756 { 3757 struct drm_i915_private *dev_priv = to_i915(dev); 3758 3759 gen5_gt_irq_postinstall(dev); 3760 3761 spin_lock_irq(&dev_priv->irq_lock); 3762 if (dev_priv->display_irqs_enabled) 3763 vlv_display_irq_postinstall(dev_priv); 3764 spin_unlock_irq(&dev_priv->irq_lock); 3765 3766 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3767 POSTING_READ(VLV_MASTER_IER); 3768 3769 return 0; 3770 } 3771 3772 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3773 { 3774 /* These are interrupts we'll toggle with the ring mask register */ 3775 uint32_t gt_interrupts[] = { 3776 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3777 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3778 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3779 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3780 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3781 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3782 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3783 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3784 0, 3785 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3786 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3787 }; 3788 3789 if (HAS_L3_DPF(dev_priv)) 3790 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3791 3792 dev_priv->pm_ier = 0x0; 3793 dev_priv->pm_imr = ~dev_priv->pm_ier; 3794 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3795 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3796 /* 3797 * RPS interrupts will get enabled/disabled on demand when RPS itself 3798 * is enabled/disabled. Same wil be the case for GuC interrupts. 3799 */ 3800 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3801 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3802 } 3803 3804 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3805 { 3806 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3807 uint32_t de_pipe_enables; 3808 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3809 u32 de_port_enables; 3810 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3811 enum pipe pipe; 3812 3813 if (INTEL_GEN(dev_priv) >= 9) { 3814 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3815 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3816 GEN9_AUX_CHANNEL_D; 3817 if (IS_GEN9_LP(dev_priv)) 3818 de_port_masked |= BXT_DE_PORT_GMBUS; 3819 } else { 3820 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3821 } 3822 3823 if (IS_CNL_WITH_PORT_F(dev_priv)) 3824 de_port_masked |= CNL_AUX_CHANNEL_F; 3825 3826 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3827 GEN8_PIPE_FIFO_UNDERRUN; 3828 3829 de_port_enables = de_port_masked; 3830 if (IS_GEN9_LP(dev_priv)) 3831 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3832 else if (IS_BROADWELL(dev_priv)) 3833 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3834 3835 for_each_pipe(dev_priv, pipe) { 3836 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3837 3838 if (intel_display_power_is_enabled(dev_priv, 3839 POWER_DOMAIN_PIPE(pipe))) 3840 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3841 dev_priv->de_irq_mask[pipe], 3842 de_pipe_enables); 3843 } 3844 3845 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3846 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3847 3848 if (IS_GEN9_LP(dev_priv)) 3849 bxt_hpd_detection_setup(dev_priv); 3850 else if (IS_BROADWELL(dev_priv)) 3851 ilk_hpd_detection_setup(dev_priv); 3852 } 3853 3854 static int gen8_irq_postinstall(struct drm_device *dev) 3855 { 3856 struct drm_i915_private *dev_priv = to_i915(dev); 3857 3858 if (HAS_PCH_SPLIT(dev_priv)) 3859 ibx_irq_pre_postinstall(dev); 3860 3861 gen8_gt_irq_postinstall(dev_priv); 3862 gen8_de_irq_postinstall(dev_priv); 3863 3864 if (HAS_PCH_SPLIT(dev_priv)) 3865 ibx_irq_postinstall(dev); 3866 3867 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3868 POSTING_READ(GEN8_MASTER_IRQ); 3869 3870 return 0; 3871 } 3872 3873 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3874 { 3875 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 3876 3877 BUILD_BUG_ON(irqs & 0xffff0000); 3878 3879 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 3880 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 3881 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 3882 3883 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 3884 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 3885 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 3886 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 3887 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 3888 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 3889 3890 dev_priv->pm_imr = 0xffffffff; /* TODO */ 3891 } 3892 3893 static int gen11_irq_postinstall(struct drm_device *dev) 3894 { 3895 struct drm_i915_private *dev_priv = dev->dev_private; 3896 3897 gen11_gt_irq_postinstall(dev_priv); 3898 gen8_de_irq_postinstall(dev_priv); 3899 3900 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 3901 3902 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 3903 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3904 3905 return 0; 3906 } 3907 3908 static int cherryview_irq_postinstall(struct drm_device *dev) 3909 { 3910 struct drm_i915_private *dev_priv = to_i915(dev); 3911 3912 gen8_gt_irq_postinstall(dev_priv); 3913 3914 spin_lock_irq(&dev_priv->irq_lock); 3915 if (dev_priv->display_irqs_enabled) 3916 vlv_display_irq_postinstall(dev_priv); 3917 spin_unlock_irq(&dev_priv->irq_lock); 3918 3919 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3920 POSTING_READ(GEN8_MASTER_IRQ); 3921 3922 return 0; 3923 } 3924 3925 static void i8xx_irq_reset(struct drm_device *dev) 3926 { 3927 struct drm_i915_private *dev_priv = to_i915(dev); 3928 3929 i9xx_pipestat_irq_reset(dev_priv); 3930 3931 I915_WRITE16(HWSTAM, 0xffff); 3932 3933 GEN2_IRQ_RESET(); 3934 } 3935 3936 static int i8xx_irq_postinstall(struct drm_device *dev) 3937 { 3938 struct drm_i915_private *dev_priv = to_i915(dev); 3939 u16 enable_mask; 3940 3941 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 3942 I915_ERROR_MEMORY_REFRESH)); 3943 3944 /* Unmask the interrupts that we always want on. */ 3945 dev_priv->irq_mask = 3946 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3947 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 3948 3949 enable_mask = 3950 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3951 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3952 I915_USER_INTERRUPT; 3953 3954 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 3955 3956 /* Interrupt setup is already guaranteed to be single-threaded, this is 3957 * just to make the assert_spin_locked check happy. */ 3958 spin_lock_irq(&dev_priv->irq_lock); 3959 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3960 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3961 spin_unlock_irq(&dev_priv->irq_lock); 3962 3963 return 0; 3964 } 3965 3966 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3967 { 3968 struct drm_device *dev = arg; 3969 struct drm_i915_private *dev_priv = to_i915(dev); 3970 irqreturn_t ret = IRQ_NONE; 3971 3972 if (!intel_irqs_enabled(dev_priv)) 3973 return IRQ_NONE; 3974 3975 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3976 disable_rpm_wakeref_asserts(dev_priv); 3977 3978 do { 3979 u32 pipe_stats[I915_MAX_PIPES] = {}; 3980 u16 iir; 3981 3982 iir = I915_READ16(IIR); 3983 if (iir == 0) 3984 break; 3985 3986 ret = IRQ_HANDLED; 3987 3988 /* Call regardless, as some status bits might not be 3989 * signalled in iir */ 3990 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3991 3992 I915_WRITE16(IIR, iir); 3993 3994 if (iir & I915_USER_INTERRUPT) 3995 notify_ring(dev_priv->engine[RCS]); 3996 3997 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3998 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3999 4000 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4001 } while (0); 4002 4003 enable_rpm_wakeref_asserts(dev_priv); 4004 4005 return ret; 4006 } 4007 4008 static void i915_irq_reset(struct drm_device *dev) 4009 { 4010 struct drm_i915_private *dev_priv = to_i915(dev); 4011 4012 if (I915_HAS_HOTPLUG(dev_priv)) { 4013 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4014 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4015 } 4016 4017 i9xx_pipestat_irq_reset(dev_priv); 4018 4019 I915_WRITE(HWSTAM, 0xffffffff); 4020 4021 GEN3_IRQ_RESET(); 4022 } 4023 4024 static int i915_irq_postinstall(struct drm_device *dev) 4025 { 4026 struct drm_i915_private *dev_priv = to_i915(dev); 4027 u32 enable_mask; 4028 4029 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4030 I915_ERROR_MEMORY_REFRESH)); 4031 4032 /* Unmask the interrupts that we always want on. */ 4033 dev_priv->irq_mask = 4034 ~(I915_ASLE_INTERRUPT | 4035 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4036 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 4037 4038 enable_mask = 4039 I915_ASLE_INTERRUPT | 4040 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4041 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4042 I915_USER_INTERRUPT; 4043 4044 if (I915_HAS_HOTPLUG(dev_priv)) { 4045 /* Enable in IER... */ 4046 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4047 /* and unmask in IMR */ 4048 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4049 } 4050 4051 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4052 4053 /* Interrupt setup is already guaranteed to be single-threaded, this is 4054 * just to make the assert_spin_locked check happy. */ 4055 spin_lock_irq(&dev_priv->irq_lock); 4056 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4057 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4058 spin_unlock_irq(&dev_priv->irq_lock); 4059 4060 i915_enable_asle_pipestat(dev_priv); 4061 4062 return 0; 4063 } 4064 4065 static irqreturn_t i915_irq_handler(int irq, void *arg) 4066 { 4067 struct drm_device *dev = arg; 4068 struct drm_i915_private *dev_priv = to_i915(dev); 4069 irqreturn_t ret = IRQ_NONE; 4070 4071 if (!intel_irqs_enabled(dev_priv)) 4072 return IRQ_NONE; 4073 4074 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4075 disable_rpm_wakeref_asserts(dev_priv); 4076 4077 do { 4078 u32 pipe_stats[I915_MAX_PIPES] = {}; 4079 u32 hotplug_status = 0; 4080 u32 iir; 4081 4082 iir = I915_READ(IIR); 4083 if (iir == 0) 4084 break; 4085 4086 ret = IRQ_HANDLED; 4087 4088 if (I915_HAS_HOTPLUG(dev_priv) && 4089 iir & I915_DISPLAY_PORT_INTERRUPT) 4090 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4091 4092 /* Call regardless, as some status bits might not be 4093 * signalled in iir */ 4094 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4095 4096 I915_WRITE(IIR, iir); 4097 4098 if (iir & I915_USER_INTERRUPT) 4099 notify_ring(dev_priv->engine[RCS]); 4100 4101 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4102 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4103 4104 if (hotplug_status) 4105 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4106 4107 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4108 } while (0); 4109 4110 enable_rpm_wakeref_asserts(dev_priv); 4111 4112 return ret; 4113 } 4114 4115 static void i965_irq_reset(struct drm_device *dev) 4116 { 4117 struct drm_i915_private *dev_priv = to_i915(dev); 4118 4119 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4120 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4121 4122 i9xx_pipestat_irq_reset(dev_priv); 4123 4124 I915_WRITE(HWSTAM, 0xffffffff); 4125 4126 GEN3_IRQ_RESET(); 4127 } 4128 4129 static int i965_irq_postinstall(struct drm_device *dev) 4130 { 4131 struct drm_i915_private *dev_priv = to_i915(dev); 4132 u32 enable_mask; 4133 u32 error_mask; 4134 4135 /* 4136 * Enable some error detection, note the instruction error mask 4137 * bit is reserved, so we leave it masked. 4138 */ 4139 if (IS_G4X(dev_priv)) { 4140 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4141 GM45_ERROR_MEM_PRIV | 4142 GM45_ERROR_CP_PRIV | 4143 I915_ERROR_MEMORY_REFRESH); 4144 } else { 4145 error_mask = ~(I915_ERROR_PAGE_TABLE | 4146 I915_ERROR_MEMORY_REFRESH); 4147 } 4148 I915_WRITE(EMR, error_mask); 4149 4150 /* Unmask the interrupts that we always want on. */ 4151 dev_priv->irq_mask = 4152 ~(I915_ASLE_INTERRUPT | 4153 I915_DISPLAY_PORT_INTERRUPT | 4154 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4155 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4156 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4157 4158 enable_mask = 4159 I915_ASLE_INTERRUPT | 4160 I915_DISPLAY_PORT_INTERRUPT | 4161 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4162 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4163 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 4164 I915_USER_INTERRUPT; 4165 4166 if (IS_G4X(dev_priv)) 4167 enable_mask |= I915_BSD_USER_INTERRUPT; 4168 4169 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4170 4171 /* Interrupt setup is already guaranteed to be single-threaded, this is 4172 * just to make the assert_spin_locked check happy. */ 4173 spin_lock_irq(&dev_priv->irq_lock); 4174 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4175 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4176 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4177 spin_unlock_irq(&dev_priv->irq_lock); 4178 4179 i915_enable_asle_pipestat(dev_priv); 4180 4181 return 0; 4182 } 4183 4184 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4185 { 4186 u32 hotplug_en; 4187 4188 lockdep_assert_held(&dev_priv->irq_lock); 4189 4190 /* Note HDMI and DP share hotplug bits */ 4191 /* enable bits are the same for all generations */ 4192 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4193 /* Programming the CRT detection parameters tends 4194 to generate a spurious hotplug event about three 4195 seconds later. So just do it once. 4196 */ 4197 if (IS_G4X(dev_priv)) 4198 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4199 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4200 4201 /* Ignore TV since it's buggy */ 4202 i915_hotplug_interrupt_update_locked(dev_priv, 4203 HOTPLUG_INT_EN_MASK | 4204 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4205 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4206 hotplug_en); 4207 } 4208 4209 static irqreturn_t i965_irq_handler(int irq, void *arg) 4210 { 4211 struct drm_device *dev = arg; 4212 struct drm_i915_private *dev_priv = to_i915(dev); 4213 irqreturn_t ret = IRQ_NONE; 4214 4215 if (!intel_irqs_enabled(dev_priv)) 4216 return IRQ_NONE; 4217 4218 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4219 disable_rpm_wakeref_asserts(dev_priv); 4220 4221 do { 4222 u32 pipe_stats[I915_MAX_PIPES] = {}; 4223 u32 hotplug_status = 0; 4224 u32 iir; 4225 4226 iir = I915_READ(IIR); 4227 if (iir == 0) 4228 break; 4229 4230 ret = IRQ_HANDLED; 4231 4232 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4233 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4234 4235 /* Call regardless, as some status bits might not be 4236 * signalled in iir */ 4237 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4238 4239 I915_WRITE(IIR, iir); 4240 4241 if (iir & I915_USER_INTERRUPT) 4242 notify_ring(dev_priv->engine[RCS]); 4243 4244 if (iir & I915_BSD_USER_INTERRUPT) 4245 notify_ring(dev_priv->engine[VCS]); 4246 4247 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4248 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4249 4250 if (hotplug_status) 4251 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4252 4253 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4254 } while (0); 4255 4256 enable_rpm_wakeref_asserts(dev_priv); 4257 4258 return ret; 4259 } 4260 4261 /** 4262 * intel_irq_init - initializes irq support 4263 * @dev_priv: i915 device instance 4264 * 4265 * This function initializes all the irq support including work items, timers 4266 * and all the vtables. It does not setup the interrupt itself though. 4267 */ 4268 void intel_irq_init(struct drm_i915_private *dev_priv) 4269 { 4270 struct drm_device *dev = &dev_priv->drm; 4271 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4272 int i; 4273 4274 intel_hpd_init_work(dev_priv); 4275 4276 INIT_WORK(&rps->work, gen6_pm_rps_work); 4277 4278 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4279 for (i = 0; i < MAX_L3_SLICES; ++i) 4280 dev_priv->l3_parity.remap_info[i] = NULL; 4281 4282 if (HAS_GUC_SCHED(dev_priv)) 4283 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4284 4285 /* Let's track the enabled rps events */ 4286 if (IS_VALLEYVIEW(dev_priv)) 4287 /* WaGsvRC0ResidencyMethod:vlv */ 4288 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4289 else 4290 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4291 4292 rps->pm_intrmsk_mbz = 0; 4293 4294 /* 4295 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4296 * if GEN6_PM_UP_EI_EXPIRED is masked. 4297 * 4298 * TODO: verify if this can be reproduced on VLV,CHV. 4299 */ 4300 if (INTEL_GEN(dev_priv) <= 7) 4301 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4302 4303 if (INTEL_GEN(dev_priv) >= 8) 4304 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4305 4306 if (IS_GEN2(dev_priv)) { 4307 /* Gen2 doesn't have a hardware frame counter */ 4308 dev->max_vblank_count = 0; 4309 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4310 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4311 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4312 } else { 4313 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4314 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4315 } 4316 4317 /* 4318 * Opt out of the vblank disable timer on everything except gen2. 4319 * Gen2 doesn't have a hardware frame counter and so depends on 4320 * vblank interrupts to produce sane vblank seuquence numbers. 4321 */ 4322 if (!IS_GEN2(dev_priv)) 4323 dev->vblank_disable_immediate = true; 4324 4325 /* Most platforms treat the display irq block as an always-on 4326 * power domain. vlv/chv can disable it at runtime and need 4327 * special care to avoid writing any of the display block registers 4328 * outside of the power domain. We defer setting up the display irqs 4329 * in this case to the runtime pm. 4330 */ 4331 dev_priv->display_irqs_enabled = true; 4332 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4333 dev_priv->display_irqs_enabled = false; 4334 4335 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4336 4337 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4338 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4339 4340 if (IS_CHERRYVIEW(dev_priv)) { 4341 dev->driver->irq_handler = cherryview_irq_handler; 4342 dev->driver->irq_preinstall = cherryview_irq_reset; 4343 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4344 dev->driver->irq_uninstall = cherryview_irq_reset; 4345 dev->driver->enable_vblank = i965_enable_vblank; 4346 dev->driver->disable_vblank = i965_disable_vblank; 4347 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4348 } else if (IS_VALLEYVIEW(dev_priv)) { 4349 dev->driver->irq_handler = valleyview_irq_handler; 4350 dev->driver->irq_preinstall = valleyview_irq_reset; 4351 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4352 dev->driver->irq_uninstall = valleyview_irq_reset; 4353 dev->driver->enable_vblank = i965_enable_vblank; 4354 dev->driver->disable_vblank = i965_disable_vblank; 4355 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4356 } else if (INTEL_GEN(dev_priv) >= 11) { 4357 dev->driver->irq_handler = gen11_irq_handler; 4358 dev->driver->irq_preinstall = gen11_irq_reset; 4359 dev->driver->irq_postinstall = gen11_irq_postinstall; 4360 dev->driver->irq_uninstall = gen11_irq_reset; 4361 dev->driver->enable_vblank = gen8_enable_vblank; 4362 dev->driver->disable_vblank = gen8_disable_vblank; 4363 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4364 } else if (INTEL_GEN(dev_priv) >= 8) { 4365 dev->driver->irq_handler = gen8_irq_handler; 4366 dev->driver->irq_preinstall = gen8_irq_reset; 4367 dev->driver->irq_postinstall = gen8_irq_postinstall; 4368 dev->driver->irq_uninstall = gen8_irq_reset; 4369 dev->driver->enable_vblank = gen8_enable_vblank; 4370 dev->driver->disable_vblank = gen8_disable_vblank; 4371 if (IS_GEN9_LP(dev_priv)) 4372 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4373 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4374 HAS_PCH_CNP(dev_priv)) 4375 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4376 else 4377 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4378 } else if (HAS_PCH_SPLIT(dev_priv)) { 4379 dev->driver->irq_handler = ironlake_irq_handler; 4380 dev->driver->irq_preinstall = ironlake_irq_reset; 4381 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4382 dev->driver->irq_uninstall = ironlake_irq_reset; 4383 dev->driver->enable_vblank = ironlake_enable_vblank; 4384 dev->driver->disable_vblank = ironlake_disable_vblank; 4385 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4386 } else { 4387 if (IS_GEN2(dev_priv)) { 4388 dev->driver->irq_preinstall = i8xx_irq_reset; 4389 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4390 dev->driver->irq_handler = i8xx_irq_handler; 4391 dev->driver->irq_uninstall = i8xx_irq_reset; 4392 dev->driver->enable_vblank = i8xx_enable_vblank; 4393 dev->driver->disable_vblank = i8xx_disable_vblank; 4394 } else if (IS_GEN3(dev_priv)) { 4395 dev->driver->irq_preinstall = i915_irq_reset; 4396 dev->driver->irq_postinstall = i915_irq_postinstall; 4397 dev->driver->irq_uninstall = i915_irq_reset; 4398 dev->driver->irq_handler = i915_irq_handler; 4399 dev->driver->enable_vblank = i8xx_enable_vblank; 4400 dev->driver->disable_vblank = i8xx_disable_vblank; 4401 } else { 4402 dev->driver->irq_preinstall = i965_irq_reset; 4403 dev->driver->irq_postinstall = i965_irq_postinstall; 4404 dev->driver->irq_uninstall = i965_irq_reset; 4405 dev->driver->irq_handler = i965_irq_handler; 4406 dev->driver->enable_vblank = i965_enable_vblank; 4407 dev->driver->disable_vblank = i965_disable_vblank; 4408 } 4409 if (I915_HAS_HOTPLUG(dev_priv)) 4410 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4411 } 4412 } 4413 4414 /** 4415 * intel_irq_fini - deinitializes IRQ support 4416 * @i915: i915 device instance 4417 * 4418 * This function deinitializes all the IRQ support. 4419 */ 4420 void intel_irq_fini(struct drm_i915_private *i915) 4421 { 4422 int i; 4423 4424 for (i = 0; i < MAX_L3_SLICES; ++i) 4425 kfree(i915->l3_parity.remap_info[i]); 4426 } 4427 4428 /** 4429 * intel_irq_install - enables the hardware interrupt 4430 * @dev_priv: i915 device instance 4431 * 4432 * This function enables the hardware interrupt handling, but leaves the hotplug 4433 * handling still disabled. It is called after intel_irq_init(). 4434 * 4435 * In the driver load and resume code we need working interrupts in a few places 4436 * but don't want to deal with the hassle of concurrent probe and hotplug 4437 * workers. Hence the split into this two-stage approach. 4438 */ 4439 int intel_irq_install(struct drm_i915_private *dev_priv) 4440 { 4441 /* 4442 * We enable some interrupt sources in our postinstall hooks, so mark 4443 * interrupts as enabled _before_ actually enabling them to avoid 4444 * special cases in our ordering checks. 4445 */ 4446 dev_priv->runtime_pm.irqs_enabled = true; 4447 4448 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4449 } 4450 4451 /** 4452 * intel_irq_uninstall - finilizes all irq handling 4453 * @dev_priv: i915 device instance 4454 * 4455 * This stops interrupt and hotplug handling and unregisters and frees all 4456 * resources acquired in the init functions. 4457 */ 4458 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4459 { 4460 drm_irq_uninstall(&dev_priv->drm); 4461 intel_hpd_cancel_work(dev_priv); 4462 dev_priv->runtime_pm.irqs_enabled = false; 4463 } 4464 4465 /** 4466 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4467 * @dev_priv: i915 device instance 4468 * 4469 * This function is used to disable interrupts at runtime, both in the runtime 4470 * pm and the system suspend/resume code. 4471 */ 4472 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4473 { 4474 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4475 dev_priv->runtime_pm.irqs_enabled = false; 4476 synchronize_irq(dev_priv->drm.irq); 4477 } 4478 4479 /** 4480 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4481 * @dev_priv: i915 device instance 4482 * 4483 * This function is used to enable interrupts at runtime, both in the runtime 4484 * pm and the system suspend/resume code. 4485 */ 4486 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4487 { 4488 dev_priv->runtime_pm.irqs_enabled = true; 4489 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4490 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4491 } 4492