1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 static const u32 hpd_gen11[HPD_NUM_PINS] = { 119 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 120 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 121 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 122 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 123 }; 124 125 static const u32 hpd_icp[HPD_NUM_PINS] = { 126 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 127 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 128 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 129 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 130 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 131 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 132 }; 133 134 /* IIR can theoretically queue up two events. Be paranoid. */ 135 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 136 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 137 POSTING_READ(GEN8_##type##_IMR(which)); \ 138 I915_WRITE(GEN8_##type##_IER(which), 0); \ 139 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 140 POSTING_READ(GEN8_##type##_IIR(which)); \ 141 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 142 POSTING_READ(GEN8_##type##_IIR(which)); \ 143 } while (0) 144 145 #define GEN3_IRQ_RESET(type) do { \ 146 I915_WRITE(type##IMR, 0xffffffff); \ 147 POSTING_READ(type##IMR); \ 148 I915_WRITE(type##IER, 0); \ 149 I915_WRITE(type##IIR, 0xffffffff); \ 150 POSTING_READ(type##IIR); \ 151 I915_WRITE(type##IIR, 0xffffffff); \ 152 POSTING_READ(type##IIR); \ 153 } while (0) 154 155 #define GEN2_IRQ_RESET(type) do { \ 156 I915_WRITE16(type##IMR, 0xffff); \ 157 POSTING_READ16(type##IMR); \ 158 I915_WRITE16(type##IER, 0); \ 159 I915_WRITE16(type##IIR, 0xffff); \ 160 POSTING_READ16(type##IIR); \ 161 I915_WRITE16(type##IIR, 0xffff); \ 162 POSTING_READ16(type##IIR); \ 163 } while (0) 164 165 /* 166 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 167 */ 168 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u32 val = I915_READ(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE(reg, 0xffffffff); 179 POSTING_READ(reg); 180 I915_WRITE(reg, 0xffffffff); 181 POSTING_READ(reg); 182 } 183 184 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 185 i915_reg_t reg) 186 { 187 u16 val = I915_READ16(reg); 188 189 if (val == 0) 190 return; 191 192 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 193 i915_mmio_reg_offset(reg), val); 194 I915_WRITE16(reg, 0xffff); 195 POSTING_READ16(reg); 196 I915_WRITE16(reg, 0xffff); 197 POSTING_READ16(reg); 198 } 199 200 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 201 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 202 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 203 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 204 POSTING_READ(GEN8_##type##_IMR(which)); \ 205 } while (0) 206 207 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 208 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 209 I915_WRITE(type##IER, (ier_val)); \ 210 I915_WRITE(type##IMR, (imr_val)); \ 211 POSTING_READ(type##IMR); \ 212 } while (0) 213 214 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 215 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 216 I915_WRITE16(type##IER, (ier_val)); \ 217 I915_WRITE16(type##IMR, (imr_val)); \ 218 POSTING_READ16(type##IMR); \ 219 } while (0) 220 221 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 222 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223 224 /* For display hotplug interrupt */ 225 static inline void 226 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 227 uint32_t mask, 228 uint32_t bits) 229 { 230 uint32_t val; 231 232 lockdep_assert_held(&dev_priv->irq_lock); 233 WARN_ON(bits & ~mask); 234 235 val = I915_READ(PORT_HOTPLUG_EN); 236 val &= ~mask; 237 val |= bits; 238 I915_WRITE(PORT_HOTPLUG_EN, val); 239 } 240 241 /** 242 * i915_hotplug_interrupt_update - update hotplug interrupt enable 243 * @dev_priv: driver private 244 * @mask: bits to update 245 * @bits: bits to enable 246 * NOTE: the HPD enable bits are modified both inside and outside 247 * of an interrupt context. To avoid that read-modify-write cycles 248 * interfer, these bits are protected by a spinlock. Since this 249 * function is usually not called from a context where the lock is 250 * held already, this function acquires the lock itself. A non-locking 251 * version is also available. 252 */ 253 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 254 uint32_t mask, 255 uint32_t bits) 256 { 257 spin_lock_irq(&dev_priv->irq_lock); 258 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 259 spin_unlock_irq(&dev_priv->irq_lock); 260 } 261 262 static u32 263 gen11_gt_engine_identity(struct drm_i915_private * const i915, 264 const unsigned int bank, const unsigned int bit); 265 266 static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 267 const unsigned int bank, 268 const unsigned int bit) 269 { 270 void __iomem * const regs = i915->regs; 271 u32 dw; 272 273 lockdep_assert_held(&i915->irq_lock); 274 275 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 276 if (dw & BIT(bit)) { 277 /* 278 * According to the BSpec, DW_IIR bits cannot be cleared without 279 * first servicing the Selector & Shared IIR registers. 280 */ 281 gen11_gt_engine_identity(i915, bank, bit); 282 283 /* 284 * We locked GT INT DW by reading it. If we want to (try 285 * to) recover from this succesfully, we need to clear 286 * our bit, otherwise we are locking the register for 287 * everybody. 288 */ 289 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 290 291 return true; 292 } 293 294 return false; 295 } 296 297 /** 298 * ilk_update_display_irq - update DEIMR 299 * @dev_priv: driver private 300 * @interrupt_mask: mask of interrupt bits to update 301 * @enabled_irq_mask: mask of interrupt bits to enable 302 */ 303 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 304 uint32_t interrupt_mask, 305 uint32_t enabled_irq_mask) 306 { 307 uint32_t new_val; 308 309 lockdep_assert_held(&dev_priv->irq_lock); 310 311 WARN_ON(enabled_irq_mask & ~interrupt_mask); 312 313 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 314 return; 315 316 new_val = dev_priv->irq_mask; 317 new_val &= ~interrupt_mask; 318 new_val |= (~enabled_irq_mask & interrupt_mask); 319 320 if (new_val != dev_priv->irq_mask) { 321 dev_priv->irq_mask = new_val; 322 I915_WRITE(DEIMR, dev_priv->irq_mask); 323 POSTING_READ(DEIMR); 324 } 325 } 326 327 /** 328 * ilk_update_gt_irq - update GTIMR 329 * @dev_priv: driver private 330 * @interrupt_mask: mask of interrupt bits to update 331 * @enabled_irq_mask: mask of interrupt bits to enable 332 */ 333 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 334 uint32_t interrupt_mask, 335 uint32_t enabled_irq_mask) 336 { 337 lockdep_assert_held(&dev_priv->irq_lock); 338 339 WARN_ON(enabled_irq_mask & ~interrupt_mask); 340 341 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342 return; 343 344 dev_priv->gt_irq_mask &= ~interrupt_mask; 345 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 346 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 347 } 348 349 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 350 { 351 ilk_update_gt_irq(dev_priv, mask, mask); 352 POSTING_READ_FW(GTIMR); 353 } 354 355 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 356 { 357 ilk_update_gt_irq(dev_priv, mask, 0); 358 } 359 360 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 361 { 362 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 363 364 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 365 } 366 367 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 368 { 369 if (INTEL_GEN(dev_priv) >= 11) 370 return GEN11_GPM_WGBOXPERF_INTR_MASK; 371 else if (INTEL_GEN(dev_priv) >= 8) 372 return GEN8_GT_IMR(2); 373 else 374 return GEN6_PMIMR; 375 } 376 377 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 378 { 379 if (INTEL_GEN(dev_priv) >= 11) 380 return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 381 else if (INTEL_GEN(dev_priv) >= 8) 382 return GEN8_GT_IER(2); 383 else 384 return GEN6_PMIER; 385 } 386 387 /** 388 * snb_update_pm_irq - update GEN6_PMIMR 389 * @dev_priv: driver private 390 * @interrupt_mask: mask of interrupt bits to update 391 * @enabled_irq_mask: mask of interrupt bits to enable 392 */ 393 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 394 uint32_t interrupt_mask, 395 uint32_t enabled_irq_mask) 396 { 397 uint32_t new_val; 398 399 WARN_ON(enabled_irq_mask & ~interrupt_mask); 400 401 lockdep_assert_held(&dev_priv->irq_lock); 402 403 new_val = dev_priv->pm_imr; 404 new_val &= ~interrupt_mask; 405 new_val |= (~enabled_irq_mask & interrupt_mask); 406 407 if (new_val != dev_priv->pm_imr) { 408 dev_priv->pm_imr = new_val; 409 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 410 POSTING_READ(gen6_pm_imr(dev_priv)); 411 } 412 } 413 414 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 415 { 416 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 417 return; 418 419 snb_update_pm_irq(dev_priv, mask, mask); 420 } 421 422 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 423 { 424 snb_update_pm_irq(dev_priv, mask, 0); 425 } 426 427 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 428 { 429 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 430 return; 431 432 __gen6_mask_pm_irq(dev_priv, mask); 433 } 434 435 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 436 { 437 i915_reg_t reg = gen6_pm_iir(dev_priv); 438 439 lockdep_assert_held(&dev_priv->irq_lock); 440 441 I915_WRITE(reg, reset_mask); 442 I915_WRITE(reg, reset_mask); 443 POSTING_READ(reg); 444 } 445 446 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 447 { 448 lockdep_assert_held(&dev_priv->irq_lock); 449 450 dev_priv->pm_ier |= enable_mask; 451 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 452 gen6_unmask_pm_irq(dev_priv, enable_mask); 453 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 454 } 455 456 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 457 { 458 lockdep_assert_held(&dev_priv->irq_lock); 459 460 dev_priv->pm_ier &= ~disable_mask; 461 __gen6_mask_pm_irq(dev_priv, disable_mask); 462 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 463 /* though a barrier is missing here, but don't really need a one */ 464 } 465 466 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 467 { 468 spin_lock_irq(&dev_priv->irq_lock); 469 470 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 471 ; 472 473 dev_priv->gt_pm.rps.pm_iir = 0; 474 475 spin_unlock_irq(&dev_priv->irq_lock); 476 } 477 478 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 479 { 480 spin_lock_irq(&dev_priv->irq_lock); 481 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 482 dev_priv->gt_pm.rps.pm_iir = 0; 483 spin_unlock_irq(&dev_priv->irq_lock); 484 } 485 486 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 487 { 488 struct intel_rps *rps = &dev_priv->gt_pm.rps; 489 490 if (READ_ONCE(rps->interrupts_enabled)) 491 return; 492 493 spin_lock_irq(&dev_priv->irq_lock); 494 WARN_ON_ONCE(rps->pm_iir); 495 496 if (INTEL_GEN(dev_priv) >= 11) 497 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 498 else 499 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 500 501 rps->interrupts_enabled = true; 502 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 503 504 spin_unlock_irq(&dev_priv->irq_lock); 505 } 506 507 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 508 { 509 struct intel_rps *rps = &dev_priv->gt_pm.rps; 510 511 if (!READ_ONCE(rps->interrupts_enabled)) 512 return; 513 514 spin_lock_irq(&dev_priv->irq_lock); 515 rps->interrupts_enabled = false; 516 517 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 518 519 gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 520 521 spin_unlock_irq(&dev_priv->irq_lock); 522 synchronize_irq(dev_priv->drm.irq); 523 524 /* Now that we will not be generating any more work, flush any 525 * outstanding tasks. As we are called on the RPS idle path, 526 * we will reset the GPU to minimum frequencies, so the current 527 * state of the worker can be discarded. 528 */ 529 cancel_work_sync(&rps->work); 530 if (INTEL_GEN(dev_priv) >= 11) 531 gen11_reset_rps_interrupts(dev_priv); 532 else 533 gen6_reset_rps_interrupts(dev_priv); 534 } 535 536 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 537 { 538 assert_rpm_wakelock_held(dev_priv); 539 540 spin_lock_irq(&dev_priv->irq_lock); 541 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 542 spin_unlock_irq(&dev_priv->irq_lock); 543 } 544 545 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 546 { 547 assert_rpm_wakelock_held(dev_priv); 548 549 spin_lock_irq(&dev_priv->irq_lock); 550 if (!dev_priv->guc.interrupts_enabled) { 551 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 552 dev_priv->pm_guc_events); 553 dev_priv->guc.interrupts_enabled = true; 554 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 555 } 556 spin_unlock_irq(&dev_priv->irq_lock); 557 } 558 559 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 560 { 561 assert_rpm_wakelock_held(dev_priv); 562 563 spin_lock_irq(&dev_priv->irq_lock); 564 dev_priv->guc.interrupts_enabled = false; 565 566 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 567 568 spin_unlock_irq(&dev_priv->irq_lock); 569 synchronize_irq(dev_priv->drm.irq); 570 571 gen9_reset_guc_interrupts(dev_priv); 572 } 573 574 /** 575 * bdw_update_port_irq - update DE port interrupt 576 * @dev_priv: driver private 577 * @interrupt_mask: mask of interrupt bits to update 578 * @enabled_irq_mask: mask of interrupt bits to enable 579 */ 580 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 581 uint32_t interrupt_mask, 582 uint32_t enabled_irq_mask) 583 { 584 uint32_t new_val; 585 uint32_t old_val; 586 587 lockdep_assert_held(&dev_priv->irq_lock); 588 589 WARN_ON(enabled_irq_mask & ~interrupt_mask); 590 591 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 592 return; 593 594 old_val = I915_READ(GEN8_DE_PORT_IMR); 595 596 new_val = old_val; 597 new_val &= ~interrupt_mask; 598 new_val |= (~enabled_irq_mask & interrupt_mask); 599 600 if (new_val != old_val) { 601 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 602 POSTING_READ(GEN8_DE_PORT_IMR); 603 } 604 } 605 606 /** 607 * bdw_update_pipe_irq - update DE pipe interrupt 608 * @dev_priv: driver private 609 * @pipe: pipe whose interrupt to update 610 * @interrupt_mask: mask of interrupt bits to update 611 * @enabled_irq_mask: mask of interrupt bits to enable 612 */ 613 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 614 enum pipe pipe, 615 uint32_t interrupt_mask, 616 uint32_t enabled_irq_mask) 617 { 618 uint32_t new_val; 619 620 lockdep_assert_held(&dev_priv->irq_lock); 621 622 WARN_ON(enabled_irq_mask & ~interrupt_mask); 623 624 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 625 return; 626 627 new_val = dev_priv->de_irq_mask[pipe]; 628 new_val &= ~interrupt_mask; 629 new_val |= (~enabled_irq_mask & interrupt_mask); 630 631 if (new_val != dev_priv->de_irq_mask[pipe]) { 632 dev_priv->de_irq_mask[pipe] = new_val; 633 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 634 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 635 } 636 } 637 638 /** 639 * ibx_display_interrupt_update - update SDEIMR 640 * @dev_priv: driver private 641 * @interrupt_mask: mask of interrupt bits to update 642 * @enabled_irq_mask: mask of interrupt bits to enable 643 */ 644 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 645 uint32_t interrupt_mask, 646 uint32_t enabled_irq_mask) 647 { 648 uint32_t sdeimr = I915_READ(SDEIMR); 649 sdeimr &= ~interrupt_mask; 650 sdeimr |= (~enabled_irq_mask & interrupt_mask); 651 652 WARN_ON(enabled_irq_mask & ~interrupt_mask); 653 654 lockdep_assert_held(&dev_priv->irq_lock); 655 656 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 657 return; 658 659 I915_WRITE(SDEIMR, sdeimr); 660 POSTING_READ(SDEIMR); 661 } 662 663 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 664 enum pipe pipe) 665 { 666 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 667 u32 enable_mask = status_mask << 16; 668 669 lockdep_assert_held(&dev_priv->irq_lock); 670 671 if (INTEL_GEN(dev_priv) < 5) 672 goto out; 673 674 /* 675 * On pipe A we don't support the PSR interrupt yet, 676 * on pipe B and C the same bit MBZ. 677 */ 678 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 679 return 0; 680 /* 681 * On pipe B and C we don't support the PSR interrupt yet, on pipe 682 * A the same bit is for perf counters which we don't use either. 683 */ 684 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 685 return 0; 686 687 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 688 SPRITE0_FLIP_DONE_INT_EN_VLV | 689 SPRITE1_FLIP_DONE_INT_EN_VLV); 690 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 691 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 692 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 693 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 694 695 out: 696 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 697 status_mask & ~PIPESTAT_INT_STATUS_MASK, 698 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 699 pipe_name(pipe), enable_mask, status_mask); 700 701 return enable_mask; 702 } 703 704 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 705 enum pipe pipe, u32 status_mask) 706 { 707 i915_reg_t reg = PIPESTAT(pipe); 708 u32 enable_mask; 709 710 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 711 "pipe %c: status_mask=0x%x\n", 712 pipe_name(pipe), status_mask); 713 714 lockdep_assert_held(&dev_priv->irq_lock); 715 WARN_ON(!intel_irqs_enabled(dev_priv)); 716 717 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 718 return; 719 720 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 721 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 722 723 I915_WRITE(reg, enable_mask | status_mask); 724 POSTING_READ(reg); 725 } 726 727 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 728 enum pipe pipe, u32 status_mask) 729 { 730 i915_reg_t reg = PIPESTAT(pipe); 731 u32 enable_mask; 732 733 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 734 "pipe %c: status_mask=0x%x\n", 735 pipe_name(pipe), status_mask); 736 737 lockdep_assert_held(&dev_priv->irq_lock); 738 WARN_ON(!intel_irqs_enabled(dev_priv)); 739 740 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 741 return; 742 743 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 744 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 745 746 I915_WRITE(reg, enable_mask | status_mask); 747 POSTING_READ(reg); 748 } 749 750 /** 751 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 752 * @dev_priv: i915 device private 753 */ 754 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 755 { 756 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 757 return; 758 759 spin_lock_irq(&dev_priv->irq_lock); 760 761 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 762 if (INTEL_GEN(dev_priv) >= 4) 763 i915_enable_pipestat(dev_priv, PIPE_A, 764 PIPE_LEGACY_BLC_EVENT_STATUS); 765 766 spin_unlock_irq(&dev_priv->irq_lock); 767 } 768 769 /* 770 * This timing diagram depicts the video signal in and 771 * around the vertical blanking period. 772 * 773 * Assumptions about the fictitious mode used in this example: 774 * vblank_start >= 3 775 * vsync_start = vblank_start + 1 776 * vsync_end = vblank_start + 2 777 * vtotal = vblank_start + 3 778 * 779 * start of vblank: 780 * latch double buffered registers 781 * increment frame counter (ctg+) 782 * generate start of vblank interrupt (gen4+) 783 * | 784 * | frame start: 785 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 786 * | may be shifted forward 1-3 extra lines via PIPECONF 787 * | | 788 * | | start of vsync: 789 * | | generate vsync interrupt 790 * | | | 791 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 792 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 793 * ----va---> <-----------------vb--------------------> <--------va------------- 794 * | | <----vs-----> | 795 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 796 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 797 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 798 * | | | 799 * last visible pixel first visible pixel 800 * | increment frame counter (gen3/4) 801 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 802 * 803 * x = horizontal active 804 * _ = horizontal blanking 805 * hs = horizontal sync 806 * va = vertical active 807 * vb = vertical blanking 808 * vs = vertical sync 809 * vbs = vblank_start (number) 810 * 811 * Summary: 812 * - most events happen at the start of horizontal sync 813 * - frame start happens at the start of horizontal blank, 1-4 lines 814 * (depending on PIPECONF settings) after the start of vblank 815 * - gen3/4 pixel and frame counter are synchronized with the start 816 * of horizontal active on the first line of vertical active 817 */ 818 819 /* Called from drm generic code, passed a 'crtc', which 820 * we use as a pipe index 821 */ 822 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 823 { 824 struct drm_i915_private *dev_priv = to_i915(dev); 825 i915_reg_t high_frame, low_frame; 826 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 827 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 828 unsigned long irqflags; 829 830 htotal = mode->crtc_htotal; 831 hsync_start = mode->crtc_hsync_start; 832 vbl_start = mode->crtc_vblank_start; 833 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 834 vbl_start = DIV_ROUND_UP(vbl_start, 2); 835 836 /* Convert to pixel count */ 837 vbl_start *= htotal; 838 839 /* Start of vblank event occurs at start of hsync */ 840 vbl_start -= htotal - hsync_start; 841 842 high_frame = PIPEFRAME(pipe); 843 low_frame = PIPEFRAMEPIXEL(pipe); 844 845 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 846 847 /* 848 * High & low register fields aren't synchronized, so make sure 849 * we get a low value that's stable across two reads of the high 850 * register. 851 */ 852 do { 853 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 854 low = I915_READ_FW(low_frame); 855 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 856 } while (high1 != high2); 857 858 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859 860 high1 >>= PIPE_FRAME_HIGH_SHIFT; 861 pixel = low & PIPE_PIXEL_MASK; 862 low >>= PIPE_FRAME_LOW_SHIFT; 863 864 /* 865 * The frame counter increments at beginning of active. 866 * Cook up a vblank counter by also checking the pixel 867 * counter against vblank start. 868 */ 869 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 870 } 871 872 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 873 { 874 struct drm_i915_private *dev_priv = to_i915(dev); 875 876 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 877 } 878 879 /* 880 * On certain encoders on certain platforms, pipe 881 * scanline register will not work to get the scanline, 882 * since the timings are driven from the PORT or issues 883 * with scanline register updates. 884 * This function will use Framestamp and current 885 * timestamp registers to calculate the scanline. 886 */ 887 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 888 { 889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 890 struct drm_vblank_crtc *vblank = 891 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 892 const struct drm_display_mode *mode = &vblank->hwmode; 893 u32 vblank_start = mode->crtc_vblank_start; 894 u32 vtotal = mode->crtc_vtotal; 895 u32 htotal = mode->crtc_htotal; 896 u32 clock = mode->crtc_clock; 897 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 898 899 /* 900 * To avoid the race condition where we might cross into the 901 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 902 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 903 * during the same frame. 904 */ 905 do { 906 /* 907 * This field provides read back of the display 908 * pipe frame time stamp. The time stamp value 909 * is sampled at every start of vertical blank. 910 */ 911 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 912 913 /* 914 * The TIMESTAMP_CTR register has the current 915 * time stamp value. 916 */ 917 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 918 919 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 920 } while (scan_post_time != scan_prev_time); 921 922 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 923 clock), 1000 * htotal); 924 scanline = min(scanline, vtotal - 1); 925 scanline = (scanline + vblank_start) % vtotal; 926 927 return scanline; 928 } 929 930 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 931 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 932 { 933 struct drm_device *dev = crtc->base.dev; 934 struct drm_i915_private *dev_priv = to_i915(dev); 935 const struct drm_display_mode *mode; 936 struct drm_vblank_crtc *vblank; 937 enum pipe pipe = crtc->pipe; 938 int position, vtotal; 939 940 if (!crtc->active) 941 return -1; 942 943 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 944 mode = &vblank->hwmode; 945 946 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 947 return __intel_get_crtc_scanline_from_timestamp(crtc); 948 949 vtotal = mode->crtc_vtotal; 950 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 951 vtotal /= 2; 952 953 if (IS_GEN2(dev_priv)) 954 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 955 else 956 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 957 958 /* 959 * On HSW, the DSL reg (0x70000) appears to return 0 if we 960 * read it just before the start of vblank. So try it again 961 * so we don't accidentally end up spanning a vblank frame 962 * increment, causing the pipe_update_end() code to squak at us. 963 * 964 * The nature of this problem means we can't simply check the ISR 965 * bit and return the vblank start value; nor can we use the scanline 966 * debug register in the transcoder as it appears to have the same 967 * problem. We may need to extend this to include other platforms, 968 * but so far testing only shows the problem on HSW. 969 */ 970 if (HAS_DDI(dev_priv) && !position) { 971 int i, temp; 972 973 for (i = 0; i < 100; i++) { 974 udelay(1); 975 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 976 if (temp != position) { 977 position = temp; 978 break; 979 } 980 } 981 } 982 983 /* 984 * See update_scanline_offset() for the details on the 985 * scanline_offset adjustment. 986 */ 987 return (position + crtc->scanline_offset) % vtotal; 988 } 989 990 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 991 bool in_vblank_irq, int *vpos, int *hpos, 992 ktime_t *stime, ktime_t *etime, 993 const struct drm_display_mode *mode) 994 { 995 struct drm_i915_private *dev_priv = to_i915(dev); 996 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 997 pipe); 998 int position; 999 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1000 unsigned long irqflags; 1001 1002 if (WARN_ON(!mode->crtc_clock)) { 1003 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 1004 "pipe %c\n", pipe_name(pipe)); 1005 return false; 1006 } 1007 1008 htotal = mode->crtc_htotal; 1009 hsync_start = mode->crtc_hsync_start; 1010 vtotal = mode->crtc_vtotal; 1011 vbl_start = mode->crtc_vblank_start; 1012 vbl_end = mode->crtc_vblank_end; 1013 1014 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1015 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1016 vbl_end /= 2; 1017 vtotal /= 2; 1018 } 1019 1020 /* 1021 * Lock uncore.lock, as we will do multiple timing critical raw 1022 * register reads, potentially with preemption disabled, so the 1023 * following code must not block on uncore.lock. 1024 */ 1025 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1026 1027 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1028 1029 /* Get optional system timestamp before query. */ 1030 if (stime) 1031 *stime = ktime_get(); 1032 1033 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1034 /* No obvious pixelcount register. Only query vertical 1035 * scanout position from Display scan line register. 1036 */ 1037 position = __intel_get_crtc_scanline(intel_crtc); 1038 } else { 1039 /* Have access to pixelcount since start of frame. 1040 * We can split this into vertical and horizontal 1041 * scanout position. 1042 */ 1043 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1044 1045 /* convert to pixel counts */ 1046 vbl_start *= htotal; 1047 vbl_end *= htotal; 1048 vtotal *= htotal; 1049 1050 /* 1051 * In interlaced modes, the pixel counter counts all pixels, 1052 * so one field will have htotal more pixels. In order to avoid 1053 * the reported position from jumping backwards when the pixel 1054 * counter is beyond the length of the shorter field, just 1055 * clamp the position the length of the shorter field. This 1056 * matches how the scanline counter based position works since 1057 * the scanline counter doesn't count the two half lines. 1058 */ 1059 if (position >= vtotal) 1060 position = vtotal - 1; 1061 1062 /* 1063 * Start of vblank interrupt is triggered at start of hsync, 1064 * just prior to the first active line of vblank. However we 1065 * consider lines to start at the leading edge of horizontal 1066 * active. So, should we get here before we've crossed into 1067 * the horizontal active of the first line in vblank, we would 1068 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1069 * always add htotal-hsync_start to the current pixel position. 1070 */ 1071 position = (position + htotal - hsync_start) % vtotal; 1072 } 1073 1074 /* Get optional system timestamp after query. */ 1075 if (etime) 1076 *etime = ktime_get(); 1077 1078 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1079 1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1081 1082 /* 1083 * While in vblank, position will be negative 1084 * counting up towards 0 at vbl_end. And outside 1085 * vblank, position will be positive counting 1086 * up since vbl_end. 1087 */ 1088 if (position >= vbl_start) 1089 position -= vbl_end; 1090 else 1091 position += vtotal - vbl_end; 1092 1093 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1094 *vpos = position; 1095 *hpos = 0; 1096 } else { 1097 *vpos = position / htotal; 1098 *hpos = position - (*vpos * htotal); 1099 } 1100 1101 return true; 1102 } 1103 1104 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1105 { 1106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1107 unsigned long irqflags; 1108 int position; 1109 1110 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1111 position = __intel_get_crtc_scanline(crtc); 1112 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1113 1114 return position; 1115 } 1116 1117 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1118 { 1119 u32 busy_up, busy_down, max_avg, min_avg; 1120 u8 new_delay; 1121 1122 spin_lock(&mchdev_lock); 1123 1124 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1125 1126 new_delay = dev_priv->ips.cur_delay; 1127 1128 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1129 busy_up = I915_READ(RCPREVBSYTUPAVG); 1130 busy_down = I915_READ(RCPREVBSYTDNAVG); 1131 max_avg = I915_READ(RCBMAXAVG); 1132 min_avg = I915_READ(RCBMINAVG); 1133 1134 /* Handle RCS change request from hw */ 1135 if (busy_up > max_avg) { 1136 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1137 new_delay = dev_priv->ips.cur_delay - 1; 1138 if (new_delay < dev_priv->ips.max_delay) 1139 new_delay = dev_priv->ips.max_delay; 1140 } else if (busy_down < min_avg) { 1141 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1142 new_delay = dev_priv->ips.cur_delay + 1; 1143 if (new_delay > dev_priv->ips.min_delay) 1144 new_delay = dev_priv->ips.min_delay; 1145 } 1146 1147 if (ironlake_set_drps(dev_priv, new_delay)) 1148 dev_priv->ips.cur_delay = new_delay; 1149 1150 spin_unlock(&mchdev_lock); 1151 1152 return; 1153 } 1154 1155 static void notify_ring(struct intel_engine_cs *engine) 1156 { 1157 const u32 seqno = intel_engine_get_seqno(engine); 1158 struct i915_request *rq = NULL; 1159 struct task_struct *tsk = NULL; 1160 struct intel_wait *wait; 1161 1162 if (unlikely(!engine->breadcrumbs.irq_armed)) 1163 return; 1164 1165 rcu_read_lock(); 1166 1167 spin_lock(&engine->breadcrumbs.irq_lock); 1168 wait = engine->breadcrumbs.irq_wait; 1169 if (wait) { 1170 /* 1171 * We use a callback from the dma-fence to submit 1172 * requests after waiting on our own requests. To 1173 * ensure minimum delay in queuing the next request to 1174 * hardware, signal the fence now rather than wait for 1175 * the signaler to be woken up. We still wake up the 1176 * waiter in order to handle the irq-seqno coherency 1177 * issues (we may receive the interrupt before the 1178 * seqno is written, see __i915_request_irq_complete()) 1179 * and to handle coalescing of multiple seqno updates 1180 * and many waiters. 1181 */ 1182 if (i915_seqno_passed(seqno, wait->seqno)) { 1183 struct i915_request *waiter = wait->request; 1184 1185 if (waiter && 1186 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1187 &waiter->fence.flags) && 1188 intel_wait_check_request(wait, waiter)) 1189 rq = i915_request_get(waiter); 1190 1191 tsk = wait->tsk; 1192 } else { 1193 if (engine->irq_seqno_barrier && 1194 i915_seqno_passed(seqno, wait->seqno - 1)) { 1195 set_bit(ENGINE_IRQ_BREADCRUMB, 1196 &engine->irq_posted); 1197 tsk = wait->tsk; 1198 } 1199 } 1200 1201 engine->breadcrumbs.irq_count++; 1202 } else { 1203 if (engine->breadcrumbs.irq_armed) 1204 __intel_engine_disarm_breadcrumbs(engine); 1205 } 1206 spin_unlock(&engine->breadcrumbs.irq_lock); 1207 1208 if (rq) { 1209 spin_lock(&rq->lock); 1210 dma_fence_signal_locked(&rq->fence); 1211 GEM_BUG_ON(!i915_request_completed(rq)); 1212 spin_unlock(&rq->lock); 1213 1214 i915_request_put(rq); 1215 } 1216 1217 if (tsk && tsk->state & TASK_NORMAL) 1218 wake_up_process(tsk); 1219 1220 rcu_read_unlock(); 1221 1222 trace_intel_engine_notify(engine, wait); 1223 } 1224 1225 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1226 struct intel_rps_ei *ei) 1227 { 1228 ei->ktime = ktime_get_raw(); 1229 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1230 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1231 } 1232 1233 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1234 { 1235 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1236 } 1237 1238 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1239 { 1240 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1241 const struct intel_rps_ei *prev = &rps->ei; 1242 struct intel_rps_ei now; 1243 u32 events = 0; 1244 1245 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1246 return 0; 1247 1248 vlv_c0_read(dev_priv, &now); 1249 1250 if (prev->ktime) { 1251 u64 time, c0; 1252 u32 render, media; 1253 1254 time = ktime_us_delta(now.ktime, prev->ktime); 1255 1256 time *= dev_priv->czclk_freq; 1257 1258 /* Workload can be split between render + media, 1259 * e.g. SwapBuffers being blitted in X after being rendered in 1260 * mesa. To account for this we need to combine both engines 1261 * into our activity counter. 1262 */ 1263 render = now.render_c0 - prev->render_c0; 1264 media = now.media_c0 - prev->media_c0; 1265 c0 = max(render, media); 1266 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1267 1268 if (c0 > time * rps->power.up_threshold) 1269 events = GEN6_PM_RP_UP_THRESHOLD; 1270 else if (c0 < time * rps->power.down_threshold) 1271 events = GEN6_PM_RP_DOWN_THRESHOLD; 1272 } 1273 1274 rps->ei = now; 1275 return events; 1276 } 1277 1278 static void gen6_pm_rps_work(struct work_struct *work) 1279 { 1280 struct drm_i915_private *dev_priv = 1281 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1282 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1283 bool client_boost = false; 1284 int new_delay, adj, min, max; 1285 u32 pm_iir = 0; 1286 1287 spin_lock_irq(&dev_priv->irq_lock); 1288 if (rps->interrupts_enabled) { 1289 pm_iir = fetch_and_zero(&rps->pm_iir); 1290 client_boost = atomic_read(&rps->num_waiters); 1291 } 1292 spin_unlock_irq(&dev_priv->irq_lock); 1293 1294 /* Make sure we didn't queue anything we're not going to process. */ 1295 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1296 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1297 goto out; 1298 1299 mutex_lock(&dev_priv->pcu_lock); 1300 1301 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1302 1303 adj = rps->last_adj; 1304 new_delay = rps->cur_freq; 1305 min = rps->min_freq_softlimit; 1306 max = rps->max_freq_softlimit; 1307 if (client_boost) 1308 max = rps->max_freq; 1309 if (client_boost && new_delay < rps->boost_freq) { 1310 new_delay = rps->boost_freq; 1311 adj = 0; 1312 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1313 if (adj > 0) 1314 adj *= 2; 1315 else /* CHV needs even encode values */ 1316 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1317 1318 if (new_delay >= rps->max_freq_softlimit) 1319 adj = 0; 1320 } else if (client_boost) { 1321 adj = 0; 1322 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1323 if (rps->cur_freq > rps->efficient_freq) 1324 new_delay = rps->efficient_freq; 1325 else if (rps->cur_freq > rps->min_freq_softlimit) 1326 new_delay = rps->min_freq_softlimit; 1327 adj = 0; 1328 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1329 if (adj < 0) 1330 adj *= 2; 1331 else /* CHV needs even encode values */ 1332 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1333 1334 if (new_delay <= rps->min_freq_softlimit) 1335 adj = 0; 1336 } else { /* unknown event */ 1337 adj = 0; 1338 } 1339 1340 rps->last_adj = adj; 1341 1342 /* sysfs frequency interfaces may have snuck in while servicing the 1343 * interrupt 1344 */ 1345 new_delay += adj; 1346 new_delay = clamp_t(int, new_delay, min, max); 1347 1348 if (intel_set_rps(dev_priv, new_delay)) { 1349 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1350 rps->last_adj = 0; 1351 } 1352 1353 mutex_unlock(&dev_priv->pcu_lock); 1354 1355 out: 1356 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1357 spin_lock_irq(&dev_priv->irq_lock); 1358 if (rps->interrupts_enabled) 1359 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1360 spin_unlock_irq(&dev_priv->irq_lock); 1361 } 1362 1363 1364 /** 1365 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1366 * occurred. 1367 * @work: workqueue struct 1368 * 1369 * Doesn't actually do anything except notify userspace. As a consequence of 1370 * this event, userspace should try to remap the bad rows since statistically 1371 * it is likely the same row is more likely to go bad again. 1372 */ 1373 static void ivybridge_parity_work(struct work_struct *work) 1374 { 1375 struct drm_i915_private *dev_priv = 1376 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1377 u32 error_status, row, bank, subbank; 1378 char *parity_event[6]; 1379 uint32_t misccpctl; 1380 uint8_t slice = 0; 1381 1382 /* We must turn off DOP level clock gating to access the L3 registers. 1383 * In order to prevent a get/put style interface, acquire struct mutex 1384 * any time we access those registers. 1385 */ 1386 mutex_lock(&dev_priv->drm.struct_mutex); 1387 1388 /* If we've screwed up tracking, just let the interrupt fire again */ 1389 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1390 goto out; 1391 1392 misccpctl = I915_READ(GEN7_MISCCPCTL); 1393 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1394 POSTING_READ(GEN7_MISCCPCTL); 1395 1396 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1397 i915_reg_t reg; 1398 1399 slice--; 1400 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1401 break; 1402 1403 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1404 1405 reg = GEN7_L3CDERRST1(slice); 1406 1407 error_status = I915_READ(reg); 1408 row = GEN7_PARITY_ERROR_ROW(error_status); 1409 bank = GEN7_PARITY_ERROR_BANK(error_status); 1410 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1411 1412 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1413 POSTING_READ(reg); 1414 1415 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1416 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1417 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1418 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1419 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1420 parity_event[5] = NULL; 1421 1422 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1423 KOBJ_CHANGE, parity_event); 1424 1425 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1426 slice, row, bank, subbank); 1427 1428 kfree(parity_event[4]); 1429 kfree(parity_event[3]); 1430 kfree(parity_event[2]); 1431 kfree(parity_event[1]); 1432 } 1433 1434 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1435 1436 out: 1437 WARN_ON(dev_priv->l3_parity.which_slice); 1438 spin_lock_irq(&dev_priv->irq_lock); 1439 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1440 spin_unlock_irq(&dev_priv->irq_lock); 1441 1442 mutex_unlock(&dev_priv->drm.struct_mutex); 1443 } 1444 1445 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1446 u32 iir) 1447 { 1448 if (!HAS_L3_DPF(dev_priv)) 1449 return; 1450 1451 spin_lock(&dev_priv->irq_lock); 1452 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1453 spin_unlock(&dev_priv->irq_lock); 1454 1455 iir &= GT_PARITY_ERROR(dev_priv); 1456 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1457 dev_priv->l3_parity.which_slice |= 1 << 1; 1458 1459 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1460 dev_priv->l3_parity.which_slice |= 1 << 0; 1461 1462 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1463 } 1464 1465 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1466 u32 gt_iir) 1467 { 1468 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1469 notify_ring(dev_priv->engine[RCS]); 1470 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1471 notify_ring(dev_priv->engine[VCS]); 1472 } 1473 1474 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1475 u32 gt_iir) 1476 { 1477 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1478 notify_ring(dev_priv->engine[RCS]); 1479 if (gt_iir & GT_BSD_USER_INTERRUPT) 1480 notify_ring(dev_priv->engine[VCS]); 1481 if (gt_iir & GT_BLT_USER_INTERRUPT) 1482 notify_ring(dev_priv->engine[BCS]); 1483 1484 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1485 GT_BSD_CS_ERROR_INTERRUPT | 1486 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1487 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1488 1489 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1490 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1491 } 1492 1493 static void 1494 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1495 { 1496 bool tasklet = false; 1497 1498 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 1499 tasklet = true; 1500 1501 if (iir & GT_RENDER_USER_INTERRUPT) { 1502 notify_ring(engine); 1503 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1504 } 1505 1506 if (tasklet) 1507 tasklet_hi_schedule(&engine->execlists.tasklet); 1508 } 1509 1510 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1511 u32 master_ctl, u32 gt_iir[4]) 1512 { 1513 void __iomem * const regs = i915->regs; 1514 1515 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1516 GEN8_GT_BCS_IRQ | \ 1517 GEN8_GT_VCS1_IRQ | \ 1518 GEN8_GT_VCS2_IRQ | \ 1519 GEN8_GT_VECS_IRQ | \ 1520 GEN8_GT_PM_IRQ | \ 1521 GEN8_GT_GUC_IRQ) 1522 1523 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1524 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1525 if (likely(gt_iir[0])) 1526 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1527 } 1528 1529 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1530 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1531 if (likely(gt_iir[1])) 1532 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1533 } 1534 1535 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1536 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1537 if (likely(gt_iir[2])) 1538 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 1539 } 1540 1541 if (master_ctl & GEN8_GT_VECS_IRQ) { 1542 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1543 if (likely(gt_iir[3])) 1544 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1545 } 1546 } 1547 1548 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1549 u32 master_ctl, u32 gt_iir[4]) 1550 { 1551 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1552 gen8_cs_irq_handler(i915->engine[RCS], 1553 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1554 gen8_cs_irq_handler(i915->engine[BCS], 1555 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1556 } 1557 1558 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1559 gen8_cs_irq_handler(i915->engine[VCS], 1560 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1561 gen8_cs_irq_handler(i915->engine[VCS2], 1562 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1563 } 1564 1565 if (master_ctl & GEN8_GT_VECS_IRQ) { 1566 gen8_cs_irq_handler(i915->engine[VECS], 1567 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1568 } 1569 1570 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1571 gen6_rps_irq_handler(i915, gt_iir[2]); 1572 gen9_guc_irq_handler(i915, gt_iir[2]); 1573 } 1574 } 1575 1576 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1577 { 1578 switch (pin) { 1579 case HPD_PORT_C: 1580 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1581 case HPD_PORT_D: 1582 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1583 case HPD_PORT_E: 1584 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1585 case HPD_PORT_F: 1586 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1587 default: 1588 return false; 1589 } 1590 } 1591 1592 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1593 { 1594 switch (pin) { 1595 case HPD_PORT_A: 1596 return val & PORTA_HOTPLUG_LONG_DETECT; 1597 case HPD_PORT_B: 1598 return val & PORTB_HOTPLUG_LONG_DETECT; 1599 case HPD_PORT_C: 1600 return val & PORTC_HOTPLUG_LONG_DETECT; 1601 default: 1602 return false; 1603 } 1604 } 1605 1606 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1607 { 1608 switch (pin) { 1609 case HPD_PORT_A: 1610 return val & ICP_DDIA_HPD_LONG_DETECT; 1611 case HPD_PORT_B: 1612 return val & ICP_DDIB_HPD_LONG_DETECT; 1613 default: 1614 return false; 1615 } 1616 } 1617 1618 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1619 { 1620 switch (pin) { 1621 case HPD_PORT_C: 1622 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1623 case HPD_PORT_D: 1624 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1625 case HPD_PORT_E: 1626 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1627 case HPD_PORT_F: 1628 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1629 default: 1630 return false; 1631 } 1632 } 1633 1634 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1635 { 1636 switch (pin) { 1637 case HPD_PORT_E: 1638 return val & PORTE_HOTPLUG_LONG_DETECT; 1639 default: 1640 return false; 1641 } 1642 } 1643 1644 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1645 { 1646 switch (pin) { 1647 case HPD_PORT_A: 1648 return val & PORTA_HOTPLUG_LONG_DETECT; 1649 case HPD_PORT_B: 1650 return val & PORTB_HOTPLUG_LONG_DETECT; 1651 case HPD_PORT_C: 1652 return val & PORTC_HOTPLUG_LONG_DETECT; 1653 case HPD_PORT_D: 1654 return val & PORTD_HOTPLUG_LONG_DETECT; 1655 default: 1656 return false; 1657 } 1658 } 1659 1660 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1661 { 1662 switch (pin) { 1663 case HPD_PORT_A: 1664 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1665 default: 1666 return false; 1667 } 1668 } 1669 1670 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1671 { 1672 switch (pin) { 1673 case HPD_PORT_B: 1674 return val & PORTB_HOTPLUG_LONG_DETECT; 1675 case HPD_PORT_C: 1676 return val & PORTC_HOTPLUG_LONG_DETECT; 1677 case HPD_PORT_D: 1678 return val & PORTD_HOTPLUG_LONG_DETECT; 1679 default: 1680 return false; 1681 } 1682 } 1683 1684 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1685 { 1686 switch (pin) { 1687 case HPD_PORT_B: 1688 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1689 case HPD_PORT_C: 1690 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1691 case HPD_PORT_D: 1692 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1693 default: 1694 return false; 1695 } 1696 } 1697 1698 /* 1699 * Get a bit mask of pins that have triggered, and which ones may be long. 1700 * This can be called multiple times with the same masks to accumulate 1701 * hotplug detection results from several registers. 1702 * 1703 * Note that the caller is expected to zero out the masks initially. 1704 */ 1705 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1706 u32 *pin_mask, u32 *long_mask, 1707 u32 hotplug_trigger, u32 dig_hotplug_reg, 1708 const u32 hpd[HPD_NUM_PINS], 1709 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1710 { 1711 enum hpd_pin pin; 1712 1713 for_each_hpd_pin(pin) { 1714 if ((hpd[pin] & hotplug_trigger) == 0) 1715 continue; 1716 1717 *pin_mask |= BIT(pin); 1718 1719 if (long_pulse_detect(pin, dig_hotplug_reg)) 1720 *long_mask |= BIT(pin); 1721 } 1722 1723 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1724 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1725 1726 } 1727 1728 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1729 { 1730 wake_up_all(&dev_priv->gmbus_wait_queue); 1731 } 1732 1733 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1734 { 1735 wake_up_all(&dev_priv->gmbus_wait_queue); 1736 } 1737 1738 #if defined(CONFIG_DEBUG_FS) 1739 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1740 enum pipe pipe, 1741 uint32_t crc0, uint32_t crc1, 1742 uint32_t crc2, uint32_t crc3, 1743 uint32_t crc4) 1744 { 1745 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1746 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1747 uint32_t crcs[5]; 1748 1749 spin_lock(&pipe_crc->lock); 1750 /* 1751 * For some not yet identified reason, the first CRC is 1752 * bonkers. So let's just wait for the next vblank and read 1753 * out the buggy result. 1754 * 1755 * On GEN8+ sometimes the second CRC is bonkers as well, so 1756 * don't trust that one either. 1757 */ 1758 if (pipe_crc->skipped <= 0 || 1759 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1760 pipe_crc->skipped++; 1761 spin_unlock(&pipe_crc->lock); 1762 return; 1763 } 1764 spin_unlock(&pipe_crc->lock); 1765 1766 crcs[0] = crc0; 1767 crcs[1] = crc1; 1768 crcs[2] = crc2; 1769 crcs[3] = crc3; 1770 crcs[4] = crc4; 1771 drm_crtc_add_crc_entry(&crtc->base, true, 1772 drm_crtc_accurate_vblank_count(&crtc->base), 1773 crcs); 1774 } 1775 #else 1776 static inline void 1777 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1778 enum pipe pipe, 1779 uint32_t crc0, uint32_t crc1, 1780 uint32_t crc2, uint32_t crc3, 1781 uint32_t crc4) {} 1782 #endif 1783 1784 1785 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1786 enum pipe pipe) 1787 { 1788 display_pipe_crc_irq_handler(dev_priv, pipe, 1789 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1790 0, 0, 0, 0); 1791 } 1792 1793 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1794 enum pipe pipe) 1795 { 1796 display_pipe_crc_irq_handler(dev_priv, pipe, 1797 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1798 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1799 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1800 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1801 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1802 } 1803 1804 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1805 enum pipe pipe) 1806 { 1807 uint32_t res1, res2; 1808 1809 if (INTEL_GEN(dev_priv) >= 3) 1810 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1811 else 1812 res1 = 0; 1813 1814 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1815 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1816 else 1817 res2 = 0; 1818 1819 display_pipe_crc_irq_handler(dev_priv, pipe, 1820 I915_READ(PIPE_CRC_RES_RED(pipe)), 1821 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1822 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1823 res1, res2); 1824 } 1825 1826 /* The RPS events need forcewake, so we add them to a work queue and mask their 1827 * IMR bits until the work is done. Other interrupts can be processed without 1828 * the work queue. */ 1829 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1830 { 1831 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1832 1833 if (pm_iir & dev_priv->pm_rps_events) { 1834 spin_lock(&dev_priv->irq_lock); 1835 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1836 if (rps->interrupts_enabled) { 1837 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1838 schedule_work(&rps->work); 1839 } 1840 spin_unlock(&dev_priv->irq_lock); 1841 } 1842 1843 if (INTEL_GEN(dev_priv) >= 8) 1844 return; 1845 1846 if (HAS_VEBOX(dev_priv)) { 1847 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1848 notify_ring(dev_priv->engine[VECS]); 1849 1850 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1851 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1852 } 1853 } 1854 1855 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1856 { 1857 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 1858 intel_guc_to_host_event_handler(&dev_priv->guc); 1859 } 1860 1861 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1862 { 1863 enum pipe pipe; 1864 1865 for_each_pipe(dev_priv, pipe) { 1866 I915_WRITE(PIPESTAT(pipe), 1867 PIPESTAT_INT_STATUS_MASK | 1868 PIPE_FIFO_UNDERRUN_STATUS); 1869 1870 dev_priv->pipestat_irq_mask[pipe] = 0; 1871 } 1872 } 1873 1874 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1875 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1876 { 1877 int pipe; 1878 1879 spin_lock(&dev_priv->irq_lock); 1880 1881 if (!dev_priv->display_irqs_enabled) { 1882 spin_unlock(&dev_priv->irq_lock); 1883 return; 1884 } 1885 1886 for_each_pipe(dev_priv, pipe) { 1887 i915_reg_t reg; 1888 u32 status_mask, enable_mask, iir_bit = 0; 1889 1890 /* 1891 * PIPESTAT bits get signalled even when the interrupt is 1892 * disabled with the mask bits, and some of the status bits do 1893 * not generate interrupts at all (like the underrun bit). Hence 1894 * we need to be careful that we only handle what we want to 1895 * handle. 1896 */ 1897 1898 /* fifo underruns are filterered in the underrun handler. */ 1899 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1900 1901 switch (pipe) { 1902 case PIPE_A: 1903 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1904 break; 1905 case PIPE_B: 1906 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1907 break; 1908 case PIPE_C: 1909 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1910 break; 1911 } 1912 if (iir & iir_bit) 1913 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1914 1915 if (!status_mask) 1916 continue; 1917 1918 reg = PIPESTAT(pipe); 1919 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1920 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1921 1922 /* 1923 * Clear the PIPE*STAT regs before the IIR 1924 * 1925 * Toggle the enable bits to make sure we get an 1926 * edge in the ISR pipe event bit if we don't clear 1927 * all the enabled status bits. Otherwise the edge 1928 * triggered IIR on i965/g4x wouldn't notice that 1929 * an interrupt is still pending. 1930 */ 1931 if (pipe_stats[pipe]) { 1932 I915_WRITE(reg, pipe_stats[pipe]); 1933 I915_WRITE(reg, enable_mask); 1934 } 1935 } 1936 spin_unlock(&dev_priv->irq_lock); 1937 } 1938 1939 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1940 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1941 { 1942 enum pipe pipe; 1943 1944 for_each_pipe(dev_priv, pipe) { 1945 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1946 drm_handle_vblank(&dev_priv->drm, pipe); 1947 1948 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1949 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1950 1951 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1952 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1953 } 1954 } 1955 1956 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1957 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1958 { 1959 bool blc_event = false; 1960 enum pipe pipe; 1961 1962 for_each_pipe(dev_priv, pipe) { 1963 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1964 drm_handle_vblank(&dev_priv->drm, pipe); 1965 1966 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1967 blc_event = true; 1968 1969 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1970 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1971 1972 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1973 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1974 } 1975 1976 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1977 intel_opregion_asle_intr(dev_priv); 1978 } 1979 1980 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1981 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1982 { 1983 bool blc_event = false; 1984 enum pipe pipe; 1985 1986 for_each_pipe(dev_priv, pipe) { 1987 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1988 drm_handle_vblank(&dev_priv->drm, pipe); 1989 1990 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1991 blc_event = true; 1992 1993 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1994 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1995 1996 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1997 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1998 } 1999 2000 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2001 intel_opregion_asle_intr(dev_priv); 2002 2003 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2004 gmbus_irq_handler(dev_priv); 2005 } 2006 2007 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2008 u32 pipe_stats[I915_MAX_PIPES]) 2009 { 2010 enum pipe pipe; 2011 2012 for_each_pipe(dev_priv, pipe) { 2013 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2014 drm_handle_vblank(&dev_priv->drm, pipe); 2015 2016 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2017 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2018 2019 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2020 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2021 } 2022 2023 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2024 gmbus_irq_handler(dev_priv); 2025 } 2026 2027 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 2028 { 2029 u32 hotplug_status = 0, hotplug_status_mask; 2030 int i; 2031 2032 if (IS_G4X(dev_priv) || 2033 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2034 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 2035 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 2036 else 2037 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 2038 2039 /* 2040 * We absolutely have to clear all the pending interrupt 2041 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 2042 * interrupt bit won't have an edge, and the i965/g4x 2043 * edge triggered IIR will not notice that an interrupt 2044 * is still pending. We can't use PORT_HOTPLUG_EN to 2045 * guarantee the edge as the act of toggling the enable 2046 * bits can itself generate a new hotplug interrupt :( 2047 */ 2048 for (i = 0; i < 10; i++) { 2049 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 2050 2051 if (tmp == 0) 2052 return hotplug_status; 2053 2054 hotplug_status |= tmp; 2055 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2056 } 2057 2058 WARN_ONCE(1, 2059 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2060 I915_READ(PORT_HOTPLUG_STAT)); 2061 2062 return hotplug_status; 2063 } 2064 2065 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2066 u32 hotplug_status) 2067 { 2068 u32 pin_mask = 0, long_mask = 0; 2069 2070 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2071 IS_CHERRYVIEW(dev_priv)) { 2072 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2073 2074 if (hotplug_trigger) { 2075 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2076 hotplug_trigger, hotplug_trigger, 2077 hpd_status_g4x, 2078 i9xx_port_hotplug_long_detect); 2079 2080 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2081 } 2082 2083 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2084 dp_aux_irq_handler(dev_priv); 2085 } else { 2086 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2087 2088 if (hotplug_trigger) { 2089 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2090 hotplug_trigger, hotplug_trigger, 2091 hpd_status_i915, 2092 i9xx_port_hotplug_long_detect); 2093 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2094 } 2095 } 2096 } 2097 2098 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2099 { 2100 struct drm_device *dev = arg; 2101 struct drm_i915_private *dev_priv = to_i915(dev); 2102 irqreturn_t ret = IRQ_NONE; 2103 2104 if (!intel_irqs_enabled(dev_priv)) 2105 return IRQ_NONE; 2106 2107 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2108 disable_rpm_wakeref_asserts(dev_priv); 2109 2110 do { 2111 u32 iir, gt_iir, pm_iir; 2112 u32 pipe_stats[I915_MAX_PIPES] = {}; 2113 u32 hotplug_status = 0; 2114 u32 ier = 0; 2115 2116 gt_iir = I915_READ(GTIIR); 2117 pm_iir = I915_READ(GEN6_PMIIR); 2118 iir = I915_READ(VLV_IIR); 2119 2120 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2121 break; 2122 2123 ret = IRQ_HANDLED; 2124 2125 /* 2126 * Theory on interrupt generation, based on empirical evidence: 2127 * 2128 * x = ((VLV_IIR & VLV_IER) || 2129 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2130 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2131 * 2132 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2133 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2134 * guarantee the CPU interrupt will be raised again even if we 2135 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2136 * bits this time around. 2137 */ 2138 I915_WRITE(VLV_MASTER_IER, 0); 2139 ier = I915_READ(VLV_IER); 2140 I915_WRITE(VLV_IER, 0); 2141 2142 if (gt_iir) 2143 I915_WRITE(GTIIR, gt_iir); 2144 if (pm_iir) 2145 I915_WRITE(GEN6_PMIIR, pm_iir); 2146 2147 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2148 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2149 2150 /* Call regardless, as some status bits might not be 2151 * signalled in iir */ 2152 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2153 2154 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2155 I915_LPE_PIPE_B_INTERRUPT)) 2156 intel_lpe_audio_irq_handler(dev_priv); 2157 2158 /* 2159 * VLV_IIR is single buffered, and reflects the level 2160 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2161 */ 2162 if (iir) 2163 I915_WRITE(VLV_IIR, iir); 2164 2165 I915_WRITE(VLV_IER, ier); 2166 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2167 2168 if (gt_iir) 2169 snb_gt_irq_handler(dev_priv, gt_iir); 2170 if (pm_iir) 2171 gen6_rps_irq_handler(dev_priv, pm_iir); 2172 2173 if (hotplug_status) 2174 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2175 2176 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2177 } while (0); 2178 2179 enable_rpm_wakeref_asserts(dev_priv); 2180 2181 return ret; 2182 } 2183 2184 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2185 { 2186 struct drm_device *dev = arg; 2187 struct drm_i915_private *dev_priv = to_i915(dev); 2188 irqreturn_t ret = IRQ_NONE; 2189 2190 if (!intel_irqs_enabled(dev_priv)) 2191 return IRQ_NONE; 2192 2193 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2194 disable_rpm_wakeref_asserts(dev_priv); 2195 2196 do { 2197 u32 master_ctl, iir; 2198 u32 pipe_stats[I915_MAX_PIPES] = {}; 2199 u32 hotplug_status = 0; 2200 u32 gt_iir[4]; 2201 u32 ier = 0; 2202 2203 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2204 iir = I915_READ(VLV_IIR); 2205 2206 if (master_ctl == 0 && iir == 0) 2207 break; 2208 2209 ret = IRQ_HANDLED; 2210 2211 /* 2212 * Theory on interrupt generation, based on empirical evidence: 2213 * 2214 * x = ((VLV_IIR & VLV_IER) || 2215 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2216 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2217 * 2218 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2219 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2220 * guarantee the CPU interrupt will be raised again even if we 2221 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2222 * bits this time around. 2223 */ 2224 I915_WRITE(GEN8_MASTER_IRQ, 0); 2225 ier = I915_READ(VLV_IER); 2226 I915_WRITE(VLV_IER, 0); 2227 2228 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2229 2230 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2231 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2232 2233 /* Call regardless, as some status bits might not be 2234 * signalled in iir */ 2235 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2236 2237 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2238 I915_LPE_PIPE_B_INTERRUPT | 2239 I915_LPE_PIPE_C_INTERRUPT)) 2240 intel_lpe_audio_irq_handler(dev_priv); 2241 2242 /* 2243 * VLV_IIR is single buffered, and reflects the level 2244 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2245 */ 2246 if (iir) 2247 I915_WRITE(VLV_IIR, iir); 2248 2249 I915_WRITE(VLV_IER, ier); 2250 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2251 2252 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2253 2254 if (hotplug_status) 2255 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2256 2257 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2258 } while (0); 2259 2260 enable_rpm_wakeref_asserts(dev_priv); 2261 2262 return ret; 2263 } 2264 2265 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2266 u32 hotplug_trigger, 2267 const u32 hpd[HPD_NUM_PINS]) 2268 { 2269 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2270 2271 /* 2272 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2273 * unless we touch the hotplug register, even if hotplug_trigger is 2274 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2275 * errors. 2276 */ 2277 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2278 if (!hotplug_trigger) { 2279 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2280 PORTD_HOTPLUG_STATUS_MASK | 2281 PORTC_HOTPLUG_STATUS_MASK | 2282 PORTB_HOTPLUG_STATUS_MASK; 2283 dig_hotplug_reg &= ~mask; 2284 } 2285 2286 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2287 if (!hotplug_trigger) 2288 return; 2289 2290 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2291 dig_hotplug_reg, hpd, 2292 pch_port_hotplug_long_detect); 2293 2294 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2295 } 2296 2297 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2298 { 2299 int pipe; 2300 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2301 2302 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2303 2304 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2305 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2306 SDE_AUDIO_POWER_SHIFT); 2307 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2308 port_name(port)); 2309 } 2310 2311 if (pch_iir & SDE_AUX_MASK) 2312 dp_aux_irq_handler(dev_priv); 2313 2314 if (pch_iir & SDE_GMBUS) 2315 gmbus_irq_handler(dev_priv); 2316 2317 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2318 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2319 2320 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2321 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2322 2323 if (pch_iir & SDE_POISON) 2324 DRM_ERROR("PCH poison interrupt\n"); 2325 2326 if (pch_iir & SDE_FDI_MASK) 2327 for_each_pipe(dev_priv, pipe) 2328 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2329 pipe_name(pipe), 2330 I915_READ(FDI_RX_IIR(pipe))); 2331 2332 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2333 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2334 2335 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2336 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2337 2338 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2339 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2340 2341 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2342 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2343 } 2344 2345 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2346 { 2347 u32 err_int = I915_READ(GEN7_ERR_INT); 2348 enum pipe pipe; 2349 2350 if (err_int & ERR_INT_POISON) 2351 DRM_ERROR("Poison interrupt\n"); 2352 2353 for_each_pipe(dev_priv, pipe) { 2354 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2355 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2356 2357 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2358 if (IS_IVYBRIDGE(dev_priv)) 2359 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2360 else 2361 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2362 } 2363 } 2364 2365 I915_WRITE(GEN7_ERR_INT, err_int); 2366 } 2367 2368 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2369 { 2370 u32 serr_int = I915_READ(SERR_INT); 2371 enum pipe pipe; 2372 2373 if (serr_int & SERR_INT_POISON) 2374 DRM_ERROR("PCH poison interrupt\n"); 2375 2376 for_each_pipe(dev_priv, pipe) 2377 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2378 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2379 2380 I915_WRITE(SERR_INT, serr_int); 2381 } 2382 2383 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2384 { 2385 int pipe; 2386 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2387 2388 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2389 2390 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2391 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2392 SDE_AUDIO_POWER_SHIFT_CPT); 2393 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2394 port_name(port)); 2395 } 2396 2397 if (pch_iir & SDE_AUX_MASK_CPT) 2398 dp_aux_irq_handler(dev_priv); 2399 2400 if (pch_iir & SDE_GMBUS_CPT) 2401 gmbus_irq_handler(dev_priv); 2402 2403 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2404 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2405 2406 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2407 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2408 2409 if (pch_iir & SDE_FDI_MASK_CPT) 2410 for_each_pipe(dev_priv, pipe) 2411 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2412 pipe_name(pipe), 2413 I915_READ(FDI_RX_IIR(pipe))); 2414 2415 if (pch_iir & SDE_ERROR_CPT) 2416 cpt_serr_int_handler(dev_priv); 2417 } 2418 2419 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2420 { 2421 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 2422 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 2423 u32 pin_mask = 0, long_mask = 0; 2424 2425 if (ddi_hotplug_trigger) { 2426 u32 dig_hotplug_reg; 2427 2428 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 2429 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 2430 2431 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2432 ddi_hotplug_trigger, 2433 dig_hotplug_reg, hpd_icp, 2434 icp_ddi_port_hotplug_long_detect); 2435 } 2436 2437 if (tc_hotplug_trigger) { 2438 u32 dig_hotplug_reg; 2439 2440 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 2441 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 2442 2443 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2444 tc_hotplug_trigger, 2445 dig_hotplug_reg, hpd_icp, 2446 icp_tc_port_hotplug_long_detect); 2447 } 2448 2449 if (pin_mask) 2450 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2451 2452 if (pch_iir & SDE_GMBUS_ICP) 2453 gmbus_irq_handler(dev_priv); 2454 } 2455 2456 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2457 { 2458 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2459 ~SDE_PORTE_HOTPLUG_SPT; 2460 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2461 u32 pin_mask = 0, long_mask = 0; 2462 2463 if (hotplug_trigger) { 2464 u32 dig_hotplug_reg; 2465 2466 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2467 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2468 2469 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2470 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2471 spt_port_hotplug_long_detect); 2472 } 2473 2474 if (hotplug2_trigger) { 2475 u32 dig_hotplug_reg; 2476 2477 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2478 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2479 2480 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2481 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2482 spt_port_hotplug2_long_detect); 2483 } 2484 2485 if (pin_mask) 2486 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2487 2488 if (pch_iir & SDE_GMBUS_CPT) 2489 gmbus_irq_handler(dev_priv); 2490 } 2491 2492 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2493 u32 hotplug_trigger, 2494 const u32 hpd[HPD_NUM_PINS]) 2495 { 2496 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2497 2498 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2499 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2500 2501 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2502 dig_hotplug_reg, hpd, 2503 ilk_port_hotplug_long_detect); 2504 2505 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2506 } 2507 2508 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2509 u32 de_iir) 2510 { 2511 enum pipe pipe; 2512 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2513 2514 if (hotplug_trigger) 2515 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2516 2517 if (de_iir & DE_AUX_CHANNEL_A) 2518 dp_aux_irq_handler(dev_priv); 2519 2520 if (de_iir & DE_GSE) 2521 intel_opregion_asle_intr(dev_priv); 2522 2523 if (de_iir & DE_POISON) 2524 DRM_ERROR("Poison interrupt\n"); 2525 2526 for_each_pipe(dev_priv, pipe) { 2527 if (de_iir & DE_PIPE_VBLANK(pipe)) 2528 drm_handle_vblank(&dev_priv->drm, pipe); 2529 2530 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2531 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2532 2533 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2534 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2535 } 2536 2537 /* check event from PCH */ 2538 if (de_iir & DE_PCH_EVENT) { 2539 u32 pch_iir = I915_READ(SDEIIR); 2540 2541 if (HAS_PCH_CPT(dev_priv)) 2542 cpt_irq_handler(dev_priv, pch_iir); 2543 else 2544 ibx_irq_handler(dev_priv, pch_iir); 2545 2546 /* should clear PCH hotplug event before clear CPU irq */ 2547 I915_WRITE(SDEIIR, pch_iir); 2548 } 2549 2550 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2551 ironlake_rps_change_irq_handler(dev_priv); 2552 } 2553 2554 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2555 u32 de_iir) 2556 { 2557 enum pipe pipe; 2558 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2559 2560 if (hotplug_trigger) 2561 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2562 2563 if (de_iir & DE_ERR_INT_IVB) 2564 ivb_err_int_handler(dev_priv); 2565 2566 if (de_iir & DE_EDP_PSR_INT_HSW) { 2567 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2568 2569 intel_psr_irq_handler(dev_priv, psr_iir); 2570 I915_WRITE(EDP_PSR_IIR, psr_iir); 2571 } 2572 2573 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2574 dp_aux_irq_handler(dev_priv); 2575 2576 if (de_iir & DE_GSE_IVB) 2577 intel_opregion_asle_intr(dev_priv); 2578 2579 for_each_pipe(dev_priv, pipe) { 2580 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2581 drm_handle_vblank(&dev_priv->drm, pipe); 2582 } 2583 2584 /* check event from PCH */ 2585 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2586 u32 pch_iir = I915_READ(SDEIIR); 2587 2588 cpt_irq_handler(dev_priv, pch_iir); 2589 2590 /* clear PCH hotplug event before clear CPU irq */ 2591 I915_WRITE(SDEIIR, pch_iir); 2592 } 2593 } 2594 2595 /* 2596 * To handle irqs with the minimum potential races with fresh interrupts, we: 2597 * 1 - Disable Master Interrupt Control. 2598 * 2 - Find the source(s) of the interrupt. 2599 * 3 - Clear the Interrupt Identity bits (IIR). 2600 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2601 * 5 - Re-enable Master Interrupt Control. 2602 */ 2603 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2604 { 2605 struct drm_device *dev = arg; 2606 struct drm_i915_private *dev_priv = to_i915(dev); 2607 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2608 irqreturn_t ret = IRQ_NONE; 2609 2610 if (!intel_irqs_enabled(dev_priv)) 2611 return IRQ_NONE; 2612 2613 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2614 disable_rpm_wakeref_asserts(dev_priv); 2615 2616 /* disable master interrupt before clearing iir */ 2617 de_ier = I915_READ(DEIER); 2618 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2619 2620 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2621 * interrupts will will be stored on its back queue, and then we'll be 2622 * able to process them after we restore SDEIER (as soon as we restore 2623 * it, we'll get an interrupt if SDEIIR still has something to process 2624 * due to its back queue). */ 2625 if (!HAS_PCH_NOP(dev_priv)) { 2626 sde_ier = I915_READ(SDEIER); 2627 I915_WRITE(SDEIER, 0); 2628 } 2629 2630 /* Find, clear, then process each source of interrupt */ 2631 2632 gt_iir = I915_READ(GTIIR); 2633 if (gt_iir) { 2634 I915_WRITE(GTIIR, gt_iir); 2635 ret = IRQ_HANDLED; 2636 if (INTEL_GEN(dev_priv) >= 6) 2637 snb_gt_irq_handler(dev_priv, gt_iir); 2638 else 2639 ilk_gt_irq_handler(dev_priv, gt_iir); 2640 } 2641 2642 de_iir = I915_READ(DEIIR); 2643 if (de_iir) { 2644 I915_WRITE(DEIIR, de_iir); 2645 ret = IRQ_HANDLED; 2646 if (INTEL_GEN(dev_priv) >= 7) 2647 ivb_display_irq_handler(dev_priv, de_iir); 2648 else 2649 ilk_display_irq_handler(dev_priv, de_iir); 2650 } 2651 2652 if (INTEL_GEN(dev_priv) >= 6) { 2653 u32 pm_iir = I915_READ(GEN6_PMIIR); 2654 if (pm_iir) { 2655 I915_WRITE(GEN6_PMIIR, pm_iir); 2656 ret = IRQ_HANDLED; 2657 gen6_rps_irq_handler(dev_priv, pm_iir); 2658 } 2659 } 2660 2661 I915_WRITE(DEIER, de_ier); 2662 if (!HAS_PCH_NOP(dev_priv)) 2663 I915_WRITE(SDEIER, sde_ier); 2664 2665 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2666 enable_rpm_wakeref_asserts(dev_priv); 2667 2668 return ret; 2669 } 2670 2671 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2672 u32 hotplug_trigger, 2673 const u32 hpd[HPD_NUM_PINS]) 2674 { 2675 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2676 2677 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2678 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2679 2680 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2681 dig_hotplug_reg, hpd, 2682 bxt_port_hotplug_long_detect); 2683 2684 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2685 } 2686 2687 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2688 { 2689 u32 pin_mask = 0, long_mask = 0; 2690 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2691 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2692 2693 if (trigger_tc) { 2694 u32 dig_hotplug_reg; 2695 2696 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2697 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2698 2699 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2700 dig_hotplug_reg, hpd_gen11, 2701 gen11_port_hotplug_long_detect); 2702 } 2703 2704 if (trigger_tbt) { 2705 u32 dig_hotplug_reg; 2706 2707 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2708 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2709 2710 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2711 dig_hotplug_reg, hpd_gen11, 2712 gen11_port_hotplug_long_detect); 2713 } 2714 2715 if (pin_mask) 2716 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2717 else 2718 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2719 } 2720 2721 static irqreturn_t 2722 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2723 { 2724 irqreturn_t ret = IRQ_NONE; 2725 u32 iir; 2726 enum pipe pipe; 2727 2728 if (master_ctl & GEN8_DE_MISC_IRQ) { 2729 iir = I915_READ(GEN8_DE_MISC_IIR); 2730 if (iir) { 2731 bool found = false; 2732 2733 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2734 ret = IRQ_HANDLED; 2735 2736 if (iir & GEN8_DE_MISC_GSE) { 2737 intel_opregion_asle_intr(dev_priv); 2738 found = true; 2739 } 2740 2741 if (iir & GEN8_DE_EDP_PSR) { 2742 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2743 2744 intel_psr_irq_handler(dev_priv, psr_iir); 2745 I915_WRITE(EDP_PSR_IIR, psr_iir); 2746 found = true; 2747 } 2748 2749 if (!found) 2750 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2751 } 2752 else 2753 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2754 } 2755 2756 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2757 iir = I915_READ(GEN11_DE_HPD_IIR); 2758 if (iir) { 2759 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2760 ret = IRQ_HANDLED; 2761 gen11_hpd_irq_handler(dev_priv, iir); 2762 } else { 2763 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2764 } 2765 } 2766 2767 if (master_ctl & GEN8_DE_PORT_IRQ) { 2768 iir = I915_READ(GEN8_DE_PORT_IIR); 2769 if (iir) { 2770 u32 tmp_mask; 2771 bool found = false; 2772 2773 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2774 ret = IRQ_HANDLED; 2775 2776 tmp_mask = GEN8_AUX_CHANNEL_A; 2777 if (INTEL_GEN(dev_priv) >= 9) 2778 tmp_mask |= GEN9_AUX_CHANNEL_B | 2779 GEN9_AUX_CHANNEL_C | 2780 GEN9_AUX_CHANNEL_D; 2781 2782 if (INTEL_GEN(dev_priv) >= 11) 2783 tmp_mask |= ICL_AUX_CHANNEL_E; 2784 2785 if (IS_CNL_WITH_PORT_F(dev_priv) || 2786 INTEL_GEN(dev_priv) >= 11) 2787 tmp_mask |= CNL_AUX_CHANNEL_F; 2788 2789 if (iir & tmp_mask) { 2790 dp_aux_irq_handler(dev_priv); 2791 found = true; 2792 } 2793 2794 if (IS_GEN9_LP(dev_priv)) { 2795 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2796 if (tmp_mask) { 2797 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2798 hpd_bxt); 2799 found = true; 2800 } 2801 } else if (IS_BROADWELL(dev_priv)) { 2802 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2803 if (tmp_mask) { 2804 ilk_hpd_irq_handler(dev_priv, 2805 tmp_mask, hpd_bdw); 2806 found = true; 2807 } 2808 } 2809 2810 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2811 gmbus_irq_handler(dev_priv); 2812 found = true; 2813 } 2814 2815 if (!found) 2816 DRM_ERROR("Unexpected DE Port interrupt\n"); 2817 } 2818 else 2819 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2820 } 2821 2822 for_each_pipe(dev_priv, pipe) { 2823 u32 fault_errors; 2824 2825 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2826 continue; 2827 2828 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2829 if (!iir) { 2830 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2831 continue; 2832 } 2833 2834 ret = IRQ_HANDLED; 2835 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2836 2837 if (iir & GEN8_PIPE_VBLANK) 2838 drm_handle_vblank(&dev_priv->drm, pipe); 2839 2840 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2841 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2842 2843 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2844 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2845 2846 fault_errors = iir; 2847 if (INTEL_GEN(dev_priv) >= 9) 2848 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2849 else 2850 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2851 2852 if (fault_errors) 2853 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2854 pipe_name(pipe), 2855 fault_errors); 2856 } 2857 2858 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2859 master_ctl & GEN8_DE_PCH_IRQ) { 2860 /* 2861 * FIXME(BDW): Assume for now that the new interrupt handling 2862 * scheme also closed the SDE interrupt handling race we've seen 2863 * on older pch-split platforms. But this needs testing. 2864 */ 2865 iir = I915_READ(SDEIIR); 2866 if (iir) { 2867 I915_WRITE(SDEIIR, iir); 2868 ret = IRQ_HANDLED; 2869 2870 if (HAS_PCH_ICP(dev_priv)) 2871 icp_irq_handler(dev_priv, iir); 2872 else if (HAS_PCH_SPT(dev_priv) || 2873 HAS_PCH_KBP(dev_priv) || 2874 HAS_PCH_CNP(dev_priv)) 2875 spt_irq_handler(dev_priv, iir); 2876 else 2877 cpt_irq_handler(dev_priv, iir); 2878 } else { 2879 /* 2880 * Like on previous PCH there seems to be something 2881 * fishy going on with forwarding PCH interrupts. 2882 */ 2883 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2884 } 2885 } 2886 2887 return ret; 2888 } 2889 2890 static inline u32 gen8_master_intr_disable(void __iomem * const regs) 2891 { 2892 raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 2893 2894 /* 2895 * Now with master disabled, get a sample of level indications 2896 * for this interrupt. Indications will be cleared on related acks. 2897 * New indications can and will light up during processing, 2898 * and will generate new interrupt after enabling master. 2899 */ 2900 return raw_reg_read(regs, GEN8_MASTER_IRQ); 2901 } 2902 2903 static inline void gen8_master_intr_enable(void __iomem * const regs) 2904 { 2905 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2906 } 2907 2908 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2909 { 2910 struct drm_i915_private *dev_priv = to_i915(arg); 2911 void __iomem * const regs = dev_priv->regs; 2912 u32 master_ctl; 2913 u32 gt_iir[4]; 2914 2915 if (!intel_irqs_enabled(dev_priv)) 2916 return IRQ_NONE; 2917 2918 master_ctl = gen8_master_intr_disable(regs); 2919 if (!master_ctl) { 2920 gen8_master_intr_enable(regs); 2921 return IRQ_NONE; 2922 } 2923 2924 /* Find, clear, then process each source of interrupt */ 2925 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2926 2927 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2928 if (master_ctl & ~GEN8_GT_IRQS) { 2929 disable_rpm_wakeref_asserts(dev_priv); 2930 gen8_de_irq_handler(dev_priv, master_ctl); 2931 enable_rpm_wakeref_asserts(dev_priv); 2932 } 2933 2934 gen8_master_intr_enable(regs); 2935 2936 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2937 2938 return IRQ_HANDLED; 2939 } 2940 2941 struct wedge_me { 2942 struct delayed_work work; 2943 struct drm_i915_private *i915; 2944 const char *name; 2945 }; 2946 2947 static void wedge_me(struct work_struct *work) 2948 { 2949 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2950 2951 dev_err(w->i915->drm.dev, 2952 "%s timed out, cancelling all in-flight rendering.\n", 2953 w->name); 2954 i915_gem_set_wedged(w->i915); 2955 } 2956 2957 static void __init_wedge(struct wedge_me *w, 2958 struct drm_i915_private *i915, 2959 long timeout, 2960 const char *name) 2961 { 2962 w->i915 = i915; 2963 w->name = name; 2964 2965 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2966 schedule_delayed_work(&w->work, timeout); 2967 } 2968 2969 static void __fini_wedge(struct wedge_me *w) 2970 { 2971 cancel_delayed_work_sync(&w->work); 2972 destroy_delayed_work_on_stack(&w->work); 2973 w->i915 = NULL; 2974 } 2975 2976 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2977 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2978 (W)->i915; \ 2979 __fini_wedge((W))) 2980 2981 static u32 2982 gen11_gt_engine_identity(struct drm_i915_private * const i915, 2983 const unsigned int bank, const unsigned int bit) 2984 { 2985 void __iomem * const regs = i915->regs; 2986 u32 timeout_ts; 2987 u32 ident; 2988 2989 lockdep_assert_held(&i915->irq_lock); 2990 2991 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2992 2993 /* 2994 * NB: Specs do not specify how long to spin wait, 2995 * so we do ~100us as an educated guess. 2996 */ 2997 timeout_ts = (local_clock() >> 10) + 100; 2998 do { 2999 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 3000 } while (!(ident & GEN11_INTR_DATA_VALID) && 3001 !time_after32(local_clock() >> 10, timeout_ts)); 3002 3003 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 3004 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 3005 bank, bit, ident); 3006 return 0; 3007 } 3008 3009 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 3010 GEN11_INTR_DATA_VALID); 3011 3012 return ident; 3013 } 3014 3015 static void 3016 gen11_other_irq_handler(struct drm_i915_private * const i915, 3017 const u8 instance, const u16 iir) 3018 { 3019 if (instance == OTHER_GTPM_INSTANCE) 3020 return gen6_rps_irq_handler(i915, iir); 3021 3022 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3023 instance, iir); 3024 } 3025 3026 static void 3027 gen11_engine_irq_handler(struct drm_i915_private * const i915, 3028 const u8 class, const u8 instance, const u16 iir) 3029 { 3030 struct intel_engine_cs *engine; 3031 3032 if (instance <= MAX_ENGINE_INSTANCE) 3033 engine = i915->engine_class[class][instance]; 3034 else 3035 engine = NULL; 3036 3037 if (likely(engine)) 3038 return gen8_cs_irq_handler(engine, iir); 3039 3040 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3041 class, instance); 3042 } 3043 3044 static void 3045 gen11_gt_identity_handler(struct drm_i915_private * const i915, 3046 const u32 identity) 3047 { 3048 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3049 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3050 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3051 3052 if (unlikely(!intr)) 3053 return; 3054 3055 if (class <= COPY_ENGINE_CLASS) 3056 return gen11_engine_irq_handler(i915, class, instance, intr); 3057 3058 if (class == OTHER_CLASS) 3059 return gen11_other_irq_handler(i915, instance, intr); 3060 3061 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3062 class, instance, intr); 3063 } 3064 3065 static void 3066 gen11_gt_bank_handler(struct drm_i915_private * const i915, 3067 const unsigned int bank) 3068 { 3069 void __iomem * const regs = i915->regs; 3070 unsigned long intr_dw; 3071 unsigned int bit; 3072 3073 lockdep_assert_held(&i915->irq_lock); 3074 3075 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 3076 3077 if (unlikely(!intr_dw)) { 3078 DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 3079 return; 3080 } 3081 3082 for_each_set_bit(bit, &intr_dw, 32) { 3083 const u32 ident = gen11_gt_engine_identity(i915, 3084 bank, bit); 3085 3086 gen11_gt_identity_handler(i915, ident); 3087 } 3088 3089 /* Clear must be after shared has been served for engine */ 3090 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 3091 } 3092 3093 static void 3094 gen11_gt_irq_handler(struct drm_i915_private * const i915, 3095 const u32 master_ctl) 3096 { 3097 unsigned int bank; 3098 3099 spin_lock(&i915->irq_lock); 3100 3101 for (bank = 0; bank < 2; bank++) { 3102 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 3103 gen11_gt_bank_handler(i915, bank); 3104 } 3105 3106 spin_unlock(&i915->irq_lock); 3107 } 3108 3109 static u32 3110 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) 3111 { 3112 void __iomem * const regs = dev_priv->regs; 3113 u32 iir; 3114 3115 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3116 return 0; 3117 3118 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3119 if (likely(iir)) 3120 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 3121 3122 return iir; 3123 } 3124 3125 static void 3126 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) 3127 { 3128 if (iir & GEN11_GU_MISC_GSE) 3129 intel_opregion_asle_intr(dev_priv); 3130 } 3131 3132 static inline u32 gen11_master_intr_disable(void __iomem * const regs) 3133 { 3134 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 3135 3136 /* 3137 * Now with master disabled, get a sample of level indications 3138 * for this interrupt. Indications will be cleared on related acks. 3139 * New indications can and will light up during processing, 3140 * and will generate new interrupt after enabling master. 3141 */ 3142 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3143 } 3144 3145 static inline void gen11_master_intr_enable(void __iomem * const regs) 3146 { 3147 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 3148 } 3149 3150 static irqreturn_t gen11_irq_handler(int irq, void *arg) 3151 { 3152 struct drm_i915_private * const i915 = to_i915(arg); 3153 void __iomem * const regs = i915->regs; 3154 u32 master_ctl; 3155 u32 gu_misc_iir; 3156 3157 if (!intel_irqs_enabled(i915)) 3158 return IRQ_NONE; 3159 3160 master_ctl = gen11_master_intr_disable(regs); 3161 if (!master_ctl) { 3162 gen11_master_intr_enable(regs); 3163 return IRQ_NONE; 3164 } 3165 3166 /* Find, clear, then process each source of interrupt. */ 3167 gen11_gt_irq_handler(i915, master_ctl); 3168 3169 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3170 if (master_ctl & GEN11_DISPLAY_IRQ) { 3171 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3172 3173 disable_rpm_wakeref_asserts(i915); 3174 /* 3175 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3176 * for the display related bits. 3177 */ 3178 gen8_de_irq_handler(i915, disp_ctl); 3179 enable_rpm_wakeref_asserts(i915); 3180 } 3181 3182 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3183 3184 gen11_master_intr_enable(regs); 3185 3186 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3187 3188 return IRQ_HANDLED; 3189 } 3190 3191 static void i915_reset_device(struct drm_i915_private *dev_priv, 3192 u32 engine_mask, 3193 const char *reason) 3194 { 3195 struct i915_gpu_error *error = &dev_priv->gpu_error; 3196 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3197 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3198 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3199 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 3200 struct wedge_me w; 3201 3202 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 3203 3204 DRM_DEBUG_DRIVER("resetting chip\n"); 3205 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 3206 3207 /* Use a watchdog to ensure that our reset completes */ 3208 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3209 intel_prepare_reset(dev_priv); 3210 3211 error->reason = reason; 3212 error->stalled_mask = engine_mask; 3213 3214 /* Signal that locked waiters should reset the GPU */ 3215 smp_mb__before_atomic(); 3216 set_bit(I915_RESET_HANDOFF, &error->flags); 3217 wake_up_all(&error->wait_queue); 3218 3219 /* Wait for anyone holding the lock to wakeup, without 3220 * blocking indefinitely on struct_mutex. 3221 */ 3222 do { 3223 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3224 i915_reset(dev_priv, engine_mask, reason); 3225 mutex_unlock(&dev_priv->drm.struct_mutex); 3226 } 3227 } while (wait_on_bit_timeout(&error->flags, 3228 I915_RESET_HANDOFF, 3229 TASK_UNINTERRUPTIBLE, 3230 1)); 3231 3232 error->stalled_mask = 0; 3233 error->reason = NULL; 3234 3235 intel_finish_reset(dev_priv); 3236 } 3237 3238 if (!test_bit(I915_WEDGED, &error->flags)) 3239 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3240 } 3241 3242 void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3243 { 3244 u32 eir; 3245 3246 if (!IS_GEN2(dev_priv)) 3247 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 3248 3249 if (INTEL_GEN(dev_priv) < 4) 3250 I915_WRITE(IPEIR, I915_READ(IPEIR)); 3251 else 3252 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 3253 3254 I915_WRITE(EIR, I915_READ(EIR)); 3255 eir = I915_READ(EIR); 3256 if (eir) { 3257 /* 3258 * some errors might have become stuck, 3259 * mask them. 3260 */ 3261 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 3262 I915_WRITE(EMR, I915_READ(EMR) | eir); 3263 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 3264 } 3265 3266 if (INTEL_GEN(dev_priv) >= 8) { 3267 I915_WRITE(GEN8_RING_FAULT_REG, 3268 I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); 3269 POSTING_READ(GEN8_RING_FAULT_REG); 3270 } else if (INTEL_GEN(dev_priv) >= 6) { 3271 struct intel_engine_cs *engine; 3272 enum intel_engine_id id; 3273 3274 for_each_engine(engine, dev_priv, id) { 3275 I915_WRITE(RING_FAULT_REG(engine), 3276 I915_READ(RING_FAULT_REG(engine)) & 3277 ~RING_FAULT_VALID); 3278 } 3279 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); 3280 } 3281 } 3282 3283 /** 3284 * i915_handle_error - handle a gpu error 3285 * @dev_priv: i915 device private 3286 * @engine_mask: mask representing engines that are hung 3287 * @flags: control flags 3288 * @fmt: Error message format string 3289 * 3290 * Do some basic checking of register state at error time and 3291 * dump it to the syslog. Also call i915_capture_error_state() to make 3292 * sure we get a record and make it available in debugfs. Fire a uevent 3293 * so userspace knows something bad happened (should trigger collection 3294 * of a ring dump etc.). 3295 */ 3296 void i915_handle_error(struct drm_i915_private *dev_priv, 3297 u32 engine_mask, 3298 unsigned long flags, 3299 const char *fmt, ...) 3300 { 3301 struct intel_engine_cs *engine; 3302 unsigned int tmp; 3303 char error_msg[80]; 3304 char *msg = NULL; 3305 3306 if (fmt) { 3307 va_list args; 3308 3309 va_start(args, fmt); 3310 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 3311 va_end(args); 3312 3313 msg = error_msg; 3314 } 3315 3316 /* 3317 * In most cases it's guaranteed that we get here with an RPM 3318 * reference held, for example because there is a pending GPU 3319 * request that won't finish until the reset is done. This 3320 * isn't the case at least when we get here by doing a 3321 * simulated reset via debugfs, so get an RPM reference. 3322 */ 3323 intel_runtime_pm_get(dev_priv); 3324 3325 engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3326 3327 if (flags & I915_ERROR_CAPTURE) { 3328 i915_capture_error_state(dev_priv, engine_mask, msg); 3329 i915_clear_error_registers(dev_priv); 3330 } 3331 3332 /* 3333 * Try engine reset when available. We fall back to full reset if 3334 * single reset fails. 3335 */ 3336 if (intel_has_reset_engine(dev_priv) && 3337 !i915_terminally_wedged(&dev_priv->gpu_error)) { 3338 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 3339 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3340 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3341 &dev_priv->gpu_error.flags)) 3342 continue; 3343 3344 if (i915_reset_engine(engine, msg) == 0) 3345 engine_mask &= ~intel_engine_flag(engine); 3346 3347 clear_bit(I915_RESET_ENGINE + engine->id, 3348 &dev_priv->gpu_error.flags); 3349 wake_up_bit(&dev_priv->gpu_error.flags, 3350 I915_RESET_ENGINE + engine->id); 3351 } 3352 } 3353 3354 if (!engine_mask) 3355 goto out; 3356 3357 /* Full reset needs the mutex, stop any other user trying to do so. */ 3358 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3359 wait_event(dev_priv->gpu_error.reset_queue, 3360 !test_bit(I915_RESET_BACKOFF, 3361 &dev_priv->gpu_error.flags)); 3362 goto out; 3363 } 3364 3365 /* Prevent any other reset-engine attempt. */ 3366 for_each_engine(engine, dev_priv, tmp) { 3367 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3368 &dev_priv->gpu_error.flags)) 3369 wait_on_bit(&dev_priv->gpu_error.flags, 3370 I915_RESET_ENGINE + engine->id, 3371 TASK_UNINTERRUPTIBLE); 3372 } 3373 3374 i915_reset_device(dev_priv, engine_mask, msg); 3375 3376 for_each_engine(engine, dev_priv, tmp) { 3377 clear_bit(I915_RESET_ENGINE + engine->id, 3378 &dev_priv->gpu_error.flags); 3379 } 3380 3381 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3382 wake_up_all(&dev_priv->gpu_error.reset_queue); 3383 3384 out: 3385 intel_runtime_pm_put(dev_priv); 3386 } 3387 3388 /* Called from drm generic code, passed 'crtc' which 3389 * we use as a pipe index 3390 */ 3391 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3392 { 3393 struct drm_i915_private *dev_priv = to_i915(dev); 3394 unsigned long irqflags; 3395 3396 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3397 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3398 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3399 3400 return 0; 3401 } 3402 3403 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3404 { 3405 struct drm_i915_private *dev_priv = to_i915(dev); 3406 unsigned long irqflags; 3407 3408 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3409 i915_enable_pipestat(dev_priv, pipe, 3410 PIPE_START_VBLANK_INTERRUPT_STATUS); 3411 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3412 3413 return 0; 3414 } 3415 3416 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3417 { 3418 struct drm_i915_private *dev_priv = to_i915(dev); 3419 unsigned long irqflags; 3420 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3421 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3422 3423 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3424 ilk_enable_display_irq(dev_priv, bit); 3425 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3426 3427 /* Even though there is no DMC, frame counter can get stuck when 3428 * PSR is active as no frames are generated. 3429 */ 3430 if (HAS_PSR(dev_priv)) 3431 drm_vblank_restore(dev, pipe); 3432 3433 return 0; 3434 } 3435 3436 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3437 { 3438 struct drm_i915_private *dev_priv = to_i915(dev); 3439 unsigned long irqflags; 3440 3441 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3442 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3443 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3444 3445 /* Even if there is no DMC, frame counter can get stuck when 3446 * PSR is active as no frames are generated, so check only for PSR. 3447 */ 3448 if (HAS_PSR(dev_priv)) 3449 drm_vblank_restore(dev, pipe); 3450 3451 return 0; 3452 } 3453 3454 /* Called from drm generic code, passed 'crtc' which 3455 * we use as a pipe index 3456 */ 3457 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3458 { 3459 struct drm_i915_private *dev_priv = to_i915(dev); 3460 unsigned long irqflags; 3461 3462 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3463 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3464 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3465 } 3466 3467 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3468 { 3469 struct drm_i915_private *dev_priv = to_i915(dev); 3470 unsigned long irqflags; 3471 3472 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3473 i915_disable_pipestat(dev_priv, pipe, 3474 PIPE_START_VBLANK_INTERRUPT_STATUS); 3475 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3476 } 3477 3478 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3479 { 3480 struct drm_i915_private *dev_priv = to_i915(dev); 3481 unsigned long irqflags; 3482 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3483 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3484 3485 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3486 ilk_disable_display_irq(dev_priv, bit); 3487 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3488 } 3489 3490 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3491 { 3492 struct drm_i915_private *dev_priv = to_i915(dev); 3493 unsigned long irqflags; 3494 3495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3496 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3497 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3498 } 3499 3500 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3501 { 3502 if (HAS_PCH_NOP(dev_priv)) 3503 return; 3504 3505 GEN3_IRQ_RESET(SDE); 3506 3507 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3508 I915_WRITE(SERR_INT, 0xffffffff); 3509 } 3510 3511 /* 3512 * SDEIER is also touched by the interrupt handler to work around missed PCH 3513 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3514 * instead we unconditionally enable all PCH interrupt sources here, but then 3515 * only unmask them as needed with SDEIMR. 3516 * 3517 * This function needs to be called before interrupts are enabled. 3518 */ 3519 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3520 { 3521 struct drm_i915_private *dev_priv = to_i915(dev); 3522 3523 if (HAS_PCH_NOP(dev_priv)) 3524 return; 3525 3526 WARN_ON(I915_READ(SDEIER) != 0); 3527 I915_WRITE(SDEIER, 0xffffffff); 3528 POSTING_READ(SDEIER); 3529 } 3530 3531 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3532 { 3533 GEN3_IRQ_RESET(GT); 3534 if (INTEL_GEN(dev_priv) >= 6) 3535 GEN3_IRQ_RESET(GEN6_PM); 3536 } 3537 3538 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3539 { 3540 if (IS_CHERRYVIEW(dev_priv)) 3541 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3542 else 3543 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3544 3545 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3546 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3547 3548 i9xx_pipestat_irq_reset(dev_priv); 3549 3550 GEN3_IRQ_RESET(VLV_); 3551 dev_priv->irq_mask = ~0u; 3552 } 3553 3554 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3555 { 3556 u32 pipestat_mask; 3557 u32 enable_mask; 3558 enum pipe pipe; 3559 3560 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3561 3562 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3563 for_each_pipe(dev_priv, pipe) 3564 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3565 3566 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3567 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3568 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3569 I915_LPE_PIPE_A_INTERRUPT | 3570 I915_LPE_PIPE_B_INTERRUPT; 3571 3572 if (IS_CHERRYVIEW(dev_priv)) 3573 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3574 I915_LPE_PIPE_C_INTERRUPT; 3575 3576 WARN_ON(dev_priv->irq_mask != ~0u); 3577 3578 dev_priv->irq_mask = ~enable_mask; 3579 3580 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3581 } 3582 3583 /* drm_dma.h hooks 3584 */ 3585 static void ironlake_irq_reset(struct drm_device *dev) 3586 { 3587 struct drm_i915_private *dev_priv = to_i915(dev); 3588 3589 if (IS_GEN5(dev_priv)) 3590 I915_WRITE(HWSTAM, 0xffffffff); 3591 3592 GEN3_IRQ_RESET(DE); 3593 if (IS_GEN7(dev_priv)) 3594 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3595 3596 if (IS_HASWELL(dev_priv)) { 3597 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3598 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3599 } 3600 3601 gen5_gt_irq_reset(dev_priv); 3602 3603 ibx_irq_reset(dev_priv); 3604 } 3605 3606 static void valleyview_irq_reset(struct drm_device *dev) 3607 { 3608 struct drm_i915_private *dev_priv = to_i915(dev); 3609 3610 I915_WRITE(VLV_MASTER_IER, 0); 3611 POSTING_READ(VLV_MASTER_IER); 3612 3613 gen5_gt_irq_reset(dev_priv); 3614 3615 spin_lock_irq(&dev_priv->irq_lock); 3616 if (dev_priv->display_irqs_enabled) 3617 vlv_display_irq_reset(dev_priv); 3618 spin_unlock_irq(&dev_priv->irq_lock); 3619 } 3620 3621 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3622 { 3623 GEN8_IRQ_RESET_NDX(GT, 0); 3624 GEN8_IRQ_RESET_NDX(GT, 1); 3625 GEN8_IRQ_RESET_NDX(GT, 2); 3626 GEN8_IRQ_RESET_NDX(GT, 3); 3627 } 3628 3629 static void gen8_irq_reset(struct drm_device *dev) 3630 { 3631 struct drm_i915_private *dev_priv = to_i915(dev); 3632 int pipe; 3633 3634 gen8_master_intr_disable(dev_priv->regs); 3635 3636 gen8_gt_irq_reset(dev_priv); 3637 3638 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3639 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3640 3641 for_each_pipe(dev_priv, pipe) 3642 if (intel_display_power_is_enabled(dev_priv, 3643 POWER_DOMAIN_PIPE(pipe))) 3644 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3645 3646 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3647 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3648 GEN3_IRQ_RESET(GEN8_PCU_); 3649 3650 if (HAS_PCH_SPLIT(dev_priv)) 3651 ibx_irq_reset(dev_priv); 3652 } 3653 3654 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3655 { 3656 /* Disable RCS, BCS, VCS and VECS class engines. */ 3657 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3658 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3659 3660 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3661 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3662 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3663 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3664 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3665 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3666 3667 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3668 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3669 } 3670 3671 static void gen11_irq_reset(struct drm_device *dev) 3672 { 3673 struct drm_i915_private *dev_priv = dev->dev_private; 3674 int pipe; 3675 3676 gen11_master_intr_disable(dev_priv->regs); 3677 3678 gen11_gt_irq_reset(dev_priv); 3679 3680 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3681 3682 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3683 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3684 3685 for_each_pipe(dev_priv, pipe) 3686 if (intel_display_power_is_enabled(dev_priv, 3687 POWER_DOMAIN_PIPE(pipe))) 3688 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3689 3690 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3691 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3692 GEN3_IRQ_RESET(GEN11_DE_HPD_); 3693 GEN3_IRQ_RESET(GEN11_GU_MISC_); 3694 GEN3_IRQ_RESET(GEN8_PCU_); 3695 3696 if (HAS_PCH_ICP(dev_priv)) 3697 GEN3_IRQ_RESET(SDE); 3698 } 3699 3700 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3701 u8 pipe_mask) 3702 { 3703 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3704 enum pipe pipe; 3705 3706 spin_lock_irq(&dev_priv->irq_lock); 3707 3708 if (!intel_irqs_enabled(dev_priv)) { 3709 spin_unlock_irq(&dev_priv->irq_lock); 3710 return; 3711 } 3712 3713 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3714 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3715 dev_priv->de_irq_mask[pipe], 3716 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3717 3718 spin_unlock_irq(&dev_priv->irq_lock); 3719 } 3720 3721 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3722 u8 pipe_mask) 3723 { 3724 enum pipe pipe; 3725 3726 spin_lock_irq(&dev_priv->irq_lock); 3727 3728 if (!intel_irqs_enabled(dev_priv)) { 3729 spin_unlock_irq(&dev_priv->irq_lock); 3730 return; 3731 } 3732 3733 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3734 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3735 3736 spin_unlock_irq(&dev_priv->irq_lock); 3737 3738 /* make sure we're done processing display irqs */ 3739 synchronize_irq(dev_priv->drm.irq); 3740 } 3741 3742 static void cherryview_irq_reset(struct drm_device *dev) 3743 { 3744 struct drm_i915_private *dev_priv = to_i915(dev); 3745 3746 I915_WRITE(GEN8_MASTER_IRQ, 0); 3747 POSTING_READ(GEN8_MASTER_IRQ); 3748 3749 gen8_gt_irq_reset(dev_priv); 3750 3751 GEN3_IRQ_RESET(GEN8_PCU_); 3752 3753 spin_lock_irq(&dev_priv->irq_lock); 3754 if (dev_priv->display_irqs_enabled) 3755 vlv_display_irq_reset(dev_priv); 3756 spin_unlock_irq(&dev_priv->irq_lock); 3757 } 3758 3759 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3760 const u32 hpd[HPD_NUM_PINS]) 3761 { 3762 struct intel_encoder *encoder; 3763 u32 enabled_irqs = 0; 3764 3765 for_each_intel_encoder(&dev_priv->drm, encoder) 3766 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3767 enabled_irqs |= hpd[encoder->hpd_pin]; 3768 3769 return enabled_irqs; 3770 } 3771 3772 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3773 { 3774 u32 hotplug; 3775 3776 /* 3777 * Enable digital hotplug on the PCH, and configure the DP short pulse 3778 * duration to 2ms (which is the minimum in the Display Port spec). 3779 * The pulse duration bits are reserved on LPT+. 3780 */ 3781 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3782 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3783 PORTC_PULSE_DURATION_MASK | 3784 PORTD_PULSE_DURATION_MASK); 3785 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3786 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3787 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3788 /* 3789 * When CPU and PCH are on the same package, port A 3790 * HPD must be enabled in both north and south. 3791 */ 3792 if (HAS_PCH_LPT_LP(dev_priv)) 3793 hotplug |= PORTA_HOTPLUG_ENABLE; 3794 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3795 } 3796 3797 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3798 { 3799 u32 hotplug_irqs, enabled_irqs; 3800 3801 if (HAS_PCH_IBX(dev_priv)) { 3802 hotplug_irqs = SDE_HOTPLUG_MASK; 3803 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3804 } else { 3805 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3806 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3807 } 3808 3809 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3810 3811 ibx_hpd_detection_setup(dev_priv); 3812 } 3813 3814 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 3815 { 3816 u32 hotplug; 3817 3818 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3819 hotplug |= ICP_DDIA_HPD_ENABLE | 3820 ICP_DDIB_HPD_ENABLE; 3821 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3822 3823 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3824 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 3825 ICP_TC_HPD_ENABLE(PORT_TC2) | 3826 ICP_TC_HPD_ENABLE(PORT_TC3) | 3827 ICP_TC_HPD_ENABLE(PORT_TC4); 3828 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3829 } 3830 3831 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3832 { 3833 u32 hotplug_irqs, enabled_irqs; 3834 3835 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 3836 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 3837 3838 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3839 3840 icp_hpd_detection_setup(dev_priv); 3841 } 3842 3843 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3844 { 3845 u32 hotplug; 3846 3847 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3848 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3849 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3850 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3851 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3852 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3853 3854 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3855 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3856 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3857 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3858 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3859 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3860 } 3861 3862 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3863 { 3864 u32 hotplug_irqs, enabled_irqs; 3865 u32 val; 3866 3867 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3868 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3869 3870 val = I915_READ(GEN11_DE_HPD_IMR); 3871 val &= ~hotplug_irqs; 3872 I915_WRITE(GEN11_DE_HPD_IMR, val); 3873 POSTING_READ(GEN11_DE_HPD_IMR); 3874 3875 gen11_hpd_detection_setup(dev_priv); 3876 3877 if (HAS_PCH_ICP(dev_priv)) 3878 icp_hpd_irq_setup(dev_priv); 3879 } 3880 3881 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3882 { 3883 u32 val, hotplug; 3884 3885 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3886 if (HAS_PCH_CNP(dev_priv)) { 3887 val = I915_READ(SOUTH_CHICKEN1); 3888 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3889 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3890 I915_WRITE(SOUTH_CHICKEN1, val); 3891 } 3892 3893 /* Enable digital hotplug on the PCH */ 3894 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3895 hotplug |= PORTA_HOTPLUG_ENABLE | 3896 PORTB_HOTPLUG_ENABLE | 3897 PORTC_HOTPLUG_ENABLE | 3898 PORTD_HOTPLUG_ENABLE; 3899 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3900 3901 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3902 hotplug |= PORTE_HOTPLUG_ENABLE; 3903 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3904 } 3905 3906 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3907 { 3908 u32 hotplug_irqs, enabled_irqs; 3909 3910 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3911 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3912 3913 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3914 3915 spt_hpd_detection_setup(dev_priv); 3916 } 3917 3918 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3919 { 3920 u32 hotplug; 3921 3922 /* 3923 * Enable digital hotplug on the CPU, and configure the DP short pulse 3924 * duration to 2ms (which is the minimum in the Display Port spec) 3925 * The pulse duration bits are reserved on HSW+. 3926 */ 3927 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3928 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3929 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3930 DIGITAL_PORTA_PULSE_DURATION_2ms; 3931 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3932 } 3933 3934 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3935 { 3936 u32 hotplug_irqs, enabled_irqs; 3937 3938 if (INTEL_GEN(dev_priv) >= 8) { 3939 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3940 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3941 3942 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3943 } else if (INTEL_GEN(dev_priv) >= 7) { 3944 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3945 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3946 3947 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3948 } else { 3949 hotplug_irqs = DE_DP_A_HOTPLUG; 3950 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3951 3952 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3953 } 3954 3955 ilk_hpd_detection_setup(dev_priv); 3956 3957 ibx_hpd_irq_setup(dev_priv); 3958 } 3959 3960 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3961 u32 enabled_irqs) 3962 { 3963 u32 hotplug; 3964 3965 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3966 hotplug |= PORTA_HOTPLUG_ENABLE | 3967 PORTB_HOTPLUG_ENABLE | 3968 PORTC_HOTPLUG_ENABLE; 3969 3970 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3971 hotplug, enabled_irqs); 3972 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3973 3974 /* 3975 * For BXT invert bit has to be set based on AOB design 3976 * for HPD detection logic, update it based on VBT fields. 3977 */ 3978 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3979 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3980 hotplug |= BXT_DDIA_HPD_INVERT; 3981 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3982 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3983 hotplug |= BXT_DDIB_HPD_INVERT; 3984 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3985 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3986 hotplug |= BXT_DDIC_HPD_INVERT; 3987 3988 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3989 } 3990 3991 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3992 { 3993 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3994 } 3995 3996 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3997 { 3998 u32 hotplug_irqs, enabled_irqs; 3999 4000 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 4001 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 4002 4003 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 4004 4005 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 4006 } 4007 4008 static void ibx_irq_postinstall(struct drm_device *dev) 4009 { 4010 struct drm_i915_private *dev_priv = to_i915(dev); 4011 u32 mask; 4012 4013 if (HAS_PCH_NOP(dev_priv)) 4014 return; 4015 4016 if (HAS_PCH_IBX(dev_priv)) 4017 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 4018 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 4019 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 4020 else 4021 mask = SDE_GMBUS_CPT; 4022 4023 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4024 I915_WRITE(SDEIMR, ~mask); 4025 4026 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 4027 HAS_PCH_LPT(dev_priv)) 4028 ibx_hpd_detection_setup(dev_priv); 4029 else 4030 spt_hpd_detection_setup(dev_priv); 4031 } 4032 4033 static void gen5_gt_irq_postinstall(struct drm_device *dev) 4034 { 4035 struct drm_i915_private *dev_priv = to_i915(dev); 4036 u32 pm_irqs, gt_irqs; 4037 4038 pm_irqs = gt_irqs = 0; 4039 4040 dev_priv->gt_irq_mask = ~0; 4041 if (HAS_L3_DPF(dev_priv)) { 4042 /* L3 parity interrupt is always unmasked. */ 4043 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4044 gt_irqs |= GT_PARITY_ERROR(dev_priv); 4045 } 4046 4047 gt_irqs |= GT_RENDER_USER_INTERRUPT; 4048 if (IS_GEN5(dev_priv)) { 4049 gt_irqs |= ILK_BSD_USER_INTERRUPT; 4050 } else { 4051 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 4052 } 4053 4054 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 4055 4056 if (INTEL_GEN(dev_priv) >= 6) { 4057 /* 4058 * RPS interrupts will get enabled/disabled on demand when RPS 4059 * itself is enabled/disabled. 4060 */ 4061 if (HAS_VEBOX(dev_priv)) { 4062 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4063 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4064 } 4065 4066 dev_priv->pm_imr = 0xffffffff; 4067 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 4068 } 4069 } 4070 4071 static int ironlake_irq_postinstall(struct drm_device *dev) 4072 { 4073 struct drm_i915_private *dev_priv = to_i915(dev); 4074 u32 display_mask, extra_mask; 4075 4076 if (INTEL_GEN(dev_priv) >= 7) { 4077 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4078 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 4079 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 4080 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 4081 DE_DP_A_HOTPLUG_IVB); 4082 } else { 4083 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4084 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4085 DE_PIPEA_CRC_DONE | DE_POISON); 4086 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4087 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4088 DE_DP_A_HOTPLUG); 4089 } 4090 4091 if (IS_HASWELL(dev_priv)) { 4092 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4093 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4094 display_mask |= DE_EDP_PSR_INT_HSW; 4095 } 4096 4097 dev_priv->irq_mask = ~display_mask; 4098 4099 ibx_irq_pre_postinstall(dev); 4100 4101 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4102 4103 gen5_gt_irq_postinstall(dev); 4104 4105 ilk_hpd_detection_setup(dev_priv); 4106 4107 ibx_irq_postinstall(dev); 4108 4109 if (IS_IRONLAKE_M(dev_priv)) { 4110 /* Enable PCU event interrupts 4111 * 4112 * spinlocking not required here for correctness since interrupt 4113 * setup is guaranteed to run in single-threaded context. But we 4114 * need it to make the assert_spin_locked happy. */ 4115 spin_lock_irq(&dev_priv->irq_lock); 4116 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4117 spin_unlock_irq(&dev_priv->irq_lock); 4118 } 4119 4120 return 0; 4121 } 4122 4123 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4124 { 4125 lockdep_assert_held(&dev_priv->irq_lock); 4126 4127 if (dev_priv->display_irqs_enabled) 4128 return; 4129 4130 dev_priv->display_irqs_enabled = true; 4131 4132 if (intel_irqs_enabled(dev_priv)) { 4133 vlv_display_irq_reset(dev_priv); 4134 vlv_display_irq_postinstall(dev_priv); 4135 } 4136 } 4137 4138 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4139 { 4140 lockdep_assert_held(&dev_priv->irq_lock); 4141 4142 if (!dev_priv->display_irqs_enabled) 4143 return; 4144 4145 dev_priv->display_irqs_enabled = false; 4146 4147 if (intel_irqs_enabled(dev_priv)) 4148 vlv_display_irq_reset(dev_priv); 4149 } 4150 4151 4152 static int valleyview_irq_postinstall(struct drm_device *dev) 4153 { 4154 struct drm_i915_private *dev_priv = to_i915(dev); 4155 4156 gen5_gt_irq_postinstall(dev); 4157 4158 spin_lock_irq(&dev_priv->irq_lock); 4159 if (dev_priv->display_irqs_enabled) 4160 vlv_display_irq_postinstall(dev_priv); 4161 spin_unlock_irq(&dev_priv->irq_lock); 4162 4163 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 4164 POSTING_READ(VLV_MASTER_IER); 4165 4166 return 0; 4167 } 4168 4169 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4170 { 4171 /* These are interrupts we'll toggle with the ring mask register */ 4172 uint32_t gt_interrupts[] = { 4173 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4174 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4175 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 4176 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4177 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4178 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4179 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 4180 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4181 0, 4182 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 4183 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4184 }; 4185 4186 if (HAS_L3_DPF(dev_priv)) 4187 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 4188 4189 dev_priv->pm_ier = 0x0; 4190 dev_priv->pm_imr = ~dev_priv->pm_ier; 4191 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 4192 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 4193 /* 4194 * RPS interrupts will get enabled/disabled on demand when RPS itself 4195 * is enabled/disabled. Same wil be the case for GuC interrupts. 4196 */ 4197 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 4198 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4199 } 4200 4201 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4202 { 4203 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4204 uint32_t de_pipe_enables; 4205 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 4206 u32 de_port_enables; 4207 u32 de_misc_masked = GEN8_DE_EDP_PSR; 4208 enum pipe pipe; 4209 4210 if (INTEL_GEN(dev_priv) <= 10) 4211 de_misc_masked |= GEN8_DE_MISC_GSE; 4212 4213 if (INTEL_GEN(dev_priv) >= 9) { 4214 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 4215 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 4216 GEN9_AUX_CHANNEL_D; 4217 if (IS_GEN9_LP(dev_priv)) 4218 de_port_masked |= BXT_DE_PORT_GMBUS; 4219 } else { 4220 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 4221 } 4222 4223 if (INTEL_GEN(dev_priv) >= 11) 4224 de_port_masked |= ICL_AUX_CHANNEL_E; 4225 4226 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4227 de_port_masked |= CNL_AUX_CHANNEL_F; 4228 4229 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4230 GEN8_PIPE_FIFO_UNDERRUN; 4231 4232 de_port_enables = de_port_masked; 4233 if (IS_GEN9_LP(dev_priv)) 4234 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4235 else if (IS_BROADWELL(dev_priv)) 4236 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 4237 4238 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4239 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4240 4241 for_each_pipe(dev_priv, pipe) { 4242 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4243 4244 if (intel_display_power_is_enabled(dev_priv, 4245 POWER_DOMAIN_PIPE(pipe))) 4246 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4247 dev_priv->de_irq_mask[pipe], 4248 de_pipe_enables); 4249 } 4250 4251 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 4252 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 4253 4254 if (INTEL_GEN(dev_priv) >= 11) { 4255 u32 de_hpd_masked = 0; 4256 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4257 GEN11_DE_TBT_HOTPLUG_MASK; 4258 4259 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4260 gen11_hpd_detection_setup(dev_priv); 4261 } else if (IS_GEN9_LP(dev_priv)) { 4262 bxt_hpd_detection_setup(dev_priv); 4263 } else if (IS_BROADWELL(dev_priv)) { 4264 ilk_hpd_detection_setup(dev_priv); 4265 } 4266 } 4267 4268 static int gen8_irq_postinstall(struct drm_device *dev) 4269 { 4270 struct drm_i915_private *dev_priv = to_i915(dev); 4271 4272 if (HAS_PCH_SPLIT(dev_priv)) 4273 ibx_irq_pre_postinstall(dev); 4274 4275 gen8_gt_irq_postinstall(dev_priv); 4276 gen8_de_irq_postinstall(dev_priv); 4277 4278 if (HAS_PCH_SPLIT(dev_priv)) 4279 ibx_irq_postinstall(dev); 4280 4281 gen8_master_intr_enable(dev_priv->regs); 4282 4283 return 0; 4284 } 4285 4286 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4287 { 4288 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 4289 4290 BUILD_BUG_ON(irqs & 0xffff0000); 4291 4292 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 4293 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 4294 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 4295 4296 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4297 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 4298 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 4299 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 4300 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 4301 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 4302 4303 /* 4304 * RPS interrupts will get enabled/disabled on demand when RPS itself 4305 * is enabled/disabled. 4306 */ 4307 dev_priv->pm_ier = 0x0; 4308 dev_priv->pm_imr = ~dev_priv->pm_ier; 4309 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4310 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4311 } 4312 4313 static void icp_irq_postinstall(struct drm_device *dev) 4314 { 4315 struct drm_i915_private *dev_priv = to_i915(dev); 4316 u32 mask = SDE_GMBUS_ICP; 4317 4318 WARN_ON(I915_READ(SDEIER) != 0); 4319 I915_WRITE(SDEIER, 0xffffffff); 4320 POSTING_READ(SDEIER); 4321 4322 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4323 I915_WRITE(SDEIMR, ~mask); 4324 4325 icp_hpd_detection_setup(dev_priv); 4326 } 4327 4328 static int gen11_irq_postinstall(struct drm_device *dev) 4329 { 4330 struct drm_i915_private *dev_priv = dev->dev_private; 4331 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 4332 4333 if (HAS_PCH_ICP(dev_priv)) 4334 icp_irq_postinstall(dev); 4335 4336 gen11_gt_irq_postinstall(dev_priv); 4337 gen8_de_irq_postinstall(dev_priv); 4338 4339 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4340 4341 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4342 4343 gen11_master_intr_enable(dev_priv->regs); 4344 4345 return 0; 4346 } 4347 4348 static int cherryview_irq_postinstall(struct drm_device *dev) 4349 { 4350 struct drm_i915_private *dev_priv = to_i915(dev); 4351 4352 gen8_gt_irq_postinstall(dev_priv); 4353 4354 spin_lock_irq(&dev_priv->irq_lock); 4355 if (dev_priv->display_irqs_enabled) 4356 vlv_display_irq_postinstall(dev_priv); 4357 spin_unlock_irq(&dev_priv->irq_lock); 4358 4359 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4360 POSTING_READ(GEN8_MASTER_IRQ); 4361 4362 return 0; 4363 } 4364 4365 static void i8xx_irq_reset(struct drm_device *dev) 4366 { 4367 struct drm_i915_private *dev_priv = to_i915(dev); 4368 4369 i9xx_pipestat_irq_reset(dev_priv); 4370 4371 I915_WRITE16(HWSTAM, 0xffff); 4372 4373 GEN2_IRQ_RESET(); 4374 } 4375 4376 static int i8xx_irq_postinstall(struct drm_device *dev) 4377 { 4378 struct drm_i915_private *dev_priv = to_i915(dev); 4379 u16 enable_mask; 4380 4381 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4382 I915_ERROR_MEMORY_REFRESH)); 4383 4384 /* Unmask the interrupts that we always want on. */ 4385 dev_priv->irq_mask = 4386 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4387 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4388 I915_MASTER_ERROR_INTERRUPT); 4389 4390 enable_mask = 4391 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4392 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4393 I915_MASTER_ERROR_INTERRUPT | 4394 I915_USER_INTERRUPT; 4395 4396 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4397 4398 /* Interrupt setup is already guaranteed to be single-threaded, this is 4399 * just to make the assert_spin_locked check happy. */ 4400 spin_lock_irq(&dev_priv->irq_lock); 4401 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4402 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4403 spin_unlock_irq(&dev_priv->irq_lock); 4404 4405 return 0; 4406 } 4407 4408 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 4409 u16 *eir, u16 *eir_stuck) 4410 { 4411 u16 emr; 4412 4413 *eir = I915_READ16(EIR); 4414 4415 if (*eir) 4416 I915_WRITE16(EIR, *eir); 4417 4418 *eir_stuck = I915_READ16(EIR); 4419 if (*eir_stuck == 0) 4420 return; 4421 4422 /* 4423 * Toggle all EMR bits to make sure we get an edge 4424 * in the ISR master error bit if we don't clear 4425 * all the EIR bits. Otherwise the edge triggered 4426 * IIR on i965/g4x wouldn't notice that an interrupt 4427 * is still pending. Also some EIR bits can't be 4428 * cleared except by handling the underlying error 4429 * (or by a GPU reset) so we mask any bit that 4430 * remains set. 4431 */ 4432 emr = I915_READ16(EMR); 4433 I915_WRITE16(EMR, 0xffff); 4434 I915_WRITE16(EMR, emr | *eir_stuck); 4435 } 4436 4437 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 4438 u16 eir, u16 eir_stuck) 4439 { 4440 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4441 4442 if (eir_stuck) 4443 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 4444 } 4445 4446 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4447 u32 *eir, u32 *eir_stuck) 4448 { 4449 u32 emr; 4450 4451 *eir = I915_READ(EIR); 4452 4453 I915_WRITE(EIR, *eir); 4454 4455 *eir_stuck = I915_READ(EIR); 4456 if (*eir_stuck == 0) 4457 return; 4458 4459 /* 4460 * Toggle all EMR bits to make sure we get an edge 4461 * in the ISR master error bit if we don't clear 4462 * all the EIR bits. Otherwise the edge triggered 4463 * IIR on i965/g4x wouldn't notice that an interrupt 4464 * is still pending. Also some EIR bits can't be 4465 * cleared except by handling the underlying error 4466 * (or by a GPU reset) so we mask any bit that 4467 * remains set. 4468 */ 4469 emr = I915_READ(EMR); 4470 I915_WRITE(EMR, 0xffffffff); 4471 I915_WRITE(EMR, emr | *eir_stuck); 4472 } 4473 4474 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4475 u32 eir, u32 eir_stuck) 4476 { 4477 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4478 4479 if (eir_stuck) 4480 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 4481 } 4482 4483 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4484 { 4485 struct drm_device *dev = arg; 4486 struct drm_i915_private *dev_priv = to_i915(dev); 4487 irqreturn_t ret = IRQ_NONE; 4488 4489 if (!intel_irqs_enabled(dev_priv)) 4490 return IRQ_NONE; 4491 4492 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4493 disable_rpm_wakeref_asserts(dev_priv); 4494 4495 do { 4496 u32 pipe_stats[I915_MAX_PIPES] = {}; 4497 u16 eir = 0, eir_stuck = 0; 4498 u16 iir; 4499 4500 iir = I915_READ16(IIR); 4501 if (iir == 0) 4502 break; 4503 4504 ret = IRQ_HANDLED; 4505 4506 /* Call regardless, as some status bits might not be 4507 * signalled in iir */ 4508 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4509 4510 if (iir & I915_MASTER_ERROR_INTERRUPT) 4511 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4512 4513 I915_WRITE16(IIR, iir); 4514 4515 if (iir & I915_USER_INTERRUPT) 4516 notify_ring(dev_priv->engine[RCS]); 4517 4518 if (iir & I915_MASTER_ERROR_INTERRUPT) 4519 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4520 4521 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4522 } while (0); 4523 4524 enable_rpm_wakeref_asserts(dev_priv); 4525 4526 return ret; 4527 } 4528 4529 static void i915_irq_reset(struct drm_device *dev) 4530 { 4531 struct drm_i915_private *dev_priv = to_i915(dev); 4532 4533 if (I915_HAS_HOTPLUG(dev_priv)) { 4534 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4535 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4536 } 4537 4538 i9xx_pipestat_irq_reset(dev_priv); 4539 4540 I915_WRITE(HWSTAM, 0xffffffff); 4541 4542 GEN3_IRQ_RESET(); 4543 } 4544 4545 static int i915_irq_postinstall(struct drm_device *dev) 4546 { 4547 struct drm_i915_private *dev_priv = to_i915(dev); 4548 u32 enable_mask; 4549 4550 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4551 I915_ERROR_MEMORY_REFRESH)); 4552 4553 /* Unmask the interrupts that we always want on. */ 4554 dev_priv->irq_mask = 4555 ~(I915_ASLE_INTERRUPT | 4556 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4557 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4558 I915_MASTER_ERROR_INTERRUPT); 4559 4560 enable_mask = 4561 I915_ASLE_INTERRUPT | 4562 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4563 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4564 I915_MASTER_ERROR_INTERRUPT | 4565 I915_USER_INTERRUPT; 4566 4567 if (I915_HAS_HOTPLUG(dev_priv)) { 4568 /* Enable in IER... */ 4569 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4570 /* and unmask in IMR */ 4571 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4572 } 4573 4574 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4575 4576 /* Interrupt setup is already guaranteed to be single-threaded, this is 4577 * just to make the assert_spin_locked check happy. */ 4578 spin_lock_irq(&dev_priv->irq_lock); 4579 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4580 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4581 spin_unlock_irq(&dev_priv->irq_lock); 4582 4583 i915_enable_asle_pipestat(dev_priv); 4584 4585 return 0; 4586 } 4587 4588 static irqreturn_t i915_irq_handler(int irq, void *arg) 4589 { 4590 struct drm_device *dev = arg; 4591 struct drm_i915_private *dev_priv = to_i915(dev); 4592 irqreturn_t ret = IRQ_NONE; 4593 4594 if (!intel_irqs_enabled(dev_priv)) 4595 return IRQ_NONE; 4596 4597 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4598 disable_rpm_wakeref_asserts(dev_priv); 4599 4600 do { 4601 u32 pipe_stats[I915_MAX_PIPES] = {}; 4602 u32 eir = 0, eir_stuck = 0; 4603 u32 hotplug_status = 0; 4604 u32 iir; 4605 4606 iir = I915_READ(IIR); 4607 if (iir == 0) 4608 break; 4609 4610 ret = IRQ_HANDLED; 4611 4612 if (I915_HAS_HOTPLUG(dev_priv) && 4613 iir & I915_DISPLAY_PORT_INTERRUPT) 4614 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4615 4616 /* Call regardless, as some status bits might not be 4617 * signalled in iir */ 4618 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4619 4620 if (iir & I915_MASTER_ERROR_INTERRUPT) 4621 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4622 4623 I915_WRITE(IIR, iir); 4624 4625 if (iir & I915_USER_INTERRUPT) 4626 notify_ring(dev_priv->engine[RCS]); 4627 4628 if (iir & I915_MASTER_ERROR_INTERRUPT) 4629 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4630 4631 if (hotplug_status) 4632 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4633 4634 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4635 } while (0); 4636 4637 enable_rpm_wakeref_asserts(dev_priv); 4638 4639 return ret; 4640 } 4641 4642 static void i965_irq_reset(struct drm_device *dev) 4643 { 4644 struct drm_i915_private *dev_priv = to_i915(dev); 4645 4646 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4647 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4648 4649 i9xx_pipestat_irq_reset(dev_priv); 4650 4651 I915_WRITE(HWSTAM, 0xffffffff); 4652 4653 GEN3_IRQ_RESET(); 4654 } 4655 4656 static int i965_irq_postinstall(struct drm_device *dev) 4657 { 4658 struct drm_i915_private *dev_priv = to_i915(dev); 4659 u32 enable_mask; 4660 u32 error_mask; 4661 4662 /* 4663 * Enable some error detection, note the instruction error mask 4664 * bit is reserved, so we leave it masked. 4665 */ 4666 if (IS_G4X(dev_priv)) { 4667 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4668 GM45_ERROR_MEM_PRIV | 4669 GM45_ERROR_CP_PRIV | 4670 I915_ERROR_MEMORY_REFRESH); 4671 } else { 4672 error_mask = ~(I915_ERROR_PAGE_TABLE | 4673 I915_ERROR_MEMORY_REFRESH); 4674 } 4675 I915_WRITE(EMR, error_mask); 4676 4677 /* Unmask the interrupts that we always want on. */ 4678 dev_priv->irq_mask = 4679 ~(I915_ASLE_INTERRUPT | 4680 I915_DISPLAY_PORT_INTERRUPT | 4681 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4682 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4683 I915_MASTER_ERROR_INTERRUPT); 4684 4685 enable_mask = 4686 I915_ASLE_INTERRUPT | 4687 I915_DISPLAY_PORT_INTERRUPT | 4688 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4689 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4690 I915_MASTER_ERROR_INTERRUPT | 4691 I915_USER_INTERRUPT; 4692 4693 if (IS_G4X(dev_priv)) 4694 enable_mask |= I915_BSD_USER_INTERRUPT; 4695 4696 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4697 4698 /* Interrupt setup is already guaranteed to be single-threaded, this is 4699 * just to make the assert_spin_locked check happy. */ 4700 spin_lock_irq(&dev_priv->irq_lock); 4701 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4702 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4703 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4704 spin_unlock_irq(&dev_priv->irq_lock); 4705 4706 i915_enable_asle_pipestat(dev_priv); 4707 4708 return 0; 4709 } 4710 4711 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4712 { 4713 u32 hotplug_en; 4714 4715 lockdep_assert_held(&dev_priv->irq_lock); 4716 4717 /* Note HDMI and DP share hotplug bits */ 4718 /* enable bits are the same for all generations */ 4719 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4720 /* Programming the CRT detection parameters tends 4721 to generate a spurious hotplug event about three 4722 seconds later. So just do it once. 4723 */ 4724 if (IS_G4X(dev_priv)) 4725 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4726 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4727 4728 /* Ignore TV since it's buggy */ 4729 i915_hotplug_interrupt_update_locked(dev_priv, 4730 HOTPLUG_INT_EN_MASK | 4731 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4732 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4733 hotplug_en); 4734 } 4735 4736 static irqreturn_t i965_irq_handler(int irq, void *arg) 4737 { 4738 struct drm_device *dev = arg; 4739 struct drm_i915_private *dev_priv = to_i915(dev); 4740 irqreturn_t ret = IRQ_NONE; 4741 4742 if (!intel_irqs_enabled(dev_priv)) 4743 return IRQ_NONE; 4744 4745 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4746 disable_rpm_wakeref_asserts(dev_priv); 4747 4748 do { 4749 u32 pipe_stats[I915_MAX_PIPES] = {}; 4750 u32 eir = 0, eir_stuck = 0; 4751 u32 hotplug_status = 0; 4752 u32 iir; 4753 4754 iir = I915_READ(IIR); 4755 if (iir == 0) 4756 break; 4757 4758 ret = IRQ_HANDLED; 4759 4760 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4761 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4762 4763 /* Call regardless, as some status bits might not be 4764 * signalled in iir */ 4765 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4766 4767 if (iir & I915_MASTER_ERROR_INTERRUPT) 4768 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4769 4770 I915_WRITE(IIR, iir); 4771 4772 if (iir & I915_USER_INTERRUPT) 4773 notify_ring(dev_priv->engine[RCS]); 4774 4775 if (iir & I915_BSD_USER_INTERRUPT) 4776 notify_ring(dev_priv->engine[VCS]); 4777 4778 if (iir & I915_MASTER_ERROR_INTERRUPT) 4779 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4780 4781 if (hotplug_status) 4782 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4783 4784 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4785 } while (0); 4786 4787 enable_rpm_wakeref_asserts(dev_priv); 4788 4789 return ret; 4790 } 4791 4792 /** 4793 * intel_irq_init - initializes irq support 4794 * @dev_priv: i915 device instance 4795 * 4796 * This function initializes all the irq support including work items, timers 4797 * and all the vtables. It does not setup the interrupt itself though. 4798 */ 4799 void intel_irq_init(struct drm_i915_private *dev_priv) 4800 { 4801 struct drm_device *dev = &dev_priv->drm; 4802 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4803 int i; 4804 4805 intel_hpd_init_work(dev_priv); 4806 4807 INIT_WORK(&rps->work, gen6_pm_rps_work); 4808 4809 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4810 for (i = 0; i < MAX_L3_SLICES; ++i) 4811 dev_priv->l3_parity.remap_info[i] = NULL; 4812 4813 if (HAS_GUC_SCHED(dev_priv)) 4814 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4815 4816 /* Let's track the enabled rps events */ 4817 if (IS_VALLEYVIEW(dev_priv)) 4818 /* WaGsvRC0ResidencyMethod:vlv */ 4819 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4820 else 4821 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 4822 GEN6_PM_RP_DOWN_THRESHOLD | 4823 GEN6_PM_RP_DOWN_TIMEOUT); 4824 4825 rps->pm_intrmsk_mbz = 0; 4826 4827 /* 4828 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4829 * if GEN6_PM_UP_EI_EXPIRED is masked. 4830 * 4831 * TODO: verify if this can be reproduced on VLV,CHV. 4832 */ 4833 if (INTEL_GEN(dev_priv) <= 7) 4834 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4835 4836 if (INTEL_GEN(dev_priv) >= 8) 4837 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4838 4839 if (IS_GEN2(dev_priv)) { 4840 /* Gen2 doesn't have a hardware frame counter */ 4841 dev->max_vblank_count = 0; 4842 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4843 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4844 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4845 } else { 4846 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4847 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4848 } 4849 4850 /* 4851 * Opt out of the vblank disable timer on everything except gen2. 4852 * Gen2 doesn't have a hardware frame counter and so depends on 4853 * vblank interrupts to produce sane vblank seuquence numbers. 4854 */ 4855 if (!IS_GEN2(dev_priv)) 4856 dev->vblank_disable_immediate = true; 4857 4858 /* Most platforms treat the display irq block as an always-on 4859 * power domain. vlv/chv can disable it at runtime and need 4860 * special care to avoid writing any of the display block registers 4861 * outside of the power domain. We defer setting up the display irqs 4862 * in this case to the runtime pm. 4863 */ 4864 dev_priv->display_irqs_enabled = true; 4865 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4866 dev_priv->display_irqs_enabled = false; 4867 4868 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4869 /* If we have MST support, we want to avoid doing short HPD IRQ storm 4870 * detection, as short HPD storms will occur as a natural part of 4871 * sideband messaging with MST. 4872 * On older platforms however, IRQ storms can occur with both long and 4873 * short pulses, as seen on some G4x systems. 4874 */ 4875 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4876 4877 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4878 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4879 4880 if (IS_CHERRYVIEW(dev_priv)) { 4881 dev->driver->irq_handler = cherryview_irq_handler; 4882 dev->driver->irq_preinstall = cherryview_irq_reset; 4883 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4884 dev->driver->irq_uninstall = cherryview_irq_reset; 4885 dev->driver->enable_vblank = i965_enable_vblank; 4886 dev->driver->disable_vblank = i965_disable_vblank; 4887 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4888 } else if (IS_VALLEYVIEW(dev_priv)) { 4889 dev->driver->irq_handler = valleyview_irq_handler; 4890 dev->driver->irq_preinstall = valleyview_irq_reset; 4891 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4892 dev->driver->irq_uninstall = valleyview_irq_reset; 4893 dev->driver->enable_vblank = i965_enable_vblank; 4894 dev->driver->disable_vblank = i965_disable_vblank; 4895 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4896 } else if (INTEL_GEN(dev_priv) >= 11) { 4897 dev->driver->irq_handler = gen11_irq_handler; 4898 dev->driver->irq_preinstall = gen11_irq_reset; 4899 dev->driver->irq_postinstall = gen11_irq_postinstall; 4900 dev->driver->irq_uninstall = gen11_irq_reset; 4901 dev->driver->enable_vblank = gen8_enable_vblank; 4902 dev->driver->disable_vblank = gen8_disable_vblank; 4903 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4904 } else if (INTEL_GEN(dev_priv) >= 8) { 4905 dev->driver->irq_handler = gen8_irq_handler; 4906 dev->driver->irq_preinstall = gen8_irq_reset; 4907 dev->driver->irq_postinstall = gen8_irq_postinstall; 4908 dev->driver->irq_uninstall = gen8_irq_reset; 4909 dev->driver->enable_vblank = gen8_enable_vblank; 4910 dev->driver->disable_vblank = gen8_disable_vblank; 4911 if (IS_GEN9_LP(dev_priv)) 4912 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4913 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4914 HAS_PCH_CNP(dev_priv)) 4915 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4916 else 4917 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4918 } else if (HAS_PCH_SPLIT(dev_priv)) { 4919 dev->driver->irq_handler = ironlake_irq_handler; 4920 dev->driver->irq_preinstall = ironlake_irq_reset; 4921 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4922 dev->driver->irq_uninstall = ironlake_irq_reset; 4923 dev->driver->enable_vblank = ironlake_enable_vblank; 4924 dev->driver->disable_vblank = ironlake_disable_vblank; 4925 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4926 } else { 4927 if (IS_GEN2(dev_priv)) { 4928 dev->driver->irq_preinstall = i8xx_irq_reset; 4929 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4930 dev->driver->irq_handler = i8xx_irq_handler; 4931 dev->driver->irq_uninstall = i8xx_irq_reset; 4932 dev->driver->enable_vblank = i8xx_enable_vblank; 4933 dev->driver->disable_vblank = i8xx_disable_vblank; 4934 } else if (IS_GEN3(dev_priv)) { 4935 dev->driver->irq_preinstall = i915_irq_reset; 4936 dev->driver->irq_postinstall = i915_irq_postinstall; 4937 dev->driver->irq_uninstall = i915_irq_reset; 4938 dev->driver->irq_handler = i915_irq_handler; 4939 dev->driver->enable_vblank = i8xx_enable_vblank; 4940 dev->driver->disable_vblank = i8xx_disable_vblank; 4941 } else { 4942 dev->driver->irq_preinstall = i965_irq_reset; 4943 dev->driver->irq_postinstall = i965_irq_postinstall; 4944 dev->driver->irq_uninstall = i965_irq_reset; 4945 dev->driver->irq_handler = i965_irq_handler; 4946 dev->driver->enable_vblank = i965_enable_vblank; 4947 dev->driver->disable_vblank = i965_disable_vblank; 4948 } 4949 if (I915_HAS_HOTPLUG(dev_priv)) 4950 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4951 } 4952 } 4953 4954 /** 4955 * intel_irq_fini - deinitializes IRQ support 4956 * @i915: i915 device instance 4957 * 4958 * This function deinitializes all the IRQ support. 4959 */ 4960 void intel_irq_fini(struct drm_i915_private *i915) 4961 { 4962 int i; 4963 4964 for (i = 0; i < MAX_L3_SLICES; ++i) 4965 kfree(i915->l3_parity.remap_info[i]); 4966 } 4967 4968 /** 4969 * intel_irq_install - enables the hardware interrupt 4970 * @dev_priv: i915 device instance 4971 * 4972 * This function enables the hardware interrupt handling, but leaves the hotplug 4973 * handling still disabled. It is called after intel_irq_init(). 4974 * 4975 * In the driver load and resume code we need working interrupts in a few places 4976 * but don't want to deal with the hassle of concurrent probe and hotplug 4977 * workers. Hence the split into this two-stage approach. 4978 */ 4979 int intel_irq_install(struct drm_i915_private *dev_priv) 4980 { 4981 /* 4982 * We enable some interrupt sources in our postinstall hooks, so mark 4983 * interrupts as enabled _before_ actually enabling them to avoid 4984 * special cases in our ordering checks. 4985 */ 4986 dev_priv->runtime_pm.irqs_enabled = true; 4987 4988 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4989 } 4990 4991 /** 4992 * intel_irq_uninstall - finilizes all irq handling 4993 * @dev_priv: i915 device instance 4994 * 4995 * This stops interrupt and hotplug handling and unregisters and frees all 4996 * resources acquired in the init functions. 4997 */ 4998 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4999 { 5000 drm_irq_uninstall(&dev_priv->drm); 5001 intel_hpd_cancel_work(dev_priv); 5002 dev_priv->runtime_pm.irqs_enabled = false; 5003 } 5004 5005 /** 5006 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 5007 * @dev_priv: i915 device instance 5008 * 5009 * This function is used to disable interrupts at runtime, both in the runtime 5010 * pm and the system suspend/resume code. 5011 */ 5012 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 5013 { 5014 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 5015 dev_priv->runtime_pm.irqs_enabled = false; 5016 synchronize_irq(dev_priv->drm.irq); 5017 } 5018 5019 /** 5020 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 5021 * @dev_priv: i915 device instance 5022 * 5023 * This function is used to enable interrupts at runtime, both in the runtime 5024 * pm and the system suspend/resume code. 5025 */ 5026 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 5027 { 5028 dev_priv->runtime_pm.irqs_enabled = true; 5029 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 5030 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 5031 } 5032