1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN3_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 #define GEN2_IRQ_RESET(type) do { \ 140 I915_WRITE16(type##IMR, 0xffff); \ 141 POSTING_READ16(type##IMR); \ 142 I915_WRITE16(type##IER, 0); \ 143 I915_WRITE16(type##IIR, 0xffff); \ 144 POSTING_READ16(type##IIR); \ 145 I915_WRITE16(type##IIR, 0xffff); \ 146 POSTING_READ16(type##IIR); \ 147 } while (0) 148 149 /* 150 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 151 */ 152 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 153 i915_reg_t reg) 154 { 155 u32 val = I915_READ(reg); 156 157 if (val == 0) 158 return; 159 160 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 161 i915_mmio_reg_offset(reg), val); 162 I915_WRITE(reg, 0xffffffff); 163 POSTING_READ(reg); 164 I915_WRITE(reg, 0xffffffff); 165 POSTING_READ(reg); 166 } 167 168 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u16 val = I915_READ16(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE16(reg, 0xffff); 179 POSTING_READ16(reg); 180 I915_WRITE16(reg, 0xffff); 181 POSTING_READ16(reg); 182 } 183 184 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 185 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 186 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 187 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 188 POSTING_READ(GEN8_##type##_IMR(which)); \ 189 } while (0) 190 191 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 192 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 193 I915_WRITE(type##IER, (ier_val)); \ 194 I915_WRITE(type##IMR, (imr_val)); \ 195 POSTING_READ(type##IMR); \ 196 } while (0) 197 198 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 199 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 200 I915_WRITE16(type##IER, (ier_val)); \ 201 I915_WRITE16(type##IMR, (imr_val)); \ 202 POSTING_READ16(type##IMR); \ 203 } while (0) 204 205 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 206 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 207 208 /* For display hotplug interrupt */ 209 static inline void 210 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 211 uint32_t mask, 212 uint32_t bits) 213 { 214 uint32_t val; 215 216 lockdep_assert_held(&dev_priv->irq_lock); 217 WARN_ON(bits & ~mask); 218 219 val = I915_READ(PORT_HOTPLUG_EN); 220 val &= ~mask; 221 val |= bits; 222 I915_WRITE(PORT_HOTPLUG_EN, val); 223 } 224 225 /** 226 * i915_hotplug_interrupt_update - update hotplug interrupt enable 227 * @dev_priv: driver private 228 * @mask: bits to update 229 * @bits: bits to enable 230 * NOTE: the HPD enable bits are modified both inside and outside 231 * of an interrupt context. To avoid that read-modify-write cycles 232 * interfer, these bits are protected by a spinlock. Since this 233 * function is usually not called from a context where the lock is 234 * held already, this function acquires the lock itself. A non-locking 235 * version is also available. 236 */ 237 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 238 uint32_t mask, 239 uint32_t bits) 240 { 241 spin_lock_irq(&dev_priv->irq_lock); 242 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 243 spin_unlock_irq(&dev_priv->irq_lock); 244 } 245 246 /** 247 * ilk_update_display_irq - update DEIMR 248 * @dev_priv: driver private 249 * @interrupt_mask: mask of interrupt bits to update 250 * @enabled_irq_mask: mask of interrupt bits to enable 251 */ 252 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 253 uint32_t interrupt_mask, 254 uint32_t enabled_irq_mask) 255 { 256 uint32_t new_val; 257 258 lockdep_assert_held(&dev_priv->irq_lock); 259 260 WARN_ON(enabled_irq_mask & ~interrupt_mask); 261 262 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 263 return; 264 265 new_val = dev_priv->irq_mask; 266 new_val &= ~interrupt_mask; 267 new_val |= (~enabled_irq_mask & interrupt_mask); 268 269 if (new_val != dev_priv->irq_mask) { 270 dev_priv->irq_mask = new_val; 271 I915_WRITE(DEIMR, dev_priv->irq_mask); 272 POSTING_READ(DEIMR); 273 } 274 } 275 276 /** 277 * ilk_update_gt_irq - update GTIMR 278 * @dev_priv: driver private 279 * @interrupt_mask: mask of interrupt bits to update 280 * @enabled_irq_mask: mask of interrupt bits to enable 281 */ 282 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 283 uint32_t interrupt_mask, 284 uint32_t enabled_irq_mask) 285 { 286 lockdep_assert_held(&dev_priv->irq_lock); 287 288 WARN_ON(enabled_irq_mask & ~interrupt_mask); 289 290 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 291 return; 292 293 dev_priv->gt_irq_mask &= ~interrupt_mask; 294 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 295 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 296 } 297 298 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 299 { 300 ilk_update_gt_irq(dev_priv, mask, mask); 301 POSTING_READ_FW(GTIMR); 302 } 303 304 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 305 { 306 ilk_update_gt_irq(dev_priv, mask, 0); 307 } 308 309 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 310 { 311 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 312 } 313 314 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 315 { 316 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 317 } 318 319 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 320 { 321 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 322 } 323 324 /** 325 * snb_update_pm_irq - update GEN6_PMIMR 326 * @dev_priv: driver private 327 * @interrupt_mask: mask of interrupt bits to update 328 * @enabled_irq_mask: mask of interrupt bits to enable 329 */ 330 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 331 uint32_t interrupt_mask, 332 uint32_t enabled_irq_mask) 333 { 334 uint32_t new_val; 335 336 WARN_ON(enabled_irq_mask & ~interrupt_mask); 337 338 lockdep_assert_held(&dev_priv->irq_lock); 339 340 new_val = dev_priv->pm_imr; 341 new_val &= ~interrupt_mask; 342 new_val |= (~enabled_irq_mask & interrupt_mask); 343 344 if (new_val != dev_priv->pm_imr) { 345 dev_priv->pm_imr = new_val; 346 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 347 POSTING_READ(gen6_pm_imr(dev_priv)); 348 } 349 } 350 351 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 352 { 353 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 354 return; 355 356 snb_update_pm_irq(dev_priv, mask, mask); 357 } 358 359 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 360 { 361 snb_update_pm_irq(dev_priv, mask, 0); 362 } 363 364 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 365 { 366 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 367 return; 368 369 __gen6_mask_pm_irq(dev_priv, mask); 370 } 371 372 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 373 { 374 i915_reg_t reg = gen6_pm_iir(dev_priv); 375 376 lockdep_assert_held(&dev_priv->irq_lock); 377 378 I915_WRITE(reg, reset_mask); 379 I915_WRITE(reg, reset_mask); 380 POSTING_READ(reg); 381 } 382 383 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 384 { 385 lockdep_assert_held(&dev_priv->irq_lock); 386 387 dev_priv->pm_ier |= enable_mask; 388 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 389 gen6_unmask_pm_irq(dev_priv, enable_mask); 390 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 391 } 392 393 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 394 { 395 lockdep_assert_held(&dev_priv->irq_lock); 396 397 dev_priv->pm_ier &= ~disable_mask; 398 __gen6_mask_pm_irq(dev_priv, disable_mask); 399 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 400 /* though a barrier is missing here, but don't really need a one */ 401 } 402 403 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 404 { 405 spin_lock_irq(&dev_priv->irq_lock); 406 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 407 dev_priv->gt_pm.rps.pm_iir = 0; 408 spin_unlock_irq(&dev_priv->irq_lock); 409 } 410 411 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 412 { 413 struct intel_rps *rps = &dev_priv->gt_pm.rps; 414 415 if (READ_ONCE(rps->interrupts_enabled)) 416 return; 417 418 spin_lock_irq(&dev_priv->irq_lock); 419 WARN_ON_ONCE(rps->pm_iir); 420 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 421 rps->interrupts_enabled = true; 422 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 423 424 spin_unlock_irq(&dev_priv->irq_lock); 425 } 426 427 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 428 { 429 struct intel_rps *rps = &dev_priv->gt_pm.rps; 430 431 if (!READ_ONCE(rps->interrupts_enabled)) 432 return; 433 434 spin_lock_irq(&dev_priv->irq_lock); 435 rps->interrupts_enabled = false; 436 437 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 438 439 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 440 441 spin_unlock_irq(&dev_priv->irq_lock); 442 synchronize_irq(dev_priv->drm.irq); 443 444 /* Now that we will not be generating any more work, flush any 445 * outstanding tasks. As we are called on the RPS idle path, 446 * we will reset the GPU to minimum frequencies, so the current 447 * state of the worker can be discarded. 448 */ 449 cancel_work_sync(&rps->work); 450 gen6_reset_rps_interrupts(dev_priv); 451 } 452 453 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 454 { 455 spin_lock_irq(&dev_priv->irq_lock); 456 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 457 spin_unlock_irq(&dev_priv->irq_lock); 458 } 459 460 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 461 { 462 spin_lock_irq(&dev_priv->irq_lock); 463 if (!dev_priv->guc.interrupts_enabled) { 464 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 465 dev_priv->pm_guc_events); 466 dev_priv->guc.interrupts_enabled = true; 467 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 468 } 469 spin_unlock_irq(&dev_priv->irq_lock); 470 } 471 472 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 473 { 474 spin_lock_irq(&dev_priv->irq_lock); 475 dev_priv->guc.interrupts_enabled = false; 476 477 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 478 479 spin_unlock_irq(&dev_priv->irq_lock); 480 synchronize_irq(dev_priv->drm.irq); 481 482 gen9_reset_guc_interrupts(dev_priv); 483 } 484 485 /** 486 * bdw_update_port_irq - update DE port interrupt 487 * @dev_priv: driver private 488 * @interrupt_mask: mask of interrupt bits to update 489 * @enabled_irq_mask: mask of interrupt bits to enable 490 */ 491 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 492 uint32_t interrupt_mask, 493 uint32_t enabled_irq_mask) 494 { 495 uint32_t new_val; 496 uint32_t old_val; 497 498 lockdep_assert_held(&dev_priv->irq_lock); 499 500 WARN_ON(enabled_irq_mask & ~interrupt_mask); 501 502 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 503 return; 504 505 old_val = I915_READ(GEN8_DE_PORT_IMR); 506 507 new_val = old_val; 508 new_val &= ~interrupt_mask; 509 new_val |= (~enabled_irq_mask & interrupt_mask); 510 511 if (new_val != old_val) { 512 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 513 POSTING_READ(GEN8_DE_PORT_IMR); 514 } 515 } 516 517 /** 518 * bdw_update_pipe_irq - update DE pipe interrupt 519 * @dev_priv: driver private 520 * @pipe: pipe whose interrupt to update 521 * @interrupt_mask: mask of interrupt bits to update 522 * @enabled_irq_mask: mask of interrupt bits to enable 523 */ 524 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 525 enum pipe pipe, 526 uint32_t interrupt_mask, 527 uint32_t enabled_irq_mask) 528 { 529 uint32_t new_val; 530 531 lockdep_assert_held(&dev_priv->irq_lock); 532 533 WARN_ON(enabled_irq_mask & ~interrupt_mask); 534 535 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 536 return; 537 538 new_val = dev_priv->de_irq_mask[pipe]; 539 new_val &= ~interrupt_mask; 540 new_val |= (~enabled_irq_mask & interrupt_mask); 541 542 if (new_val != dev_priv->de_irq_mask[pipe]) { 543 dev_priv->de_irq_mask[pipe] = new_val; 544 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 545 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 546 } 547 } 548 549 /** 550 * ibx_display_interrupt_update - update SDEIMR 551 * @dev_priv: driver private 552 * @interrupt_mask: mask of interrupt bits to update 553 * @enabled_irq_mask: mask of interrupt bits to enable 554 */ 555 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 556 uint32_t interrupt_mask, 557 uint32_t enabled_irq_mask) 558 { 559 uint32_t sdeimr = I915_READ(SDEIMR); 560 sdeimr &= ~interrupt_mask; 561 sdeimr |= (~enabled_irq_mask & interrupt_mask); 562 563 WARN_ON(enabled_irq_mask & ~interrupt_mask); 564 565 lockdep_assert_held(&dev_priv->irq_lock); 566 567 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 568 return; 569 570 I915_WRITE(SDEIMR, sdeimr); 571 POSTING_READ(SDEIMR); 572 } 573 574 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 575 enum pipe pipe) 576 { 577 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 578 u32 enable_mask = status_mask << 16; 579 580 lockdep_assert_held(&dev_priv->irq_lock); 581 582 if (INTEL_GEN(dev_priv) < 5) 583 goto out; 584 585 /* 586 * On pipe A we don't support the PSR interrupt yet, 587 * on pipe B and C the same bit MBZ. 588 */ 589 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 590 return 0; 591 /* 592 * On pipe B and C we don't support the PSR interrupt yet, on pipe 593 * A the same bit is for perf counters which we don't use either. 594 */ 595 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 596 return 0; 597 598 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 599 SPRITE0_FLIP_DONE_INT_EN_VLV | 600 SPRITE1_FLIP_DONE_INT_EN_VLV); 601 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 602 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 603 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 604 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 605 606 out: 607 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 608 status_mask & ~PIPESTAT_INT_STATUS_MASK, 609 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 610 pipe_name(pipe), enable_mask, status_mask); 611 612 return enable_mask; 613 } 614 615 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 616 enum pipe pipe, u32 status_mask) 617 { 618 i915_reg_t reg = PIPESTAT(pipe); 619 u32 enable_mask; 620 621 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 622 "pipe %c: status_mask=0x%x\n", 623 pipe_name(pipe), status_mask); 624 625 lockdep_assert_held(&dev_priv->irq_lock); 626 WARN_ON(!intel_irqs_enabled(dev_priv)); 627 628 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 629 return; 630 631 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 632 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 633 634 I915_WRITE(reg, enable_mask | status_mask); 635 POSTING_READ(reg); 636 } 637 638 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 639 enum pipe pipe, u32 status_mask) 640 { 641 i915_reg_t reg = PIPESTAT(pipe); 642 u32 enable_mask; 643 644 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 645 "pipe %c: status_mask=0x%x\n", 646 pipe_name(pipe), status_mask); 647 648 lockdep_assert_held(&dev_priv->irq_lock); 649 WARN_ON(!intel_irqs_enabled(dev_priv)); 650 651 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 652 return; 653 654 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 655 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 656 657 I915_WRITE(reg, enable_mask | status_mask); 658 POSTING_READ(reg); 659 } 660 661 /** 662 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 663 * @dev_priv: i915 device private 664 */ 665 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 666 { 667 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 668 return; 669 670 spin_lock_irq(&dev_priv->irq_lock); 671 672 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 673 if (INTEL_GEN(dev_priv) >= 4) 674 i915_enable_pipestat(dev_priv, PIPE_A, 675 PIPE_LEGACY_BLC_EVENT_STATUS); 676 677 spin_unlock_irq(&dev_priv->irq_lock); 678 } 679 680 /* 681 * This timing diagram depicts the video signal in and 682 * around the vertical blanking period. 683 * 684 * Assumptions about the fictitious mode used in this example: 685 * vblank_start >= 3 686 * vsync_start = vblank_start + 1 687 * vsync_end = vblank_start + 2 688 * vtotal = vblank_start + 3 689 * 690 * start of vblank: 691 * latch double buffered registers 692 * increment frame counter (ctg+) 693 * generate start of vblank interrupt (gen4+) 694 * | 695 * | frame start: 696 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 697 * | may be shifted forward 1-3 extra lines via PIPECONF 698 * | | 699 * | | start of vsync: 700 * | | generate vsync interrupt 701 * | | | 702 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 703 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 704 * ----va---> <-----------------vb--------------------> <--------va------------- 705 * | | <----vs-----> | 706 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 707 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 708 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 709 * | | | 710 * last visible pixel first visible pixel 711 * | increment frame counter (gen3/4) 712 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 713 * 714 * x = horizontal active 715 * _ = horizontal blanking 716 * hs = horizontal sync 717 * va = vertical active 718 * vb = vertical blanking 719 * vs = vertical sync 720 * vbs = vblank_start (number) 721 * 722 * Summary: 723 * - most events happen at the start of horizontal sync 724 * - frame start happens at the start of horizontal blank, 1-4 lines 725 * (depending on PIPECONF settings) after the start of vblank 726 * - gen3/4 pixel and frame counter are synchronized with the start 727 * of horizontal active on the first line of vertical active 728 */ 729 730 /* Called from drm generic code, passed a 'crtc', which 731 * we use as a pipe index 732 */ 733 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 734 { 735 struct drm_i915_private *dev_priv = to_i915(dev); 736 i915_reg_t high_frame, low_frame; 737 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 738 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 739 unsigned long irqflags; 740 741 htotal = mode->crtc_htotal; 742 hsync_start = mode->crtc_hsync_start; 743 vbl_start = mode->crtc_vblank_start; 744 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 745 vbl_start = DIV_ROUND_UP(vbl_start, 2); 746 747 /* Convert to pixel count */ 748 vbl_start *= htotal; 749 750 /* Start of vblank event occurs at start of hsync */ 751 vbl_start -= htotal - hsync_start; 752 753 high_frame = PIPEFRAME(pipe); 754 low_frame = PIPEFRAMEPIXEL(pipe); 755 756 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 757 758 /* 759 * High & low register fields aren't synchronized, so make sure 760 * we get a low value that's stable across two reads of the high 761 * register. 762 */ 763 do { 764 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 765 low = I915_READ_FW(low_frame); 766 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 767 } while (high1 != high2); 768 769 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 770 771 high1 >>= PIPE_FRAME_HIGH_SHIFT; 772 pixel = low & PIPE_PIXEL_MASK; 773 low >>= PIPE_FRAME_LOW_SHIFT; 774 775 /* 776 * The frame counter increments at beginning of active. 777 * Cook up a vblank counter by also checking the pixel 778 * counter against vblank start. 779 */ 780 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 781 } 782 783 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 784 { 785 struct drm_i915_private *dev_priv = to_i915(dev); 786 787 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 788 } 789 790 /* 791 * On certain encoders on certain platforms, pipe 792 * scanline register will not work to get the scanline, 793 * since the timings are driven from the PORT or issues 794 * with scanline register updates. 795 * This function will use Framestamp and current 796 * timestamp registers to calculate the scanline. 797 */ 798 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 799 { 800 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 801 struct drm_vblank_crtc *vblank = 802 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 803 const struct drm_display_mode *mode = &vblank->hwmode; 804 u32 vblank_start = mode->crtc_vblank_start; 805 u32 vtotal = mode->crtc_vtotal; 806 u32 htotal = mode->crtc_htotal; 807 u32 clock = mode->crtc_clock; 808 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 809 810 /* 811 * To avoid the race condition where we might cross into the 812 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 813 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 814 * during the same frame. 815 */ 816 do { 817 /* 818 * This field provides read back of the display 819 * pipe frame time stamp. The time stamp value 820 * is sampled at every start of vertical blank. 821 */ 822 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 823 824 /* 825 * The TIMESTAMP_CTR register has the current 826 * time stamp value. 827 */ 828 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 829 830 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 831 } while (scan_post_time != scan_prev_time); 832 833 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 834 clock), 1000 * htotal); 835 scanline = min(scanline, vtotal - 1); 836 scanline = (scanline + vblank_start) % vtotal; 837 838 return scanline; 839 } 840 841 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 842 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 843 { 844 struct drm_device *dev = crtc->base.dev; 845 struct drm_i915_private *dev_priv = to_i915(dev); 846 const struct drm_display_mode *mode; 847 struct drm_vblank_crtc *vblank; 848 enum pipe pipe = crtc->pipe; 849 int position, vtotal; 850 851 if (!crtc->active) 852 return -1; 853 854 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 855 mode = &vblank->hwmode; 856 857 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 858 return __intel_get_crtc_scanline_from_timestamp(crtc); 859 860 vtotal = mode->crtc_vtotal; 861 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 862 vtotal /= 2; 863 864 if (IS_GEN2(dev_priv)) 865 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 866 else 867 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 868 869 /* 870 * On HSW, the DSL reg (0x70000) appears to return 0 if we 871 * read it just before the start of vblank. So try it again 872 * so we don't accidentally end up spanning a vblank frame 873 * increment, causing the pipe_update_end() code to squak at us. 874 * 875 * The nature of this problem means we can't simply check the ISR 876 * bit and return the vblank start value; nor can we use the scanline 877 * debug register in the transcoder as it appears to have the same 878 * problem. We may need to extend this to include other platforms, 879 * but so far testing only shows the problem on HSW. 880 */ 881 if (HAS_DDI(dev_priv) && !position) { 882 int i, temp; 883 884 for (i = 0; i < 100; i++) { 885 udelay(1); 886 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 887 if (temp != position) { 888 position = temp; 889 break; 890 } 891 } 892 } 893 894 /* 895 * See update_scanline_offset() for the details on the 896 * scanline_offset adjustment. 897 */ 898 return (position + crtc->scanline_offset) % vtotal; 899 } 900 901 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 902 bool in_vblank_irq, int *vpos, int *hpos, 903 ktime_t *stime, ktime_t *etime, 904 const struct drm_display_mode *mode) 905 { 906 struct drm_i915_private *dev_priv = to_i915(dev); 907 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 908 pipe); 909 int position; 910 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 911 unsigned long irqflags; 912 913 if (WARN_ON(!mode->crtc_clock)) { 914 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 915 "pipe %c\n", pipe_name(pipe)); 916 return false; 917 } 918 919 htotal = mode->crtc_htotal; 920 hsync_start = mode->crtc_hsync_start; 921 vtotal = mode->crtc_vtotal; 922 vbl_start = mode->crtc_vblank_start; 923 vbl_end = mode->crtc_vblank_end; 924 925 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 926 vbl_start = DIV_ROUND_UP(vbl_start, 2); 927 vbl_end /= 2; 928 vtotal /= 2; 929 } 930 931 /* 932 * Lock uncore.lock, as we will do multiple timing critical raw 933 * register reads, potentially with preemption disabled, so the 934 * following code must not block on uncore.lock. 935 */ 936 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 937 938 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 939 940 /* Get optional system timestamp before query. */ 941 if (stime) 942 *stime = ktime_get(); 943 944 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 945 /* No obvious pixelcount register. Only query vertical 946 * scanout position from Display scan line register. 947 */ 948 position = __intel_get_crtc_scanline(intel_crtc); 949 } else { 950 /* Have access to pixelcount since start of frame. 951 * We can split this into vertical and horizontal 952 * scanout position. 953 */ 954 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 955 956 /* convert to pixel counts */ 957 vbl_start *= htotal; 958 vbl_end *= htotal; 959 vtotal *= htotal; 960 961 /* 962 * In interlaced modes, the pixel counter counts all pixels, 963 * so one field will have htotal more pixels. In order to avoid 964 * the reported position from jumping backwards when the pixel 965 * counter is beyond the length of the shorter field, just 966 * clamp the position the length of the shorter field. This 967 * matches how the scanline counter based position works since 968 * the scanline counter doesn't count the two half lines. 969 */ 970 if (position >= vtotal) 971 position = vtotal - 1; 972 973 /* 974 * Start of vblank interrupt is triggered at start of hsync, 975 * just prior to the first active line of vblank. However we 976 * consider lines to start at the leading edge of horizontal 977 * active. So, should we get here before we've crossed into 978 * the horizontal active of the first line in vblank, we would 979 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 980 * always add htotal-hsync_start to the current pixel position. 981 */ 982 position = (position + htotal - hsync_start) % vtotal; 983 } 984 985 /* Get optional system timestamp after query. */ 986 if (etime) 987 *etime = ktime_get(); 988 989 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 990 991 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 992 993 /* 994 * While in vblank, position will be negative 995 * counting up towards 0 at vbl_end. And outside 996 * vblank, position will be positive counting 997 * up since vbl_end. 998 */ 999 if (position >= vbl_start) 1000 position -= vbl_end; 1001 else 1002 position += vtotal - vbl_end; 1003 1004 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1005 *vpos = position; 1006 *hpos = 0; 1007 } else { 1008 *vpos = position / htotal; 1009 *hpos = position - (*vpos * htotal); 1010 } 1011 1012 return true; 1013 } 1014 1015 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1016 { 1017 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1018 unsigned long irqflags; 1019 int position; 1020 1021 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1022 position = __intel_get_crtc_scanline(crtc); 1023 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1024 1025 return position; 1026 } 1027 1028 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1029 { 1030 u32 busy_up, busy_down, max_avg, min_avg; 1031 u8 new_delay; 1032 1033 spin_lock(&mchdev_lock); 1034 1035 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1036 1037 new_delay = dev_priv->ips.cur_delay; 1038 1039 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1040 busy_up = I915_READ(RCPREVBSYTUPAVG); 1041 busy_down = I915_READ(RCPREVBSYTDNAVG); 1042 max_avg = I915_READ(RCBMAXAVG); 1043 min_avg = I915_READ(RCBMINAVG); 1044 1045 /* Handle RCS change request from hw */ 1046 if (busy_up > max_avg) { 1047 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1048 new_delay = dev_priv->ips.cur_delay - 1; 1049 if (new_delay < dev_priv->ips.max_delay) 1050 new_delay = dev_priv->ips.max_delay; 1051 } else if (busy_down < min_avg) { 1052 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1053 new_delay = dev_priv->ips.cur_delay + 1; 1054 if (new_delay > dev_priv->ips.min_delay) 1055 new_delay = dev_priv->ips.min_delay; 1056 } 1057 1058 if (ironlake_set_drps(dev_priv, new_delay)) 1059 dev_priv->ips.cur_delay = new_delay; 1060 1061 spin_unlock(&mchdev_lock); 1062 1063 return; 1064 } 1065 1066 static void notify_ring(struct intel_engine_cs *engine) 1067 { 1068 struct drm_i915_gem_request *rq = NULL; 1069 struct intel_wait *wait; 1070 1071 if (!engine->breadcrumbs.irq_armed) 1072 return; 1073 1074 atomic_inc(&engine->irq_count); 1075 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1076 1077 spin_lock(&engine->breadcrumbs.irq_lock); 1078 wait = engine->breadcrumbs.irq_wait; 1079 if (wait) { 1080 bool wakeup = engine->irq_seqno_barrier; 1081 1082 /* We use a callback from the dma-fence to submit 1083 * requests after waiting on our own requests. To 1084 * ensure minimum delay in queuing the next request to 1085 * hardware, signal the fence now rather than wait for 1086 * the signaler to be woken up. We still wake up the 1087 * waiter in order to handle the irq-seqno coherency 1088 * issues (we may receive the interrupt before the 1089 * seqno is written, see __i915_request_irq_complete()) 1090 * and to handle coalescing of multiple seqno updates 1091 * and many waiters. 1092 */ 1093 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1094 wait->seqno)) { 1095 struct drm_i915_gem_request *waiter = wait->request; 1096 1097 wakeup = true; 1098 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1099 &waiter->fence.flags) && 1100 intel_wait_check_request(wait, waiter)) 1101 rq = i915_gem_request_get(waiter); 1102 } 1103 1104 if (wakeup) 1105 wake_up_process(wait->tsk); 1106 } else { 1107 if (engine->breadcrumbs.irq_armed) 1108 __intel_engine_disarm_breadcrumbs(engine); 1109 } 1110 spin_unlock(&engine->breadcrumbs.irq_lock); 1111 1112 if (rq) { 1113 dma_fence_signal(&rq->fence); 1114 i915_gem_request_put(rq); 1115 } 1116 1117 trace_intel_engine_notify(engine, wait); 1118 } 1119 1120 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1121 struct intel_rps_ei *ei) 1122 { 1123 ei->ktime = ktime_get_raw(); 1124 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1125 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1126 } 1127 1128 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1129 { 1130 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1131 } 1132 1133 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1134 { 1135 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1136 const struct intel_rps_ei *prev = &rps->ei; 1137 struct intel_rps_ei now; 1138 u32 events = 0; 1139 1140 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1141 return 0; 1142 1143 vlv_c0_read(dev_priv, &now); 1144 1145 if (prev->ktime) { 1146 u64 time, c0; 1147 u32 render, media; 1148 1149 time = ktime_us_delta(now.ktime, prev->ktime); 1150 1151 time *= dev_priv->czclk_freq; 1152 1153 /* Workload can be split between render + media, 1154 * e.g. SwapBuffers being blitted in X after being rendered in 1155 * mesa. To account for this we need to combine both engines 1156 * into our activity counter. 1157 */ 1158 render = now.render_c0 - prev->render_c0; 1159 media = now.media_c0 - prev->media_c0; 1160 c0 = max(render, media); 1161 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1162 1163 if (c0 > time * rps->up_threshold) 1164 events = GEN6_PM_RP_UP_THRESHOLD; 1165 else if (c0 < time * rps->down_threshold) 1166 events = GEN6_PM_RP_DOWN_THRESHOLD; 1167 } 1168 1169 rps->ei = now; 1170 return events; 1171 } 1172 1173 static void gen6_pm_rps_work(struct work_struct *work) 1174 { 1175 struct drm_i915_private *dev_priv = 1176 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1177 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1178 bool client_boost = false; 1179 int new_delay, adj, min, max; 1180 u32 pm_iir = 0; 1181 1182 spin_lock_irq(&dev_priv->irq_lock); 1183 if (rps->interrupts_enabled) { 1184 pm_iir = fetch_and_zero(&rps->pm_iir); 1185 client_boost = atomic_read(&rps->num_waiters); 1186 } 1187 spin_unlock_irq(&dev_priv->irq_lock); 1188 1189 /* Make sure we didn't queue anything we're not going to process. */ 1190 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1191 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1192 goto out; 1193 1194 mutex_lock(&dev_priv->pcu_lock); 1195 1196 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1197 1198 adj = rps->last_adj; 1199 new_delay = rps->cur_freq; 1200 min = rps->min_freq_softlimit; 1201 max = rps->max_freq_softlimit; 1202 if (client_boost) 1203 max = rps->max_freq; 1204 if (client_boost && new_delay < rps->boost_freq) { 1205 new_delay = rps->boost_freq; 1206 adj = 0; 1207 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1208 if (adj > 0) 1209 adj *= 2; 1210 else /* CHV needs even encode values */ 1211 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1212 1213 if (new_delay >= rps->max_freq_softlimit) 1214 adj = 0; 1215 } else if (client_boost) { 1216 adj = 0; 1217 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1218 if (rps->cur_freq > rps->efficient_freq) 1219 new_delay = rps->efficient_freq; 1220 else if (rps->cur_freq > rps->min_freq_softlimit) 1221 new_delay = rps->min_freq_softlimit; 1222 adj = 0; 1223 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1224 if (adj < 0) 1225 adj *= 2; 1226 else /* CHV needs even encode values */ 1227 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1228 1229 if (new_delay <= rps->min_freq_softlimit) 1230 adj = 0; 1231 } else { /* unknown event */ 1232 adj = 0; 1233 } 1234 1235 rps->last_adj = adj; 1236 1237 /* sysfs frequency interfaces may have snuck in while servicing the 1238 * interrupt 1239 */ 1240 new_delay += adj; 1241 new_delay = clamp_t(int, new_delay, min, max); 1242 1243 if (intel_set_rps(dev_priv, new_delay)) { 1244 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1245 rps->last_adj = 0; 1246 } 1247 1248 mutex_unlock(&dev_priv->pcu_lock); 1249 1250 out: 1251 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1252 spin_lock_irq(&dev_priv->irq_lock); 1253 if (rps->interrupts_enabled) 1254 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1255 spin_unlock_irq(&dev_priv->irq_lock); 1256 } 1257 1258 1259 /** 1260 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1261 * occurred. 1262 * @work: workqueue struct 1263 * 1264 * Doesn't actually do anything except notify userspace. As a consequence of 1265 * this event, userspace should try to remap the bad rows since statistically 1266 * it is likely the same row is more likely to go bad again. 1267 */ 1268 static void ivybridge_parity_work(struct work_struct *work) 1269 { 1270 struct drm_i915_private *dev_priv = 1271 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1272 u32 error_status, row, bank, subbank; 1273 char *parity_event[6]; 1274 uint32_t misccpctl; 1275 uint8_t slice = 0; 1276 1277 /* We must turn off DOP level clock gating to access the L3 registers. 1278 * In order to prevent a get/put style interface, acquire struct mutex 1279 * any time we access those registers. 1280 */ 1281 mutex_lock(&dev_priv->drm.struct_mutex); 1282 1283 /* If we've screwed up tracking, just let the interrupt fire again */ 1284 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1285 goto out; 1286 1287 misccpctl = I915_READ(GEN7_MISCCPCTL); 1288 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1289 POSTING_READ(GEN7_MISCCPCTL); 1290 1291 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1292 i915_reg_t reg; 1293 1294 slice--; 1295 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1296 break; 1297 1298 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1299 1300 reg = GEN7_L3CDERRST1(slice); 1301 1302 error_status = I915_READ(reg); 1303 row = GEN7_PARITY_ERROR_ROW(error_status); 1304 bank = GEN7_PARITY_ERROR_BANK(error_status); 1305 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1306 1307 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1308 POSTING_READ(reg); 1309 1310 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1311 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1312 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1313 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1314 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1315 parity_event[5] = NULL; 1316 1317 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1318 KOBJ_CHANGE, parity_event); 1319 1320 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1321 slice, row, bank, subbank); 1322 1323 kfree(parity_event[4]); 1324 kfree(parity_event[3]); 1325 kfree(parity_event[2]); 1326 kfree(parity_event[1]); 1327 } 1328 1329 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1330 1331 out: 1332 WARN_ON(dev_priv->l3_parity.which_slice); 1333 spin_lock_irq(&dev_priv->irq_lock); 1334 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1335 spin_unlock_irq(&dev_priv->irq_lock); 1336 1337 mutex_unlock(&dev_priv->drm.struct_mutex); 1338 } 1339 1340 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1341 u32 iir) 1342 { 1343 if (!HAS_L3_DPF(dev_priv)) 1344 return; 1345 1346 spin_lock(&dev_priv->irq_lock); 1347 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1348 spin_unlock(&dev_priv->irq_lock); 1349 1350 iir &= GT_PARITY_ERROR(dev_priv); 1351 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1352 dev_priv->l3_parity.which_slice |= 1 << 1; 1353 1354 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1355 dev_priv->l3_parity.which_slice |= 1 << 0; 1356 1357 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1358 } 1359 1360 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1361 u32 gt_iir) 1362 { 1363 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1364 notify_ring(dev_priv->engine[RCS]); 1365 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1366 notify_ring(dev_priv->engine[VCS]); 1367 } 1368 1369 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1370 u32 gt_iir) 1371 { 1372 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1373 notify_ring(dev_priv->engine[RCS]); 1374 if (gt_iir & GT_BSD_USER_INTERRUPT) 1375 notify_ring(dev_priv->engine[VCS]); 1376 if (gt_iir & GT_BLT_USER_INTERRUPT) 1377 notify_ring(dev_priv->engine[BCS]); 1378 1379 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1380 GT_BSD_CS_ERROR_INTERRUPT | 1381 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1382 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1383 1384 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1385 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1386 } 1387 1388 static void 1389 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1390 { 1391 struct intel_engine_execlists * const execlists = &engine->execlists; 1392 bool tasklet = false; 1393 1394 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1395 if (READ_ONCE(engine->execlists.active)) { 1396 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1397 tasklet = true; 1398 } 1399 } 1400 1401 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1402 notify_ring(engine); 1403 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1404 } 1405 1406 if (tasklet) 1407 tasklet_hi_schedule(&execlists->tasklet); 1408 } 1409 1410 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1411 u32 master_ctl, 1412 u32 gt_iir[4]) 1413 { 1414 irqreturn_t ret = IRQ_NONE; 1415 1416 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1417 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1418 if (gt_iir[0]) { 1419 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1420 ret = IRQ_HANDLED; 1421 } else 1422 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1423 } 1424 1425 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1426 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1427 if (gt_iir[1]) { 1428 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1429 ret = IRQ_HANDLED; 1430 } else 1431 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1432 } 1433 1434 if (master_ctl & GEN8_GT_VECS_IRQ) { 1435 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1436 if (gt_iir[3]) { 1437 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1438 ret = IRQ_HANDLED; 1439 } else 1440 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1441 } 1442 1443 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1444 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1445 if (gt_iir[2] & (dev_priv->pm_rps_events | 1446 dev_priv->pm_guc_events)) { 1447 I915_WRITE_FW(GEN8_GT_IIR(2), 1448 gt_iir[2] & (dev_priv->pm_rps_events | 1449 dev_priv->pm_guc_events)); 1450 ret = IRQ_HANDLED; 1451 } else 1452 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1453 } 1454 1455 return ret; 1456 } 1457 1458 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1459 u32 gt_iir[4]) 1460 { 1461 if (gt_iir[0]) { 1462 gen8_cs_irq_handler(dev_priv->engine[RCS], 1463 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1464 gen8_cs_irq_handler(dev_priv->engine[BCS], 1465 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1466 } 1467 1468 if (gt_iir[1]) { 1469 gen8_cs_irq_handler(dev_priv->engine[VCS], 1470 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1471 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1472 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1473 } 1474 1475 if (gt_iir[3]) 1476 gen8_cs_irq_handler(dev_priv->engine[VECS], 1477 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1478 1479 if (gt_iir[2] & dev_priv->pm_rps_events) 1480 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1481 1482 if (gt_iir[2] & dev_priv->pm_guc_events) 1483 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1484 } 1485 1486 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1487 { 1488 switch (port) { 1489 case PORT_A: 1490 return val & PORTA_HOTPLUG_LONG_DETECT; 1491 case PORT_B: 1492 return val & PORTB_HOTPLUG_LONG_DETECT; 1493 case PORT_C: 1494 return val & PORTC_HOTPLUG_LONG_DETECT; 1495 default: 1496 return false; 1497 } 1498 } 1499 1500 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1501 { 1502 switch (port) { 1503 case PORT_E: 1504 return val & PORTE_HOTPLUG_LONG_DETECT; 1505 default: 1506 return false; 1507 } 1508 } 1509 1510 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1511 { 1512 switch (port) { 1513 case PORT_A: 1514 return val & PORTA_HOTPLUG_LONG_DETECT; 1515 case PORT_B: 1516 return val & PORTB_HOTPLUG_LONG_DETECT; 1517 case PORT_C: 1518 return val & PORTC_HOTPLUG_LONG_DETECT; 1519 case PORT_D: 1520 return val & PORTD_HOTPLUG_LONG_DETECT; 1521 default: 1522 return false; 1523 } 1524 } 1525 1526 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1527 { 1528 switch (port) { 1529 case PORT_A: 1530 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1531 default: 1532 return false; 1533 } 1534 } 1535 1536 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1537 { 1538 switch (port) { 1539 case PORT_B: 1540 return val & PORTB_HOTPLUG_LONG_DETECT; 1541 case PORT_C: 1542 return val & PORTC_HOTPLUG_LONG_DETECT; 1543 case PORT_D: 1544 return val & PORTD_HOTPLUG_LONG_DETECT; 1545 default: 1546 return false; 1547 } 1548 } 1549 1550 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1551 { 1552 switch (port) { 1553 case PORT_B: 1554 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1555 case PORT_C: 1556 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1557 case PORT_D: 1558 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1559 default: 1560 return false; 1561 } 1562 } 1563 1564 /* 1565 * Get a bit mask of pins that have triggered, and which ones may be long. 1566 * This can be called multiple times with the same masks to accumulate 1567 * hotplug detection results from several registers. 1568 * 1569 * Note that the caller is expected to zero out the masks initially. 1570 */ 1571 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1572 u32 hotplug_trigger, u32 dig_hotplug_reg, 1573 const u32 hpd[HPD_NUM_PINS], 1574 bool long_pulse_detect(enum port port, u32 val)) 1575 { 1576 enum port port; 1577 int i; 1578 1579 for_each_hpd_pin(i) { 1580 if ((hpd[i] & hotplug_trigger) == 0) 1581 continue; 1582 1583 *pin_mask |= BIT(i); 1584 1585 port = intel_hpd_pin_to_port(i); 1586 if (port == PORT_NONE) 1587 continue; 1588 1589 if (long_pulse_detect(port, dig_hotplug_reg)) 1590 *long_mask |= BIT(i); 1591 } 1592 1593 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1594 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1595 1596 } 1597 1598 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1599 { 1600 wake_up_all(&dev_priv->gmbus_wait_queue); 1601 } 1602 1603 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1604 { 1605 wake_up_all(&dev_priv->gmbus_wait_queue); 1606 } 1607 1608 #if defined(CONFIG_DEBUG_FS) 1609 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1610 enum pipe pipe, 1611 uint32_t crc0, uint32_t crc1, 1612 uint32_t crc2, uint32_t crc3, 1613 uint32_t crc4) 1614 { 1615 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1616 struct intel_pipe_crc_entry *entry; 1617 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1618 struct drm_driver *driver = dev_priv->drm.driver; 1619 uint32_t crcs[5]; 1620 int head, tail; 1621 1622 spin_lock(&pipe_crc->lock); 1623 if (pipe_crc->source) { 1624 if (!pipe_crc->entries) { 1625 spin_unlock(&pipe_crc->lock); 1626 DRM_DEBUG_KMS("spurious interrupt\n"); 1627 return; 1628 } 1629 1630 head = pipe_crc->head; 1631 tail = pipe_crc->tail; 1632 1633 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1634 spin_unlock(&pipe_crc->lock); 1635 DRM_ERROR("CRC buffer overflowing\n"); 1636 return; 1637 } 1638 1639 entry = &pipe_crc->entries[head]; 1640 1641 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1642 entry->crc[0] = crc0; 1643 entry->crc[1] = crc1; 1644 entry->crc[2] = crc2; 1645 entry->crc[3] = crc3; 1646 entry->crc[4] = crc4; 1647 1648 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1649 pipe_crc->head = head; 1650 1651 spin_unlock(&pipe_crc->lock); 1652 1653 wake_up_interruptible(&pipe_crc->wq); 1654 } else { 1655 /* 1656 * For some not yet identified reason, the first CRC is 1657 * bonkers. So let's just wait for the next vblank and read 1658 * out the buggy result. 1659 * 1660 * On GEN8+ sometimes the second CRC is bonkers as well, so 1661 * don't trust that one either. 1662 */ 1663 if (pipe_crc->skipped == 0 || 1664 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1665 pipe_crc->skipped++; 1666 spin_unlock(&pipe_crc->lock); 1667 return; 1668 } 1669 spin_unlock(&pipe_crc->lock); 1670 crcs[0] = crc0; 1671 crcs[1] = crc1; 1672 crcs[2] = crc2; 1673 crcs[3] = crc3; 1674 crcs[4] = crc4; 1675 drm_crtc_add_crc_entry(&crtc->base, true, 1676 drm_crtc_accurate_vblank_count(&crtc->base), 1677 crcs); 1678 } 1679 } 1680 #else 1681 static inline void 1682 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1683 enum pipe pipe, 1684 uint32_t crc0, uint32_t crc1, 1685 uint32_t crc2, uint32_t crc3, 1686 uint32_t crc4) {} 1687 #endif 1688 1689 1690 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1691 enum pipe pipe) 1692 { 1693 display_pipe_crc_irq_handler(dev_priv, pipe, 1694 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1695 0, 0, 0, 0); 1696 } 1697 1698 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1699 enum pipe pipe) 1700 { 1701 display_pipe_crc_irq_handler(dev_priv, pipe, 1702 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1703 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1704 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1705 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1706 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1707 } 1708 1709 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1710 enum pipe pipe) 1711 { 1712 uint32_t res1, res2; 1713 1714 if (INTEL_GEN(dev_priv) >= 3) 1715 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1716 else 1717 res1 = 0; 1718 1719 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1720 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1721 else 1722 res2 = 0; 1723 1724 display_pipe_crc_irq_handler(dev_priv, pipe, 1725 I915_READ(PIPE_CRC_RES_RED(pipe)), 1726 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1727 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1728 res1, res2); 1729 } 1730 1731 /* The RPS events need forcewake, so we add them to a work queue and mask their 1732 * IMR bits until the work is done. Other interrupts can be processed without 1733 * the work queue. */ 1734 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1735 { 1736 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1737 1738 if (pm_iir & dev_priv->pm_rps_events) { 1739 spin_lock(&dev_priv->irq_lock); 1740 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1741 if (rps->interrupts_enabled) { 1742 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1743 schedule_work(&rps->work); 1744 } 1745 spin_unlock(&dev_priv->irq_lock); 1746 } 1747 1748 if (INTEL_GEN(dev_priv) >= 8) 1749 return; 1750 1751 if (HAS_VEBOX(dev_priv)) { 1752 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1753 notify_ring(dev_priv->engine[VECS]); 1754 1755 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1756 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1757 } 1758 } 1759 1760 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1761 { 1762 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1763 /* Sample the log buffer flush related bits & clear them out now 1764 * itself from the message identity register to minimize the 1765 * probability of losing a flush interrupt, when there are back 1766 * to back flush interrupts. 1767 * There can be a new flush interrupt, for different log buffer 1768 * type (like for ISR), whilst Host is handling one (for DPC). 1769 * Since same bit is used in message register for ISR & DPC, it 1770 * could happen that GuC sets the bit for 2nd interrupt but Host 1771 * clears out the bit on handling the 1st interrupt. 1772 */ 1773 u32 msg, flush; 1774 1775 msg = I915_READ(SOFT_SCRATCH(15)); 1776 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1777 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1778 if (flush) { 1779 /* Clear the message bits that are handled */ 1780 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1781 1782 /* Handle flush interrupt in bottom half */ 1783 queue_work(dev_priv->guc.log.runtime.flush_wq, 1784 &dev_priv->guc.log.runtime.flush_work); 1785 1786 dev_priv->guc.log.flush_interrupt_count++; 1787 } else { 1788 /* Not clearing of unhandled event bits won't result in 1789 * re-triggering of the interrupt. 1790 */ 1791 } 1792 } 1793 } 1794 1795 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1796 { 1797 enum pipe pipe; 1798 1799 for_each_pipe(dev_priv, pipe) { 1800 I915_WRITE(PIPESTAT(pipe), 1801 PIPESTAT_INT_STATUS_MASK | 1802 PIPE_FIFO_UNDERRUN_STATUS); 1803 1804 dev_priv->pipestat_irq_mask[pipe] = 0; 1805 } 1806 } 1807 1808 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1809 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1810 { 1811 int pipe; 1812 1813 spin_lock(&dev_priv->irq_lock); 1814 1815 if (!dev_priv->display_irqs_enabled) { 1816 spin_unlock(&dev_priv->irq_lock); 1817 return; 1818 } 1819 1820 for_each_pipe(dev_priv, pipe) { 1821 i915_reg_t reg; 1822 u32 status_mask, enable_mask, iir_bit = 0; 1823 1824 /* 1825 * PIPESTAT bits get signalled even when the interrupt is 1826 * disabled with the mask bits, and some of the status bits do 1827 * not generate interrupts at all (like the underrun bit). Hence 1828 * we need to be careful that we only handle what we want to 1829 * handle. 1830 */ 1831 1832 /* fifo underruns are filterered in the underrun handler. */ 1833 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1834 1835 switch (pipe) { 1836 case PIPE_A: 1837 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1838 break; 1839 case PIPE_B: 1840 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1841 break; 1842 case PIPE_C: 1843 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1844 break; 1845 } 1846 if (iir & iir_bit) 1847 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1848 1849 if (!status_mask) 1850 continue; 1851 1852 reg = PIPESTAT(pipe); 1853 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1854 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1855 1856 /* 1857 * Clear the PIPE*STAT regs before the IIR 1858 */ 1859 if (pipe_stats[pipe]) 1860 I915_WRITE(reg, enable_mask | pipe_stats[pipe]); 1861 } 1862 spin_unlock(&dev_priv->irq_lock); 1863 } 1864 1865 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1866 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1867 { 1868 enum pipe pipe; 1869 1870 for_each_pipe(dev_priv, pipe) { 1871 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1872 drm_handle_vblank(&dev_priv->drm, pipe); 1873 1874 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1875 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1876 1877 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1878 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1879 } 1880 } 1881 1882 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1883 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1884 { 1885 bool blc_event = false; 1886 enum pipe pipe; 1887 1888 for_each_pipe(dev_priv, pipe) { 1889 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1890 drm_handle_vblank(&dev_priv->drm, pipe); 1891 1892 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1893 blc_event = true; 1894 1895 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1896 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1897 1898 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1899 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1900 } 1901 1902 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1903 intel_opregion_asle_intr(dev_priv); 1904 } 1905 1906 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1907 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1908 { 1909 bool blc_event = false; 1910 enum pipe pipe; 1911 1912 for_each_pipe(dev_priv, pipe) { 1913 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1914 drm_handle_vblank(&dev_priv->drm, pipe); 1915 1916 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1917 blc_event = true; 1918 1919 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1920 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1921 1922 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1923 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1924 } 1925 1926 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1927 intel_opregion_asle_intr(dev_priv); 1928 1929 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1930 gmbus_irq_handler(dev_priv); 1931 } 1932 1933 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1934 u32 pipe_stats[I915_MAX_PIPES]) 1935 { 1936 enum pipe pipe; 1937 1938 for_each_pipe(dev_priv, pipe) { 1939 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1940 drm_handle_vblank(&dev_priv->drm, pipe); 1941 1942 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1943 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1944 1945 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1946 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1947 } 1948 1949 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1950 gmbus_irq_handler(dev_priv); 1951 } 1952 1953 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1954 { 1955 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1956 1957 if (hotplug_status) 1958 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1959 1960 return hotplug_status; 1961 } 1962 1963 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1964 u32 hotplug_status) 1965 { 1966 u32 pin_mask = 0, long_mask = 0; 1967 1968 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1969 IS_CHERRYVIEW(dev_priv)) { 1970 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1971 1972 if (hotplug_trigger) { 1973 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1974 hotplug_trigger, hpd_status_g4x, 1975 i9xx_port_hotplug_long_detect); 1976 1977 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1978 } 1979 1980 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1981 dp_aux_irq_handler(dev_priv); 1982 } else { 1983 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1984 1985 if (hotplug_trigger) { 1986 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1987 hotplug_trigger, hpd_status_i915, 1988 i9xx_port_hotplug_long_detect); 1989 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1990 } 1991 } 1992 } 1993 1994 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1995 { 1996 struct drm_device *dev = arg; 1997 struct drm_i915_private *dev_priv = to_i915(dev); 1998 irqreturn_t ret = IRQ_NONE; 1999 2000 if (!intel_irqs_enabled(dev_priv)) 2001 return IRQ_NONE; 2002 2003 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2004 disable_rpm_wakeref_asserts(dev_priv); 2005 2006 do { 2007 u32 iir, gt_iir, pm_iir; 2008 u32 pipe_stats[I915_MAX_PIPES] = {}; 2009 u32 hotplug_status = 0; 2010 u32 ier = 0; 2011 2012 gt_iir = I915_READ(GTIIR); 2013 pm_iir = I915_READ(GEN6_PMIIR); 2014 iir = I915_READ(VLV_IIR); 2015 2016 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2017 break; 2018 2019 ret = IRQ_HANDLED; 2020 2021 /* 2022 * Theory on interrupt generation, based on empirical evidence: 2023 * 2024 * x = ((VLV_IIR & VLV_IER) || 2025 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2026 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2027 * 2028 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2029 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2030 * guarantee the CPU interrupt will be raised again even if we 2031 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2032 * bits this time around. 2033 */ 2034 I915_WRITE(VLV_MASTER_IER, 0); 2035 ier = I915_READ(VLV_IER); 2036 I915_WRITE(VLV_IER, 0); 2037 2038 if (gt_iir) 2039 I915_WRITE(GTIIR, gt_iir); 2040 if (pm_iir) 2041 I915_WRITE(GEN6_PMIIR, pm_iir); 2042 2043 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2044 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2045 2046 /* Call regardless, as some status bits might not be 2047 * signalled in iir */ 2048 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2049 2050 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2051 I915_LPE_PIPE_B_INTERRUPT)) 2052 intel_lpe_audio_irq_handler(dev_priv); 2053 2054 /* 2055 * VLV_IIR is single buffered, and reflects the level 2056 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2057 */ 2058 if (iir) 2059 I915_WRITE(VLV_IIR, iir); 2060 2061 I915_WRITE(VLV_IER, ier); 2062 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2063 POSTING_READ(VLV_MASTER_IER); 2064 2065 if (gt_iir) 2066 snb_gt_irq_handler(dev_priv, gt_iir); 2067 if (pm_iir) 2068 gen6_rps_irq_handler(dev_priv, pm_iir); 2069 2070 if (hotplug_status) 2071 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2072 2073 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2074 } while (0); 2075 2076 enable_rpm_wakeref_asserts(dev_priv); 2077 2078 return ret; 2079 } 2080 2081 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2082 { 2083 struct drm_device *dev = arg; 2084 struct drm_i915_private *dev_priv = to_i915(dev); 2085 irqreturn_t ret = IRQ_NONE; 2086 2087 if (!intel_irqs_enabled(dev_priv)) 2088 return IRQ_NONE; 2089 2090 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2091 disable_rpm_wakeref_asserts(dev_priv); 2092 2093 do { 2094 u32 master_ctl, iir; 2095 u32 gt_iir[4] = {}; 2096 u32 pipe_stats[I915_MAX_PIPES] = {}; 2097 u32 hotplug_status = 0; 2098 u32 ier = 0; 2099 2100 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2101 iir = I915_READ(VLV_IIR); 2102 2103 if (master_ctl == 0 && iir == 0) 2104 break; 2105 2106 ret = IRQ_HANDLED; 2107 2108 /* 2109 * Theory on interrupt generation, based on empirical evidence: 2110 * 2111 * x = ((VLV_IIR & VLV_IER) || 2112 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2113 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2114 * 2115 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2116 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2117 * guarantee the CPU interrupt will be raised again even if we 2118 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2119 * bits this time around. 2120 */ 2121 I915_WRITE(GEN8_MASTER_IRQ, 0); 2122 ier = I915_READ(VLV_IER); 2123 I915_WRITE(VLV_IER, 0); 2124 2125 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2126 2127 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2128 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2129 2130 /* Call regardless, as some status bits might not be 2131 * signalled in iir */ 2132 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2133 2134 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2135 I915_LPE_PIPE_B_INTERRUPT | 2136 I915_LPE_PIPE_C_INTERRUPT)) 2137 intel_lpe_audio_irq_handler(dev_priv); 2138 2139 /* 2140 * VLV_IIR is single buffered, and reflects the level 2141 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2142 */ 2143 if (iir) 2144 I915_WRITE(VLV_IIR, iir); 2145 2146 I915_WRITE(VLV_IER, ier); 2147 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2148 POSTING_READ(GEN8_MASTER_IRQ); 2149 2150 gen8_gt_irq_handler(dev_priv, gt_iir); 2151 2152 if (hotplug_status) 2153 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2154 2155 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2156 } while (0); 2157 2158 enable_rpm_wakeref_asserts(dev_priv); 2159 2160 return ret; 2161 } 2162 2163 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2164 u32 hotplug_trigger, 2165 const u32 hpd[HPD_NUM_PINS]) 2166 { 2167 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2168 2169 /* 2170 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2171 * unless we touch the hotplug register, even if hotplug_trigger is 2172 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2173 * errors. 2174 */ 2175 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2176 if (!hotplug_trigger) { 2177 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2178 PORTD_HOTPLUG_STATUS_MASK | 2179 PORTC_HOTPLUG_STATUS_MASK | 2180 PORTB_HOTPLUG_STATUS_MASK; 2181 dig_hotplug_reg &= ~mask; 2182 } 2183 2184 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2185 if (!hotplug_trigger) 2186 return; 2187 2188 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2189 dig_hotplug_reg, hpd, 2190 pch_port_hotplug_long_detect); 2191 2192 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2193 } 2194 2195 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2196 { 2197 int pipe; 2198 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2199 2200 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2201 2202 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2203 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2204 SDE_AUDIO_POWER_SHIFT); 2205 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2206 port_name(port)); 2207 } 2208 2209 if (pch_iir & SDE_AUX_MASK) 2210 dp_aux_irq_handler(dev_priv); 2211 2212 if (pch_iir & SDE_GMBUS) 2213 gmbus_irq_handler(dev_priv); 2214 2215 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2216 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2217 2218 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2219 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2220 2221 if (pch_iir & SDE_POISON) 2222 DRM_ERROR("PCH poison interrupt\n"); 2223 2224 if (pch_iir & SDE_FDI_MASK) 2225 for_each_pipe(dev_priv, pipe) 2226 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2227 pipe_name(pipe), 2228 I915_READ(FDI_RX_IIR(pipe))); 2229 2230 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2231 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2232 2233 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2234 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2235 2236 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2237 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2238 2239 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2240 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2241 } 2242 2243 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2244 { 2245 u32 err_int = I915_READ(GEN7_ERR_INT); 2246 enum pipe pipe; 2247 2248 if (err_int & ERR_INT_POISON) 2249 DRM_ERROR("Poison interrupt\n"); 2250 2251 for_each_pipe(dev_priv, pipe) { 2252 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2253 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2254 2255 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2256 if (IS_IVYBRIDGE(dev_priv)) 2257 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2258 else 2259 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2260 } 2261 } 2262 2263 I915_WRITE(GEN7_ERR_INT, err_int); 2264 } 2265 2266 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2267 { 2268 u32 serr_int = I915_READ(SERR_INT); 2269 enum pipe pipe; 2270 2271 if (serr_int & SERR_INT_POISON) 2272 DRM_ERROR("PCH poison interrupt\n"); 2273 2274 for_each_pipe(dev_priv, pipe) 2275 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2276 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2277 2278 I915_WRITE(SERR_INT, serr_int); 2279 } 2280 2281 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2282 { 2283 int pipe; 2284 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2285 2286 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2287 2288 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2289 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2290 SDE_AUDIO_POWER_SHIFT_CPT); 2291 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2292 port_name(port)); 2293 } 2294 2295 if (pch_iir & SDE_AUX_MASK_CPT) 2296 dp_aux_irq_handler(dev_priv); 2297 2298 if (pch_iir & SDE_GMBUS_CPT) 2299 gmbus_irq_handler(dev_priv); 2300 2301 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2302 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2303 2304 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2305 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2306 2307 if (pch_iir & SDE_FDI_MASK_CPT) 2308 for_each_pipe(dev_priv, pipe) 2309 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2310 pipe_name(pipe), 2311 I915_READ(FDI_RX_IIR(pipe))); 2312 2313 if (pch_iir & SDE_ERROR_CPT) 2314 cpt_serr_int_handler(dev_priv); 2315 } 2316 2317 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2318 { 2319 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2320 ~SDE_PORTE_HOTPLUG_SPT; 2321 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2322 u32 pin_mask = 0, long_mask = 0; 2323 2324 if (hotplug_trigger) { 2325 u32 dig_hotplug_reg; 2326 2327 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2328 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2329 2330 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2331 dig_hotplug_reg, hpd_spt, 2332 spt_port_hotplug_long_detect); 2333 } 2334 2335 if (hotplug2_trigger) { 2336 u32 dig_hotplug_reg; 2337 2338 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2339 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2340 2341 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2342 dig_hotplug_reg, hpd_spt, 2343 spt_port_hotplug2_long_detect); 2344 } 2345 2346 if (pin_mask) 2347 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2348 2349 if (pch_iir & SDE_GMBUS_CPT) 2350 gmbus_irq_handler(dev_priv); 2351 } 2352 2353 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2354 u32 hotplug_trigger, 2355 const u32 hpd[HPD_NUM_PINS]) 2356 { 2357 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2358 2359 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2360 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2361 2362 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2363 dig_hotplug_reg, hpd, 2364 ilk_port_hotplug_long_detect); 2365 2366 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2367 } 2368 2369 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2370 u32 de_iir) 2371 { 2372 enum pipe pipe; 2373 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2374 2375 if (hotplug_trigger) 2376 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2377 2378 if (de_iir & DE_AUX_CHANNEL_A) 2379 dp_aux_irq_handler(dev_priv); 2380 2381 if (de_iir & DE_GSE) 2382 intel_opregion_asle_intr(dev_priv); 2383 2384 if (de_iir & DE_POISON) 2385 DRM_ERROR("Poison interrupt\n"); 2386 2387 for_each_pipe(dev_priv, pipe) { 2388 if (de_iir & DE_PIPE_VBLANK(pipe)) 2389 drm_handle_vblank(&dev_priv->drm, pipe); 2390 2391 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2392 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2393 2394 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2395 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2396 } 2397 2398 /* check event from PCH */ 2399 if (de_iir & DE_PCH_EVENT) { 2400 u32 pch_iir = I915_READ(SDEIIR); 2401 2402 if (HAS_PCH_CPT(dev_priv)) 2403 cpt_irq_handler(dev_priv, pch_iir); 2404 else 2405 ibx_irq_handler(dev_priv, pch_iir); 2406 2407 /* should clear PCH hotplug event before clear CPU irq */ 2408 I915_WRITE(SDEIIR, pch_iir); 2409 } 2410 2411 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2412 ironlake_rps_change_irq_handler(dev_priv); 2413 } 2414 2415 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2416 u32 de_iir) 2417 { 2418 enum pipe pipe; 2419 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2420 2421 if (hotplug_trigger) 2422 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2423 2424 if (de_iir & DE_ERR_INT_IVB) 2425 ivb_err_int_handler(dev_priv); 2426 2427 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2428 dp_aux_irq_handler(dev_priv); 2429 2430 if (de_iir & DE_GSE_IVB) 2431 intel_opregion_asle_intr(dev_priv); 2432 2433 for_each_pipe(dev_priv, pipe) { 2434 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2435 drm_handle_vblank(&dev_priv->drm, pipe); 2436 } 2437 2438 /* check event from PCH */ 2439 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2440 u32 pch_iir = I915_READ(SDEIIR); 2441 2442 cpt_irq_handler(dev_priv, pch_iir); 2443 2444 /* clear PCH hotplug event before clear CPU irq */ 2445 I915_WRITE(SDEIIR, pch_iir); 2446 } 2447 } 2448 2449 /* 2450 * To handle irqs with the minimum potential races with fresh interrupts, we: 2451 * 1 - Disable Master Interrupt Control. 2452 * 2 - Find the source(s) of the interrupt. 2453 * 3 - Clear the Interrupt Identity bits (IIR). 2454 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2455 * 5 - Re-enable Master Interrupt Control. 2456 */ 2457 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2458 { 2459 struct drm_device *dev = arg; 2460 struct drm_i915_private *dev_priv = to_i915(dev); 2461 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2462 irqreturn_t ret = IRQ_NONE; 2463 2464 if (!intel_irqs_enabled(dev_priv)) 2465 return IRQ_NONE; 2466 2467 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2468 disable_rpm_wakeref_asserts(dev_priv); 2469 2470 /* disable master interrupt before clearing iir */ 2471 de_ier = I915_READ(DEIER); 2472 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2473 POSTING_READ(DEIER); 2474 2475 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2476 * interrupts will will be stored on its back queue, and then we'll be 2477 * able to process them after we restore SDEIER (as soon as we restore 2478 * it, we'll get an interrupt if SDEIIR still has something to process 2479 * due to its back queue). */ 2480 if (!HAS_PCH_NOP(dev_priv)) { 2481 sde_ier = I915_READ(SDEIER); 2482 I915_WRITE(SDEIER, 0); 2483 POSTING_READ(SDEIER); 2484 } 2485 2486 /* Find, clear, then process each source of interrupt */ 2487 2488 gt_iir = I915_READ(GTIIR); 2489 if (gt_iir) { 2490 I915_WRITE(GTIIR, gt_iir); 2491 ret = IRQ_HANDLED; 2492 if (INTEL_GEN(dev_priv) >= 6) 2493 snb_gt_irq_handler(dev_priv, gt_iir); 2494 else 2495 ilk_gt_irq_handler(dev_priv, gt_iir); 2496 } 2497 2498 de_iir = I915_READ(DEIIR); 2499 if (de_iir) { 2500 I915_WRITE(DEIIR, de_iir); 2501 ret = IRQ_HANDLED; 2502 if (INTEL_GEN(dev_priv) >= 7) 2503 ivb_display_irq_handler(dev_priv, de_iir); 2504 else 2505 ilk_display_irq_handler(dev_priv, de_iir); 2506 } 2507 2508 if (INTEL_GEN(dev_priv) >= 6) { 2509 u32 pm_iir = I915_READ(GEN6_PMIIR); 2510 if (pm_iir) { 2511 I915_WRITE(GEN6_PMIIR, pm_iir); 2512 ret = IRQ_HANDLED; 2513 gen6_rps_irq_handler(dev_priv, pm_iir); 2514 } 2515 } 2516 2517 I915_WRITE(DEIER, de_ier); 2518 POSTING_READ(DEIER); 2519 if (!HAS_PCH_NOP(dev_priv)) { 2520 I915_WRITE(SDEIER, sde_ier); 2521 POSTING_READ(SDEIER); 2522 } 2523 2524 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2525 enable_rpm_wakeref_asserts(dev_priv); 2526 2527 return ret; 2528 } 2529 2530 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2531 u32 hotplug_trigger, 2532 const u32 hpd[HPD_NUM_PINS]) 2533 { 2534 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2535 2536 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2537 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2538 2539 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2540 dig_hotplug_reg, hpd, 2541 bxt_port_hotplug_long_detect); 2542 2543 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2544 } 2545 2546 static irqreturn_t 2547 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2548 { 2549 irqreturn_t ret = IRQ_NONE; 2550 u32 iir; 2551 enum pipe pipe; 2552 2553 if (master_ctl & GEN8_DE_MISC_IRQ) { 2554 iir = I915_READ(GEN8_DE_MISC_IIR); 2555 if (iir) { 2556 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2557 ret = IRQ_HANDLED; 2558 if (iir & GEN8_DE_MISC_GSE) 2559 intel_opregion_asle_intr(dev_priv); 2560 else 2561 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2562 } 2563 else 2564 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2565 } 2566 2567 if (master_ctl & GEN8_DE_PORT_IRQ) { 2568 iir = I915_READ(GEN8_DE_PORT_IIR); 2569 if (iir) { 2570 u32 tmp_mask; 2571 bool found = false; 2572 2573 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2574 ret = IRQ_HANDLED; 2575 2576 tmp_mask = GEN8_AUX_CHANNEL_A; 2577 if (INTEL_GEN(dev_priv) >= 9) 2578 tmp_mask |= GEN9_AUX_CHANNEL_B | 2579 GEN9_AUX_CHANNEL_C | 2580 GEN9_AUX_CHANNEL_D; 2581 2582 if (iir & tmp_mask) { 2583 dp_aux_irq_handler(dev_priv); 2584 found = true; 2585 } 2586 2587 if (IS_GEN9_LP(dev_priv)) { 2588 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2589 if (tmp_mask) { 2590 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2591 hpd_bxt); 2592 found = true; 2593 } 2594 } else if (IS_BROADWELL(dev_priv)) { 2595 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2596 if (tmp_mask) { 2597 ilk_hpd_irq_handler(dev_priv, 2598 tmp_mask, hpd_bdw); 2599 found = true; 2600 } 2601 } 2602 2603 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2604 gmbus_irq_handler(dev_priv); 2605 found = true; 2606 } 2607 2608 if (!found) 2609 DRM_ERROR("Unexpected DE Port interrupt\n"); 2610 } 2611 else 2612 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2613 } 2614 2615 for_each_pipe(dev_priv, pipe) { 2616 u32 fault_errors; 2617 2618 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2619 continue; 2620 2621 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2622 if (!iir) { 2623 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2624 continue; 2625 } 2626 2627 ret = IRQ_HANDLED; 2628 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2629 2630 if (iir & GEN8_PIPE_VBLANK) 2631 drm_handle_vblank(&dev_priv->drm, pipe); 2632 2633 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2634 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2635 2636 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2637 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2638 2639 fault_errors = iir; 2640 if (INTEL_GEN(dev_priv) >= 9) 2641 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2642 else 2643 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2644 2645 if (fault_errors) 2646 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2647 pipe_name(pipe), 2648 fault_errors); 2649 } 2650 2651 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2652 master_ctl & GEN8_DE_PCH_IRQ) { 2653 /* 2654 * FIXME(BDW): Assume for now that the new interrupt handling 2655 * scheme also closed the SDE interrupt handling race we've seen 2656 * on older pch-split platforms. But this needs testing. 2657 */ 2658 iir = I915_READ(SDEIIR); 2659 if (iir) { 2660 I915_WRITE(SDEIIR, iir); 2661 ret = IRQ_HANDLED; 2662 2663 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 2664 HAS_PCH_CNP(dev_priv)) 2665 spt_irq_handler(dev_priv, iir); 2666 else 2667 cpt_irq_handler(dev_priv, iir); 2668 } else { 2669 /* 2670 * Like on previous PCH there seems to be something 2671 * fishy going on with forwarding PCH interrupts. 2672 */ 2673 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2674 } 2675 } 2676 2677 return ret; 2678 } 2679 2680 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2681 { 2682 struct drm_device *dev = arg; 2683 struct drm_i915_private *dev_priv = to_i915(dev); 2684 u32 master_ctl; 2685 u32 gt_iir[4] = {}; 2686 irqreturn_t ret; 2687 2688 if (!intel_irqs_enabled(dev_priv)) 2689 return IRQ_NONE; 2690 2691 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2692 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2693 if (!master_ctl) 2694 return IRQ_NONE; 2695 2696 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2697 2698 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2699 disable_rpm_wakeref_asserts(dev_priv); 2700 2701 /* Find, clear, then process each source of interrupt */ 2702 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2703 gen8_gt_irq_handler(dev_priv, gt_iir); 2704 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2705 2706 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2707 POSTING_READ_FW(GEN8_MASTER_IRQ); 2708 2709 enable_rpm_wakeref_asserts(dev_priv); 2710 2711 return ret; 2712 } 2713 2714 struct wedge_me { 2715 struct delayed_work work; 2716 struct drm_i915_private *i915; 2717 const char *name; 2718 }; 2719 2720 static void wedge_me(struct work_struct *work) 2721 { 2722 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2723 2724 dev_err(w->i915->drm.dev, 2725 "%s timed out, cancelling all in-flight rendering.\n", 2726 w->name); 2727 i915_gem_set_wedged(w->i915); 2728 } 2729 2730 static void __init_wedge(struct wedge_me *w, 2731 struct drm_i915_private *i915, 2732 long timeout, 2733 const char *name) 2734 { 2735 w->i915 = i915; 2736 w->name = name; 2737 2738 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2739 schedule_delayed_work(&w->work, timeout); 2740 } 2741 2742 static void __fini_wedge(struct wedge_me *w) 2743 { 2744 cancel_delayed_work_sync(&w->work); 2745 destroy_delayed_work_on_stack(&w->work); 2746 w->i915 = NULL; 2747 } 2748 2749 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2750 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2751 (W)->i915; \ 2752 __fini_wedge((W))) 2753 2754 /** 2755 * i915_reset_device - do process context error handling work 2756 * @dev_priv: i915 device private 2757 * 2758 * Fire an error uevent so userspace can see that a hang or error 2759 * was detected. 2760 */ 2761 static void i915_reset_device(struct drm_i915_private *dev_priv) 2762 { 2763 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2764 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2765 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2766 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2767 struct wedge_me w; 2768 2769 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2770 2771 DRM_DEBUG_DRIVER("resetting chip\n"); 2772 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2773 2774 /* Use a watchdog to ensure that our reset completes */ 2775 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 2776 intel_prepare_reset(dev_priv); 2777 2778 /* Signal that locked waiters should reset the GPU */ 2779 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2780 wake_up_all(&dev_priv->gpu_error.wait_queue); 2781 2782 /* Wait for anyone holding the lock to wakeup, without 2783 * blocking indefinitely on struct_mutex. 2784 */ 2785 do { 2786 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2787 i915_reset(dev_priv, 0); 2788 mutex_unlock(&dev_priv->drm.struct_mutex); 2789 } 2790 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2791 I915_RESET_HANDOFF, 2792 TASK_UNINTERRUPTIBLE, 2793 1)); 2794 2795 intel_finish_reset(dev_priv); 2796 } 2797 2798 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2799 kobject_uevent_env(kobj, 2800 KOBJ_CHANGE, reset_done_event); 2801 } 2802 2803 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2804 { 2805 u32 eir; 2806 2807 if (!IS_GEN2(dev_priv)) 2808 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2809 2810 if (INTEL_GEN(dev_priv) < 4) 2811 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2812 else 2813 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2814 2815 I915_WRITE(EIR, I915_READ(EIR)); 2816 eir = I915_READ(EIR); 2817 if (eir) { 2818 /* 2819 * some errors might have become stuck, 2820 * mask them. 2821 */ 2822 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2823 I915_WRITE(EMR, I915_READ(EMR) | eir); 2824 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2825 } 2826 } 2827 2828 /** 2829 * i915_handle_error - handle a gpu error 2830 * @dev_priv: i915 device private 2831 * @engine_mask: mask representing engines that are hung 2832 * @fmt: Error message format string 2833 * 2834 * Do some basic checking of register state at error time and 2835 * dump it to the syslog. Also call i915_capture_error_state() to make 2836 * sure we get a record and make it available in debugfs. Fire a uevent 2837 * so userspace knows something bad happened (should trigger collection 2838 * of a ring dump etc.). 2839 */ 2840 void i915_handle_error(struct drm_i915_private *dev_priv, 2841 u32 engine_mask, 2842 const char *fmt, ...) 2843 { 2844 struct intel_engine_cs *engine; 2845 unsigned int tmp; 2846 va_list args; 2847 char error_msg[80]; 2848 2849 va_start(args, fmt); 2850 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2851 va_end(args); 2852 2853 /* 2854 * In most cases it's guaranteed that we get here with an RPM 2855 * reference held, for example because there is a pending GPU 2856 * request that won't finish until the reset is done. This 2857 * isn't the case at least when we get here by doing a 2858 * simulated reset via debugfs, so get an RPM reference. 2859 */ 2860 intel_runtime_pm_get(dev_priv); 2861 2862 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2863 i915_clear_error_registers(dev_priv); 2864 2865 /* 2866 * Try engine reset when available. We fall back to full reset if 2867 * single reset fails. 2868 */ 2869 if (intel_has_reset_engine(dev_priv)) { 2870 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 2871 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 2872 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 2873 &dev_priv->gpu_error.flags)) 2874 continue; 2875 2876 if (i915_reset_engine(engine, 0) == 0) 2877 engine_mask &= ~intel_engine_flag(engine); 2878 2879 clear_bit(I915_RESET_ENGINE + engine->id, 2880 &dev_priv->gpu_error.flags); 2881 wake_up_bit(&dev_priv->gpu_error.flags, 2882 I915_RESET_ENGINE + engine->id); 2883 } 2884 } 2885 2886 if (!engine_mask) 2887 goto out; 2888 2889 /* Full reset needs the mutex, stop any other user trying to do so. */ 2890 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 2891 wait_event(dev_priv->gpu_error.reset_queue, 2892 !test_bit(I915_RESET_BACKOFF, 2893 &dev_priv->gpu_error.flags)); 2894 goto out; 2895 } 2896 2897 /* Prevent any other reset-engine attempt. */ 2898 for_each_engine(engine, dev_priv, tmp) { 2899 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 2900 &dev_priv->gpu_error.flags)) 2901 wait_on_bit(&dev_priv->gpu_error.flags, 2902 I915_RESET_ENGINE + engine->id, 2903 TASK_UNINTERRUPTIBLE); 2904 } 2905 2906 i915_reset_device(dev_priv); 2907 2908 for_each_engine(engine, dev_priv, tmp) { 2909 clear_bit(I915_RESET_ENGINE + engine->id, 2910 &dev_priv->gpu_error.flags); 2911 } 2912 2913 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 2914 wake_up_all(&dev_priv->gpu_error.reset_queue); 2915 2916 out: 2917 intel_runtime_pm_put(dev_priv); 2918 } 2919 2920 /* Called from drm generic code, passed 'crtc' which 2921 * we use as a pipe index 2922 */ 2923 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2924 { 2925 struct drm_i915_private *dev_priv = to_i915(dev); 2926 unsigned long irqflags; 2927 2928 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2929 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2930 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2931 2932 return 0; 2933 } 2934 2935 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2936 { 2937 struct drm_i915_private *dev_priv = to_i915(dev); 2938 unsigned long irqflags; 2939 2940 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2941 i915_enable_pipestat(dev_priv, pipe, 2942 PIPE_START_VBLANK_INTERRUPT_STATUS); 2943 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2944 2945 return 0; 2946 } 2947 2948 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2949 { 2950 struct drm_i915_private *dev_priv = to_i915(dev); 2951 unsigned long irqflags; 2952 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2953 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2954 2955 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2956 ilk_enable_display_irq(dev_priv, bit); 2957 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2958 2959 return 0; 2960 } 2961 2962 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2963 { 2964 struct drm_i915_private *dev_priv = to_i915(dev); 2965 unsigned long irqflags; 2966 2967 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2968 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2969 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2970 2971 return 0; 2972 } 2973 2974 /* Called from drm generic code, passed 'crtc' which 2975 * we use as a pipe index 2976 */ 2977 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2978 { 2979 struct drm_i915_private *dev_priv = to_i915(dev); 2980 unsigned long irqflags; 2981 2982 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2983 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2984 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2985 } 2986 2987 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2988 { 2989 struct drm_i915_private *dev_priv = to_i915(dev); 2990 unsigned long irqflags; 2991 2992 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2993 i915_disable_pipestat(dev_priv, pipe, 2994 PIPE_START_VBLANK_INTERRUPT_STATUS); 2995 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2996 } 2997 2998 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2999 { 3000 struct drm_i915_private *dev_priv = to_i915(dev); 3001 unsigned long irqflags; 3002 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3003 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3004 3005 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3006 ilk_disable_display_irq(dev_priv, bit); 3007 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3008 } 3009 3010 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3011 { 3012 struct drm_i915_private *dev_priv = to_i915(dev); 3013 unsigned long irqflags; 3014 3015 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3016 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3017 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3018 } 3019 3020 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3021 { 3022 if (HAS_PCH_NOP(dev_priv)) 3023 return; 3024 3025 GEN3_IRQ_RESET(SDE); 3026 3027 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3028 I915_WRITE(SERR_INT, 0xffffffff); 3029 } 3030 3031 /* 3032 * SDEIER is also touched by the interrupt handler to work around missed PCH 3033 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3034 * instead we unconditionally enable all PCH interrupt sources here, but then 3035 * only unmask them as needed with SDEIMR. 3036 * 3037 * This function needs to be called before interrupts are enabled. 3038 */ 3039 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3040 { 3041 struct drm_i915_private *dev_priv = to_i915(dev); 3042 3043 if (HAS_PCH_NOP(dev_priv)) 3044 return; 3045 3046 WARN_ON(I915_READ(SDEIER) != 0); 3047 I915_WRITE(SDEIER, 0xffffffff); 3048 POSTING_READ(SDEIER); 3049 } 3050 3051 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3052 { 3053 GEN3_IRQ_RESET(GT); 3054 if (INTEL_GEN(dev_priv) >= 6) 3055 GEN3_IRQ_RESET(GEN6_PM); 3056 } 3057 3058 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3059 { 3060 if (IS_CHERRYVIEW(dev_priv)) 3061 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3062 else 3063 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3064 3065 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3066 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3067 3068 i9xx_pipestat_irq_reset(dev_priv); 3069 3070 GEN3_IRQ_RESET(VLV_); 3071 dev_priv->irq_mask = ~0u; 3072 } 3073 3074 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3075 { 3076 u32 pipestat_mask; 3077 u32 enable_mask; 3078 enum pipe pipe; 3079 3080 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3081 3082 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3083 for_each_pipe(dev_priv, pipe) 3084 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3085 3086 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3087 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3088 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3089 I915_LPE_PIPE_A_INTERRUPT | 3090 I915_LPE_PIPE_B_INTERRUPT; 3091 3092 if (IS_CHERRYVIEW(dev_priv)) 3093 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3094 I915_LPE_PIPE_C_INTERRUPT; 3095 3096 WARN_ON(dev_priv->irq_mask != ~0u); 3097 3098 dev_priv->irq_mask = ~enable_mask; 3099 3100 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3101 } 3102 3103 /* drm_dma.h hooks 3104 */ 3105 static void ironlake_irq_reset(struct drm_device *dev) 3106 { 3107 struct drm_i915_private *dev_priv = to_i915(dev); 3108 3109 if (IS_GEN5(dev_priv)) 3110 I915_WRITE(HWSTAM, 0xffffffff); 3111 3112 GEN3_IRQ_RESET(DE); 3113 if (IS_GEN7(dev_priv)) 3114 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3115 3116 gen5_gt_irq_reset(dev_priv); 3117 3118 ibx_irq_reset(dev_priv); 3119 } 3120 3121 static void valleyview_irq_reset(struct drm_device *dev) 3122 { 3123 struct drm_i915_private *dev_priv = to_i915(dev); 3124 3125 I915_WRITE(VLV_MASTER_IER, 0); 3126 POSTING_READ(VLV_MASTER_IER); 3127 3128 gen5_gt_irq_reset(dev_priv); 3129 3130 spin_lock_irq(&dev_priv->irq_lock); 3131 if (dev_priv->display_irqs_enabled) 3132 vlv_display_irq_reset(dev_priv); 3133 spin_unlock_irq(&dev_priv->irq_lock); 3134 } 3135 3136 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3137 { 3138 GEN8_IRQ_RESET_NDX(GT, 0); 3139 GEN8_IRQ_RESET_NDX(GT, 1); 3140 GEN8_IRQ_RESET_NDX(GT, 2); 3141 GEN8_IRQ_RESET_NDX(GT, 3); 3142 } 3143 3144 static void gen8_irq_reset(struct drm_device *dev) 3145 { 3146 struct drm_i915_private *dev_priv = to_i915(dev); 3147 int pipe; 3148 3149 I915_WRITE(GEN8_MASTER_IRQ, 0); 3150 POSTING_READ(GEN8_MASTER_IRQ); 3151 3152 gen8_gt_irq_reset(dev_priv); 3153 3154 for_each_pipe(dev_priv, pipe) 3155 if (intel_display_power_is_enabled(dev_priv, 3156 POWER_DOMAIN_PIPE(pipe))) 3157 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3158 3159 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3160 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3161 GEN3_IRQ_RESET(GEN8_PCU_); 3162 3163 if (HAS_PCH_SPLIT(dev_priv)) 3164 ibx_irq_reset(dev_priv); 3165 } 3166 3167 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3168 u8 pipe_mask) 3169 { 3170 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3171 enum pipe pipe; 3172 3173 spin_lock_irq(&dev_priv->irq_lock); 3174 3175 if (!intel_irqs_enabled(dev_priv)) { 3176 spin_unlock_irq(&dev_priv->irq_lock); 3177 return; 3178 } 3179 3180 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3181 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3182 dev_priv->de_irq_mask[pipe], 3183 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3184 3185 spin_unlock_irq(&dev_priv->irq_lock); 3186 } 3187 3188 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3189 u8 pipe_mask) 3190 { 3191 enum pipe pipe; 3192 3193 spin_lock_irq(&dev_priv->irq_lock); 3194 3195 if (!intel_irqs_enabled(dev_priv)) { 3196 spin_unlock_irq(&dev_priv->irq_lock); 3197 return; 3198 } 3199 3200 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3201 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3202 3203 spin_unlock_irq(&dev_priv->irq_lock); 3204 3205 /* make sure we're done processing display irqs */ 3206 synchronize_irq(dev_priv->drm.irq); 3207 } 3208 3209 static void cherryview_irq_reset(struct drm_device *dev) 3210 { 3211 struct drm_i915_private *dev_priv = to_i915(dev); 3212 3213 I915_WRITE(GEN8_MASTER_IRQ, 0); 3214 POSTING_READ(GEN8_MASTER_IRQ); 3215 3216 gen8_gt_irq_reset(dev_priv); 3217 3218 GEN3_IRQ_RESET(GEN8_PCU_); 3219 3220 spin_lock_irq(&dev_priv->irq_lock); 3221 if (dev_priv->display_irqs_enabled) 3222 vlv_display_irq_reset(dev_priv); 3223 spin_unlock_irq(&dev_priv->irq_lock); 3224 } 3225 3226 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3227 const u32 hpd[HPD_NUM_PINS]) 3228 { 3229 struct intel_encoder *encoder; 3230 u32 enabled_irqs = 0; 3231 3232 for_each_intel_encoder(&dev_priv->drm, encoder) 3233 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3234 enabled_irqs |= hpd[encoder->hpd_pin]; 3235 3236 return enabled_irqs; 3237 } 3238 3239 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3240 { 3241 u32 hotplug; 3242 3243 /* 3244 * Enable digital hotplug on the PCH, and configure the DP short pulse 3245 * duration to 2ms (which is the minimum in the Display Port spec). 3246 * The pulse duration bits are reserved on LPT+. 3247 */ 3248 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3249 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3250 PORTC_PULSE_DURATION_MASK | 3251 PORTD_PULSE_DURATION_MASK); 3252 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3253 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3254 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3255 /* 3256 * When CPU and PCH are on the same package, port A 3257 * HPD must be enabled in both north and south. 3258 */ 3259 if (HAS_PCH_LPT_LP(dev_priv)) 3260 hotplug |= PORTA_HOTPLUG_ENABLE; 3261 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3262 } 3263 3264 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3265 { 3266 u32 hotplug_irqs, enabled_irqs; 3267 3268 if (HAS_PCH_IBX(dev_priv)) { 3269 hotplug_irqs = SDE_HOTPLUG_MASK; 3270 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3271 } else { 3272 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3273 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3274 } 3275 3276 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3277 3278 ibx_hpd_detection_setup(dev_priv); 3279 } 3280 3281 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3282 { 3283 u32 val, hotplug; 3284 3285 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3286 if (HAS_PCH_CNP(dev_priv)) { 3287 val = I915_READ(SOUTH_CHICKEN1); 3288 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3289 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3290 I915_WRITE(SOUTH_CHICKEN1, val); 3291 } 3292 3293 /* Enable digital hotplug on the PCH */ 3294 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3295 hotplug |= PORTA_HOTPLUG_ENABLE | 3296 PORTB_HOTPLUG_ENABLE | 3297 PORTC_HOTPLUG_ENABLE | 3298 PORTD_HOTPLUG_ENABLE; 3299 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3300 3301 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3302 hotplug |= PORTE_HOTPLUG_ENABLE; 3303 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3304 } 3305 3306 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3307 { 3308 u32 hotplug_irqs, enabled_irqs; 3309 3310 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3311 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3312 3313 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3314 3315 spt_hpd_detection_setup(dev_priv); 3316 } 3317 3318 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3319 { 3320 u32 hotplug; 3321 3322 /* 3323 * Enable digital hotplug on the CPU, and configure the DP short pulse 3324 * duration to 2ms (which is the minimum in the Display Port spec) 3325 * The pulse duration bits are reserved on HSW+. 3326 */ 3327 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3328 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3329 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3330 DIGITAL_PORTA_PULSE_DURATION_2ms; 3331 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3332 } 3333 3334 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3335 { 3336 u32 hotplug_irqs, enabled_irqs; 3337 3338 if (INTEL_GEN(dev_priv) >= 8) { 3339 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3340 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3341 3342 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3343 } else if (INTEL_GEN(dev_priv) >= 7) { 3344 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3345 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3346 3347 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3348 } else { 3349 hotplug_irqs = DE_DP_A_HOTPLUG; 3350 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3351 3352 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3353 } 3354 3355 ilk_hpd_detection_setup(dev_priv); 3356 3357 ibx_hpd_irq_setup(dev_priv); 3358 } 3359 3360 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3361 u32 enabled_irqs) 3362 { 3363 u32 hotplug; 3364 3365 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3366 hotplug |= PORTA_HOTPLUG_ENABLE | 3367 PORTB_HOTPLUG_ENABLE | 3368 PORTC_HOTPLUG_ENABLE; 3369 3370 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3371 hotplug, enabled_irqs); 3372 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3373 3374 /* 3375 * For BXT invert bit has to be set based on AOB design 3376 * for HPD detection logic, update it based on VBT fields. 3377 */ 3378 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3379 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3380 hotplug |= BXT_DDIA_HPD_INVERT; 3381 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3382 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3383 hotplug |= BXT_DDIB_HPD_INVERT; 3384 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3385 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3386 hotplug |= BXT_DDIC_HPD_INVERT; 3387 3388 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3389 } 3390 3391 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3392 { 3393 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3394 } 3395 3396 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3397 { 3398 u32 hotplug_irqs, enabled_irqs; 3399 3400 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3401 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3402 3403 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3404 3405 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3406 } 3407 3408 static void ibx_irq_postinstall(struct drm_device *dev) 3409 { 3410 struct drm_i915_private *dev_priv = to_i915(dev); 3411 u32 mask; 3412 3413 if (HAS_PCH_NOP(dev_priv)) 3414 return; 3415 3416 if (HAS_PCH_IBX(dev_priv)) 3417 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3418 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3419 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3420 else 3421 mask = SDE_GMBUS_CPT; 3422 3423 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3424 I915_WRITE(SDEIMR, ~mask); 3425 3426 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3427 HAS_PCH_LPT(dev_priv)) 3428 ibx_hpd_detection_setup(dev_priv); 3429 else 3430 spt_hpd_detection_setup(dev_priv); 3431 } 3432 3433 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3434 { 3435 struct drm_i915_private *dev_priv = to_i915(dev); 3436 u32 pm_irqs, gt_irqs; 3437 3438 pm_irqs = gt_irqs = 0; 3439 3440 dev_priv->gt_irq_mask = ~0; 3441 if (HAS_L3_DPF(dev_priv)) { 3442 /* L3 parity interrupt is always unmasked. */ 3443 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3444 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3445 } 3446 3447 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3448 if (IS_GEN5(dev_priv)) { 3449 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3450 } else { 3451 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3452 } 3453 3454 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3455 3456 if (INTEL_GEN(dev_priv) >= 6) { 3457 /* 3458 * RPS interrupts will get enabled/disabled on demand when RPS 3459 * itself is enabled/disabled. 3460 */ 3461 if (HAS_VEBOX(dev_priv)) { 3462 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3463 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3464 } 3465 3466 dev_priv->pm_imr = 0xffffffff; 3467 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3468 } 3469 } 3470 3471 static int ironlake_irq_postinstall(struct drm_device *dev) 3472 { 3473 struct drm_i915_private *dev_priv = to_i915(dev); 3474 u32 display_mask, extra_mask; 3475 3476 if (INTEL_GEN(dev_priv) >= 7) { 3477 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3478 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3479 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3480 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3481 DE_DP_A_HOTPLUG_IVB); 3482 } else { 3483 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3484 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3485 DE_PIPEA_CRC_DONE | DE_POISON); 3486 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3487 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3488 DE_DP_A_HOTPLUG); 3489 } 3490 3491 dev_priv->irq_mask = ~display_mask; 3492 3493 ibx_irq_pre_postinstall(dev); 3494 3495 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3496 3497 gen5_gt_irq_postinstall(dev); 3498 3499 ilk_hpd_detection_setup(dev_priv); 3500 3501 ibx_irq_postinstall(dev); 3502 3503 if (IS_IRONLAKE_M(dev_priv)) { 3504 /* Enable PCU event interrupts 3505 * 3506 * spinlocking not required here for correctness since interrupt 3507 * setup is guaranteed to run in single-threaded context. But we 3508 * need it to make the assert_spin_locked happy. */ 3509 spin_lock_irq(&dev_priv->irq_lock); 3510 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3511 spin_unlock_irq(&dev_priv->irq_lock); 3512 } 3513 3514 return 0; 3515 } 3516 3517 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3518 { 3519 lockdep_assert_held(&dev_priv->irq_lock); 3520 3521 if (dev_priv->display_irqs_enabled) 3522 return; 3523 3524 dev_priv->display_irqs_enabled = true; 3525 3526 if (intel_irqs_enabled(dev_priv)) { 3527 vlv_display_irq_reset(dev_priv); 3528 vlv_display_irq_postinstall(dev_priv); 3529 } 3530 } 3531 3532 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3533 { 3534 lockdep_assert_held(&dev_priv->irq_lock); 3535 3536 if (!dev_priv->display_irqs_enabled) 3537 return; 3538 3539 dev_priv->display_irqs_enabled = false; 3540 3541 if (intel_irqs_enabled(dev_priv)) 3542 vlv_display_irq_reset(dev_priv); 3543 } 3544 3545 3546 static int valleyview_irq_postinstall(struct drm_device *dev) 3547 { 3548 struct drm_i915_private *dev_priv = to_i915(dev); 3549 3550 gen5_gt_irq_postinstall(dev); 3551 3552 spin_lock_irq(&dev_priv->irq_lock); 3553 if (dev_priv->display_irqs_enabled) 3554 vlv_display_irq_postinstall(dev_priv); 3555 spin_unlock_irq(&dev_priv->irq_lock); 3556 3557 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3558 POSTING_READ(VLV_MASTER_IER); 3559 3560 return 0; 3561 } 3562 3563 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3564 { 3565 /* These are interrupts we'll toggle with the ring mask register */ 3566 uint32_t gt_interrupts[] = { 3567 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3568 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3569 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3570 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3571 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3572 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3573 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3574 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3575 0, 3576 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3577 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3578 }; 3579 3580 if (HAS_L3_DPF(dev_priv)) 3581 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3582 3583 dev_priv->pm_ier = 0x0; 3584 dev_priv->pm_imr = ~dev_priv->pm_ier; 3585 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3586 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3587 /* 3588 * RPS interrupts will get enabled/disabled on demand when RPS itself 3589 * is enabled/disabled. Same wil be the case for GuC interrupts. 3590 */ 3591 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3592 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3593 } 3594 3595 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3596 { 3597 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3598 uint32_t de_pipe_enables; 3599 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3600 u32 de_port_enables; 3601 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3602 enum pipe pipe; 3603 3604 if (INTEL_GEN(dev_priv) >= 9) { 3605 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3606 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3607 GEN9_AUX_CHANNEL_D; 3608 if (IS_GEN9_LP(dev_priv)) 3609 de_port_masked |= BXT_DE_PORT_GMBUS; 3610 } else { 3611 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3612 } 3613 3614 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3615 GEN8_PIPE_FIFO_UNDERRUN; 3616 3617 de_port_enables = de_port_masked; 3618 if (IS_GEN9_LP(dev_priv)) 3619 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3620 else if (IS_BROADWELL(dev_priv)) 3621 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3622 3623 for_each_pipe(dev_priv, pipe) { 3624 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3625 3626 if (intel_display_power_is_enabled(dev_priv, 3627 POWER_DOMAIN_PIPE(pipe))) 3628 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3629 dev_priv->de_irq_mask[pipe], 3630 de_pipe_enables); 3631 } 3632 3633 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3634 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3635 3636 if (IS_GEN9_LP(dev_priv)) 3637 bxt_hpd_detection_setup(dev_priv); 3638 else if (IS_BROADWELL(dev_priv)) 3639 ilk_hpd_detection_setup(dev_priv); 3640 } 3641 3642 static int gen8_irq_postinstall(struct drm_device *dev) 3643 { 3644 struct drm_i915_private *dev_priv = to_i915(dev); 3645 3646 if (HAS_PCH_SPLIT(dev_priv)) 3647 ibx_irq_pre_postinstall(dev); 3648 3649 gen8_gt_irq_postinstall(dev_priv); 3650 gen8_de_irq_postinstall(dev_priv); 3651 3652 if (HAS_PCH_SPLIT(dev_priv)) 3653 ibx_irq_postinstall(dev); 3654 3655 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3656 POSTING_READ(GEN8_MASTER_IRQ); 3657 3658 return 0; 3659 } 3660 3661 static int cherryview_irq_postinstall(struct drm_device *dev) 3662 { 3663 struct drm_i915_private *dev_priv = to_i915(dev); 3664 3665 gen8_gt_irq_postinstall(dev_priv); 3666 3667 spin_lock_irq(&dev_priv->irq_lock); 3668 if (dev_priv->display_irqs_enabled) 3669 vlv_display_irq_postinstall(dev_priv); 3670 spin_unlock_irq(&dev_priv->irq_lock); 3671 3672 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3673 POSTING_READ(GEN8_MASTER_IRQ); 3674 3675 return 0; 3676 } 3677 3678 static void i8xx_irq_reset(struct drm_device *dev) 3679 { 3680 struct drm_i915_private *dev_priv = to_i915(dev); 3681 3682 i9xx_pipestat_irq_reset(dev_priv); 3683 3684 I915_WRITE16(HWSTAM, 0xffff); 3685 3686 GEN2_IRQ_RESET(); 3687 } 3688 3689 static int i8xx_irq_postinstall(struct drm_device *dev) 3690 { 3691 struct drm_i915_private *dev_priv = to_i915(dev); 3692 u16 enable_mask; 3693 3694 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 3695 I915_ERROR_MEMORY_REFRESH)); 3696 3697 /* Unmask the interrupts that we always want on. */ 3698 dev_priv->irq_mask = 3699 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3700 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 3701 3702 enable_mask = 3703 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3704 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3705 I915_USER_INTERRUPT; 3706 3707 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 3708 3709 /* Interrupt setup is already guaranteed to be single-threaded, this is 3710 * just to make the assert_spin_locked check happy. */ 3711 spin_lock_irq(&dev_priv->irq_lock); 3712 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3713 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3714 spin_unlock_irq(&dev_priv->irq_lock); 3715 3716 return 0; 3717 } 3718 3719 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3720 { 3721 struct drm_device *dev = arg; 3722 struct drm_i915_private *dev_priv = to_i915(dev); 3723 irqreturn_t ret = IRQ_NONE; 3724 3725 if (!intel_irqs_enabled(dev_priv)) 3726 return IRQ_NONE; 3727 3728 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3729 disable_rpm_wakeref_asserts(dev_priv); 3730 3731 do { 3732 u32 pipe_stats[I915_MAX_PIPES] = {}; 3733 u16 iir; 3734 3735 iir = I915_READ16(IIR); 3736 if (iir == 0) 3737 break; 3738 3739 ret = IRQ_HANDLED; 3740 3741 /* Call regardless, as some status bits might not be 3742 * signalled in iir */ 3743 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3744 3745 I915_WRITE16(IIR, iir); 3746 3747 if (iir & I915_USER_INTERRUPT) 3748 notify_ring(dev_priv->engine[RCS]); 3749 3750 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3751 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3752 3753 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3754 } while (0); 3755 3756 enable_rpm_wakeref_asserts(dev_priv); 3757 3758 return ret; 3759 } 3760 3761 static void i915_irq_reset(struct drm_device *dev) 3762 { 3763 struct drm_i915_private *dev_priv = to_i915(dev); 3764 3765 if (I915_HAS_HOTPLUG(dev_priv)) { 3766 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3767 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3768 } 3769 3770 i9xx_pipestat_irq_reset(dev_priv); 3771 3772 I915_WRITE(HWSTAM, 0xffffffff); 3773 3774 GEN3_IRQ_RESET(); 3775 } 3776 3777 static int i915_irq_postinstall(struct drm_device *dev) 3778 { 3779 struct drm_i915_private *dev_priv = to_i915(dev); 3780 u32 enable_mask; 3781 3782 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 3783 I915_ERROR_MEMORY_REFRESH)); 3784 3785 /* Unmask the interrupts that we always want on. */ 3786 dev_priv->irq_mask = 3787 ~(I915_ASLE_INTERRUPT | 3788 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3789 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 3790 3791 enable_mask = 3792 I915_ASLE_INTERRUPT | 3793 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3794 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3795 I915_USER_INTERRUPT; 3796 3797 if (I915_HAS_HOTPLUG(dev_priv)) { 3798 /* Enable in IER... */ 3799 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3800 /* and unmask in IMR */ 3801 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3802 } 3803 3804 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 3805 3806 /* Interrupt setup is already guaranteed to be single-threaded, this is 3807 * just to make the assert_spin_locked check happy. */ 3808 spin_lock_irq(&dev_priv->irq_lock); 3809 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3810 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3811 spin_unlock_irq(&dev_priv->irq_lock); 3812 3813 i915_enable_asle_pipestat(dev_priv); 3814 3815 return 0; 3816 } 3817 3818 static irqreturn_t i915_irq_handler(int irq, void *arg) 3819 { 3820 struct drm_device *dev = arg; 3821 struct drm_i915_private *dev_priv = to_i915(dev); 3822 irqreturn_t ret = IRQ_NONE; 3823 3824 if (!intel_irqs_enabled(dev_priv)) 3825 return IRQ_NONE; 3826 3827 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3828 disable_rpm_wakeref_asserts(dev_priv); 3829 3830 do { 3831 u32 pipe_stats[I915_MAX_PIPES] = {}; 3832 u32 hotplug_status = 0; 3833 u32 iir; 3834 3835 iir = I915_READ(IIR); 3836 if (iir == 0) 3837 break; 3838 3839 ret = IRQ_HANDLED; 3840 3841 if (I915_HAS_HOTPLUG(dev_priv) && 3842 iir & I915_DISPLAY_PORT_INTERRUPT) 3843 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3844 3845 /* Call regardless, as some status bits might not be 3846 * signalled in iir */ 3847 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3848 3849 I915_WRITE(IIR, iir); 3850 3851 if (iir & I915_USER_INTERRUPT) 3852 notify_ring(dev_priv->engine[RCS]); 3853 3854 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3855 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3856 3857 if (hotplug_status) 3858 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3859 3860 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3861 } while (0); 3862 3863 enable_rpm_wakeref_asserts(dev_priv); 3864 3865 return ret; 3866 } 3867 3868 static void i965_irq_reset(struct drm_device *dev) 3869 { 3870 struct drm_i915_private *dev_priv = to_i915(dev); 3871 3872 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3873 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3874 3875 i9xx_pipestat_irq_reset(dev_priv); 3876 3877 I915_WRITE(HWSTAM, 0xffffffff); 3878 3879 GEN3_IRQ_RESET(); 3880 } 3881 3882 static int i965_irq_postinstall(struct drm_device *dev) 3883 { 3884 struct drm_i915_private *dev_priv = to_i915(dev); 3885 u32 enable_mask; 3886 u32 error_mask; 3887 3888 /* 3889 * Enable some error detection, note the instruction error mask 3890 * bit is reserved, so we leave it masked. 3891 */ 3892 if (IS_G4X(dev_priv)) { 3893 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3894 GM45_ERROR_MEM_PRIV | 3895 GM45_ERROR_CP_PRIV | 3896 I915_ERROR_MEMORY_REFRESH); 3897 } else { 3898 error_mask = ~(I915_ERROR_PAGE_TABLE | 3899 I915_ERROR_MEMORY_REFRESH); 3900 } 3901 I915_WRITE(EMR, error_mask); 3902 3903 /* Unmask the interrupts that we always want on. */ 3904 dev_priv->irq_mask = 3905 ~(I915_ASLE_INTERRUPT | 3906 I915_DISPLAY_PORT_INTERRUPT | 3907 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3908 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3909 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3910 3911 enable_mask = 3912 I915_ASLE_INTERRUPT | 3913 I915_DISPLAY_PORT_INTERRUPT | 3914 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3915 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3916 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3917 I915_USER_INTERRUPT; 3918 3919 if (IS_G4X(dev_priv)) 3920 enable_mask |= I915_BSD_USER_INTERRUPT; 3921 3922 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 3923 3924 /* Interrupt setup is already guaranteed to be single-threaded, this is 3925 * just to make the assert_spin_locked check happy. */ 3926 spin_lock_irq(&dev_priv->irq_lock); 3927 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3928 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3929 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3930 spin_unlock_irq(&dev_priv->irq_lock); 3931 3932 i915_enable_asle_pipestat(dev_priv); 3933 3934 return 0; 3935 } 3936 3937 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 3938 { 3939 u32 hotplug_en; 3940 3941 lockdep_assert_held(&dev_priv->irq_lock); 3942 3943 /* Note HDMI and DP share hotplug bits */ 3944 /* enable bits are the same for all generations */ 3945 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 3946 /* Programming the CRT detection parameters tends 3947 to generate a spurious hotplug event about three 3948 seconds later. So just do it once. 3949 */ 3950 if (IS_G4X(dev_priv)) 3951 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3952 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3953 3954 /* Ignore TV since it's buggy */ 3955 i915_hotplug_interrupt_update_locked(dev_priv, 3956 HOTPLUG_INT_EN_MASK | 3957 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 3958 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 3959 hotplug_en); 3960 } 3961 3962 static irqreturn_t i965_irq_handler(int irq, void *arg) 3963 { 3964 struct drm_device *dev = arg; 3965 struct drm_i915_private *dev_priv = to_i915(dev); 3966 irqreturn_t ret = IRQ_NONE; 3967 3968 if (!intel_irqs_enabled(dev_priv)) 3969 return IRQ_NONE; 3970 3971 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3972 disable_rpm_wakeref_asserts(dev_priv); 3973 3974 do { 3975 u32 pipe_stats[I915_MAX_PIPES] = {}; 3976 u32 hotplug_status = 0; 3977 u32 iir; 3978 3979 iir = I915_READ(IIR); 3980 if (iir == 0) 3981 break; 3982 3983 ret = IRQ_HANDLED; 3984 3985 if (iir & I915_DISPLAY_PORT_INTERRUPT) 3986 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3987 3988 /* Call regardless, as some status bits might not be 3989 * signalled in iir */ 3990 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3991 3992 I915_WRITE(IIR, iir); 3993 3994 if (iir & I915_USER_INTERRUPT) 3995 notify_ring(dev_priv->engine[RCS]); 3996 3997 if (iir & I915_BSD_USER_INTERRUPT) 3998 notify_ring(dev_priv->engine[VCS]); 3999 4000 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4001 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4002 4003 if (hotplug_status) 4004 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4005 4006 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4007 } while (0); 4008 4009 enable_rpm_wakeref_asserts(dev_priv); 4010 4011 return ret; 4012 } 4013 4014 /** 4015 * intel_irq_init - initializes irq support 4016 * @dev_priv: i915 device instance 4017 * 4018 * This function initializes all the irq support including work items, timers 4019 * and all the vtables. It does not setup the interrupt itself though. 4020 */ 4021 void intel_irq_init(struct drm_i915_private *dev_priv) 4022 { 4023 struct drm_device *dev = &dev_priv->drm; 4024 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4025 int i; 4026 4027 intel_hpd_init_work(dev_priv); 4028 4029 INIT_WORK(&rps->work, gen6_pm_rps_work); 4030 4031 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4032 for (i = 0; i < MAX_L3_SLICES; ++i) 4033 dev_priv->l3_parity.remap_info[i] = NULL; 4034 4035 if (HAS_GUC_SCHED(dev_priv)) 4036 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4037 4038 /* Let's track the enabled rps events */ 4039 if (IS_VALLEYVIEW(dev_priv)) 4040 /* WaGsvRC0ResidencyMethod:vlv */ 4041 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4042 else 4043 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4044 4045 rps->pm_intrmsk_mbz = 0; 4046 4047 /* 4048 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4049 * if GEN6_PM_UP_EI_EXPIRED is masked. 4050 * 4051 * TODO: verify if this can be reproduced on VLV,CHV. 4052 */ 4053 if (INTEL_GEN(dev_priv) <= 7) 4054 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4055 4056 if (INTEL_GEN(dev_priv) >= 8) 4057 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4058 4059 if (IS_GEN2(dev_priv)) { 4060 /* Gen2 doesn't have a hardware frame counter */ 4061 dev->max_vblank_count = 0; 4062 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4063 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4064 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4065 } else { 4066 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4067 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4068 } 4069 4070 /* 4071 * Opt out of the vblank disable timer on everything except gen2. 4072 * Gen2 doesn't have a hardware frame counter and so depends on 4073 * vblank interrupts to produce sane vblank seuquence numbers. 4074 */ 4075 if (!IS_GEN2(dev_priv)) 4076 dev->vblank_disable_immediate = true; 4077 4078 /* Most platforms treat the display irq block as an always-on 4079 * power domain. vlv/chv can disable it at runtime and need 4080 * special care to avoid writing any of the display block registers 4081 * outside of the power domain. We defer setting up the display irqs 4082 * in this case to the runtime pm. 4083 */ 4084 dev_priv->display_irqs_enabled = true; 4085 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4086 dev_priv->display_irqs_enabled = false; 4087 4088 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4089 4090 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4091 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4092 4093 if (IS_CHERRYVIEW(dev_priv)) { 4094 dev->driver->irq_handler = cherryview_irq_handler; 4095 dev->driver->irq_preinstall = cherryview_irq_reset; 4096 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4097 dev->driver->irq_uninstall = cherryview_irq_reset; 4098 dev->driver->enable_vblank = i965_enable_vblank; 4099 dev->driver->disable_vblank = i965_disable_vblank; 4100 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4101 } else if (IS_VALLEYVIEW(dev_priv)) { 4102 dev->driver->irq_handler = valleyview_irq_handler; 4103 dev->driver->irq_preinstall = valleyview_irq_reset; 4104 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4105 dev->driver->irq_uninstall = valleyview_irq_reset; 4106 dev->driver->enable_vblank = i965_enable_vblank; 4107 dev->driver->disable_vblank = i965_disable_vblank; 4108 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4109 } else if (INTEL_GEN(dev_priv) >= 8) { 4110 dev->driver->irq_handler = gen8_irq_handler; 4111 dev->driver->irq_preinstall = gen8_irq_reset; 4112 dev->driver->irq_postinstall = gen8_irq_postinstall; 4113 dev->driver->irq_uninstall = gen8_irq_reset; 4114 dev->driver->enable_vblank = gen8_enable_vblank; 4115 dev->driver->disable_vblank = gen8_disable_vblank; 4116 if (IS_GEN9_LP(dev_priv)) 4117 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4118 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4119 HAS_PCH_CNP(dev_priv)) 4120 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4121 else 4122 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4123 } else if (HAS_PCH_SPLIT(dev_priv)) { 4124 dev->driver->irq_handler = ironlake_irq_handler; 4125 dev->driver->irq_preinstall = ironlake_irq_reset; 4126 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4127 dev->driver->irq_uninstall = ironlake_irq_reset; 4128 dev->driver->enable_vblank = ironlake_enable_vblank; 4129 dev->driver->disable_vblank = ironlake_disable_vblank; 4130 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4131 } else { 4132 if (IS_GEN2(dev_priv)) { 4133 dev->driver->irq_preinstall = i8xx_irq_reset; 4134 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4135 dev->driver->irq_handler = i8xx_irq_handler; 4136 dev->driver->irq_uninstall = i8xx_irq_reset; 4137 dev->driver->enable_vblank = i8xx_enable_vblank; 4138 dev->driver->disable_vblank = i8xx_disable_vblank; 4139 } else if (IS_GEN3(dev_priv)) { 4140 dev->driver->irq_preinstall = i915_irq_reset; 4141 dev->driver->irq_postinstall = i915_irq_postinstall; 4142 dev->driver->irq_uninstall = i915_irq_reset; 4143 dev->driver->irq_handler = i915_irq_handler; 4144 dev->driver->enable_vblank = i8xx_enable_vblank; 4145 dev->driver->disable_vblank = i8xx_disable_vblank; 4146 } else { 4147 dev->driver->irq_preinstall = i965_irq_reset; 4148 dev->driver->irq_postinstall = i965_irq_postinstall; 4149 dev->driver->irq_uninstall = i965_irq_reset; 4150 dev->driver->irq_handler = i965_irq_handler; 4151 dev->driver->enable_vblank = i965_enable_vblank; 4152 dev->driver->disable_vblank = i965_disable_vblank; 4153 } 4154 if (I915_HAS_HOTPLUG(dev_priv)) 4155 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4156 } 4157 } 4158 4159 /** 4160 * intel_irq_fini - deinitializes IRQ support 4161 * @i915: i915 device instance 4162 * 4163 * This function deinitializes all the IRQ support. 4164 */ 4165 void intel_irq_fini(struct drm_i915_private *i915) 4166 { 4167 int i; 4168 4169 for (i = 0; i < MAX_L3_SLICES; ++i) 4170 kfree(i915->l3_parity.remap_info[i]); 4171 } 4172 4173 /** 4174 * intel_irq_install - enables the hardware interrupt 4175 * @dev_priv: i915 device instance 4176 * 4177 * This function enables the hardware interrupt handling, but leaves the hotplug 4178 * handling still disabled. It is called after intel_irq_init(). 4179 * 4180 * In the driver load and resume code we need working interrupts in a few places 4181 * but don't want to deal with the hassle of concurrent probe and hotplug 4182 * workers. Hence the split into this two-stage approach. 4183 */ 4184 int intel_irq_install(struct drm_i915_private *dev_priv) 4185 { 4186 /* 4187 * We enable some interrupt sources in our postinstall hooks, so mark 4188 * interrupts as enabled _before_ actually enabling them to avoid 4189 * special cases in our ordering checks. 4190 */ 4191 dev_priv->runtime_pm.irqs_enabled = true; 4192 4193 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4194 } 4195 4196 /** 4197 * intel_irq_uninstall - finilizes all irq handling 4198 * @dev_priv: i915 device instance 4199 * 4200 * This stops interrupt and hotplug handling and unregisters and frees all 4201 * resources acquired in the init functions. 4202 */ 4203 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4204 { 4205 drm_irq_uninstall(&dev_priv->drm); 4206 intel_hpd_cancel_work(dev_priv); 4207 dev_priv->runtime_pm.irqs_enabled = false; 4208 } 4209 4210 /** 4211 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4212 * @dev_priv: i915 device instance 4213 * 4214 * This function is used to disable interrupts at runtime, both in the runtime 4215 * pm and the system suspend/resume code. 4216 */ 4217 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4218 { 4219 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4220 dev_priv->runtime_pm.irqs_enabled = false; 4221 synchronize_irq(dev_priv->drm.irq); 4222 } 4223 4224 /** 4225 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4226 * @dev_priv: i915 device instance 4227 * 4228 * This function is used to enable interrupts at runtime, both in the runtime 4229 * pm and the system suspend/resume code. 4230 */ 4231 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4232 { 4233 dev_priv->runtime_pm.irqs_enabled = true; 4234 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4235 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4236 } 4237