1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 174 175 /* For display hotplug interrupt */ 176 static inline void 177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 178 uint32_t mask, 179 uint32_t bits) 180 { 181 uint32_t val; 182 183 lockdep_assert_held(&dev_priv->irq_lock); 184 WARN_ON(bits & ~mask); 185 186 val = I915_READ(PORT_HOTPLUG_EN); 187 val &= ~mask; 188 val |= bits; 189 I915_WRITE(PORT_HOTPLUG_EN, val); 190 } 191 192 /** 193 * i915_hotplug_interrupt_update - update hotplug interrupt enable 194 * @dev_priv: driver private 195 * @mask: bits to update 196 * @bits: bits to enable 197 * NOTE: the HPD enable bits are modified both inside and outside 198 * of an interrupt context. To avoid that read-modify-write cycles 199 * interfer, these bits are protected by a spinlock. Since this 200 * function is usually not called from a context where the lock is 201 * held already, this function acquires the lock itself. A non-locking 202 * version is also available. 203 */ 204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 205 uint32_t mask, 206 uint32_t bits) 207 { 208 spin_lock_irq(&dev_priv->irq_lock); 209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 210 spin_unlock_irq(&dev_priv->irq_lock); 211 } 212 213 /** 214 * ilk_update_display_irq - update DEIMR 215 * @dev_priv: driver private 216 * @interrupt_mask: mask of interrupt bits to update 217 * @enabled_irq_mask: mask of interrupt bits to enable 218 */ 219 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 220 uint32_t interrupt_mask, 221 uint32_t enabled_irq_mask) 222 { 223 uint32_t new_val; 224 225 lockdep_assert_held(&dev_priv->irq_lock); 226 227 WARN_ON(enabled_irq_mask & ~interrupt_mask); 228 229 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 230 return; 231 232 new_val = dev_priv->irq_mask; 233 new_val &= ~interrupt_mask; 234 new_val |= (~enabled_irq_mask & interrupt_mask); 235 236 if (new_val != dev_priv->irq_mask) { 237 dev_priv->irq_mask = new_val; 238 I915_WRITE(DEIMR, dev_priv->irq_mask); 239 POSTING_READ(DEIMR); 240 } 241 } 242 243 /** 244 * ilk_update_gt_irq - update GTIMR 245 * @dev_priv: driver private 246 * @interrupt_mask: mask of interrupt bits to update 247 * @enabled_irq_mask: mask of interrupt bits to enable 248 */ 249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 250 uint32_t interrupt_mask, 251 uint32_t enabled_irq_mask) 252 { 253 lockdep_assert_held(&dev_priv->irq_lock); 254 255 WARN_ON(enabled_irq_mask & ~interrupt_mask); 256 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 dev_priv->gt_irq_mask &= ~interrupt_mask; 261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 POSTING_READ_FW(GTIMR); 269 } 270 271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 ilk_update_gt_irq(dev_priv, mask, 0); 274 } 275 276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 277 { 278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 279 } 280 281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 282 { 283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 284 } 285 286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 287 { 288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 289 } 290 291 /** 292 * snb_update_pm_irq - update GEN6_PMIMR 293 * @dev_priv: driver private 294 * @interrupt_mask: mask of interrupt bits to update 295 * @enabled_irq_mask: mask of interrupt bits to enable 296 */ 297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 298 uint32_t interrupt_mask, 299 uint32_t enabled_irq_mask) 300 { 301 uint32_t new_val; 302 303 WARN_ON(enabled_irq_mask & ~interrupt_mask); 304 305 lockdep_assert_held(&dev_priv->irq_lock); 306 307 new_val = dev_priv->pm_imr; 308 new_val &= ~interrupt_mask; 309 new_val |= (~enabled_irq_mask & interrupt_mask); 310 311 if (new_val != dev_priv->pm_imr) { 312 dev_priv->pm_imr = new_val; 313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 314 POSTING_READ(gen6_pm_imr(dev_priv)); 315 } 316 } 317 318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 319 { 320 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 321 return; 322 323 snb_update_pm_irq(dev_priv, mask, mask); 324 } 325 326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_mask_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 lockdep_assert_held(&dev_priv->irq_lock); 344 345 I915_WRITE(reg, reset_mask); 346 I915_WRITE(reg, reset_mask); 347 POSTING_READ(reg); 348 } 349 350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 351 { 352 lockdep_assert_held(&dev_priv->irq_lock); 353 354 dev_priv->pm_ier |= enable_mask; 355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 356 gen6_unmask_pm_irq(dev_priv, enable_mask); 357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 358 } 359 360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 361 { 362 lockdep_assert_held(&dev_priv->irq_lock); 363 364 dev_priv->pm_ier &= ~disable_mask; 365 __gen6_mask_pm_irq(dev_priv, disable_mask); 366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 367 /* though a barrier is missing here, but don't really need a one */ 368 } 369 370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 371 { 372 spin_lock_irq(&dev_priv->irq_lock); 373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 374 dev_priv->rps.pm_iir = 0; 375 spin_unlock_irq(&dev_priv->irq_lock); 376 } 377 378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 379 { 380 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 381 return; 382 383 spin_lock_irq(&dev_priv->irq_lock); 384 WARN_ON_ONCE(dev_priv->rps.pm_iir); 385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 386 dev_priv->rps.interrupts_enabled = true; 387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 388 389 spin_unlock_irq(&dev_priv->irq_lock); 390 } 391 392 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 393 { 394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 395 return; 396 397 spin_lock_irq(&dev_priv->irq_lock); 398 dev_priv->rps.interrupts_enabled = false; 399 400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 401 402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 403 404 spin_unlock_irq(&dev_priv->irq_lock); 405 synchronize_irq(dev_priv->drm.irq); 406 407 /* Now that we will not be generating any more work, flush any 408 * outsanding tasks. As we are called on the RPS idle path, 409 * we will reset the GPU to minimum frequencies, so the current 410 * state of the worker can be discarded. 411 */ 412 cancel_work_sync(&dev_priv->rps.work); 413 gen6_reset_rps_interrupts(dev_priv); 414 } 415 416 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 417 { 418 spin_lock_irq(&dev_priv->irq_lock); 419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 420 spin_unlock_irq(&dev_priv->irq_lock); 421 } 422 423 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 424 { 425 spin_lock_irq(&dev_priv->irq_lock); 426 if (!dev_priv->guc.interrupts_enabled) { 427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 428 dev_priv->pm_guc_events); 429 dev_priv->guc.interrupts_enabled = true; 430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 431 } 432 spin_unlock_irq(&dev_priv->irq_lock); 433 } 434 435 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 436 { 437 spin_lock_irq(&dev_priv->irq_lock); 438 dev_priv->guc.interrupts_enabled = false; 439 440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 441 442 spin_unlock_irq(&dev_priv->irq_lock); 443 synchronize_irq(dev_priv->drm.irq); 444 445 gen9_reset_guc_interrupts(dev_priv); 446 } 447 448 /** 449 * bdw_update_port_irq - update DE port interrupt 450 * @dev_priv: driver private 451 * @interrupt_mask: mask of interrupt bits to update 452 * @enabled_irq_mask: mask of interrupt bits to enable 453 */ 454 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 455 uint32_t interrupt_mask, 456 uint32_t enabled_irq_mask) 457 { 458 uint32_t new_val; 459 uint32_t old_val; 460 461 lockdep_assert_held(&dev_priv->irq_lock); 462 463 WARN_ON(enabled_irq_mask & ~interrupt_mask); 464 465 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 466 return; 467 468 old_val = I915_READ(GEN8_DE_PORT_IMR); 469 470 new_val = old_val; 471 new_val &= ~interrupt_mask; 472 new_val |= (~enabled_irq_mask & interrupt_mask); 473 474 if (new_val != old_val) { 475 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 476 POSTING_READ(GEN8_DE_PORT_IMR); 477 } 478 } 479 480 /** 481 * bdw_update_pipe_irq - update DE pipe interrupt 482 * @dev_priv: driver private 483 * @pipe: pipe whose interrupt to update 484 * @interrupt_mask: mask of interrupt bits to update 485 * @enabled_irq_mask: mask of interrupt bits to enable 486 */ 487 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 488 enum pipe pipe, 489 uint32_t interrupt_mask, 490 uint32_t enabled_irq_mask) 491 { 492 uint32_t new_val; 493 494 lockdep_assert_held(&dev_priv->irq_lock); 495 496 WARN_ON(enabled_irq_mask & ~interrupt_mask); 497 498 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 499 return; 500 501 new_val = dev_priv->de_irq_mask[pipe]; 502 new_val &= ~interrupt_mask; 503 new_val |= (~enabled_irq_mask & interrupt_mask); 504 505 if (new_val != dev_priv->de_irq_mask[pipe]) { 506 dev_priv->de_irq_mask[pipe] = new_val; 507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 509 } 510 } 511 512 /** 513 * ibx_display_interrupt_update - update SDEIMR 514 * @dev_priv: driver private 515 * @interrupt_mask: mask of interrupt bits to update 516 * @enabled_irq_mask: mask of interrupt bits to enable 517 */ 518 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 519 uint32_t interrupt_mask, 520 uint32_t enabled_irq_mask) 521 { 522 uint32_t sdeimr = I915_READ(SDEIMR); 523 sdeimr &= ~interrupt_mask; 524 sdeimr |= (~enabled_irq_mask & interrupt_mask); 525 526 WARN_ON(enabled_irq_mask & ~interrupt_mask); 527 528 lockdep_assert_held(&dev_priv->irq_lock); 529 530 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 531 return; 532 533 I915_WRITE(SDEIMR, sdeimr); 534 POSTING_READ(SDEIMR); 535 } 536 537 static void 538 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 539 u32 enable_mask, u32 status_mask) 540 { 541 i915_reg_t reg = PIPESTAT(pipe); 542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 543 544 lockdep_assert_held(&dev_priv->irq_lock); 545 WARN_ON(!intel_irqs_enabled(dev_priv)); 546 547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 548 status_mask & ~PIPESTAT_INT_STATUS_MASK, 549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 550 pipe_name(pipe), enable_mask, status_mask)) 551 return; 552 553 if ((pipestat & enable_mask) == enable_mask) 554 return; 555 556 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 557 558 /* Enable the interrupt, clear any pending status */ 559 pipestat |= enable_mask | status_mask; 560 I915_WRITE(reg, pipestat); 561 POSTING_READ(reg); 562 } 563 564 static void 565 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 566 u32 enable_mask, u32 status_mask) 567 { 568 i915_reg_t reg = PIPESTAT(pipe); 569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 570 571 lockdep_assert_held(&dev_priv->irq_lock); 572 WARN_ON(!intel_irqs_enabled(dev_priv)); 573 574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 575 status_mask & ~PIPESTAT_INT_STATUS_MASK, 576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 577 pipe_name(pipe), enable_mask, status_mask)) 578 return; 579 580 if ((pipestat & enable_mask) == 0) 581 return; 582 583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 584 585 pipestat &= ~enable_mask; 586 I915_WRITE(reg, pipestat); 587 POSTING_READ(reg); 588 } 589 590 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 591 { 592 u32 enable_mask = status_mask << 16; 593 594 /* 595 * On pipe A we don't support the PSR interrupt yet, 596 * on pipe B and C the same bit MBZ. 597 */ 598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 599 return 0; 600 /* 601 * On pipe B and C we don't support the PSR interrupt yet, on pipe 602 * A the same bit is for perf counters which we don't use either. 603 */ 604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 605 return 0; 606 607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 608 SPRITE0_FLIP_DONE_INT_EN_VLV | 609 SPRITE1_FLIP_DONE_INT_EN_VLV); 610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 614 615 return enable_mask; 616 } 617 618 void 619 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 620 u32 status_mask) 621 { 622 u32 enable_mask; 623 624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 626 status_mask); 627 else 628 enable_mask = status_mask << 16; 629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 630 } 631 632 void 633 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 634 u32 status_mask) 635 { 636 u32 enable_mask; 637 638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 640 status_mask); 641 else 642 enable_mask = status_mask << 16; 643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 644 } 645 646 /** 647 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 648 * @dev_priv: i915 device private 649 */ 650 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 651 { 652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 653 return; 654 655 spin_lock_irq(&dev_priv->irq_lock); 656 657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 658 if (INTEL_GEN(dev_priv) >= 4) 659 i915_enable_pipestat(dev_priv, PIPE_A, 660 PIPE_LEGACY_BLC_EVENT_STATUS); 661 662 spin_unlock_irq(&dev_priv->irq_lock); 663 } 664 665 /* 666 * This timing diagram depicts the video signal in and 667 * around the vertical blanking period. 668 * 669 * Assumptions about the fictitious mode used in this example: 670 * vblank_start >= 3 671 * vsync_start = vblank_start + 1 672 * vsync_end = vblank_start + 2 673 * vtotal = vblank_start + 3 674 * 675 * start of vblank: 676 * latch double buffered registers 677 * increment frame counter (ctg+) 678 * generate start of vblank interrupt (gen4+) 679 * | 680 * | frame start: 681 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 682 * | may be shifted forward 1-3 extra lines via PIPECONF 683 * | | 684 * | | start of vsync: 685 * | | generate vsync interrupt 686 * | | | 687 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 688 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 689 * ----va---> <-----------------vb--------------------> <--------va------------- 690 * | | <----vs-----> | 691 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 692 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 693 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 694 * | | | 695 * last visible pixel first visible pixel 696 * | increment frame counter (gen3/4) 697 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 698 * 699 * x = horizontal active 700 * _ = horizontal blanking 701 * hs = horizontal sync 702 * va = vertical active 703 * vb = vertical blanking 704 * vs = vertical sync 705 * vbs = vblank_start (number) 706 * 707 * Summary: 708 * - most events happen at the start of horizontal sync 709 * - frame start happens at the start of horizontal blank, 1-4 lines 710 * (depending on PIPECONF settings) after the start of vblank 711 * - gen3/4 pixel and frame counter are synchronized with the start 712 * of horizontal active on the first line of vertical active 713 */ 714 715 /* Called from drm generic code, passed a 'crtc', which 716 * we use as a pipe index 717 */ 718 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 719 { 720 struct drm_i915_private *dev_priv = to_i915(dev); 721 i915_reg_t high_frame, low_frame; 722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 723 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 724 pipe); 725 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 726 unsigned long irqflags; 727 728 htotal = mode->crtc_htotal; 729 hsync_start = mode->crtc_hsync_start; 730 vbl_start = mode->crtc_vblank_start; 731 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 732 vbl_start = DIV_ROUND_UP(vbl_start, 2); 733 734 /* Convert to pixel count */ 735 vbl_start *= htotal; 736 737 /* Start of vblank event occurs at start of hsync */ 738 vbl_start -= htotal - hsync_start; 739 740 high_frame = PIPEFRAME(pipe); 741 low_frame = PIPEFRAMEPIXEL(pipe); 742 743 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 744 745 /* 746 * High & low register fields aren't synchronized, so make sure 747 * we get a low value that's stable across two reads of the high 748 * register. 749 */ 750 do { 751 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 752 low = I915_READ_FW(low_frame); 753 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 754 } while (high1 != high2); 755 756 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 757 758 high1 >>= PIPE_FRAME_HIGH_SHIFT; 759 pixel = low & PIPE_PIXEL_MASK; 760 low >>= PIPE_FRAME_LOW_SHIFT; 761 762 /* 763 * The frame counter increments at beginning of active. 764 * Cook up a vblank counter by also checking the pixel 765 * counter against vblank start. 766 */ 767 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 768 } 769 770 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 771 { 772 struct drm_i915_private *dev_priv = to_i915(dev); 773 774 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 775 } 776 777 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 778 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 779 { 780 struct drm_device *dev = crtc->base.dev; 781 struct drm_i915_private *dev_priv = to_i915(dev); 782 const struct drm_display_mode *mode = &crtc->base.hwmode; 783 enum pipe pipe = crtc->pipe; 784 int position, vtotal; 785 786 if (!crtc->active) 787 return -1; 788 789 vtotal = mode->crtc_vtotal; 790 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 791 vtotal /= 2; 792 793 if (IS_GEN2(dev_priv)) 794 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 795 else 796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 797 798 /* 799 * On HSW, the DSL reg (0x70000) appears to return 0 if we 800 * read it just before the start of vblank. So try it again 801 * so we don't accidentally end up spanning a vblank frame 802 * increment, causing the pipe_update_end() code to squak at us. 803 * 804 * The nature of this problem means we can't simply check the ISR 805 * bit and return the vblank start value; nor can we use the scanline 806 * debug register in the transcoder as it appears to have the same 807 * problem. We may need to extend this to include other platforms, 808 * but so far testing only shows the problem on HSW. 809 */ 810 if (HAS_DDI(dev_priv) && !position) { 811 int i, temp; 812 813 for (i = 0; i < 100; i++) { 814 udelay(1); 815 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 816 if (temp != position) { 817 position = temp; 818 break; 819 } 820 } 821 } 822 823 /* 824 * See update_scanline_offset() for the details on the 825 * scanline_offset adjustment. 826 */ 827 return (position + crtc->scanline_offset) % vtotal; 828 } 829 830 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 831 unsigned int flags, int *vpos, int *hpos, 832 ktime_t *stime, ktime_t *etime, 833 const struct drm_display_mode *mode) 834 { 835 struct drm_i915_private *dev_priv = to_i915(dev); 836 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 837 pipe); 838 int position; 839 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 840 bool in_vbl = true; 841 int ret = 0; 842 unsigned long irqflags; 843 844 if (WARN_ON(!mode->crtc_clock)) { 845 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 846 "pipe %c\n", pipe_name(pipe)); 847 return 0; 848 } 849 850 htotal = mode->crtc_htotal; 851 hsync_start = mode->crtc_hsync_start; 852 vtotal = mode->crtc_vtotal; 853 vbl_start = mode->crtc_vblank_start; 854 vbl_end = mode->crtc_vblank_end; 855 856 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 857 vbl_start = DIV_ROUND_UP(vbl_start, 2); 858 vbl_end /= 2; 859 vtotal /= 2; 860 } 861 862 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 863 864 /* 865 * Lock uncore.lock, as we will do multiple timing critical raw 866 * register reads, potentially with preemption disabled, so the 867 * following code must not block on uncore.lock. 868 */ 869 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 870 871 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 872 873 /* Get optional system timestamp before query. */ 874 if (stime) 875 *stime = ktime_get(); 876 877 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 878 /* No obvious pixelcount register. Only query vertical 879 * scanout position from Display scan line register. 880 */ 881 position = __intel_get_crtc_scanline(intel_crtc); 882 } else { 883 /* Have access to pixelcount since start of frame. 884 * We can split this into vertical and horizontal 885 * scanout position. 886 */ 887 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 888 889 /* convert to pixel counts */ 890 vbl_start *= htotal; 891 vbl_end *= htotal; 892 vtotal *= htotal; 893 894 /* 895 * In interlaced modes, the pixel counter counts all pixels, 896 * so one field will have htotal more pixels. In order to avoid 897 * the reported position from jumping backwards when the pixel 898 * counter is beyond the length of the shorter field, just 899 * clamp the position the length of the shorter field. This 900 * matches how the scanline counter based position works since 901 * the scanline counter doesn't count the two half lines. 902 */ 903 if (position >= vtotal) 904 position = vtotal - 1; 905 906 /* 907 * Start of vblank interrupt is triggered at start of hsync, 908 * just prior to the first active line of vblank. However we 909 * consider lines to start at the leading edge of horizontal 910 * active. So, should we get here before we've crossed into 911 * the horizontal active of the first line in vblank, we would 912 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 913 * always add htotal-hsync_start to the current pixel position. 914 */ 915 position = (position + htotal - hsync_start) % vtotal; 916 } 917 918 /* Get optional system timestamp after query. */ 919 if (etime) 920 *etime = ktime_get(); 921 922 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 923 924 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 925 926 in_vbl = position >= vbl_start && position < vbl_end; 927 928 /* 929 * While in vblank, position will be negative 930 * counting up towards 0 at vbl_end. And outside 931 * vblank, position will be positive counting 932 * up since vbl_end. 933 */ 934 if (position >= vbl_start) 935 position -= vbl_end; 936 else 937 position += vtotal - vbl_end; 938 939 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 940 *vpos = position; 941 *hpos = 0; 942 } else { 943 *vpos = position / htotal; 944 *hpos = position - (*vpos * htotal); 945 } 946 947 /* In vblank? */ 948 if (in_vbl) 949 ret |= DRM_SCANOUTPOS_IN_VBLANK; 950 951 return ret; 952 } 953 954 int intel_get_crtc_scanline(struct intel_crtc *crtc) 955 { 956 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 957 unsigned long irqflags; 958 int position; 959 960 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 961 position = __intel_get_crtc_scanline(crtc); 962 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 963 964 return position; 965 } 966 967 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 968 int *max_error, 969 struct timeval *vblank_time, 970 unsigned flags) 971 { 972 struct drm_i915_private *dev_priv = to_i915(dev); 973 struct intel_crtc *crtc; 974 975 if (pipe >= INTEL_INFO(dev_priv)->num_pipes) { 976 DRM_ERROR("Invalid crtc %u\n", pipe); 977 return -EINVAL; 978 } 979 980 /* Get drm_crtc to timestamp: */ 981 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 982 if (crtc == NULL) { 983 DRM_ERROR("Invalid crtc %u\n", pipe); 984 return -EINVAL; 985 } 986 987 if (!crtc->base.hwmode.crtc_clock) { 988 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 989 return -EBUSY; 990 } 991 992 /* Helper routine in DRM core does all the work: */ 993 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 994 vblank_time, flags, 995 &crtc->base.hwmode); 996 } 997 998 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 999 { 1000 u32 busy_up, busy_down, max_avg, min_avg; 1001 u8 new_delay; 1002 1003 spin_lock(&mchdev_lock); 1004 1005 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1006 1007 new_delay = dev_priv->ips.cur_delay; 1008 1009 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1010 busy_up = I915_READ(RCPREVBSYTUPAVG); 1011 busy_down = I915_READ(RCPREVBSYTDNAVG); 1012 max_avg = I915_READ(RCBMAXAVG); 1013 min_avg = I915_READ(RCBMINAVG); 1014 1015 /* Handle RCS change request from hw */ 1016 if (busy_up > max_avg) { 1017 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1018 new_delay = dev_priv->ips.cur_delay - 1; 1019 if (new_delay < dev_priv->ips.max_delay) 1020 new_delay = dev_priv->ips.max_delay; 1021 } else if (busy_down < min_avg) { 1022 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1023 new_delay = dev_priv->ips.cur_delay + 1; 1024 if (new_delay > dev_priv->ips.min_delay) 1025 new_delay = dev_priv->ips.min_delay; 1026 } 1027 1028 if (ironlake_set_drps(dev_priv, new_delay)) 1029 dev_priv->ips.cur_delay = new_delay; 1030 1031 spin_unlock(&mchdev_lock); 1032 1033 return; 1034 } 1035 1036 static void notify_ring(struct intel_engine_cs *engine) 1037 { 1038 struct drm_i915_gem_request *rq = NULL; 1039 struct intel_wait *wait; 1040 1041 atomic_inc(&engine->irq_count); 1042 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1043 1044 spin_lock(&engine->breadcrumbs.irq_lock); 1045 wait = engine->breadcrumbs.irq_wait; 1046 if (wait) { 1047 /* We use a callback from the dma-fence to submit 1048 * requests after waiting on our own requests. To 1049 * ensure minimum delay in queuing the next request to 1050 * hardware, signal the fence now rather than wait for 1051 * the signaler to be woken up. We still wake up the 1052 * waiter in order to handle the irq-seqno coherency 1053 * issues (we may receive the interrupt before the 1054 * seqno is written, see __i915_request_irq_complete()) 1055 * and to handle coalescing of multiple seqno updates 1056 * and many waiters. 1057 */ 1058 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1059 wait->seqno) && 1060 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1061 &wait->request->fence.flags)) 1062 rq = i915_gem_request_get(wait->request); 1063 1064 wake_up_process(wait->tsk); 1065 } else { 1066 __intel_engine_disarm_breadcrumbs(engine); 1067 } 1068 spin_unlock(&engine->breadcrumbs.irq_lock); 1069 1070 if (rq) { 1071 dma_fence_signal(&rq->fence); 1072 i915_gem_request_put(rq); 1073 } 1074 1075 trace_intel_engine_notify(engine, wait); 1076 } 1077 1078 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1079 struct intel_rps_ei *ei) 1080 { 1081 ei->ktime = ktime_get_raw(); 1082 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1083 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1084 } 1085 1086 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1087 { 1088 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); 1089 } 1090 1091 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1092 { 1093 const struct intel_rps_ei *prev = &dev_priv->rps.ei; 1094 struct intel_rps_ei now; 1095 u32 events = 0; 1096 1097 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1098 return 0; 1099 1100 vlv_c0_read(dev_priv, &now); 1101 1102 if (prev->ktime) { 1103 u64 time, c0; 1104 u32 render, media; 1105 1106 time = ktime_us_delta(now.ktime, prev->ktime); 1107 1108 time *= dev_priv->czclk_freq; 1109 1110 /* Workload can be split between render + media, 1111 * e.g. SwapBuffers being blitted in X after being rendered in 1112 * mesa. To account for this we need to combine both engines 1113 * into our activity counter. 1114 */ 1115 render = now.render_c0 - prev->render_c0; 1116 media = now.media_c0 - prev->media_c0; 1117 c0 = max(render, media); 1118 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1119 1120 if (c0 > time * dev_priv->rps.up_threshold) 1121 events = GEN6_PM_RP_UP_THRESHOLD; 1122 else if (c0 < time * dev_priv->rps.down_threshold) 1123 events = GEN6_PM_RP_DOWN_THRESHOLD; 1124 } 1125 1126 dev_priv->rps.ei = now; 1127 return events; 1128 } 1129 1130 static bool any_waiters(struct drm_i915_private *dev_priv) 1131 { 1132 struct intel_engine_cs *engine; 1133 enum intel_engine_id id; 1134 1135 for_each_engine(engine, dev_priv, id) 1136 if (intel_engine_has_waiter(engine)) 1137 return true; 1138 1139 return false; 1140 } 1141 1142 static void gen6_pm_rps_work(struct work_struct *work) 1143 { 1144 struct drm_i915_private *dev_priv = 1145 container_of(work, struct drm_i915_private, rps.work); 1146 bool client_boost = false; 1147 int new_delay, adj, min, max; 1148 u32 pm_iir = 0; 1149 1150 spin_lock_irq(&dev_priv->irq_lock); 1151 if (dev_priv->rps.interrupts_enabled) { 1152 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); 1153 client_boost = fetch_and_zero(&dev_priv->rps.client_boost); 1154 } 1155 spin_unlock_irq(&dev_priv->irq_lock); 1156 1157 /* Make sure we didn't queue anything we're not going to process. */ 1158 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1159 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1160 goto out; 1161 1162 mutex_lock(&dev_priv->rps.hw_lock); 1163 1164 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1165 1166 adj = dev_priv->rps.last_adj; 1167 new_delay = dev_priv->rps.cur_freq; 1168 min = dev_priv->rps.min_freq_softlimit; 1169 max = dev_priv->rps.max_freq_softlimit; 1170 if (client_boost || any_waiters(dev_priv)) 1171 max = dev_priv->rps.max_freq; 1172 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1173 new_delay = dev_priv->rps.boost_freq; 1174 adj = 0; 1175 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1176 if (adj > 0) 1177 adj *= 2; 1178 else /* CHV needs even encode values */ 1179 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1180 1181 if (new_delay >= dev_priv->rps.max_freq_softlimit) 1182 adj = 0; 1183 } else if (client_boost || any_waiters(dev_priv)) { 1184 adj = 0; 1185 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1186 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1187 new_delay = dev_priv->rps.efficient_freq; 1188 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1189 new_delay = dev_priv->rps.min_freq_softlimit; 1190 adj = 0; 1191 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1192 if (adj < 0) 1193 adj *= 2; 1194 else /* CHV needs even encode values */ 1195 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1196 1197 if (new_delay <= dev_priv->rps.min_freq_softlimit) 1198 adj = 0; 1199 } else { /* unknown event */ 1200 adj = 0; 1201 } 1202 1203 dev_priv->rps.last_adj = adj; 1204 1205 /* sysfs frequency interfaces may have snuck in while servicing the 1206 * interrupt 1207 */ 1208 new_delay += adj; 1209 new_delay = clamp_t(int, new_delay, min, max); 1210 1211 if (intel_set_rps(dev_priv, new_delay)) { 1212 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1213 dev_priv->rps.last_adj = 0; 1214 } 1215 1216 mutex_unlock(&dev_priv->rps.hw_lock); 1217 1218 out: 1219 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1220 spin_lock_irq(&dev_priv->irq_lock); 1221 if (dev_priv->rps.interrupts_enabled) 1222 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1223 spin_unlock_irq(&dev_priv->irq_lock); 1224 } 1225 1226 1227 /** 1228 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1229 * occurred. 1230 * @work: workqueue struct 1231 * 1232 * Doesn't actually do anything except notify userspace. As a consequence of 1233 * this event, userspace should try to remap the bad rows since statistically 1234 * it is likely the same row is more likely to go bad again. 1235 */ 1236 static void ivybridge_parity_work(struct work_struct *work) 1237 { 1238 struct drm_i915_private *dev_priv = 1239 container_of(work, struct drm_i915_private, l3_parity.error_work); 1240 u32 error_status, row, bank, subbank; 1241 char *parity_event[6]; 1242 uint32_t misccpctl; 1243 uint8_t slice = 0; 1244 1245 /* We must turn off DOP level clock gating to access the L3 registers. 1246 * In order to prevent a get/put style interface, acquire struct mutex 1247 * any time we access those registers. 1248 */ 1249 mutex_lock(&dev_priv->drm.struct_mutex); 1250 1251 /* If we've screwed up tracking, just let the interrupt fire again */ 1252 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1253 goto out; 1254 1255 misccpctl = I915_READ(GEN7_MISCCPCTL); 1256 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1257 POSTING_READ(GEN7_MISCCPCTL); 1258 1259 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1260 i915_reg_t reg; 1261 1262 slice--; 1263 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1264 break; 1265 1266 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1267 1268 reg = GEN7_L3CDERRST1(slice); 1269 1270 error_status = I915_READ(reg); 1271 row = GEN7_PARITY_ERROR_ROW(error_status); 1272 bank = GEN7_PARITY_ERROR_BANK(error_status); 1273 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1274 1275 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1276 POSTING_READ(reg); 1277 1278 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1279 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1280 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1281 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1282 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1283 parity_event[5] = NULL; 1284 1285 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1286 KOBJ_CHANGE, parity_event); 1287 1288 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1289 slice, row, bank, subbank); 1290 1291 kfree(parity_event[4]); 1292 kfree(parity_event[3]); 1293 kfree(parity_event[2]); 1294 kfree(parity_event[1]); 1295 } 1296 1297 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1298 1299 out: 1300 WARN_ON(dev_priv->l3_parity.which_slice); 1301 spin_lock_irq(&dev_priv->irq_lock); 1302 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1303 spin_unlock_irq(&dev_priv->irq_lock); 1304 1305 mutex_unlock(&dev_priv->drm.struct_mutex); 1306 } 1307 1308 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1309 u32 iir) 1310 { 1311 if (!HAS_L3_DPF(dev_priv)) 1312 return; 1313 1314 spin_lock(&dev_priv->irq_lock); 1315 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1316 spin_unlock(&dev_priv->irq_lock); 1317 1318 iir &= GT_PARITY_ERROR(dev_priv); 1319 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1320 dev_priv->l3_parity.which_slice |= 1 << 1; 1321 1322 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1323 dev_priv->l3_parity.which_slice |= 1 << 0; 1324 1325 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1326 } 1327 1328 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1329 u32 gt_iir) 1330 { 1331 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1332 notify_ring(dev_priv->engine[RCS]); 1333 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1334 notify_ring(dev_priv->engine[VCS]); 1335 } 1336 1337 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1338 u32 gt_iir) 1339 { 1340 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1341 notify_ring(dev_priv->engine[RCS]); 1342 if (gt_iir & GT_BSD_USER_INTERRUPT) 1343 notify_ring(dev_priv->engine[VCS]); 1344 if (gt_iir & GT_BLT_USER_INTERRUPT) 1345 notify_ring(dev_priv->engine[BCS]); 1346 1347 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1348 GT_BSD_CS_ERROR_INTERRUPT | 1349 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1350 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1351 1352 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1353 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1354 } 1355 1356 static __always_inline void 1357 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1358 { 1359 bool tasklet = false; 1360 1361 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1362 set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1363 tasklet = true; 1364 } 1365 1366 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1367 notify_ring(engine); 1368 tasklet |= i915.enable_guc_submission; 1369 } 1370 1371 if (tasklet) 1372 tasklet_hi_schedule(&engine->irq_tasklet); 1373 } 1374 1375 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1376 u32 master_ctl, 1377 u32 gt_iir[4]) 1378 { 1379 irqreturn_t ret = IRQ_NONE; 1380 1381 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1382 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1383 if (gt_iir[0]) { 1384 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1385 ret = IRQ_HANDLED; 1386 } else 1387 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1388 } 1389 1390 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1391 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1392 if (gt_iir[1]) { 1393 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1394 ret = IRQ_HANDLED; 1395 } else 1396 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1397 } 1398 1399 if (master_ctl & GEN8_GT_VECS_IRQ) { 1400 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1401 if (gt_iir[3]) { 1402 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1403 ret = IRQ_HANDLED; 1404 } else 1405 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1406 } 1407 1408 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1409 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1410 if (gt_iir[2] & (dev_priv->pm_rps_events | 1411 dev_priv->pm_guc_events)) { 1412 I915_WRITE_FW(GEN8_GT_IIR(2), 1413 gt_iir[2] & (dev_priv->pm_rps_events | 1414 dev_priv->pm_guc_events)); 1415 ret = IRQ_HANDLED; 1416 } else 1417 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1418 } 1419 1420 return ret; 1421 } 1422 1423 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1424 u32 gt_iir[4]) 1425 { 1426 if (gt_iir[0]) { 1427 gen8_cs_irq_handler(dev_priv->engine[RCS], 1428 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1429 gen8_cs_irq_handler(dev_priv->engine[BCS], 1430 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1431 } 1432 1433 if (gt_iir[1]) { 1434 gen8_cs_irq_handler(dev_priv->engine[VCS], 1435 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1436 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1437 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1438 } 1439 1440 if (gt_iir[3]) 1441 gen8_cs_irq_handler(dev_priv->engine[VECS], 1442 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1443 1444 if (gt_iir[2] & dev_priv->pm_rps_events) 1445 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1446 1447 if (gt_iir[2] & dev_priv->pm_guc_events) 1448 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1449 } 1450 1451 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1452 { 1453 switch (port) { 1454 case PORT_A: 1455 return val & PORTA_HOTPLUG_LONG_DETECT; 1456 case PORT_B: 1457 return val & PORTB_HOTPLUG_LONG_DETECT; 1458 case PORT_C: 1459 return val & PORTC_HOTPLUG_LONG_DETECT; 1460 default: 1461 return false; 1462 } 1463 } 1464 1465 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1466 { 1467 switch (port) { 1468 case PORT_E: 1469 return val & PORTE_HOTPLUG_LONG_DETECT; 1470 default: 1471 return false; 1472 } 1473 } 1474 1475 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1476 { 1477 switch (port) { 1478 case PORT_A: 1479 return val & PORTA_HOTPLUG_LONG_DETECT; 1480 case PORT_B: 1481 return val & PORTB_HOTPLUG_LONG_DETECT; 1482 case PORT_C: 1483 return val & PORTC_HOTPLUG_LONG_DETECT; 1484 case PORT_D: 1485 return val & PORTD_HOTPLUG_LONG_DETECT; 1486 default: 1487 return false; 1488 } 1489 } 1490 1491 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1492 { 1493 switch (port) { 1494 case PORT_A: 1495 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1496 default: 1497 return false; 1498 } 1499 } 1500 1501 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1502 { 1503 switch (port) { 1504 case PORT_B: 1505 return val & PORTB_HOTPLUG_LONG_DETECT; 1506 case PORT_C: 1507 return val & PORTC_HOTPLUG_LONG_DETECT; 1508 case PORT_D: 1509 return val & PORTD_HOTPLUG_LONG_DETECT; 1510 default: 1511 return false; 1512 } 1513 } 1514 1515 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1516 { 1517 switch (port) { 1518 case PORT_B: 1519 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1520 case PORT_C: 1521 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1522 case PORT_D: 1523 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1524 default: 1525 return false; 1526 } 1527 } 1528 1529 /* 1530 * Get a bit mask of pins that have triggered, and which ones may be long. 1531 * This can be called multiple times with the same masks to accumulate 1532 * hotplug detection results from several registers. 1533 * 1534 * Note that the caller is expected to zero out the masks initially. 1535 */ 1536 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1537 u32 hotplug_trigger, u32 dig_hotplug_reg, 1538 const u32 hpd[HPD_NUM_PINS], 1539 bool long_pulse_detect(enum port port, u32 val)) 1540 { 1541 enum port port; 1542 int i; 1543 1544 for_each_hpd_pin(i) { 1545 if ((hpd[i] & hotplug_trigger) == 0) 1546 continue; 1547 1548 *pin_mask |= BIT(i); 1549 1550 if (!intel_hpd_pin_to_port(i, &port)) 1551 continue; 1552 1553 if (long_pulse_detect(port, dig_hotplug_reg)) 1554 *long_mask |= BIT(i); 1555 } 1556 1557 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1558 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1559 1560 } 1561 1562 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1563 { 1564 wake_up_all(&dev_priv->gmbus_wait_queue); 1565 } 1566 1567 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1568 { 1569 wake_up_all(&dev_priv->gmbus_wait_queue); 1570 } 1571 1572 #if defined(CONFIG_DEBUG_FS) 1573 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1574 enum pipe pipe, 1575 uint32_t crc0, uint32_t crc1, 1576 uint32_t crc2, uint32_t crc3, 1577 uint32_t crc4) 1578 { 1579 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1580 struct intel_pipe_crc_entry *entry; 1581 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1582 struct drm_driver *driver = dev_priv->drm.driver; 1583 uint32_t crcs[5]; 1584 int head, tail; 1585 1586 spin_lock(&pipe_crc->lock); 1587 if (pipe_crc->source) { 1588 if (!pipe_crc->entries) { 1589 spin_unlock(&pipe_crc->lock); 1590 DRM_DEBUG_KMS("spurious interrupt\n"); 1591 return; 1592 } 1593 1594 head = pipe_crc->head; 1595 tail = pipe_crc->tail; 1596 1597 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1598 spin_unlock(&pipe_crc->lock); 1599 DRM_ERROR("CRC buffer overflowing\n"); 1600 return; 1601 } 1602 1603 entry = &pipe_crc->entries[head]; 1604 1605 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1606 entry->crc[0] = crc0; 1607 entry->crc[1] = crc1; 1608 entry->crc[2] = crc2; 1609 entry->crc[3] = crc3; 1610 entry->crc[4] = crc4; 1611 1612 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1613 pipe_crc->head = head; 1614 1615 spin_unlock(&pipe_crc->lock); 1616 1617 wake_up_interruptible(&pipe_crc->wq); 1618 } else { 1619 /* 1620 * For some not yet identified reason, the first CRC is 1621 * bonkers. So let's just wait for the next vblank and read 1622 * out the buggy result. 1623 * 1624 * On CHV sometimes the second CRC is bonkers as well, so 1625 * don't trust that one either. 1626 */ 1627 if (pipe_crc->skipped == 0 || 1628 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { 1629 pipe_crc->skipped++; 1630 spin_unlock(&pipe_crc->lock); 1631 return; 1632 } 1633 spin_unlock(&pipe_crc->lock); 1634 crcs[0] = crc0; 1635 crcs[1] = crc1; 1636 crcs[2] = crc2; 1637 crcs[3] = crc3; 1638 crcs[4] = crc4; 1639 drm_crtc_add_crc_entry(&crtc->base, true, 1640 drm_accurate_vblank_count(&crtc->base), 1641 crcs); 1642 } 1643 } 1644 #else 1645 static inline void 1646 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1647 enum pipe pipe, 1648 uint32_t crc0, uint32_t crc1, 1649 uint32_t crc2, uint32_t crc3, 1650 uint32_t crc4) {} 1651 #endif 1652 1653 1654 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1655 enum pipe pipe) 1656 { 1657 display_pipe_crc_irq_handler(dev_priv, pipe, 1658 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1659 0, 0, 0, 0); 1660 } 1661 1662 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1663 enum pipe pipe) 1664 { 1665 display_pipe_crc_irq_handler(dev_priv, pipe, 1666 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1667 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1668 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1669 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1670 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1671 } 1672 1673 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1674 enum pipe pipe) 1675 { 1676 uint32_t res1, res2; 1677 1678 if (INTEL_GEN(dev_priv) >= 3) 1679 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1680 else 1681 res1 = 0; 1682 1683 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1684 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1685 else 1686 res2 = 0; 1687 1688 display_pipe_crc_irq_handler(dev_priv, pipe, 1689 I915_READ(PIPE_CRC_RES_RED(pipe)), 1690 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1691 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1692 res1, res2); 1693 } 1694 1695 /* The RPS events need forcewake, so we add them to a work queue and mask their 1696 * IMR bits until the work is done. Other interrupts can be processed without 1697 * the work queue. */ 1698 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1699 { 1700 if (pm_iir & dev_priv->pm_rps_events) { 1701 spin_lock(&dev_priv->irq_lock); 1702 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1703 if (dev_priv->rps.interrupts_enabled) { 1704 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1705 schedule_work(&dev_priv->rps.work); 1706 } 1707 spin_unlock(&dev_priv->irq_lock); 1708 } 1709 1710 if (INTEL_INFO(dev_priv)->gen >= 8) 1711 return; 1712 1713 if (HAS_VEBOX(dev_priv)) { 1714 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1715 notify_ring(dev_priv->engine[VECS]); 1716 1717 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1718 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1719 } 1720 } 1721 1722 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1723 { 1724 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1725 /* Sample the log buffer flush related bits & clear them out now 1726 * itself from the message identity register to minimize the 1727 * probability of losing a flush interrupt, when there are back 1728 * to back flush interrupts. 1729 * There can be a new flush interrupt, for different log buffer 1730 * type (like for ISR), whilst Host is handling one (for DPC). 1731 * Since same bit is used in message register for ISR & DPC, it 1732 * could happen that GuC sets the bit for 2nd interrupt but Host 1733 * clears out the bit on handling the 1st interrupt. 1734 */ 1735 u32 msg, flush; 1736 1737 msg = I915_READ(SOFT_SCRATCH(15)); 1738 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1739 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1740 if (flush) { 1741 /* Clear the message bits that are handled */ 1742 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1743 1744 /* Handle flush interrupt in bottom half */ 1745 queue_work(dev_priv->guc.log.runtime.flush_wq, 1746 &dev_priv->guc.log.runtime.flush_work); 1747 1748 dev_priv->guc.log.flush_interrupt_count++; 1749 } else { 1750 /* Not clearing of unhandled event bits won't result in 1751 * re-triggering of the interrupt. 1752 */ 1753 } 1754 } 1755 } 1756 1757 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1758 enum pipe pipe) 1759 { 1760 bool ret; 1761 1762 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1763 if (ret) 1764 intel_finish_page_flip_mmio(dev_priv, pipe); 1765 1766 return ret; 1767 } 1768 1769 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1770 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1771 { 1772 int pipe; 1773 1774 spin_lock(&dev_priv->irq_lock); 1775 1776 if (!dev_priv->display_irqs_enabled) { 1777 spin_unlock(&dev_priv->irq_lock); 1778 return; 1779 } 1780 1781 for_each_pipe(dev_priv, pipe) { 1782 i915_reg_t reg; 1783 u32 mask, iir_bit = 0; 1784 1785 /* 1786 * PIPESTAT bits get signalled even when the interrupt is 1787 * disabled with the mask bits, and some of the status bits do 1788 * not generate interrupts at all (like the underrun bit). Hence 1789 * we need to be careful that we only handle what we want to 1790 * handle. 1791 */ 1792 1793 /* fifo underruns are filterered in the underrun handler. */ 1794 mask = PIPE_FIFO_UNDERRUN_STATUS; 1795 1796 switch (pipe) { 1797 case PIPE_A: 1798 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1799 break; 1800 case PIPE_B: 1801 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1802 break; 1803 case PIPE_C: 1804 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1805 break; 1806 } 1807 if (iir & iir_bit) 1808 mask |= dev_priv->pipestat_irq_mask[pipe]; 1809 1810 if (!mask) 1811 continue; 1812 1813 reg = PIPESTAT(pipe); 1814 mask |= PIPESTAT_INT_ENABLE_MASK; 1815 pipe_stats[pipe] = I915_READ(reg) & mask; 1816 1817 /* 1818 * Clear the PIPE*STAT regs before the IIR 1819 */ 1820 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1821 PIPESTAT_INT_STATUS_MASK)) 1822 I915_WRITE(reg, pipe_stats[pipe]); 1823 } 1824 spin_unlock(&dev_priv->irq_lock); 1825 } 1826 1827 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1828 u32 pipe_stats[I915_MAX_PIPES]) 1829 { 1830 enum pipe pipe; 1831 1832 for_each_pipe(dev_priv, pipe) { 1833 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1834 intel_pipe_handle_vblank(dev_priv, pipe)) 1835 intel_check_page_flip(dev_priv, pipe); 1836 1837 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1838 intel_finish_page_flip_cs(dev_priv, pipe); 1839 1840 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1841 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1842 1843 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1844 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1845 } 1846 1847 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1848 gmbus_irq_handler(dev_priv); 1849 } 1850 1851 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1852 { 1853 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1854 1855 if (hotplug_status) 1856 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1857 1858 return hotplug_status; 1859 } 1860 1861 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1862 u32 hotplug_status) 1863 { 1864 u32 pin_mask = 0, long_mask = 0; 1865 1866 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1867 IS_CHERRYVIEW(dev_priv)) { 1868 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1869 1870 if (hotplug_trigger) { 1871 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1872 hotplug_trigger, hpd_status_g4x, 1873 i9xx_port_hotplug_long_detect); 1874 1875 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1876 } 1877 1878 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1879 dp_aux_irq_handler(dev_priv); 1880 } else { 1881 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1882 1883 if (hotplug_trigger) { 1884 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1885 hotplug_trigger, hpd_status_i915, 1886 i9xx_port_hotplug_long_detect); 1887 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1888 } 1889 } 1890 } 1891 1892 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1893 { 1894 struct drm_device *dev = arg; 1895 struct drm_i915_private *dev_priv = to_i915(dev); 1896 irqreturn_t ret = IRQ_NONE; 1897 1898 if (!intel_irqs_enabled(dev_priv)) 1899 return IRQ_NONE; 1900 1901 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1902 disable_rpm_wakeref_asserts(dev_priv); 1903 1904 do { 1905 u32 iir, gt_iir, pm_iir; 1906 u32 pipe_stats[I915_MAX_PIPES] = {}; 1907 u32 hotplug_status = 0; 1908 u32 ier = 0; 1909 1910 gt_iir = I915_READ(GTIIR); 1911 pm_iir = I915_READ(GEN6_PMIIR); 1912 iir = I915_READ(VLV_IIR); 1913 1914 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1915 break; 1916 1917 ret = IRQ_HANDLED; 1918 1919 /* 1920 * Theory on interrupt generation, based on empirical evidence: 1921 * 1922 * x = ((VLV_IIR & VLV_IER) || 1923 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1924 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1925 * 1926 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1927 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1928 * guarantee the CPU interrupt will be raised again even if we 1929 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1930 * bits this time around. 1931 */ 1932 I915_WRITE(VLV_MASTER_IER, 0); 1933 ier = I915_READ(VLV_IER); 1934 I915_WRITE(VLV_IER, 0); 1935 1936 if (gt_iir) 1937 I915_WRITE(GTIIR, gt_iir); 1938 if (pm_iir) 1939 I915_WRITE(GEN6_PMIIR, pm_iir); 1940 1941 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1942 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1943 1944 /* Call regardless, as some status bits might not be 1945 * signalled in iir */ 1946 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1947 1948 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1949 I915_LPE_PIPE_B_INTERRUPT)) 1950 intel_lpe_audio_irq_handler(dev_priv); 1951 1952 /* 1953 * VLV_IIR is single buffered, and reflects the level 1954 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1955 */ 1956 if (iir) 1957 I915_WRITE(VLV_IIR, iir); 1958 1959 I915_WRITE(VLV_IER, ier); 1960 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1961 POSTING_READ(VLV_MASTER_IER); 1962 1963 if (gt_iir) 1964 snb_gt_irq_handler(dev_priv, gt_iir); 1965 if (pm_iir) 1966 gen6_rps_irq_handler(dev_priv, pm_iir); 1967 1968 if (hotplug_status) 1969 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1970 1971 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1972 } while (0); 1973 1974 enable_rpm_wakeref_asserts(dev_priv); 1975 1976 return ret; 1977 } 1978 1979 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1980 { 1981 struct drm_device *dev = arg; 1982 struct drm_i915_private *dev_priv = to_i915(dev); 1983 irqreturn_t ret = IRQ_NONE; 1984 1985 if (!intel_irqs_enabled(dev_priv)) 1986 return IRQ_NONE; 1987 1988 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1989 disable_rpm_wakeref_asserts(dev_priv); 1990 1991 do { 1992 u32 master_ctl, iir; 1993 u32 gt_iir[4] = {}; 1994 u32 pipe_stats[I915_MAX_PIPES] = {}; 1995 u32 hotplug_status = 0; 1996 u32 ier = 0; 1997 1998 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1999 iir = I915_READ(VLV_IIR); 2000 2001 if (master_ctl == 0 && iir == 0) 2002 break; 2003 2004 ret = IRQ_HANDLED; 2005 2006 /* 2007 * Theory on interrupt generation, based on empirical evidence: 2008 * 2009 * x = ((VLV_IIR & VLV_IER) || 2010 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2011 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2012 * 2013 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2014 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2015 * guarantee the CPU interrupt will be raised again even if we 2016 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2017 * bits this time around. 2018 */ 2019 I915_WRITE(GEN8_MASTER_IRQ, 0); 2020 ier = I915_READ(VLV_IER); 2021 I915_WRITE(VLV_IER, 0); 2022 2023 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2024 2025 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2026 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2027 2028 /* Call regardless, as some status bits might not be 2029 * signalled in iir */ 2030 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2031 2032 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2033 I915_LPE_PIPE_B_INTERRUPT | 2034 I915_LPE_PIPE_C_INTERRUPT)) 2035 intel_lpe_audio_irq_handler(dev_priv); 2036 2037 /* 2038 * VLV_IIR is single buffered, and reflects the level 2039 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2040 */ 2041 if (iir) 2042 I915_WRITE(VLV_IIR, iir); 2043 2044 I915_WRITE(VLV_IER, ier); 2045 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2046 POSTING_READ(GEN8_MASTER_IRQ); 2047 2048 gen8_gt_irq_handler(dev_priv, gt_iir); 2049 2050 if (hotplug_status) 2051 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2052 2053 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2054 } while (0); 2055 2056 enable_rpm_wakeref_asserts(dev_priv); 2057 2058 return ret; 2059 } 2060 2061 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2062 u32 hotplug_trigger, 2063 const u32 hpd[HPD_NUM_PINS]) 2064 { 2065 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2066 2067 /* 2068 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2069 * unless we touch the hotplug register, even if hotplug_trigger is 2070 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2071 * errors. 2072 */ 2073 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2074 if (!hotplug_trigger) { 2075 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2076 PORTD_HOTPLUG_STATUS_MASK | 2077 PORTC_HOTPLUG_STATUS_MASK | 2078 PORTB_HOTPLUG_STATUS_MASK; 2079 dig_hotplug_reg &= ~mask; 2080 } 2081 2082 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2083 if (!hotplug_trigger) 2084 return; 2085 2086 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2087 dig_hotplug_reg, hpd, 2088 pch_port_hotplug_long_detect); 2089 2090 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2091 } 2092 2093 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2094 { 2095 int pipe; 2096 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2097 2098 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2099 2100 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2101 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2102 SDE_AUDIO_POWER_SHIFT); 2103 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2104 port_name(port)); 2105 } 2106 2107 if (pch_iir & SDE_AUX_MASK) 2108 dp_aux_irq_handler(dev_priv); 2109 2110 if (pch_iir & SDE_GMBUS) 2111 gmbus_irq_handler(dev_priv); 2112 2113 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2114 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2115 2116 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2117 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2118 2119 if (pch_iir & SDE_POISON) 2120 DRM_ERROR("PCH poison interrupt\n"); 2121 2122 if (pch_iir & SDE_FDI_MASK) 2123 for_each_pipe(dev_priv, pipe) 2124 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2125 pipe_name(pipe), 2126 I915_READ(FDI_RX_IIR(pipe))); 2127 2128 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2129 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2130 2131 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2132 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2133 2134 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2135 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2136 2137 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2138 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2139 } 2140 2141 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2142 { 2143 u32 err_int = I915_READ(GEN7_ERR_INT); 2144 enum pipe pipe; 2145 2146 if (err_int & ERR_INT_POISON) 2147 DRM_ERROR("Poison interrupt\n"); 2148 2149 for_each_pipe(dev_priv, pipe) { 2150 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2151 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2152 2153 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2154 if (IS_IVYBRIDGE(dev_priv)) 2155 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2156 else 2157 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2158 } 2159 } 2160 2161 I915_WRITE(GEN7_ERR_INT, err_int); 2162 } 2163 2164 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2165 { 2166 u32 serr_int = I915_READ(SERR_INT); 2167 2168 if (serr_int & SERR_INT_POISON) 2169 DRM_ERROR("PCH poison interrupt\n"); 2170 2171 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2172 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2173 2174 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2175 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2176 2177 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2178 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2179 2180 I915_WRITE(SERR_INT, serr_int); 2181 } 2182 2183 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2184 { 2185 int pipe; 2186 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2187 2188 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2189 2190 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2191 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2192 SDE_AUDIO_POWER_SHIFT_CPT); 2193 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2194 port_name(port)); 2195 } 2196 2197 if (pch_iir & SDE_AUX_MASK_CPT) 2198 dp_aux_irq_handler(dev_priv); 2199 2200 if (pch_iir & SDE_GMBUS_CPT) 2201 gmbus_irq_handler(dev_priv); 2202 2203 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2204 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2205 2206 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2207 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2208 2209 if (pch_iir & SDE_FDI_MASK_CPT) 2210 for_each_pipe(dev_priv, pipe) 2211 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2212 pipe_name(pipe), 2213 I915_READ(FDI_RX_IIR(pipe))); 2214 2215 if (pch_iir & SDE_ERROR_CPT) 2216 cpt_serr_int_handler(dev_priv); 2217 } 2218 2219 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2220 { 2221 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2222 ~SDE_PORTE_HOTPLUG_SPT; 2223 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2224 u32 pin_mask = 0, long_mask = 0; 2225 2226 if (hotplug_trigger) { 2227 u32 dig_hotplug_reg; 2228 2229 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2230 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2231 2232 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2233 dig_hotplug_reg, hpd_spt, 2234 spt_port_hotplug_long_detect); 2235 } 2236 2237 if (hotplug2_trigger) { 2238 u32 dig_hotplug_reg; 2239 2240 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2241 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2242 2243 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2244 dig_hotplug_reg, hpd_spt, 2245 spt_port_hotplug2_long_detect); 2246 } 2247 2248 if (pin_mask) 2249 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2250 2251 if (pch_iir & SDE_GMBUS_CPT) 2252 gmbus_irq_handler(dev_priv); 2253 } 2254 2255 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2256 u32 hotplug_trigger, 2257 const u32 hpd[HPD_NUM_PINS]) 2258 { 2259 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2260 2261 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2262 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2263 2264 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2265 dig_hotplug_reg, hpd, 2266 ilk_port_hotplug_long_detect); 2267 2268 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2269 } 2270 2271 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2272 u32 de_iir) 2273 { 2274 enum pipe pipe; 2275 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2276 2277 if (hotplug_trigger) 2278 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2279 2280 if (de_iir & DE_AUX_CHANNEL_A) 2281 dp_aux_irq_handler(dev_priv); 2282 2283 if (de_iir & DE_GSE) 2284 intel_opregion_asle_intr(dev_priv); 2285 2286 if (de_iir & DE_POISON) 2287 DRM_ERROR("Poison interrupt\n"); 2288 2289 for_each_pipe(dev_priv, pipe) { 2290 if (de_iir & DE_PIPE_VBLANK(pipe) && 2291 intel_pipe_handle_vblank(dev_priv, pipe)) 2292 intel_check_page_flip(dev_priv, pipe); 2293 2294 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2295 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2296 2297 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2298 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2299 2300 /* plane/pipes map 1:1 on ilk+ */ 2301 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2302 intel_finish_page_flip_cs(dev_priv, pipe); 2303 } 2304 2305 /* check event from PCH */ 2306 if (de_iir & DE_PCH_EVENT) { 2307 u32 pch_iir = I915_READ(SDEIIR); 2308 2309 if (HAS_PCH_CPT(dev_priv)) 2310 cpt_irq_handler(dev_priv, pch_iir); 2311 else 2312 ibx_irq_handler(dev_priv, pch_iir); 2313 2314 /* should clear PCH hotplug event before clear CPU irq */ 2315 I915_WRITE(SDEIIR, pch_iir); 2316 } 2317 2318 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2319 ironlake_rps_change_irq_handler(dev_priv); 2320 } 2321 2322 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2323 u32 de_iir) 2324 { 2325 enum pipe pipe; 2326 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2327 2328 if (hotplug_trigger) 2329 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2330 2331 if (de_iir & DE_ERR_INT_IVB) 2332 ivb_err_int_handler(dev_priv); 2333 2334 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2335 dp_aux_irq_handler(dev_priv); 2336 2337 if (de_iir & DE_GSE_IVB) 2338 intel_opregion_asle_intr(dev_priv); 2339 2340 for_each_pipe(dev_priv, pipe) { 2341 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2342 intel_pipe_handle_vblank(dev_priv, pipe)) 2343 intel_check_page_flip(dev_priv, pipe); 2344 2345 /* plane/pipes map 1:1 on ilk+ */ 2346 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2347 intel_finish_page_flip_cs(dev_priv, pipe); 2348 } 2349 2350 /* check event from PCH */ 2351 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2352 u32 pch_iir = I915_READ(SDEIIR); 2353 2354 cpt_irq_handler(dev_priv, pch_iir); 2355 2356 /* clear PCH hotplug event before clear CPU irq */ 2357 I915_WRITE(SDEIIR, pch_iir); 2358 } 2359 } 2360 2361 /* 2362 * To handle irqs with the minimum potential races with fresh interrupts, we: 2363 * 1 - Disable Master Interrupt Control. 2364 * 2 - Find the source(s) of the interrupt. 2365 * 3 - Clear the Interrupt Identity bits (IIR). 2366 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2367 * 5 - Re-enable Master Interrupt Control. 2368 */ 2369 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2370 { 2371 struct drm_device *dev = arg; 2372 struct drm_i915_private *dev_priv = to_i915(dev); 2373 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2374 irqreturn_t ret = IRQ_NONE; 2375 2376 if (!intel_irqs_enabled(dev_priv)) 2377 return IRQ_NONE; 2378 2379 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2380 disable_rpm_wakeref_asserts(dev_priv); 2381 2382 /* disable master interrupt before clearing iir */ 2383 de_ier = I915_READ(DEIER); 2384 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2385 POSTING_READ(DEIER); 2386 2387 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2388 * interrupts will will be stored on its back queue, and then we'll be 2389 * able to process them after we restore SDEIER (as soon as we restore 2390 * it, we'll get an interrupt if SDEIIR still has something to process 2391 * due to its back queue). */ 2392 if (!HAS_PCH_NOP(dev_priv)) { 2393 sde_ier = I915_READ(SDEIER); 2394 I915_WRITE(SDEIER, 0); 2395 POSTING_READ(SDEIER); 2396 } 2397 2398 /* Find, clear, then process each source of interrupt */ 2399 2400 gt_iir = I915_READ(GTIIR); 2401 if (gt_iir) { 2402 I915_WRITE(GTIIR, gt_iir); 2403 ret = IRQ_HANDLED; 2404 if (INTEL_GEN(dev_priv) >= 6) 2405 snb_gt_irq_handler(dev_priv, gt_iir); 2406 else 2407 ilk_gt_irq_handler(dev_priv, gt_iir); 2408 } 2409 2410 de_iir = I915_READ(DEIIR); 2411 if (de_iir) { 2412 I915_WRITE(DEIIR, de_iir); 2413 ret = IRQ_HANDLED; 2414 if (INTEL_GEN(dev_priv) >= 7) 2415 ivb_display_irq_handler(dev_priv, de_iir); 2416 else 2417 ilk_display_irq_handler(dev_priv, de_iir); 2418 } 2419 2420 if (INTEL_GEN(dev_priv) >= 6) { 2421 u32 pm_iir = I915_READ(GEN6_PMIIR); 2422 if (pm_iir) { 2423 I915_WRITE(GEN6_PMIIR, pm_iir); 2424 ret = IRQ_HANDLED; 2425 gen6_rps_irq_handler(dev_priv, pm_iir); 2426 } 2427 } 2428 2429 I915_WRITE(DEIER, de_ier); 2430 POSTING_READ(DEIER); 2431 if (!HAS_PCH_NOP(dev_priv)) { 2432 I915_WRITE(SDEIER, sde_ier); 2433 POSTING_READ(SDEIER); 2434 } 2435 2436 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2437 enable_rpm_wakeref_asserts(dev_priv); 2438 2439 return ret; 2440 } 2441 2442 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2443 u32 hotplug_trigger, 2444 const u32 hpd[HPD_NUM_PINS]) 2445 { 2446 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2447 2448 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2449 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2450 2451 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2452 dig_hotplug_reg, hpd, 2453 bxt_port_hotplug_long_detect); 2454 2455 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2456 } 2457 2458 static irqreturn_t 2459 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2460 { 2461 irqreturn_t ret = IRQ_NONE; 2462 u32 iir; 2463 enum pipe pipe; 2464 2465 if (master_ctl & GEN8_DE_MISC_IRQ) { 2466 iir = I915_READ(GEN8_DE_MISC_IIR); 2467 if (iir) { 2468 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2469 ret = IRQ_HANDLED; 2470 if (iir & GEN8_DE_MISC_GSE) 2471 intel_opregion_asle_intr(dev_priv); 2472 else 2473 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2474 } 2475 else 2476 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2477 } 2478 2479 if (master_ctl & GEN8_DE_PORT_IRQ) { 2480 iir = I915_READ(GEN8_DE_PORT_IIR); 2481 if (iir) { 2482 u32 tmp_mask; 2483 bool found = false; 2484 2485 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2486 ret = IRQ_HANDLED; 2487 2488 tmp_mask = GEN8_AUX_CHANNEL_A; 2489 if (INTEL_INFO(dev_priv)->gen >= 9) 2490 tmp_mask |= GEN9_AUX_CHANNEL_B | 2491 GEN9_AUX_CHANNEL_C | 2492 GEN9_AUX_CHANNEL_D; 2493 2494 if (iir & tmp_mask) { 2495 dp_aux_irq_handler(dev_priv); 2496 found = true; 2497 } 2498 2499 if (IS_GEN9_LP(dev_priv)) { 2500 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2501 if (tmp_mask) { 2502 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2503 hpd_bxt); 2504 found = true; 2505 } 2506 } else if (IS_BROADWELL(dev_priv)) { 2507 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2508 if (tmp_mask) { 2509 ilk_hpd_irq_handler(dev_priv, 2510 tmp_mask, hpd_bdw); 2511 found = true; 2512 } 2513 } 2514 2515 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2516 gmbus_irq_handler(dev_priv); 2517 found = true; 2518 } 2519 2520 if (!found) 2521 DRM_ERROR("Unexpected DE Port interrupt\n"); 2522 } 2523 else 2524 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2525 } 2526 2527 for_each_pipe(dev_priv, pipe) { 2528 u32 flip_done, fault_errors; 2529 2530 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2531 continue; 2532 2533 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2534 if (!iir) { 2535 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2536 continue; 2537 } 2538 2539 ret = IRQ_HANDLED; 2540 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2541 2542 if (iir & GEN8_PIPE_VBLANK && 2543 intel_pipe_handle_vblank(dev_priv, pipe)) 2544 intel_check_page_flip(dev_priv, pipe); 2545 2546 flip_done = iir; 2547 if (INTEL_INFO(dev_priv)->gen >= 9) 2548 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2549 else 2550 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2551 2552 if (flip_done) 2553 intel_finish_page_flip_cs(dev_priv, pipe); 2554 2555 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2556 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2557 2558 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2559 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2560 2561 fault_errors = iir; 2562 if (INTEL_INFO(dev_priv)->gen >= 9) 2563 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2564 else 2565 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2566 2567 if (fault_errors) 2568 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2569 pipe_name(pipe), 2570 fault_errors); 2571 } 2572 2573 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2574 master_ctl & GEN8_DE_PCH_IRQ) { 2575 /* 2576 * FIXME(BDW): Assume for now that the new interrupt handling 2577 * scheme also closed the SDE interrupt handling race we've seen 2578 * on older pch-split platforms. But this needs testing. 2579 */ 2580 iir = I915_READ(SDEIIR); 2581 if (iir) { 2582 I915_WRITE(SDEIIR, iir); 2583 ret = IRQ_HANDLED; 2584 2585 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2586 spt_irq_handler(dev_priv, iir); 2587 else 2588 cpt_irq_handler(dev_priv, iir); 2589 } else { 2590 /* 2591 * Like on previous PCH there seems to be something 2592 * fishy going on with forwarding PCH interrupts. 2593 */ 2594 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2595 } 2596 } 2597 2598 return ret; 2599 } 2600 2601 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2602 { 2603 struct drm_device *dev = arg; 2604 struct drm_i915_private *dev_priv = to_i915(dev); 2605 u32 master_ctl; 2606 u32 gt_iir[4] = {}; 2607 irqreturn_t ret; 2608 2609 if (!intel_irqs_enabled(dev_priv)) 2610 return IRQ_NONE; 2611 2612 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2613 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2614 if (!master_ctl) 2615 return IRQ_NONE; 2616 2617 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2618 2619 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2620 disable_rpm_wakeref_asserts(dev_priv); 2621 2622 /* Find, clear, then process each source of interrupt */ 2623 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2624 gen8_gt_irq_handler(dev_priv, gt_iir); 2625 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2626 2627 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2628 POSTING_READ_FW(GEN8_MASTER_IRQ); 2629 2630 enable_rpm_wakeref_asserts(dev_priv); 2631 2632 return ret; 2633 } 2634 2635 /** 2636 * i915_reset_and_wakeup - do process context error handling work 2637 * @dev_priv: i915 device private 2638 * 2639 * Fire an error uevent so userspace can see that a hang or error 2640 * was detected. 2641 */ 2642 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2643 { 2644 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2645 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2646 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2647 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2648 2649 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2650 2651 DRM_DEBUG_DRIVER("resetting chip\n"); 2652 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2653 2654 intel_prepare_reset(dev_priv); 2655 2656 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2657 wake_up_all(&dev_priv->gpu_error.wait_queue); 2658 2659 do { 2660 /* 2661 * All state reset _must_ be completed before we update the 2662 * reset counter, for otherwise waiters might miss the reset 2663 * pending state and not properly drop locks, resulting in 2664 * deadlocks with the reset work. 2665 */ 2666 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2667 i915_reset(dev_priv); 2668 mutex_unlock(&dev_priv->drm.struct_mutex); 2669 } 2670 2671 /* We need to wait for anyone holding the lock to wakeup */ 2672 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2673 I915_RESET_HANDOFF, 2674 TASK_UNINTERRUPTIBLE, 2675 HZ)); 2676 2677 intel_finish_reset(dev_priv); 2678 2679 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2680 kobject_uevent_env(kobj, 2681 KOBJ_CHANGE, reset_done_event); 2682 2683 /* 2684 * Note: The wake_up also serves as a memory barrier so that 2685 * waiters see the updated value of the dev_priv->gpu_error. 2686 */ 2687 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 2688 wake_up_all(&dev_priv->gpu_error.reset_queue); 2689 } 2690 2691 static inline void 2692 i915_err_print_instdone(struct drm_i915_private *dev_priv, 2693 struct intel_instdone *instdone) 2694 { 2695 int slice; 2696 int subslice; 2697 2698 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone); 2699 2700 if (INTEL_GEN(dev_priv) <= 3) 2701 return; 2702 2703 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common); 2704 2705 if (INTEL_GEN(dev_priv) <= 6) 2706 return; 2707 2708 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2709 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 2710 slice, subslice, instdone->sampler[slice][subslice]); 2711 2712 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2713 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n", 2714 slice, subslice, instdone->row[slice][subslice]); 2715 } 2716 2717 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2718 { 2719 u32 eir; 2720 2721 if (!IS_GEN2(dev_priv)) 2722 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2723 2724 if (INTEL_GEN(dev_priv) < 4) 2725 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2726 else 2727 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2728 2729 I915_WRITE(EIR, I915_READ(EIR)); 2730 eir = I915_READ(EIR); 2731 if (eir) { 2732 /* 2733 * some errors might have become stuck, 2734 * mask them. 2735 */ 2736 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2737 I915_WRITE(EMR, I915_READ(EMR) | eir); 2738 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2739 } 2740 } 2741 2742 /** 2743 * i915_handle_error - handle a gpu error 2744 * @dev_priv: i915 device private 2745 * @engine_mask: mask representing engines that are hung 2746 * @fmt: Error message format string 2747 * 2748 * Do some basic checking of register state at error time and 2749 * dump it to the syslog. Also call i915_capture_error_state() to make 2750 * sure we get a record and make it available in debugfs. Fire a uevent 2751 * so userspace knows something bad happened (should trigger collection 2752 * of a ring dump etc.). 2753 */ 2754 void i915_handle_error(struct drm_i915_private *dev_priv, 2755 u32 engine_mask, 2756 const char *fmt, ...) 2757 { 2758 va_list args; 2759 char error_msg[80]; 2760 2761 va_start(args, fmt); 2762 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2763 va_end(args); 2764 2765 /* 2766 * In most cases it's guaranteed that we get here with an RPM 2767 * reference held, for example because there is a pending GPU 2768 * request that won't finish until the reset is done. This 2769 * isn't the case at least when we get here by doing a 2770 * simulated reset via debugfs, so get an RPM reference. 2771 */ 2772 intel_runtime_pm_get(dev_priv); 2773 2774 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2775 i915_clear_error_registers(dev_priv); 2776 2777 if (!engine_mask) 2778 goto out; 2779 2780 if (test_and_set_bit(I915_RESET_BACKOFF, 2781 &dev_priv->gpu_error.flags)) 2782 goto out; 2783 2784 i915_reset_and_wakeup(dev_priv); 2785 2786 out: 2787 intel_runtime_pm_put(dev_priv); 2788 } 2789 2790 /* Called from drm generic code, passed 'crtc' which 2791 * we use as a pipe index 2792 */ 2793 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2794 { 2795 struct drm_i915_private *dev_priv = to_i915(dev); 2796 unsigned long irqflags; 2797 2798 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2799 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2800 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2801 2802 return 0; 2803 } 2804 2805 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2806 { 2807 struct drm_i915_private *dev_priv = to_i915(dev); 2808 unsigned long irqflags; 2809 2810 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2811 i915_enable_pipestat(dev_priv, pipe, 2812 PIPE_START_VBLANK_INTERRUPT_STATUS); 2813 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2814 2815 return 0; 2816 } 2817 2818 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2819 { 2820 struct drm_i915_private *dev_priv = to_i915(dev); 2821 unsigned long irqflags; 2822 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2823 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2824 2825 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2826 ilk_enable_display_irq(dev_priv, bit); 2827 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2828 2829 return 0; 2830 } 2831 2832 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2833 { 2834 struct drm_i915_private *dev_priv = to_i915(dev); 2835 unsigned long irqflags; 2836 2837 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2838 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2839 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2840 2841 return 0; 2842 } 2843 2844 /* Called from drm generic code, passed 'crtc' which 2845 * we use as a pipe index 2846 */ 2847 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2848 { 2849 struct drm_i915_private *dev_priv = to_i915(dev); 2850 unsigned long irqflags; 2851 2852 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2853 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2854 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2855 } 2856 2857 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2858 { 2859 struct drm_i915_private *dev_priv = to_i915(dev); 2860 unsigned long irqflags; 2861 2862 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2863 i915_disable_pipestat(dev_priv, pipe, 2864 PIPE_START_VBLANK_INTERRUPT_STATUS); 2865 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2866 } 2867 2868 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2869 { 2870 struct drm_i915_private *dev_priv = to_i915(dev); 2871 unsigned long irqflags; 2872 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2873 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2874 2875 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2876 ilk_disable_display_irq(dev_priv, bit); 2877 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2878 } 2879 2880 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2881 { 2882 struct drm_i915_private *dev_priv = to_i915(dev); 2883 unsigned long irqflags; 2884 2885 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2886 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2887 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2888 } 2889 2890 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2891 { 2892 if (HAS_PCH_NOP(dev_priv)) 2893 return; 2894 2895 GEN5_IRQ_RESET(SDE); 2896 2897 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2898 I915_WRITE(SERR_INT, 0xffffffff); 2899 } 2900 2901 /* 2902 * SDEIER is also touched by the interrupt handler to work around missed PCH 2903 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2904 * instead we unconditionally enable all PCH interrupt sources here, but then 2905 * only unmask them as needed with SDEIMR. 2906 * 2907 * This function needs to be called before interrupts are enabled. 2908 */ 2909 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2910 { 2911 struct drm_i915_private *dev_priv = to_i915(dev); 2912 2913 if (HAS_PCH_NOP(dev_priv)) 2914 return; 2915 2916 WARN_ON(I915_READ(SDEIER) != 0); 2917 I915_WRITE(SDEIER, 0xffffffff); 2918 POSTING_READ(SDEIER); 2919 } 2920 2921 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 2922 { 2923 GEN5_IRQ_RESET(GT); 2924 if (INTEL_GEN(dev_priv) >= 6) 2925 GEN5_IRQ_RESET(GEN6_PM); 2926 } 2927 2928 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2929 { 2930 enum pipe pipe; 2931 2932 if (IS_CHERRYVIEW(dev_priv)) 2933 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2934 else 2935 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2936 2937 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2938 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2939 2940 for_each_pipe(dev_priv, pipe) { 2941 I915_WRITE(PIPESTAT(pipe), 2942 PIPE_FIFO_UNDERRUN_STATUS | 2943 PIPESTAT_INT_STATUS_MASK); 2944 dev_priv->pipestat_irq_mask[pipe] = 0; 2945 } 2946 2947 GEN5_IRQ_RESET(VLV_); 2948 dev_priv->irq_mask = ~0; 2949 } 2950 2951 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2952 { 2953 u32 pipestat_mask; 2954 u32 enable_mask; 2955 enum pipe pipe; 2956 u32 val; 2957 2958 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2959 PIPE_CRC_DONE_INTERRUPT_STATUS; 2960 2961 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2962 for_each_pipe(dev_priv, pipe) 2963 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2964 2965 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2966 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2967 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2968 if (IS_CHERRYVIEW(dev_priv)) 2969 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2970 2971 WARN_ON(dev_priv->irq_mask != ~0); 2972 2973 val = (I915_LPE_PIPE_A_INTERRUPT | 2974 I915_LPE_PIPE_B_INTERRUPT | 2975 I915_LPE_PIPE_C_INTERRUPT); 2976 2977 enable_mask |= val; 2978 2979 dev_priv->irq_mask = ~enable_mask; 2980 2981 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 2982 } 2983 2984 /* drm_dma.h hooks 2985 */ 2986 static void ironlake_irq_reset(struct drm_device *dev) 2987 { 2988 struct drm_i915_private *dev_priv = to_i915(dev); 2989 2990 I915_WRITE(HWSTAM, 0xffffffff); 2991 2992 GEN5_IRQ_RESET(DE); 2993 if (IS_GEN7(dev_priv)) 2994 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2995 2996 gen5_gt_irq_reset(dev_priv); 2997 2998 ibx_irq_reset(dev_priv); 2999 } 3000 3001 static void valleyview_irq_preinstall(struct drm_device *dev) 3002 { 3003 struct drm_i915_private *dev_priv = to_i915(dev); 3004 3005 I915_WRITE(VLV_MASTER_IER, 0); 3006 POSTING_READ(VLV_MASTER_IER); 3007 3008 gen5_gt_irq_reset(dev_priv); 3009 3010 spin_lock_irq(&dev_priv->irq_lock); 3011 if (dev_priv->display_irqs_enabled) 3012 vlv_display_irq_reset(dev_priv); 3013 spin_unlock_irq(&dev_priv->irq_lock); 3014 } 3015 3016 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3017 { 3018 GEN8_IRQ_RESET_NDX(GT, 0); 3019 GEN8_IRQ_RESET_NDX(GT, 1); 3020 GEN8_IRQ_RESET_NDX(GT, 2); 3021 GEN8_IRQ_RESET_NDX(GT, 3); 3022 } 3023 3024 static void gen8_irq_reset(struct drm_device *dev) 3025 { 3026 struct drm_i915_private *dev_priv = to_i915(dev); 3027 int pipe; 3028 3029 I915_WRITE(GEN8_MASTER_IRQ, 0); 3030 POSTING_READ(GEN8_MASTER_IRQ); 3031 3032 gen8_gt_irq_reset(dev_priv); 3033 3034 for_each_pipe(dev_priv, pipe) 3035 if (intel_display_power_is_enabled(dev_priv, 3036 POWER_DOMAIN_PIPE(pipe))) 3037 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3038 3039 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3040 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3041 GEN5_IRQ_RESET(GEN8_PCU_); 3042 3043 if (HAS_PCH_SPLIT(dev_priv)) 3044 ibx_irq_reset(dev_priv); 3045 } 3046 3047 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3048 unsigned int pipe_mask) 3049 { 3050 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3051 enum pipe pipe; 3052 3053 spin_lock_irq(&dev_priv->irq_lock); 3054 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3055 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3056 dev_priv->de_irq_mask[pipe], 3057 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3058 spin_unlock_irq(&dev_priv->irq_lock); 3059 } 3060 3061 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3062 unsigned int pipe_mask) 3063 { 3064 enum pipe pipe; 3065 3066 spin_lock_irq(&dev_priv->irq_lock); 3067 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3068 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3069 spin_unlock_irq(&dev_priv->irq_lock); 3070 3071 /* make sure we're done processing display irqs */ 3072 synchronize_irq(dev_priv->drm.irq); 3073 } 3074 3075 static void cherryview_irq_preinstall(struct drm_device *dev) 3076 { 3077 struct drm_i915_private *dev_priv = to_i915(dev); 3078 3079 I915_WRITE(GEN8_MASTER_IRQ, 0); 3080 POSTING_READ(GEN8_MASTER_IRQ); 3081 3082 gen8_gt_irq_reset(dev_priv); 3083 3084 GEN5_IRQ_RESET(GEN8_PCU_); 3085 3086 spin_lock_irq(&dev_priv->irq_lock); 3087 if (dev_priv->display_irqs_enabled) 3088 vlv_display_irq_reset(dev_priv); 3089 spin_unlock_irq(&dev_priv->irq_lock); 3090 } 3091 3092 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3093 const u32 hpd[HPD_NUM_PINS]) 3094 { 3095 struct intel_encoder *encoder; 3096 u32 enabled_irqs = 0; 3097 3098 for_each_intel_encoder(&dev_priv->drm, encoder) 3099 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3100 enabled_irqs |= hpd[encoder->hpd_pin]; 3101 3102 return enabled_irqs; 3103 } 3104 3105 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3106 { 3107 u32 hotplug; 3108 3109 /* 3110 * Enable digital hotplug on the PCH, and configure the DP short pulse 3111 * duration to 2ms (which is the minimum in the Display Port spec). 3112 * The pulse duration bits are reserved on LPT+. 3113 */ 3114 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3115 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3116 PORTC_PULSE_DURATION_MASK | 3117 PORTD_PULSE_DURATION_MASK); 3118 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3119 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3120 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3121 /* 3122 * When CPU and PCH are on the same package, port A 3123 * HPD must be enabled in both north and south. 3124 */ 3125 if (HAS_PCH_LPT_LP(dev_priv)) 3126 hotplug |= PORTA_HOTPLUG_ENABLE; 3127 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3128 } 3129 3130 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3131 { 3132 u32 hotplug_irqs, enabled_irqs; 3133 3134 if (HAS_PCH_IBX(dev_priv)) { 3135 hotplug_irqs = SDE_HOTPLUG_MASK; 3136 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3137 } else { 3138 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3139 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3140 } 3141 3142 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3143 3144 ibx_hpd_detection_setup(dev_priv); 3145 } 3146 3147 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3148 { 3149 u32 hotplug; 3150 3151 /* Enable digital hotplug on the PCH */ 3152 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3153 hotplug |= PORTA_HOTPLUG_ENABLE | 3154 PORTB_HOTPLUG_ENABLE | 3155 PORTC_HOTPLUG_ENABLE | 3156 PORTD_HOTPLUG_ENABLE; 3157 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3158 3159 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3160 hotplug |= PORTE_HOTPLUG_ENABLE; 3161 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3162 } 3163 3164 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3165 { 3166 u32 hotplug_irqs, enabled_irqs; 3167 3168 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3169 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3170 3171 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3172 3173 spt_hpd_detection_setup(dev_priv); 3174 } 3175 3176 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3177 { 3178 u32 hotplug; 3179 3180 /* 3181 * Enable digital hotplug on the CPU, and configure the DP short pulse 3182 * duration to 2ms (which is the minimum in the Display Port spec) 3183 * The pulse duration bits are reserved on HSW+. 3184 */ 3185 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3186 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3187 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3188 DIGITAL_PORTA_PULSE_DURATION_2ms; 3189 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3190 } 3191 3192 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3193 { 3194 u32 hotplug_irqs, enabled_irqs; 3195 3196 if (INTEL_GEN(dev_priv) >= 8) { 3197 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3198 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3199 3200 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3201 } else if (INTEL_GEN(dev_priv) >= 7) { 3202 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3203 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3204 3205 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3206 } else { 3207 hotplug_irqs = DE_DP_A_HOTPLUG; 3208 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3209 3210 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3211 } 3212 3213 ilk_hpd_detection_setup(dev_priv); 3214 3215 ibx_hpd_irq_setup(dev_priv); 3216 } 3217 3218 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3219 u32 enabled_irqs) 3220 { 3221 u32 hotplug; 3222 3223 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3224 hotplug |= PORTA_HOTPLUG_ENABLE | 3225 PORTB_HOTPLUG_ENABLE | 3226 PORTC_HOTPLUG_ENABLE; 3227 3228 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3229 hotplug, enabled_irqs); 3230 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3231 3232 /* 3233 * For BXT invert bit has to be set based on AOB design 3234 * for HPD detection logic, update it based on VBT fields. 3235 */ 3236 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3237 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3238 hotplug |= BXT_DDIA_HPD_INVERT; 3239 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3240 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3241 hotplug |= BXT_DDIB_HPD_INVERT; 3242 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3243 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3244 hotplug |= BXT_DDIC_HPD_INVERT; 3245 3246 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3247 } 3248 3249 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3250 { 3251 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3252 } 3253 3254 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3255 { 3256 u32 hotplug_irqs, enabled_irqs; 3257 3258 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3259 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3260 3261 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3262 3263 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3264 } 3265 3266 static void ibx_irq_postinstall(struct drm_device *dev) 3267 { 3268 struct drm_i915_private *dev_priv = to_i915(dev); 3269 u32 mask; 3270 3271 if (HAS_PCH_NOP(dev_priv)) 3272 return; 3273 3274 if (HAS_PCH_IBX(dev_priv)) 3275 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3276 else 3277 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3278 3279 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3280 I915_WRITE(SDEIMR, ~mask); 3281 3282 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3283 HAS_PCH_LPT(dev_priv)) 3284 ibx_hpd_detection_setup(dev_priv); 3285 else 3286 spt_hpd_detection_setup(dev_priv); 3287 } 3288 3289 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3290 { 3291 struct drm_i915_private *dev_priv = to_i915(dev); 3292 u32 pm_irqs, gt_irqs; 3293 3294 pm_irqs = gt_irqs = 0; 3295 3296 dev_priv->gt_irq_mask = ~0; 3297 if (HAS_L3_DPF(dev_priv)) { 3298 /* L3 parity interrupt is always unmasked. */ 3299 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3300 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3301 } 3302 3303 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3304 if (IS_GEN5(dev_priv)) { 3305 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3306 } else { 3307 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3308 } 3309 3310 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3311 3312 if (INTEL_GEN(dev_priv) >= 6) { 3313 /* 3314 * RPS interrupts will get enabled/disabled on demand when RPS 3315 * itself is enabled/disabled. 3316 */ 3317 if (HAS_VEBOX(dev_priv)) { 3318 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3319 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3320 } 3321 3322 dev_priv->pm_imr = 0xffffffff; 3323 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3324 } 3325 } 3326 3327 static int ironlake_irq_postinstall(struct drm_device *dev) 3328 { 3329 struct drm_i915_private *dev_priv = to_i915(dev); 3330 u32 display_mask, extra_mask; 3331 3332 if (INTEL_GEN(dev_priv) >= 7) { 3333 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3334 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3335 DE_PLANEB_FLIP_DONE_IVB | 3336 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3337 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3338 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3339 DE_DP_A_HOTPLUG_IVB); 3340 } else { 3341 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3342 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3343 DE_AUX_CHANNEL_A | 3344 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3345 DE_POISON); 3346 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3347 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3348 DE_DP_A_HOTPLUG); 3349 } 3350 3351 dev_priv->irq_mask = ~display_mask; 3352 3353 I915_WRITE(HWSTAM, 0xeffe); 3354 3355 ibx_irq_pre_postinstall(dev); 3356 3357 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3358 3359 gen5_gt_irq_postinstall(dev); 3360 3361 ilk_hpd_detection_setup(dev_priv); 3362 3363 ibx_irq_postinstall(dev); 3364 3365 if (IS_IRONLAKE_M(dev_priv)) { 3366 /* Enable PCU event interrupts 3367 * 3368 * spinlocking not required here for correctness since interrupt 3369 * setup is guaranteed to run in single-threaded context. But we 3370 * need it to make the assert_spin_locked happy. */ 3371 spin_lock_irq(&dev_priv->irq_lock); 3372 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3373 spin_unlock_irq(&dev_priv->irq_lock); 3374 } 3375 3376 return 0; 3377 } 3378 3379 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3380 { 3381 lockdep_assert_held(&dev_priv->irq_lock); 3382 3383 if (dev_priv->display_irqs_enabled) 3384 return; 3385 3386 dev_priv->display_irqs_enabled = true; 3387 3388 if (intel_irqs_enabled(dev_priv)) { 3389 vlv_display_irq_reset(dev_priv); 3390 vlv_display_irq_postinstall(dev_priv); 3391 } 3392 } 3393 3394 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3395 { 3396 lockdep_assert_held(&dev_priv->irq_lock); 3397 3398 if (!dev_priv->display_irqs_enabled) 3399 return; 3400 3401 dev_priv->display_irqs_enabled = false; 3402 3403 if (intel_irqs_enabled(dev_priv)) 3404 vlv_display_irq_reset(dev_priv); 3405 } 3406 3407 3408 static int valleyview_irq_postinstall(struct drm_device *dev) 3409 { 3410 struct drm_i915_private *dev_priv = to_i915(dev); 3411 3412 gen5_gt_irq_postinstall(dev); 3413 3414 spin_lock_irq(&dev_priv->irq_lock); 3415 if (dev_priv->display_irqs_enabled) 3416 vlv_display_irq_postinstall(dev_priv); 3417 spin_unlock_irq(&dev_priv->irq_lock); 3418 3419 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3420 POSTING_READ(VLV_MASTER_IER); 3421 3422 return 0; 3423 } 3424 3425 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3426 { 3427 /* These are interrupts we'll toggle with the ring mask register */ 3428 uint32_t gt_interrupts[] = { 3429 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3430 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3431 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3432 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3433 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3434 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3435 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3436 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3437 0, 3438 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3439 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3440 }; 3441 3442 if (HAS_L3_DPF(dev_priv)) 3443 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3444 3445 dev_priv->pm_ier = 0x0; 3446 dev_priv->pm_imr = ~dev_priv->pm_ier; 3447 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3448 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3449 /* 3450 * RPS interrupts will get enabled/disabled on demand when RPS itself 3451 * is enabled/disabled. Same wil be the case for GuC interrupts. 3452 */ 3453 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3454 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3455 } 3456 3457 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3458 { 3459 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3460 uint32_t de_pipe_enables; 3461 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3462 u32 de_port_enables; 3463 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3464 enum pipe pipe; 3465 3466 if (INTEL_INFO(dev_priv)->gen >= 9) { 3467 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3468 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3469 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3470 GEN9_AUX_CHANNEL_D; 3471 if (IS_GEN9_LP(dev_priv)) 3472 de_port_masked |= BXT_DE_PORT_GMBUS; 3473 } else { 3474 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3475 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3476 } 3477 3478 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3479 GEN8_PIPE_FIFO_UNDERRUN; 3480 3481 de_port_enables = de_port_masked; 3482 if (IS_GEN9_LP(dev_priv)) 3483 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3484 else if (IS_BROADWELL(dev_priv)) 3485 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3486 3487 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3488 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3489 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3490 3491 for_each_pipe(dev_priv, pipe) 3492 if (intel_display_power_is_enabled(dev_priv, 3493 POWER_DOMAIN_PIPE(pipe))) 3494 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3495 dev_priv->de_irq_mask[pipe], 3496 de_pipe_enables); 3497 3498 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3499 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3500 3501 if (IS_GEN9_LP(dev_priv)) 3502 bxt_hpd_detection_setup(dev_priv); 3503 else if (IS_BROADWELL(dev_priv)) 3504 ilk_hpd_detection_setup(dev_priv); 3505 } 3506 3507 static int gen8_irq_postinstall(struct drm_device *dev) 3508 { 3509 struct drm_i915_private *dev_priv = to_i915(dev); 3510 3511 if (HAS_PCH_SPLIT(dev_priv)) 3512 ibx_irq_pre_postinstall(dev); 3513 3514 gen8_gt_irq_postinstall(dev_priv); 3515 gen8_de_irq_postinstall(dev_priv); 3516 3517 if (HAS_PCH_SPLIT(dev_priv)) 3518 ibx_irq_postinstall(dev); 3519 3520 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3521 POSTING_READ(GEN8_MASTER_IRQ); 3522 3523 return 0; 3524 } 3525 3526 static int cherryview_irq_postinstall(struct drm_device *dev) 3527 { 3528 struct drm_i915_private *dev_priv = to_i915(dev); 3529 3530 gen8_gt_irq_postinstall(dev_priv); 3531 3532 spin_lock_irq(&dev_priv->irq_lock); 3533 if (dev_priv->display_irqs_enabled) 3534 vlv_display_irq_postinstall(dev_priv); 3535 spin_unlock_irq(&dev_priv->irq_lock); 3536 3537 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3538 POSTING_READ(GEN8_MASTER_IRQ); 3539 3540 return 0; 3541 } 3542 3543 static void gen8_irq_uninstall(struct drm_device *dev) 3544 { 3545 struct drm_i915_private *dev_priv = to_i915(dev); 3546 3547 if (!dev_priv) 3548 return; 3549 3550 gen8_irq_reset(dev); 3551 } 3552 3553 static void valleyview_irq_uninstall(struct drm_device *dev) 3554 { 3555 struct drm_i915_private *dev_priv = to_i915(dev); 3556 3557 if (!dev_priv) 3558 return; 3559 3560 I915_WRITE(VLV_MASTER_IER, 0); 3561 POSTING_READ(VLV_MASTER_IER); 3562 3563 gen5_gt_irq_reset(dev_priv); 3564 3565 I915_WRITE(HWSTAM, 0xffffffff); 3566 3567 spin_lock_irq(&dev_priv->irq_lock); 3568 if (dev_priv->display_irqs_enabled) 3569 vlv_display_irq_reset(dev_priv); 3570 spin_unlock_irq(&dev_priv->irq_lock); 3571 } 3572 3573 static void cherryview_irq_uninstall(struct drm_device *dev) 3574 { 3575 struct drm_i915_private *dev_priv = to_i915(dev); 3576 3577 if (!dev_priv) 3578 return; 3579 3580 I915_WRITE(GEN8_MASTER_IRQ, 0); 3581 POSTING_READ(GEN8_MASTER_IRQ); 3582 3583 gen8_gt_irq_reset(dev_priv); 3584 3585 GEN5_IRQ_RESET(GEN8_PCU_); 3586 3587 spin_lock_irq(&dev_priv->irq_lock); 3588 if (dev_priv->display_irqs_enabled) 3589 vlv_display_irq_reset(dev_priv); 3590 spin_unlock_irq(&dev_priv->irq_lock); 3591 } 3592 3593 static void ironlake_irq_uninstall(struct drm_device *dev) 3594 { 3595 struct drm_i915_private *dev_priv = to_i915(dev); 3596 3597 if (!dev_priv) 3598 return; 3599 3600 ironlake_irq_reset(dev); 3601 } 3602 3603 static void i8xx_irq_preinstall(struct drm_device * dev) 3604 { 3605 struct drm_i915_private *dev_priv = to_i915(dev); 3606 int pipe; 3607 3608 for_each_pipe(dev_priv, pipe) 3609 I915_WRITE(PIPESTAT(pipe), 0); 3610 I915_WRITE16(IMR, 0xffff); 3611 I915_WRITE16(IER, 0x0); 3612 POSTING_READ16(IER); 3613 } 3614 3615 static int i8xx_irq_postinstall(struct drm_device *dev) 3616 { 3617 struct drm_i915_private *dev_priv = to_i915(dev); 3618 3619 I915_WRITE16(EMR, 3620 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3621 3622 /* Unmask the interrupts that we always want on. */ 3623 dev_priv->irq_mask = 3624 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3625 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3626 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3627 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3628 I915_WRITE16(IMR, dev_priv->irq_mask); 3629 3630 I915_WRITE16(IER, 3631 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3632 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3633 I915_USER_INTERRUPT); 3634 POSTING_READ16(IER); 3635 3636 /* Interrupt setup is already guaranteed to be single-threaded, this is 3637 * just to make the assert_spin_locked check happy. */ 3638 spin_lock_irq(&dev_priv->irq_lock); 3639 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3640 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3641 spin_unlock_irq(&dev_priv->irq_lock); 3642 3643 return 0; 3644 } 3645 3646 /* 3647 * Returns true when a page flip has completed. 3648 */ 3649 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3650 int plane, int pipe, u32 iir) 3651 { 3652 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3653 3654 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3655 return false; 3656 3657 if ((iir & flip_pending) == 0) 3658 goto check_page_flip; 3659 3660 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3661 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3662 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3663 * the flip is completed (no longer pending). Since this doesn't raise 3664 * an interrupt per se, we watch for the change at vblank. 3665 */ 3666 if (I915_READ16(ISR) & flip_pending) 3667 goto check_page_flip; 3668 3669 intel_finish_page_flip_cs(dev_priv, pipe); 3670 return true; 3671 3672 check_page_flip: 3673 intel_check_page_flip(dev_priv, pipe); 3674 return false; 3675 } 3676 3677 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3678 { 3679 struct drm_device *dev = arg; 3680 struct drm_i915_private *dev_priv = to_i915(dev); 3681 u16 iir, new_iir; 3682 u32 pipe_stats[2]; 3683 int pipe; 3684 u16 flip_mask = 3685 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3686 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3687 irqreturn_t ret; 3688 3689 if (!intel_irqs_enabled(dev_priv)) 3690 return IRQ_NONE; 3691 3692 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3693 disable_rpm_wakeref_asserts(dev_priv); 3694 3695 ret = IRQ_NONE; 3696 iir = I915_READ16(IIR); 3697 if (iir == 0) 3698 goto out; 3699 3700 while (iir & ~flip_mask) { 3701 /* Can't rely on pipestat interrupt bit in iir as it might 3702 * have been cleared after the pipestat interrupt was received. 3703 * It doesn't set the bit in iir again, but it still produces 3704 * interrupts (for non-MSI). 3705 */ 3706 spin_lock(&dev_priv->irq_lock); 3707 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3708 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3709 3710 for_each_pipe(dev_priv, pipe) { 3711 i915_reg_t reg = PIPESTAT(pipe); 3712 pipe_stats[pipe] = I915_READ(reg); 3713 3714 /* 3715 * Clear the PIPE*STAT regs before the IIR 3716 */ 3717 if (pipe_stats[pipe] & 0x8000ffff) 3718 I915_WRITE(reg, pipe_stats[pipe]); 3719 } 3720 spin_unlock(&dev_priv->irq_lock); 3721 3722 I915_WRITE16(IIR, iir & ~flip_mask); 3723 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3724 3725 if (iir & I915_USER_INTERRUPT) 3726 notify_ring(dev_priv->engine[RCS]); 3727 3728 for_each_pipe(dev_priv, pipe) { 3729 int plane = pipe; 3730 if (HAS_FBC(dev_priv)) 3731 plane = !plane; 3732 3733 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3734 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 3735 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3736 3737 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3738 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3739 3740 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3741 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3742 pipe); 3743 } 3744 3745 iir = new_iir; 3746 } 3747 ret = IRQ_HANDLED; 3748 3749 out: 3750 enable_rpm_wakeref_asserts(dev_priv); 3751 3752 return ret; 3753 } 3754 3755 static void i8xx_irq_uninstall(struct drm_device * dev) 3756 { 3757 struct drm_i915_private *dev_priv = to_i915(dev); 3758 int pipe; 3759 3760 for_each_pipe(dev_priv, pipe) { 3761 /* Clear enable bits; then clear status bits */ 3762 I915_WRITE(PIPESTAT(pipe), 0); 3763 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3764 } 3765 I915_WRITE16(IMR, 0xffff); 3766 I915_WRITE16(IER, 0x0); 3767 I915_WRITE16(IIR, I915_READ16(IIR)); 3768 } 3769 3770 static void i915_irq_preinstall(struct drm_device * dev) 3771 { 3772 struct drm_i915_private *dev_priv = to_i915(dev); 3773 int pipe; 3774 3775 if (I915_HAS_HOTPLUG(dev_priv)) { 3776 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3777 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3778 } 3779 3780 I915_WRITE16(HWSTAM, 0xeffe); 3781 for_each_pipe(dev_priv, pipe) 3782 I915_WRITE(PIPESTAT(pipe), 0); 3783 I915_WRITE(IMR, 0xffffffff); 3784 I915_WRITE(IER, 0x0); 3785 POSTING_READ(IER); 3786 } 3787 3788 static int i915_irq_postinstall(struct drm_device *dev) 3789 { 3790 struct drm_i915_private *dev_priv = to_i915(dev); 3791 u32 enable_mask; 3792 3793 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3794 3795 /* Unmask the interrupts that we always want on. */ 3796 dev_priv->irq_mask = 3797 ~(I915_ASLE_INTERRUPT | 3798 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3799 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3800 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3801 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3802 3803 enable_mask = 3804 I915_ASLE_INTERRUPT | 3805 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3806 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3807 I915_USER_INTERRUPT; 3808 3809 if (I915_HAS_HOTPLUG(dev_priv)) { 3810 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3811 POSTING_READ(PORT_HOTPLUG_EN); 3812 3813 /* Enable in IER... */ 3814 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3815 /* and unmask in IMR */ 3816 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3817 } 3818 3819 I915_WRITE(IMR, dev_priv->irq_mask); 3820 I915_WRITE(IER, enable_mask); 3821 POSTING_READ(IER); 3822 3823 i915_enable_asle_pipestat(dev_priv); 3824 3825 /* Interrupt setup is already guaranteed to be single-threaded, this is 3826 * just to make the assert_spin_locked check happy. */ 3827 spin_lock_irq(&dev_priv->irq_lock); 3828 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3829 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3830 spin_unlock_irq(&dev_priv->irq_lock); 3831 3832 return 0; 3833 } 3834 3835 /* 3836 * Returns true when a page flip has completed. 3837 */ 3838 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 3839 int plane, int pipe, u32 iir) 3840 { 3841 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3842 3843 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3844 return false; 3845 3846 if ((iir & flip_pending) == 0) 3847 goto check_page_flip; 3848 3849 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3850 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3851 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3852 * the flip is completed (no longer pending). Since this doesn't raise 3853 * an interrupt per se, we watch for the change at vblank. 3854 */ 3855 if (I915_READ(ISR) & flip_pending) 3856 goto check_page_flip; 3857 3858 intel_finish_page_flip_cs(dev_priv, pipe); 3859 return true; 3860 3861 check_page_flip: 3862 intel_check_page_flip(dev_priv, pipe); 3863 return false; 3864 } 3865 3866 static irqreturn_t i915_irq_handler(int irq, void *arg) 3867 { 3868 struct drm_device *dev = arg; 3869 struct drm_i915_private *dev_priv = to_i915(dev); 3870 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3871 u32 flip_mask = 3872 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3873 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3874 int pipe, ret = IRQ_NONE; 3875 3876 if (!intel_irqs_enabled(dev_priv)) 3877 return IRQ_NONE; 3878 3879 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3880 disable_rpm_wakeref_asserts(dev_priv); 3881 3882 iir = I915_READ(IIR); 3883 do { 3884 bool irq_received = (iir & ~flip_mask) != 0; 3885 bool blc_event = false; 3886 3887 /* Can't rely on pipestat interrupt bit in iir as it might 3888 * have been cleared after the pipestat interrupt was received. 3889 * It doesn't set the bit in iir again, but it still produces 3890 * interrupts (for non-MSI). 3891 */ 3892 spin_lock(&dev_priv->irq_lock); 3893 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3894 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3895 3896 for_each_pipe(dev_priv, pipe) { 3897 i915_reg_t reg = PIPESTAT(pipe); 3898 pipe_stats[pipe] = I915_READ(reg); 3899 3900 /* Clear the PIPE*STAT regs before the IIR */ 3901 if (pipe_stats[pipe] & 0x8000ffff) { 3902 I915_WRITE(reg, pipe_stats[pipe]); 3903 irq_received = true; 3904 } 3905 } 3906 spin_unlock(&dev_priv->irq_lock); 3907 3908 if (!irq_received) 3909 break; 3910 3911 /* Consume port. Then clear IIR or we'll miss events */ 3912 if (I915_HAS_HOTPLUG(dev_priv) && 3913 iir & I915_DISPLAY_PORT_INTERRUPT) { 3914 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3915 if (hotplug_status) 3916 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3917 } 3918 3919 I915_WRITE(IIR, iir & ~flip_mask); 3920 new_iir = I915_READ(IIR); /* Flush posted writes */ 3921 3922 if (iir & I915_USER_INTERRUPT) 3923 notify_ring(dev_priv->engine[RCS]); 3924 3925 for_each_pipe(dev_priv, pipe) { 3926 int plane = pipe; 3927 if (HAS_FBC(dev_priv)) 3928 plane = !plane; 3929 3930 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3931 i915_handle_vblank(dev_priv, plane, pipe, iir)) 3932 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3933 3934 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3935 blc_event = true; 3936 3937 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3938 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3939 3940 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3941 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3942 pipe); 3943 } 3944 3945 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3946 intel_opregion_asle_intr(dev_priv); 3947 3948 /* With MSI, interrupts are only generated when iir 3949 * transitions from zero to nonzero. If another bit got 3950 * set while we were handling the existing iir bits, then 3951 * we would never get another interrupt. 3952 * 3953 * This is fine on non-MSI as well, as if we hit this path 3954 * we avoid exiting the interrupt handler only to generate 3955 * another one. 3956 * 3957 * Note that for MSI this could cause a stray interrupt report 3958 * if an interrupt landed in the time between writing IIR and 3959 * the posting read. This should be rare enough to never 3960 * trigger the 99% of 100,000 interrupts test for disabling 3961 * stray interrupts. 3962 */ 3963 ret = IRQ_HANDLED; 3964 iir = new_iir; 3965 } while (iir & ~flip_mask); 3966 3967 enable_rpm_wakeref_asserts(dev_priv); 3968 3969 return ret; 3970 } 3971 3972 static void i915_irq_uninstall(struct drm_device * dev) 3973 { 3974 struct drm_i915_private *dev_priv = to_i915(dev); 3975 int pipe; 3976 3977 if (I915_HAS_HOTPLUG(dev_priv)) { 3978 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3979 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3980 } 3981 3982 I915_WRITE16(HWSTAM, 0xffff); 3983 for_each_pipe(dev_priv, pipe) { 3984 /* Clear enable bits; then clear status bits */ 3985 I915_WRITE(PIPESTAT(pipe), 0); 3986 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3987 } 3988 I915_WRITE(IMR, 0xffffffff); 3989 I915_WRITE(IER, 0x0); 3990 3991 I915_WRITE(IIR, I915_READ(IIR)); 3992 } 3993 3994 static void i965_irq_preinstall(struct drm_device * dev) 3995 { 3996 struct drm_i915_private *dev_priv = to_i915(dev); 3997 int pipe; 3998 3999 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4000 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4001 4002 I915_WRITE(HWSTAM, 0xeffe); 4003 for_each_pipe(dev_priv, pipe) 4004 I915_WRITE(PIPESTAT(pipe), 0); 4005 I915_WRITE(IMR, 0xffffffff); 4006 I915_WRITE(IER, 0x0); 4007 POSTING_READ(IER); 4008 } 4009 4010 static int i965_irq_postinstall(struct drm_device *dev) 4011 { 4012 struct drm_i915_private *dev_priv = to_i915(dev); 4013 u32 enable_mask; 4014 u32 error_mask; 4015 4016 /* Unmask the interrupts that we always want on. */ 4017 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4018 I915_DISPLAY_PORT_INTERRUPT | 4019 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4020 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4021 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4022 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4023 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4024 4025 enable_mask = ~dev_priv->irq_mask; 4026 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4027 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4028 enable_mask |= I915_USER_INTERRUPT; 4029 4030 if (IS_G4X(dev_priv)) 4031 enable_mask |= I915_BSD_USER_INTERRUPT; 4032 4033 /* Interrupt setup is already guaranteed to be single-threaded, this is 4034 * just to make the assert_spin_locked check happy. */ 4035 spin_lock_irq(&dev_priv->irq_lock); 4036 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4037 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4038 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4039 spin_unlock_irq(&dev_priv->irq_lock); 4040 4041 /* 4042 * Enable some error detection, note the instruction error mask 4043 * bit is reserved, so we leave it masked. 4044 */ 4045 if (IS_G4X(dev_priv)) { 4046 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4047 GM45_ERROR_MEM_PRIV | 4048 GM45_ERROR_CP_PRIV | 4049 I915_ERROR_MEMORY_REFRESH); 4050 } else { 4051 error_mask = ~(I915_ERROR_PAGE_TABLE | 4052 I915_ERROR_MEMORY_REFRESH); 4053 } 4054 I915_WRITE(EMR, error_mask); 4055 4056 I915_WRITE(IMR, dev_priv->irq_mask); 4057 I915_WRITE(IER, enable_mask); 4058 POSTING_READ(IER); 4059 4060 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4061 POSTING_READ(PORT_HOTPLUG_EN); 4062 4063 i915_enable_asle_pipestat(dev_priv); 4064 4065 return 0; 4066 } 4067 4068 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4069 { 4070 u32 hotplug_en; 4071 4072 lockdep_assert_held(&dev_priv->irq_lock); 4073 4074 /* Note HDMI and DP share hotplug bits */ 4075 /* enable bits are the same for all generations */ 4076 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4077 /* Programming the CRT detection parameters tends 4078 to generate a spurious hotplug event about three 4079 seconds later. So just do it once. 4080 */ 4081 if (IS_G4X(dev_priv)) 4082 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4083 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4084 4085 /* Ignore TV since it's buggy */ 4086 i915_hotplug_interrupt_update_locked(dev_priv, 4087 HOTPLUG_INT_EN_MASK | 4088 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4089 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4090 hotplug_en); 4091 } 4092 4093 static irqreturn_t i965_irq_handler(int irq, void *arg) 4094 { 4095 struct drm_device *dev = arg; 4096 struct drm_i915_private *dev_priv = to_i915(dev); 4097 u32 iir, new_iir; 4098 u32 pipe_stats[I915_MAX_PIPES]; 4099 int ret = IRQ_NONE, pipe; 4100 u32 flip_mask = 4101 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4102 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4103 4104 if (!intel_irqs_enabled(dev_priv)) 4105 return IRQ_NONE; 4106 4107 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4108 disable_rpm_wakeref_asserts(dev_priv); 4109 4110 iir = I915_READ(IIR); 4111 4112 for (;;) { 4113 bool irq_received = (iir & ~flip_mask) != 0; 4114 bool blc_event = false; 4115 4116 /* Can't rely on pipestat interrupt bit in iir as it might 4117 * have been cleared after the pipestat interrupt was received. 4118 * It doesn't set the bit in iir again, but it still produces 4119 * interrupts (for non-MSI). 4120 */ 4121 spin_lock(&dev_priv->irq_lock); 4122 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4123 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4124 4125 for_each_pipe(dev_priv, pipe) { 4126 i915_reg_t reg = PIPESTAT(pipe); 4127 pipe_stats[pipe] = I915_READ(reg); 4128 4129 /* 4130 * Clear the PIPE*STAT regs before the IIR 4131 */ 4132 if (pipe_stats[pipe] & 0x8000ffff) { 4133 I915_WRITE(reg, pipe_stats[pipe]); 4134 irq_received = true; 4135 } 4136 } 4137 spin_unlock(&dev_priv->irq_lock); 4138 4139 if (!irq_received) 4140 break; 4141 4142 ret = IRQ_HANDLED; 4143 4144 /* Consume port. Then clear IIR or we'll miss events */ 4145 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4146 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4147 if (hotplug_status) 4148 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4149 } 4150 4151 I915_WRITE(IIR, iir & ~flip_mask); 4152 new_iir = I915_READ(IIR); /* Flush posted writes */ 4153 4154 if (iir & I915_USER_INTERRUPT) 4155 notify_ring(dev_priv->engine[RCS]); 4156 if (iir & I915_BSD_USER_INTERRUPT) 4157 notify_ring(dev_priv->engine[VCS]); 4158 4159 for_each_pipe(dev_priv, pipe) { 4160 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4161 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4162 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4163 4164 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4165 blc_event = true; 4166 4167 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4168 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4169 4170 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4171 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4172 } 4173 4174 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4175 intel_opregion_asle_intr(dev_priv); 4176 4177 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4178 gmbus_irq_handler(dev_priv); 4179 4180 /* With MSI, interrupts are only generated when iir 4181 * transitions from zero to nonzero. If another bit got 4182 * set while we were handling the existing iir bits, then 4183 * we would never get another interrupt. 4184 * 4185 * This is fine on non-MSI as well, as if we hit this path 4186 * we avoid exiting the interrupt handler only to generate 4187 * another one. 4188 * 4189 * Note that for MSI this could cause a stray interrupt report 4190 * if an interrupt landed in the time between writing IIR and 4191 * the posting read. This should be rare enough to never 4192 * trigger the 99% of 100,000 interrupts test for disabling 4193 * stray interrupts. 4194 */ 4195 iir = new_iir; 4196 } 4197 4198 enable_rpm_wakeref_asserts(dev_priv); 4199 4200 return ret; 4201 } 4202 4203 static void i965_irq_uninstall(struct drm_device * dev) 4204 { 4205 struct drm_i915_private *dev_priv = to_i915(dev); 4206 int pipe; 4207 4208 if (!dev_priv) 4209 return; 4210 4211 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4212 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4213 4214 I915_WRITE(HWSTAM, 0xffffffff); 4215 for_each_pipe(dev_priv, pipe) 4216 I915_WRITE(PIPESTAT(pipe), 0); 4217 I915_WRITE(IMR, 0xffffffff); 4218 I915_WRITE(IER, 0x0); 4219 4220 for_each_pipe(dev_priv, pipe) 4221 I915_WRITE(PIPESTAT(pipe), 4222 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4223 I915_WRITE(IIR, I915_READ(IIR)); 4224 } 4225 4226 /** 4227 * intel_irq_init - initializes irq support 4228 * @dev_priv: i915 device instance 4229 * 4230 * This function initializes all the irq support including work items, timers 4231 * and all the vtables. It does not setup the interrupt itself though. 4232 */ 4233 void intel_irq_init(struct drm_i915_private *dev_priv) 4234 { 4235 struct drm_device *dev = &dev_priv->drm; 4236 4237 intel_hpd_init_work(dev_priv); 4238 4239 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4240 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4241 4242 if (HAS_GUC_SCHED(dev_priv)) 4243 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4244 4245 /* Let's track the enabled rps events */ 4246 if (IS_VALLEYVIEW(dev_priv)) 4247 /* WaGsvRC0ResidencyMethod:vlv */ 4248 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4249 else 4250 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4251 4252 dev_priv->rps.pm_intrmsk_mbz = 0; 4253 4254 /* 4255 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4256 * if GEN6_PM_UP_EI_EXPIRED is masked. 4257 * 4258 * TODO: verify if this can be reproduced on VLV,CHV. 4259 */ 4260 if (INTEL_INFO(dev_priv)->gen <= 7) 4261 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4262 4263 if (INTEL_INFO(dev_priv)->gen >= 8) 4264 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4265 4266 if (IS_GEN2(dev_priv)) { 4267 /* Gen2 doesn't have a hardware frame counter */ 4268 dev->max_vblank_count = 0; 4269 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4270 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4271 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4272 } else { 4273 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4274 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4275 } 4276 4277 /* 4278 * Opt out of the vblank disable timer on everything except gen2. 4279 * Gen2 doesn't have a hardware frame counter and so depends on 4280 * vblank interrupts to produce sane vblank seuquence numbers. 4281 */ 4282 if (!IS_GEN2(dev_priv)) 4283 dev->vblank_disable_immediate = true; 4284 4285 /* Most platforms treat the display irq block as an always-on 4286 * power domain. vlv/chv can disable it at runtime and need 4287 * special care to avoid writing any of the display block registers 4288 * outside of the power domain. We defer setting up the display irqs 4289 * in this case to the runtime pm. 4290 */ 4291 dev_priv->display_irqs_enabled = true; 4292 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4293 dev_priv->display_irqs_enabled = false; 4294 4295 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4296 4297 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4298 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4299 4300 if (IS_CHERRYVIEW(dev_priv)) { 4301 dev->driver->irq_handler = cherryview_irq_handler; 4302 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4303 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4304 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4305 dev->driver->enable_vblank = i965_enable_vblank; 4306 dev->driver->disable_vblank = i965_disable_vblank; 4307 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4308 } else if (IS_VALLEYVIEW(dev_priv)) { 4309 dev->driver->irq_handler = valleyview_irq_handler; 4310 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4311 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4312 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4313 dev->driver->enable_vblank = i965_enable_vblank; 4314 dev->driver->disable_vblank = i965_disable_vblank; 4315 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4316 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4317 dev->driver->irq_handler = gen8_irq_handler; 4318 dev->driver->irq_preinstall = gen8_irq_reset; 4319 dev->driver->irq_postinstall = gen8_irq_postinstall; 4320 dev->driver->irq_uninstall = gen8_irq_uninstall; 4321 dev->driver->enable_vblank = gen8_enable_vblank; 4322 dev->driver->disable_vblank = gen8_disable_vblank; 4323 if (IS_GEN9_LP(dev_priv)) 4324 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4325 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 4326 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4327 else 4328 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4329 } else if (HAS_PCH_SPLIT(dev_priv)) { 4330 dev->driver->irq_handler = ironlake_irq_handler; 4331 dev->driver->irq_preinstall = ironlake_irq_reset; 4332 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4333 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4334 dev->driver->enable_vblank = ironlake_enable_vblank; 4335 dev->driver->disable_vblank = ironlake_disable_vblank; 4336 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4337 } else { 4338 if (IS_GEN2(dev_priv)) { 4339 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4340 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4341 dev->driver->irq_handler = i8xx_irq_handler; 4342 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4343 dev->driver->enable_vblank = i8xx_enable_vblank; 4344 dev->driver->disable_vblank = i8xx_disable_vblank; 4345 } else if (IS_GEN3(dev_priv)) { 4346 dev->driver->irq_preinstall = i915_irq_preinstall; 4347 dev->driver->irq_postinstall = i915_irq_postinstall; 4348 dev->driver->irq_uninstall = i915_irq_uninstall; 4349 dev->driver->irq_handler = i915_irq_handler; 4350 dev->driver->enable_vblank = i8xx_enable_vblank; 4351 dev->driver->disable_vblank = i8xx_disable_vblank; 4352 } else { 4353 dev->driver->irq_preinstall = i965_irq_preinstall; 4354 dev->driver->irq_postinstall = i965_irq_postinstall; 4355 dev->driver->irq_uninstall = i965_irq_uninstall; 4356 dev->driver->irq_handler = i965_irq_handler; 4357 dev->driver->enable_vblank = i965_enable_vblank; 4358 dev->driver->disable_vblank = i965_disable_vblank; 4359 } 4360 if (I915_HAS_HOTPLUG(dev_priv)) 4361 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4362 } 4363 } 4364 4365 /** 4366 * intel_irq_install - enables the hardware interrupt 4367 * @dev_priv: i915 device instance 4368 * 4369 * This function enables the hardware interrupt handling, but leaves the hotplug 4370 * handling still disabled. It is called after intel_irq_init(). 4371 * 4372 * In the driver load and resume code we need working interrupts in a few places 4373 * but don't want to deal with the hassle of concurrent probe and hotplug 4374 * workers. Hence the split into this two-stage approach. 4375 */ 4376 int intel_irq_install(struct drm_i915_private *dev_priv) 4377 { 4378 /* 4379 * We enable some interrupt sources in our postinstall hooks, so mark 4380 * interrupts as enabled _before_ actually enabling them to avoid 4381 * special cases in our ordering checks. 4382 */ 4383 dev_priv->pm.irqs_enabled = true; 4384 4385 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4386 } 4387 4388 /** 4389 * intel_irq_uninstall - finilizes all irq handling 4390 * @dev_priv: i915 device instance 4391 * 4392 * This stops interrupt and hotplug handling and unregisters and frees all 4393 * resources acquired in the init functions. 4394 */ 4395 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4396 { 4397 drm_irq_uninstall(&dev_priv->drm); 4398 intel_hpd_cancel_work(dev_priv); 4399 dev_priv->pm.irqs_enabled = false; 4400 } 4401 4402 /** 4403 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4404 * @dev_priv: i915 device instance 4405 * 4406 * This function is used to disable interrupts at runtime, both in the runtime 4407 * pm and the system suspend/resume code. 4408 */ 4409 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4410 { 4411 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4412 dev_priv->pm.irqs_enabled = false; 4413 synchronize_irq(dev_priv->drm.irq); 4414 } 4415 4416 /** 4417 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4418 * @dev_priv: i915 device instance 4419 * 4420 * This function is used to enable interrupts at runtime, both in the runtime 4421 * pm and the system suspend/resume code. 4422 */ 4423 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4424 { 4425 dev_priv->pm.irqs_enabled = true; 4426 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4427 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4428 } 4429