1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 174 175 /* For display hotplug interrupt */ 176 static inline void 177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 178 uint32_t mask, 179 uint32_t bits) 180 { 181 uint32_t val; 182 183 lockdep_assert_held(&dev_priv->irq_lock); 184 WARN_ON(bits & ~mask); 185 186 val = I915_READ(PORT_HOTPLUG_EN); 187 val &= ~mask; 188 val |= bits; 189 I915_WRITE(PORT_HOTPLUG_EN, val); 190 } 191 192 /** 193 * i915_hotplug_interrupt_update - update hotplug interrupt enable 194 * @dev_priv: driver private 195 * @mask: bits to update 196 * @bits: bits to enable 197 * NOTE: the HPD enable bits are modified both inside and outside 198 * of an interrupt context. To avoid that read-modify-write cycles 199 * interfer, these bits are protected by a spinlock. Since this 200 * function is usually not called from a context where the lock is 201 * held already, this function acquires the lock itself. A non-locking 202 * version is also available. 203 */ 204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 205 uint32_t mask, 206 uint32_t bits) 207 { 208 spin_lock_irq(&dev_priv->irq_lock); 209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 210 spin_unlock_irq(&dev_priv->irq_lock); 211 } 212 213 /** 214 * ilk_update_display_irq - update DEIMR 215 * @dev_priv: driver private 216 * @interrupt_mask: mask of interrupt bits to update 217 * @enabled_irq_mask: mask of interrupt bits to enable 218 */ 219 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 220 uint32_t interrupt_mask, 221 uint32_t enabled_irq_mask) 222 { 223 uint32_t new_val; 224 225 lockdep_assert_held(&dev_priv->irq_lock); 226 227 WARN_ON(enabled_irq_mask & ~interrupt_mask); 228 229 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 230 return; 231 232 new_val = dev_priv->irq_mask; 233 new_val &= ~interrupt_mask; 234 new_val |= (~enabled_irq_mask & interrupt_mask); 235 236 if (new_val != dev_priv->irq_mask) { 237 dev_priv->irq_mask = new_val; 238 I915_WRITE(DEIMR, dev_priv->irq_mask); 239 POSTING_READ(DEIMR); 240 } 241 } 242 243 /** 244 * ilk_update_gt_irq - update GTIMR 245 * @dev_priv: driver private 246 * @interrupt_mask: mask of interrupt bits to update 247 * @enabled_irq_mask: mask of interrupt bits to enable 248 */ 249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 250 uint32_t interrupt_mask, 251 uint32_t enabled_irq_mask) 252 { 253 lockdep_assert_held(&dev_priv->irq_lock); 254 255 WARN_ON(enabled_irq_mask & ~interrupt_mask); 256 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 dev_priv->gt_irq_mask &= ~interrupt_mask; 261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 POSTING_READ_FW(GTIMR); 269 } 270 271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 ilk_update_gt_irq(dev_priv, mask, 0); 274 } 275 276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 277 { 278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 279 } 280 281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 282 { 283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 284 } 285 286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 287 { 288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 289 } 290 291 /** 292 * snb_update_pm_irq - update GEN6_PMIMR 293 * @dev_priv: driver private 294 * @interrupt_mask: mask of interrupt bits to update 295 * @enabled_irq_mask: mask of interrupt bits to enable 296 */ 297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 298 uint32_t interrupt_mask, 299 uint32_t enabled_irq_mask) 300 { 301 uint32_t new_val; 302 303 WARN_ON(enabled_irq_mask & ~interrupt_mask); 304 305 lockdep_assert_held(&dev_priv->irq_lock); 306 307 new_val = dev_priv->pm_imr; 308 new_val &= ~interrupt_mask; 309 new_val |= (~enabled_irq_mask & interrupt_mask); 310 311 if (new_val != dev_priv->pm_imr) { 312 dev_priv->pm_imr = new_val; 313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 314 POSTING_READ(gen6_pm_imr(dev_priv)); 315 } 316 } 317 318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 319 { 320 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 321 return; 322 323 snb_update_pm_irq(dev_priv, mask, mask); 324 } 325 326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_mask_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 lockdep_assert_held(&dev_priv->irq_lock); 344 345 I915_WRITE(reg, reset_mask); 346 I915_WRITE(reg, reset_mask); 347 POSTING_READ(reg); 348 } 349 350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 351 { 352 lockdep_assert_held(&dev_priv->irq_lock); 353 354 dev_priv->pm_ier |= enable_mask; 355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 356 gen6_unmask_pm_irq(dev_priv, enable_mask); 357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 358 } 359 360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 361 { 362 lockdep_assert_held(&dev_priv->irq_lock); 363 364 dev_priv->pm_ier &= ~disable_mask; 365 __gen6_mask_pm_irq(dev_priv, disable_mask); 366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 367 /* though a barrier is missing here, but don't really need a one */ 368 } 369 370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 371 { 372 spin_lock_irq(&dev_priv->irq_lock); 373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 374 dev_priv->rps.pm_iir = 0; 375 spin_unlock_irq(&dev_priv->irq_lock); 376 } 377 378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 379 { 380 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 381 return; 382 383 spin_lock_irq(&dev_priv->irq_lock); 384 WARN_ON_ONCE(dev_priv->rps.pm_iir); 385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 386 dev_priv->rps.interrupts_enabled = true; 387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 388 389 spin_unlock_irq(&dev_priv->irq_lock); 390 } 391 392 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 393 { 394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 395 return; 396 397 spin_lock_irq(&dev_priv->irq_lock); 398 dev_priv->rps.interrupts_enabled = false; 399 400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 401 402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 403 404 spin_unlock_irq(&dev_priv->irq_lock); 405 synchronize_irq(dev_priv->drm.irq); 406 407 /* Now that we will not be generating any more work, flush any 408 * outsanding tasks. As we are called on the RPS idle path, 409 * we will reset the GPU to minimum frequencies, so the current 410 * state of the worker can be discarded. 411 */ 412 cancel_work_sync(&dev_priv->rps.work); 413 gen6_reset_rps_interrupts(dev_priv); 414 } 415 416 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 417 { 418 spin_lock_irq(&dev_priv->irq_lock); 419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 420 spin_unlock_irq(&dev_priv->irq_lock); 421 } 422 423 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 424 { 425 spin_lock_irq(&dev_priv->irq_lock); 426 if (!dev_priv->guc.interrupts_enabled) { 427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 428 dev_priv->pm_guc_events); 429 dev_priv->guc.interrupts_enabled = true; 430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 431 } 432 spin_unlock_irq(&dev_priv->irq_lock); 433 } 434 435 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 436 { 437 spin_lock_irq(&dev_priv->irq_lock); 438 dev_priv->guc.interrupts_enabled = false; 439 440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 441 442 spin_unlock_irq(&dev_priv->irq_lock); 443 synchronize_irq(dev_priv->drm.irq); 444 445 gen9_reset_guc_interrupts(dev_priv); 446 } 447 448 /** 449 * bdw_update_port_irq - update DE port interrupt 450 * @dev_priv: driver private 451 * @interrupt_mask: mask of interrupt bits to update 452 * @enabled_irq_mask: mask of interrupt bits to enable 453 */ 454 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 455 uint32_t interrupt_mask, 456 uint32_t enabled_irq_mask) 457 { 458 uint32_t new_val; 459 uint32_t old_val; 460 461 lockdep_assert_held(&dev_priv->irq_lock); 462 463 WARN_ON(enabled_irq_mask & ~interrupt_mask); 464 465 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 466 return; 467 468 old_val = I915_READ(GEN8_DE_PORT_IMR); 469 470 new_val = old_val; 471 new_val &= ~interrupt_mask; 472 new_val |= (~enabled_irq_mask & interrupt_mask); 473 474 if (new_val != old_val) { 475 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 476 POSTING_READ(GEN8_DE_PORT_IMR); 477 } 478 } 479 480 /** 481 * bdw_update_pipe_irq - update DE pipe interrupt 482 * @dev_priv: driver private 483 * @pipe: pipe whose interrupt to update 484 * @interrupt_mask: mask of interrupt bits to update 485 * @enabled_irq_mask: mask of interrupt bits to enable 486 */ 487 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 488 enum pipe pipe, 489 uint32_t interrupt_mask, 490 uint32_t enabled_irq_mask) 491 { 492 uint32_t new_val; 493 494 lockdep_assert_held(&dev_priv->irq_lock); 495 496 WARN_ON(enabled_irq_mask & ~interrupt_mask); 497 498 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 499 return; 500 501 new_val = dev_priv->de_irq_mask[pipe]; 502 new_val &= ~interrupt_mask; 503 new_val |= (~enabled_irq_mask & interrupt_mask); 504 505 if (new_val != dev_priv->de_irq_mask[pipe]) { 506 dev_priv->de_irq_mask[pipe] = new_val; 507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 509 } 510 } 511 512 /** 513 * ibx_display_interrupt_update - update SDEIMR 514 * @dev_priv: driver private 515 * @interrupt_mask: mask of interrupt bits to update 516 * @enabled_irq_mask: mask of interrupt bits to enable 517 */ 518 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 519 uint32_t interrupt_mask, 520 uint32_t enabled_irq_mask) 521 { 522 uint32_t sdeimr = I915_READ(SDEIMR); 523 sdeimr &= ~interrupt_mask; 524 sdeimr |= (~enabled_irq_mask & interrupt_mask); 525 526 WARN_ON(enabled_irq_mask & ~interrupt_mask); 527 528 lockdep_assert_held(&dev_priv->irq_lock); 529 530 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 531 return; 532 533 I915_WRITE(SDEIMR, sdeimr); 534 POSTING_READ(SDEIMR); 535 } 536 537 static void 538 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 539 u32 enable_mask, u32 status_mask) 540 { 541 i915_reg_t reg = PIPESTAT(pipe); 542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 543 544 lockdep_assert_held(&dev_priv->irq_lock); 545 WARN_ON(!intel_irqs_enabled(dev_priv)); 546 547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 548 status_mask & ~PIPESTAT_INT_STATUS_MASK, 549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 550 pipe_name(pipe), enable_mask, status_mask)) 551 return; 552 553 if ((pipestat & enable_mask) == enable_mask) 554 return; 555 556 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 557 558 /* Enable the interrupt, clear any pending status */ 559 pipestat |= enable_mask | status_mask; 560 I915_WRITE(reg, pipestat); 561 POSTING_READ(reg); 562 } 563 564 static void 565 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 566 u32 enable_mask, u32 status_mask) 567 { 568 i915_reg_t reg = PIPESTAT(pipe); 569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 570 571 lockdep_assert_held(&dev_priv->irq_lock); 572 WARN_ON(!intel_irqs_enabled(dev_priv)); 573 574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 575 status_mask & ~PIPESTAT_INT_STATUS_MASK, 576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 577 pipe_name(pipe), enable_mask, status_mask)) 578 return; 579 580 if ((pipestat & enable_mask) == 0) 581 return; 582 583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 584 585 pipestat &= ~enable_mask; 586 I915_WRITE(reg, pipestat); 587 POSTING_READ(reg); 588 } 589 590 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 591 { 592 u32 enable_mask = status_mask << 16; 593 594 /* 595 * On pipe A we don't support the PSR interrupt yet, 596 * on pipe B and C the same bit MBZ. 597 */ 598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 599 return 0; 600 /* 601 * On pipe B and C we don't support the PSR interrupt yet, on pipe 602 * A the same bit is for perf counters which we don't use either. 603 */ 604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 605 return 0; 606 607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 608 SPRITE0_FLIP_DONE_INT_EN_VLV | 609 SPRITE1_FLIP_DONE_INT_EN_VLV); 610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 614 615 return enable_mask; 616 } 617 618 void 619 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 620 u32 status_mask) 621 { 622 u32 enable_mask; 623 624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 626 status_mask); 627 else 628 enable_mask = status_mask << 16; 629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 630 } 631 632 void 633 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 634 u32 status_mask) 635 { 636 u32 enable_mask; 637 638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 640 status_mask); 641 else 642 enable_mask = status_mask << 16; 643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 644 } 645 646 /** 647 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 648 * @dev_priv: i915 device private 649 */ 650 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 651 { 652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 653 return; 654 655 spin_lock_irq(&dev_priv->irq_lock); 656 657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 658 if (INTEL_GEN(dev_priv) >= 4) 659 i915_enable_pipestat(dev_priv, PIPE_A, 660 PIPE_LEGACY_BLC_EVENT_STATUS); 661 662 spin_unlock_irq(&dev_priv->irq_lock); 663 } 664 665 /* 666 * This timing diagram depicts the video signal in and 667 * around the vertical blanking period. 668 * 669 * Assumptions about the fictitious mode used in this example: 670 * vblank_start >= 3 671 * vsync_start = vblank_start + 1 672 * vsync_end = vblank_start + 2 673 * vtotal = vblank_start + 3 674 * 675 * start of vblank: 676 * latch double buffered registers 677 * increment frame counter (ctg+) 678 * generate start of vblank interrupt (gen4+) 679 * | 680 * | frame start: 681 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 682 * | may be shifted forward 1-3 extra lines via PIPECONF 683 * | | 684 * | | start of vsync: 685 * | | generate vsync interrupt 686 * | | | 687 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 688 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 689 * ----va---> <-----------------vb--------------------> <--------va------------- 690 * | | <----vs-----> | 691 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 692 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 693 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 694 * | | | 695 * last visible pixel first visible pixel 696 * | increment frame counter (gen3/4) 697 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 698 * 699 * x = horizontal active 700 * _ = horizontal blanking 701 * hs = horizontal sync 702 * va = vertical active 703 * vb = vertical blanking 704 * vs = vertical sync 705 * vbs = vblank_start (number) 706 * 707 * Summary: 708 * - most events happen at the start of horizontal sync 709 * - frame start happens at the start of horizontal blank, 1-4 lines 710 * (depending on PIPECONF settings) after the start of vblank 711 * - gen3/4 pixel and frame counter are synchronized with the start 712 * of horizontal active on the first line of vertical active 713 */ 714 715 /* Called from drm generic code, passed a 'crtc', which 716 * we use as a pipe index 717 */ 718 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 719 { 720 struct drm_i915_private *dev_priv = to_i915(dev); 721 i915_reg_t high_frame, low_frame; 722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 723 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 724 unsigned long irqflags; 725 726 htotal = mode->crtc_htotal; 727 hsync_start = mode->crtc_hsync_start; 728 vbl_start = mode->crtc_vblank_start; 729 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 730 vbl_start = DIV_ROUND_UP(vbl_start, 2); 731 732 /* Convert to pixel count */ 733 vbl_start *= htotal; 734 735 /* Start of vblank event occurs at start of hsync */ 736 vbl_start -= htotal - hsync_start; 737 738 high_frame = PIPEFRAME(pipe); 739 low_frame = PIPEFRAMEPIXEL(pipe); 740 741 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 742 743 /* 744 * High & low register fields aren't synchronized, so make sure 745 * we get a low value that's stable across two reads of the high 746 * register. 747 */ 748 do { 749 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 750 low = I915_READ_FW(low_frame); 751 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 752 } while (high1 != high2); 753 754 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 755 756 high1 >>= PIPE_FRAME_HIGH_SHIFT; 757 pixel = low & PIPE_PIXEL_MASK; 758 low >>= PIPE_FRAME_LOW_SHIFT; 759 760 /* 761 * The frame counter increments at beginning of active. 762 * Cook up a vblank counter by also checking the pixel 763 * counter against vblank start. 764 */ 765 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 766 } 767 768 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 769 { 770 struct drm_i915_private *dev_priv = to_i915(dev); 771 772 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 773 } 774 775 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 776 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 777 { 778 struct drm_device *dev = crtc->base.dev; 779 struct drm_i915_private *dev_priv = to_i915(dev); 780 const struct drm_display_mode *mode; 781 struct drm_vblank_crtc *vblank; 782 enum pipe pipe = crtc->pipe; 783 int position, vtotal; 784 785 if (!crtc->active) 786 return -1; 787 788 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 789 mode = &vblank->hwmode; 790 791 vtotal = mode->crtc_vtotal; 792 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 793 vtotal /= 2; 794 795 if (IS_GEN2(dev_priv)) 796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 797 else 798 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 799 800 /* 801 * On HSW, the DSL reg (0x70000) appears to return 0 if we 802 * read it just before the start of vblank. So try it again 803 * so we don't accidentally end up spanning a vblank frame 804 * increment, causing the pipe_update_end() code to squak at us. 805 * 806 * The nature of this problem means we can't simply check the ISR 807 * bit and return the vblank start value; nor can we use the scanline 808 * debug register in the transcoder as it appears to have the same 809 * problem. We may need to extend this to include other platforms, 810 * but so far testing only shows the problem on HSW. 811 */ 812 if (HAS_DDI(dev_priv) && !position) { 813 int i, temp; 814 815 for (i = 0; i < 100; i++) { 816 udelay(1); 817 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 818 if (temp != position) { 819 position = temp; 820 break; 821 } 822 } 823 } 824 825 /* 826 * See update_scanline_offset() for the details on the 827 * scanline_offset adjustment. 828 */ 829 return (position + crtc->scanline_offset) % vtotal; 830 } 831 832 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 833 bool in_vblank_irq, int *vpos, int *hpos, 834 ktime_t *stime, ktime_t *etime, 835 const struct drm_display_mode *mode) 836 { 837 struct drm_i915_private *dev_priv = to_i915(dev); 838 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 839 pipe); 840 int position; 841 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 842 bool in_vbl = true; 843 unsigned long irqflags; 844 845 if (WARN_ON(!mode->crtc_clock)) { 846 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 847 "pipe %c\n", pipe_name(pipe)); 848 return false; 849 } 850 851 htotal = mode->crtc_htotal; 852 hsync_start = mode->crtc_hsync_start; 853 vtotal = mode->crtc_vtotal; 854 vbl_start = mode->crtc_vblank_start; 855 vbl_end = mode->crtc_vblank_end; 856 857 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 858 vbl_start = DIV_ROUND_UP(vbl_start, 2); 859 vbl_end /= 2; 860 vtotal /= 2; 861 } 862 863 /* 864 * Lock uncore.lock, as we will do multiple timing critical raw 865 * register reads, potentially with preemption disabled, so the 866 * following code must not block on uncore.lock. 867 */ 868 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 869 870 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 871 872 /* Get optional system timestamp before query. */ 873 if (stime) 874 *stime = ktime_get(); 875 876 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 877 /* No obvious pixelcount register. Only query vertical 878 * scanout position from Display scan line register. 879 */ 880 position = __intel_get_crtc_scanline(intel_crtc); 881 } else { 882 /* Have access to pixelcount since start of frame. 883 * We can split this into vertical and horizontal 884 * scanout position. 885 */ 886 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 887 888 /* convert to pixel counts */ 889 vbl_start *= htotal; 890 vbl_end *= htotal; 891 vtotal *= htotal; 892 893 /* 894 * In interlaced modes, the pixel counter counts all pixels, 895 * so one field will have htotal more pixels. In order to avoid 896 * the reported position from jumping backwards when the pixel 897 * counter is beyond the length of the shorter field, just 898 * clamp the position the length of the shorter field. This 899 * matches how the scanline counter based position works since 900 * the scanline counter doesn't count the two half lines. 901 */ 902 if (position >= vtotal) 903 position = vtotal - 1; 904 905 /* 906 * Start of vblank interrupt is triggered at start of hsync, 907 * just prior to the first active line of vblank. However we 908 * consider lines to start at the leading edge of horizontal 909 * active. So, should we get here before we've crossed into 910 * the horizontal active of the first line in vblank, we would 911 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 912 * always add htotal-hsync_start to the current pixel position. 913 */ 914 position = (position + htotal - hsync_start) % vtotal; 915 } 916 917 /* Get optional system timestamp after query. */ 918 if (etime) 919 *etime = ktime_get(); 920 921 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 922 923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 924 925 in_vbl = position >= vbl_start && position < vbl_end; 926 927 /* 928 * While in vblank, position will be negative 929 * counting up towards 0 at vbl_end. And outside 930 * vblank, position will be positive counting 931 * up since vbl_end. 932 */ 933 if (position >= vbl_start) 934 position -= vbl_end; 935 else 936 position += vtotal - vbl_end; 937 938 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 939 *vpos = position; 940 *hpos = 0; 941 } else { 942 *vpos = position / htotal; 943 *hpos = position - (*vpos * htotal); 944 } 945 946 return true; 947 } 948 949 int intel_get_crtc_scanline(struct intel_crtc *crtc) 950 { 951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 952 unsigned long irqflags; 953 int position; 954 955 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 956 position = __intel_get_crtc_scanline(crtc); 957 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 958 959 return position; 960 } 961 962 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 963 { 964 u32 busy_up, busy_down, max_avg, min_avg; 965 u8 new_delay; 966 967 spin_lock(&mchdev_lock); 968 969 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 970 971 new_delay = dev_priv->ips.cur_delay; 972 973 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 974 busy_up = I915_READ(RCPREVBSYTUPAVG); 975 busy_down = I915_READ(RCPREVBSYTDNAVG); 976 max_avg = I915_READ(RCBMAXAVG); 977 min_avg = I915_READ(RCBMINAVG); 978 979 /* Handle RCS change request from hw */ 980 if (busy_up > max_avg) { 981 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 982 new_delay = dev_priv->ips.cur_delay - 1; 983 if (new_delay < dev_priv->ips.max_delay) 984 new_delay = dev_priv->ips.max_delay; 985 } else if (busy_down < min_avg) { 986 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 987 new_delay = dev_priv->ips.cur_delay + 1; 988 if (new_delay > dev_priv->ips.min_delay) 989 new_delay = dev_priv->ips.min_delay; 990 } 991 992 if (ironlake_set_drps(dev_priv, new_delay)) 993 dev_priv->ips.cur_delay = new_delay; 994 995 spin_unlock(&mchdev_lock); 996 997 return; 998 } 999 1000 static void notify_ring(struct intel_engine_cs *engine) 1001 { 1002 struct drm_i915_gem_request *rq = NULL; 1003 struct intel_wait *wait; 1004 1005 atomic_inc(&engine->irq_count); 1006 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1007 1008 spin_lock(&engine->breadcrumbs.irq_lock); 1009 wait = engine->breadcrumbs.irq_wait; 1010 if (wait) { 1011 /* We use a callback from the dma-fence to submit 1012 * requests after waiting on our own requests. To 1013 * ensure minimum delay in queuing the next request to 1014 * hardware, signal the fence now rather than wait for 1015 * the signaler to be woken up. We still wake up the 1016 * waiter in order to handle the irq-seqno coherency 1017 * issues (we may receive the interrupt before the 1018 * seqno is written, see __i915_request_irq_complete()) 1019 * and to handle coalescing of multiple seqno updates 1020 * and many waiters. 1021 */ 1022 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1023 wait->seqno) && 1024 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1025 &wait->request->fence.flags)) 1026 rq = i915_gem_request_get(wait->request); 1027 1028 wake_up_process(wait->tsk); 1029 } else { 1030 __intel_engine_disarm_breadcrumbs(engine); 1031 } 1032 spin_unlock(&engine->breadcrumbs.irq_lock); 1033 1034 if (rq) { 1035 dma_fence_signal(&rq->fence); 1036 i915_gem_request_put(rq); 1037 } 1038 1039 trace_intel_engine_notify(engine, wait); 1040 } 1041 1042 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1043 struct intel_rps_ei *ei) 1044 { 1045 ei->ktime = ktime_get_raw(); 1046 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1047 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1048 } 1049 1050 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1051 { 1052 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); 1053 } 1054 1055 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1056 { 1057 const struct intel_rps_ei *prev = &dev_priv->rps.ei; 1058 struct intel_rps_ei now; 1059 u32 events = 0; 1060 1061 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1062 return 0; 1063 1064 vlv_c0_read(dev_priv, &now); 1065 1066 if (prev->ktime) { 1067 u64 time, c0; 1068 u32 render, media; 1069 1070 time = ktime_us_delta(now.ktime, prev->ktime); 1071 1072 time *= dev_priv->czclk_freq; 1073 1074 /* Workload can be split between render + media, 1075 * e.g. SwapBuffers being blitted in X after being rendered in 1076 * mesa. To account for this we need to combine both engines 1077 * into our activity counter. 1078 */ 1079 render = now.render_c0 - prev->render_c0; 1080 media = now.media_c0 - prev->media_c0; 1081 c0 = max(render, media); 1082 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1083 1084 if (c0 > time * dev_priv->rps.up_threshold) 1085 events = GEN6_PM_RP_UP_THRESHOLD; 1086 else if (c0 < time * dev_priv->rps.down_threshold) 1087 events = GEN6_PM_RP_DOWN_THRESHOLD; 1088 } 1089 1090 dev_priv->rps.ei = now; 1091 return events; 1092 } 1093 1094 static bool any_waiters(struct drm_i915_private *dev_priv) 1095 { 1096 struct intel_engine_cs *engine; 1097 enum intel_engine_id id; 1098 1099 for_each_engine(engine, dev_priv, id) 1100 if (intel_engine_has_waiter(engine)) 1101 return true; 1102 1103 return false; 1104 } 1105 1106 static void gen6_pm_rps_work(struct work_struct *work) 1107 { 1108 struct drm_i915_private *dev_priv = 1109 container_of(work, struct drm_i915_private, rps.work); 1110 bool client_boost = false; 1111 int new_delay, adj, min, max; 1112 u32 pm_iir = 0; 1113 1114 spin_lock_irq(&dev_priv->irq_lock); 1115 if (dev_priv->rps.interrupts_enabled) { 1116 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); 1117 client_boost = fetch_and_zero(&dev_priv->rps.client_boost); 1118 } 1119 spin_unlock_irq(&dev_priv->irq_lock); 1120 1121 /* Make sure we didn't queue anything we're not going to process. */ 1122 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1123 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1124 goto out; 1125 1126 mutex_lock(&dev_priv->rps.hw_lock); 1127 1128 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1129 1130 adj = dev_priv->rps.last_adj; 1131 new_delay = dev_priv->rps.cur_freq; 1132 min = dev_priv->rps.min_freq_softlimit; 1133 max = dev_priv->rps.max_freq_softlimit; 1134 if (client_boost || any_waiters(dev_priv)) 1135 max = dev_priv->rps.max_freq; 1136 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1137 new_delay = dev_priv->rps.boost_freq; 1138 adj = 0; 1139 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1140 if (adj > 0) 1141 adj *= 2; 1142 else /* CHV needs even encode values */ 1143 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1144 1145 if (new_delay >= dev_priv->rps.max_freq_softlimit) 1146 adj = 0; 1147 } else if (client_boost || any_waiters(dev_priv)) { 1148 adj = 0; 1149 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1150 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1151 new_delay = dev_priv->rps.efficient_freq; 1152 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1153 new_delay = dev_priv->rps.min_freq_softlimit; 1154 adj = 0; 1155 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1156 if (adj < 0) 1157 adj *= 2; 1158 else /* CHV needs even encode values */ 1159 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1160 1161 if (new_delay <= dev_priv->rps.min_freq_softlimit) 1162 adj = 0; 1163 } else { /* unknown event */ 1164 adj = 0; 1165 } 1166 1167 dev_priv->rps.last_adj = adj; 1168 1169 /* sysfs frequency interfaces may have snuck in while servicing the 1170 * interrupt 1171 */ 1172 new_delay += adj; 1173 new_delay = clamp_t(int, new_delay, min, max); 1174 1175 if (intel_set_rps(dev_priv, new_delay)) { 1176 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1177 dev_priv->rps.last_adj = 0; 1178 } 1179 1180 mutex_unlock(&dev_priv->rps.hw_lock); 1181 1182 out: 1183 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1184 spin_lock_irq(&dev_priv->irq_lock); 1185 if (dev_priv->rps.interrupts_enabled) 1186 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1187 spin_unlock_irq(&dev_priv->irq_lock); 1188 } 1189 1190 1191 /** 1192 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1193 * occurred. 1194 * @work: workqueue struct 1195 * 1196 * Doesn't actually do anything except notify userspace. As a consequence of 1197 * this event, userspace should try to remap the bad rows since statistically 1198 * it is likely the same row is more likely to go bad again. 1199 */ 1200 static void ivybridge_parity_work(struct work_struct *work) 1201 { 1202 struct drm_i915_private *dev_priv = 1203 container_of(work, struct drm_i915_private, l3_parity.error_work); 1204 u32 error_status, row, bank, subbank; 1205 char *parity_event[6]; 1206 uint32_t misccpctl; 1207 uint8_t slice = 0; 1208 1209 /* We must turn off DOP level clock gating to access the L3 registers. 1210 * In order to prevent a get/put style interface, acquire struct mutex 1211 * any time we access those registers. 1212 */ 1213 mutex_lock(&dev_priv->drm.struct_mutex); 1214 1215 /* If we've screwed up tracking, just let the interrupt fire again */ 1216 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1217 goto out; 1218 1219 misccpctl = I915_READ(GEN7_MISCCPCTL); 1220 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1221 POSTING_READ(GEN7_MISCCPCTL); 1222 1223 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1224 i915_reg_t reg; 1225 1226 slice--; 1227 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1228 break; 1229 1230 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1231 1232 reg = GEN7_L3CDERRST1(slice); 1233 1234 error_status = I915_READ(reg); 1235 row = GEN7_PARITY_ERROR_ROW(error_status); 1236 bank = GEN7_PARITY_ERROR_BANK(error_status); 1237 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1238 1239 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1240 POSTING_READ(reg); 1241 1242 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1243 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1244 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1245 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1246 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1247 parity_event[5] = NULL; 1248 1249 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1250 KOBJ_CHANGE, parity_event); 1251 1252 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1253 slice, row, bank, subbank); 1254 1255 kfree(parity_event[4]); 1256 kfree(parity_event[3]); 1257 kfree(parity_event[2]); 1258 kfree(parity_event[1]); 1259 } 1260 1261 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1262 1263 out: 1264 WARN_ON(dev_priv->l3_parity.which_slice); 1265 spin_lock_irq(&dev_priv->irq_lock); 1266 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1267 spin_unlock_irq(&dev_priv->irq_lock); 1268 1269 mutex_unlock(&dev_priv->drm.struct_mutex); 1270 } 1271 1272 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1273 u32 iir) 1274 { 1275 if (!HAS_L3_DPF(dev_priv)) 1276 return; 1277 1278 spin_lock(&dev_priv->irq_lock); 1279 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1280 spin_unlock(&dev_priv->irq_lock); 1281 1282 iir &= GT_PARITY_ERROR(dev_priv); 1283 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1284 dev_priv->l3_parity.which_slice |= 1 << 1; 1285 1286 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1287 dev_priv->l3_parity.which_slice |= 1 << 0; 1288 1289 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1290 } 1291 1292 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1293 u32 gt_iir) 1294 { 1295 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1296 notify_ring(dev_priv->engine[RCS]); 1297 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1298 notify_ring(dev_priv->engine[VCS]); 1299 } 1300 1301 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1302 u32 gt_iir) 1303 { 1304 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1305 notify_ring(dev_priv->engine[RCS]); 1306 if (gt_iir & GT_BSD_USER_INTERRUPT) 1307 notify_ring(dev_priv->engine[VCS]); 1308 if (gt_iir & GT_BLT_USER_INTERRUPT) 1309 notify_ring(dev_priv->engine[BCS]); 1310 1311 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1312 GT_BSD_CS_ERROR_INTERRUPT | 1313 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1314 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1315 1316 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1317 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1318 } 1319 1320 static __always_inline void 1321 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1322 { 1323 bool tasklet = false; 1324 1325 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1326 set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1327 tasklet = true; 1328 } 1329 1330 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1331 notify_ring(engine); 1332 tasklet |= i915.enable_guc_submission; 1333 } 1334 1335 if (tasklet) 1336 tasklet_hi_schedule(&engine->irq_tasklet); 1337 } 1338 1339 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1340 u32 master_ctl, 1341 u32 gt_iir[4]) 1342 { 1343 irqreturn_t ret = IRQ_NONE; 1344 1345 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1346 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1347 if (gt_iir[0]) { 1348 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1349 ret = IRQ_HANDLED; 1350 } else 1351 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1352 } 1353 1354 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1355 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1356 if (gt_iir[1]) { 1357 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1358 ret = IRQ_HANDLED; 1359 } else 1360 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1361 } 1362 1363 if (master_ctl & GEN8_GT_VECS_IRQ) { 1364 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1365 if (gt_iir[3]) { 1366 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1367 ret = IRQ_HANDLED; 1368 } else 1369 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1370 } 1371 1372 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1373 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1374 if (gt_iir[2] & (dev_priv->pm_rps_events | 1375 dev_priv->pm_guc_events)) { 1376 I915_WRITE_FW(GEN8_GT_IIR(2), 1377 gt_iir[2] & (dev_priv->pm_rps_events | 1378 dev_priv->pm_guc_events)); 1379 ret = IRQ_HANDLED; 1380 } else 1381 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1382 } 1383 1384 return ret; 1385 } 1386 1387 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1388 u32 gt_iir[4]) 1389 { 1390 if (gt_iir[0]) { 1391 gen8_cs_irq_handler(dev_priv->engine[RCS], 1392 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1393 gen8_cs_irq_handler(dev_priv->engine[BCS], 1394 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1395 } 1396 1397 if (gt_iir[1]) { 1398 gen8_cs_irq_handler(dev_priv->engine[VCS], 1399 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1400 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1401 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1402 } 1403 1404 if (gt_iir[3]) 1405 gen8_cs_irq_handler(dev_priv->engine[VECS], 1406 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1407 1408 if (gt_iir[2] & dev_priv->pm_rps_events) 1409 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1410 1411 if (gt_iir[2] & dev_priv->pm_guc_events) 1412 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1413 } 1414 1415 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1416 { 1417 switch (port) { 1418 case PORT_A: 1419 return val & PORTA_HOTPLUG_LONG_DETECT; 1420 case PORT_B: 1421 return val & PORTB_HOTPLUG_LONG_DETECT; 1422 case PORT_C: 1423 return val & PORTC_HOTPLUG_LONG_DETECT; 1424 default: 1425 return false; 1426 } 1427 } 1428 1429 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1430 { 1431 switch (port) { 1432 case PORT_E: 1433 return val & PORTE_HOTPLUG_LONG_DETECT; 1434 default: 1435 return false; 1436 } 1437 } 1438 1439 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1440 { 1441 switch (port) { 1442 case PORT_A: 1443 return val & PORTA_HOTPLUG_LONG_DETECT; 1444 case PORT_B: 1445 return val & PORTB_HOTPLUG_LONG_DETECT; 1446 case PORT_C: 1447 return val & PORTC_HOTPLUG_LONG_DETECT; 1448 case PORT_D: 1449 return val & PORTD_HOTPLUG_LONG_DETECT; 1450 default: 1451 return false; 1452 } 1453 } 1454 1455 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1456 { 1457 switch (port) { 1458 case PORT_A: 1459 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1460 default: 1461 return false; 1462 } 1463 } 1464 1465 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1466 { 1467 switch (port) { 1468 case PORT_B: 1469 return val & PORTB_HOTPLUG_LONG_DETECT; 1470 case PORT_C: 1471 return val & PORTC_HOTPLUG_LONG_DETECT; 1472 case PORT_D: 1473 return val & PORTD_HOTPLUG_LONG_DETECT; 1474 default: 1475 return false; 1476 } 1477 } 1478 1479 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1480 { 1481 switch (port) { 1482 case PORT_B: 1483 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1484 case PORT_C: 1485 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1486 case PORT_D: 1487 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1488 default: 1489 return false; 1490 } 1491 } 1492 1493 /* 1494 * Get a bit mask of pins that have triggered, and which ones may be long. 1495 * This can be called multiple times with the same masks to accumulate 1496 * hotplug detection results from several registers. 1497 * 1498 * Note that the caller is expected to zero out the masks initially. 1499 */ 1500 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1501 u32 hotplug_trigger, u32 dig_hotplug_reg, 1502 const u32 hpd[HPD_NUM_PINS], 1503 bool long_pulse_detect(enum port port, u32 val)) 1504 { 1505 enum port port; 1506 int i; 1507 1508 for_each_hpd_pin(i) { 1509 if ((hpd[i] & hotplug_trigger) == 0) 1510 continue; 1511 1512 *pin_mask |= BIT(i); 1513 1514 if (!intel_hpd_pin_to_port(i, &port)) 1515 continue; 1516 1517 if (long_pulse_detect(port, dig_hotplug_reg)) 1518 *long_mask |= BIT(i); 1519 } 1520 1521 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1522 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1523 1524 } 1525 1526 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1527 { 1528 wake_up_all(&dev_priv->gmbus_wait_queue); 1529 } 1530 1531 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1532 { 1533 wake_up_all(&dev_priv->gmbus_wait_queue); 1534 } 1535 1536 #if defined(CONFIG_DEBUG_FS) 1537 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1538 enum pipe pipe, 1539 uint32_t crc0, uint32_t crc1, 1540 uint32_t crc2, uint32_t crc3, 1541 uint32_t crc4) 1542 { 1543 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1544 struct intel_pipe_crc_entry *entry; 1545 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1546 struct drm_driver *driver = dev_priv->drm.driver; 1547 uint32_t crcs[5]; 1548 int head, tail; 1549 1550 spin_lock(&pipe_crc->lock); 1551 if (pipe_crc->source) { 1552 if (!pipe_crc->entries) { 1553 spin_unlock(&pipe_crc->lock); 1554 DRM_DEBUG_KMS("spurious interrupt\n"); 1555 return; 1556 } 1557 1558 head = pipe_crc->head; 1559 tail = pipe_crc->tail; 1560 1561 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1562 spin_unlock(&pipe_crc->lock); 1563 DRM_ERROR("CRC buffer overflowing\n"); 1564 return; 1565 } 1566 1567 entry = &pipe_crc->entries[head]; 1568 1569 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1570 entry->crc[0] = crc0; 1571 entry->crc[1] = crc1; 1572 entry->crc[2] = crc2; 1573 entry->crc[3] = crc3; 1574 entry->crc[4] = crc4; 1575 1576 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1577 pipe_crc->head = head; 1578 1579 spin_unlock(&pipe_crc->lock); 1580 1581 wake_up_interruptible(&pipe_crc->wq); 1582 } else { 1583 /* 1584 * For some not yet identified reason, the first CRC is 1585 * bonkers. So let's just wait for the next vblank and read 1586 * out the buggy result. 1587 * 1588 * On CHV sometimes the second CRC is bonkers as well, so 1589 * don't trust that one either. 1590 */ 1591 if (pipe_crc->skipped == 0 || 1592 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { 1593 pipe_crc->skipped++; 1594 spin_unlock(&pipe_crc->lock); 1595 return; 1596 } 1597 spin_unlock(&pipe_crc->lock); 1598 crcs[0] = crc0; 1599 crcs[1] = crc1; 1600 crcs[2] = crc2; 1601 crcs[3] = crc3; 1602 crcs[4] = crc4; 1603 drm_crtc_add_crc_entry(&crtc->base, true, 1604 drm_accurate_vblank_count(&crtc->base), 1605 crcs); 1606 } 1607 } 1608 #else 1609 static inline void 1610 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1611 enum pipe pipe, 1612 uint32_t crc0, uint32_t crc1, 1613 uint32_t crc2, uint32_t crc3, 1614 uint32_t crc4) {} 1615 #endif 1616 1617 1618 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1619 enum pipe pipe) 1620 { 1621 display_pipe_crc_irq_handler(dev_priv, pipe, 1622 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1623 0, 0, 0, 0); 1624 } 1625 1626 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1627 enum pipe pipe) 1628 { 1629 display_pipe_crc_irq_handler(dev_priv, pipe, 1630 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1631 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1632 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1633 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1634 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1635 } 1636 1637 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1638 enum pipe pipe) 1639 { 1640 uint32_t res1, res2; 1641 1642 if (INTEL_GEN(dev_priv) >= 3) 1643 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1644 else 1645 res1 = 0; 1646 1647 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1648 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1649 else 1650 res2 = 0; 1651 1652 display_pipe_crc_irq_handler(dev_priv, pipe, 1653 I915_READ(PIPE_CRC_RES_RED(pipe)), 1654 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1655 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1656 res1, res2); 1657 } 1658 1659 /* The RPS events need forcewake, so we add them to a work queue and mask their 1660 * IMR bits until the work is done. Other interrupts can be processed without 1661 * the work queue. */ 1662 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1663 { 1664 if (pm_iir & dev_priv->pm_rps_events) { 1665 spin_lock(&dev_priv->irq_lock); 1666 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1667 if (dev_priv->rps.interrupts_enabled) { 1668 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1669 schedule_work(&dev_priv->rps.work); 1670 } 1671 spin_unlock(&dev_priv->irq_lock); 1672 } 1673 1674 if (INTEL_INFO(dev_priv)->gen >= 8) 1675 return; 1676 1677 if (HAS_VEBOX(dev_priv)) { 1678 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1679 notify_ring(dev_priv->engine[VECS]); 1680 1681 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1682 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1683 } 1684 } 1685 1686 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1687 { 1688 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1689 /* Sample the log buffer flush related bits & clear them out now 1690 * itself from the message identity register to minimize the 1691 * probability of losing a flush interrupt, when there are back 1692 * to back flush interrupts. 1693 * There can be a new flush interrupt, for different log buffer 1694 * type (like for ISR), whilst Host is handling one (for DPC). 1695 * Since same bit is used in message register for ISR & DPC, it 1696 * could happen that GuC sets the bit for 2nd interrupt but Host 1697 * clears out the bit on handling the 1st interrupt. 1698 */ 1699 u32 msg, flush; 1700 1701 msg = I915_READ(SOFT_SCRATCH(15)); 1702 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1703 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1704 if (flush) { 1705 /* Clear the message bits that are handled */ 1706 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1707 1708 /* Handle flush interrupt in bottom half */ 1709 queue_work(dev_priv->guc.log.runtime.flush_wq, 1710 &dev_priv->guc.log.runtime.flush_work); 1711 1712 dev_priv->guc.log.flush_interrupt_count++; 1713 } else { 1714 /* Not clearing of unhandled event bits won't result in 1715 * re-triggering of the interrupt. 1716 */ 1717 } 1718 } 1719 } 1720 1721 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1722 enum pipe pipe) 1723 { 1724 bool ret; 1725 1726 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1727 if (ret) 1728 intel_finish_page_flip_mmio(dev_priv, pipe); 1729 1730 return ret; 1731 } 1732 1733 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1734 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1735 { 1736 int pipe; 1737 1738 spin_lock(&dev_priv->irq_lock); 1739 1740 if (!dev_priv->display_irqs_enabled) { 1741 spin_unlock(&dev_priv->irq_lock); 1742 return; 1743 } 1744 1745 for_each_pipe(dev_priv, pipe) { 1746 i915_reg_t reg; 1747 u32 mask, iir_bit = 0; 1748 1749 /* 1750 * PIPESTAT bits get signalled even when the interrupt is 1751 * disabled with the mask bits, and some of the status bits do 1752 * not generate interrupts at all (like the underrun bit). Hence 1753 * we need to be careful that we only handle what we want to 1754 * handle. 1755 */ 1756 1757 /* fifo underruns are filterered in the underrun handler. */ 1758 mask = PIPE_FIFO_UNDERRUN_STATUS; 1759 1760 switch (pipe) { 1761 case PIPE_A: 1762 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1763 break; 1764 case PIPE_B: 1765 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1766 break; 1767 case PIPE_C: 1768 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1769 break; 1770 } 1771 if (iir & iir_bit) 1772 mask |= dev_priv->pipestat_irq_mask[pipe]; 1773 1774 if (!mask) 1775 continue; 1776 1777 reg = PIPESTAT(pipe); 1778 mask |= PIPESTAT_INT_ENABLE_MASK; 1779 pipe_stats[pipe] = I915_READ(reg) & mask; 1780 1781 /* 1782 * Clear the PIPE*STAT regs before the IIR 1783 */ 1784 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1785 PIPESTAT_INT_STATUS_MASK)) 1786 I915_WRITE(reg, pipe_stats[pipe]); 1787 } 1788 spin_unlock(&dev_priv->irq_lock); 1789 } 1790 1791 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1792 u32 pipe_stats[I915_MAX_PIPES]) 1793 { 1794 enum pipe pipe; 1795 1796 for_each_pipe(dev_priv, pipe) { 1797 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1798 intel_pipe_handle_vblank(dev_priv, pipe)) 1799 intel_check_page_flip(dev_priv, pipe); 1800 1801 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1802 intel_finish_page_flip_cs(dev_priv, pipe); 1803 1804 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1805 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1806 1807 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1808 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1809 } 1810 1811 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1812 gmbus_irq_handler(dev_priv); 1813 } 1814 1815 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1816 { 1817 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1818 1819 if (hotplug_status) 1820 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1821 1822 return hotplug_status; 1823 } 1824 1825 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1826 u32 hotplug_status) 1827 { 1828 u32 pin_mask = 0, long_mask = 0; 1829 1830 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1831 IS_CHERRYVIEW(dev_priv)) { 1832 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1833 1834 if (hotplug_trigger) { 1835 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1836 hotplug_trigger, hpd_status_g4x, 1837 i9xx_port_hotplug_long_detect); 1838 1839 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1840 } 1841 1842 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1843 dp_aux_irq_handler(dev_priv); 1844 } else { 1845 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1846 1847 if (hotplug_trigger) { 1848 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1849 hotplug_trigger, hpd_status_i915, 1850 i9xx_port_hotplug_long_detect); 1851 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1852 } 1853 } 1854 } 1855 1856 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1857 { 1858 struct drm_device *dev = arg; 1859 struct drm_i915_private *dev_priv = to_i915(dev); 1860 irqreturn_t ret = IRQ_NONE; 1861 1862 if (!intel_irqs_enabled(dev_priv)) 1863 return IRQ_NONE; 1864 1865 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1866 disable_rpm_wakeref_asserts(dev_priv); 1867 1868 do { 1869 u32 iir, gt_iir, pm_iir; 1870 u32 pipe_stats[I915_MAX_PIPES] = {}; 1871 u32 hotplug_status = 0; 1872 u32 ier = 0; 1873 1874 gt_iir = I915_READ(GTIIR); 1875 pm_iir = I915_READ(GEN6_PMIIR); 1876 iir = I915_READ(VLV_IIR); 1877 1878 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1879 break; 1880 1881 ret = IRQ_HANDLED; 1882 1883 /* 1884 * Theory on interrupt generation, based on empirical evidence: 1885 * 1886 * x = ((VLV_IIR & VLV_IER) || 1887 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1888 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1889 * 1890 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1891 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1892 * guarantee the CPU interrupt will be raised again even if we 1893 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1894 * bits this time around. 1895 */ 1896 I915_WRITE(VLV_MASTER_IER, 0); 1897 ier = I915_READ(VLV_IER); 1898 I915_WRITE(VLV_IER, 0); 1899 1900 if (gt_iir) 1901 I915_WRITE(GTIIR, gt_iir); 1902 if (pm_iir) 1903 I915_WRITE(GEN6_PMIIR, pm_iir); 1904 1905 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1906 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1907 1908 /* Call regardless, as some status bits might not be 1909 * signalled in iir */ 1910 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1911 1912 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1913 I915_LPE_PIPE_B_INTERRUPT)) 1914 intel_lpe_audio_irq_handler(dev_priv); 1915 1916 /* 1917 * VLV_IIR is single buffered, and reflects the level 1918 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1919 */ 1920 if (iir) 1921 I915_WRITE(VLV_IIR, iir); 1922 1923 I915_WRITE(VLV_IER, ier); 1924 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1925 POSTING_READ(VLV_MASTER_IER); 1926 1927 if (gt_iir) 1928 snb_gt_irq_handler(dev_priv, gt_iir); 1929 if (pm_iir) 1930 gen6_rps_irq_handler(dev_priv, pm_iir); 1931 1932 if (hotplug_status) 1933 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1934 1935 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1936 } while (0); 1937 1938 enable_rpm_wakeref_asserts(dev_priv); 1939 1940 return ret; 1941 } 1942 1943 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1944 { 1945 struct drm_device *dev = arg; 1946 struct drm_i915_private *dev_priv = to_i915(dev); 1947 irqreturn_t ret = IRQ_NONE; 1948 1949 if (!intel_irqs_enabled(dev_priv)) 1950 return IRQ_NONE; 1951 1952 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1953 disable_rpm_wakeref_asserts(dev_priv); 1954 1955 do { 1956 u32 master_ctl, iir; 1957 u32 gt_iir[4] = {}; 1958 u32 pipe_stats[I915_MAX_PIPES] = {}; 1959 u32 hotplug_status = 0; 1960 u32 ier = 0; 1961 1962 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1963 iir = I915_READ(VLV_IIR); 1964 1965 if (master_ctl == 0 && iir == 0) 1966 break; 1967 1968 ret = IRQ_HANDLED; 1969 1970 /* 1971 * Theory on interrupt generation, based on empirical evidence: 1972 * 1973 * x = ((VLV_IIR & VLV_IER) || 1974 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1975 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1976 * 1977 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1978 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1979 * guarantee the CPU interrupt will be raised again even if we 1980 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1981 * bits this time around. 1982 */ 1983 I915_WRITE(GEN8_MASTER_IRQ, 0); 1984 ier = I915_READ(VLV_IER); 1985 I915_WRITE(VLV_IER, 0); 1986 1987 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1988 1989 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1990 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1991 1992 /* Call regardless, as some status bits might not be 1993 * signalled in iir */ 1994 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1995 1996 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1997 I915_LPE_PIPE_B_INTERRUPT | 1998 I915_LPE_PIPE_C_INTERRUPT)) 1999 intel_lpe_audio_irq_handler(dev_priv); 2000 2001 /* 2002 * VLV_IIR is single buffered, and reflects the level 2003 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2004 */ 2005 if (iir) 2006 I915_WRITE(VLV_IIR, iir); 2007 2008 I915_WRITE(VLV_IER, ier); 2009 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2010 POSTING_READ(GEN8_MASTER_IRQ); 2011 2012 gen8_gt_irq_handler(dev_priv, gt_iir); 2013 2014 if (hotplug_status) 2015 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2016 2017 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2018 } while (0); 2019 2020 enable_rpm_wakeref_asserts(dev_priv); 2021 2022 return ret; 2023 } 2024 2025 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2026 u32 hotplug_trigger, 2027 const u32 hpd[HPD_NUM_PINS]) 2028 { 2029 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2030 2031 /* 2032 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2033 * unless we touch the hotplug register, even if hotplug_trigger is 2034 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2035 * errors. 2036 */ 2037 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2038 if (!hotplug_trigger) { 2039 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2040 PORTD_HOTPLUG_STATUS_MASK | 2041 PORTC_HOTPLUG_STATUS_MASK | 2042 PORTB_HOTPLUG_STATUS_MASK; 2043 dig_hotplug_reg &= ~mask; 2044 } 2045 2046 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2047 if (!hotplug_trigger) 2048 return; 2049 2050 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2051 dig_hotplug_reg, hpd, 2052 pch_port_hotplug_long_detect); 2053 2054 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2055 } 2056 2057 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2058 { 2059 int pipe; 2060 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2061 2062 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2063 2064 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2065 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2066 SDE_AUDIO_POWER_SHIFT); 2067 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2068 port_name(port)); 2069 } 2070 2071 if (pch_iir & SDE_AUX_MASK) 2072 dp_aux_irq_handler(dev_priv); 2073 2074 if (pch_iir & SDE_GMBUS) 2075 gmbus_irq_handler(dev_priv); 2076 2077 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2078 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2079 2080 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2081 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2082 2083 if (pch_iir & SDE_POISON) 2084 DRM_ERROR("PCH poison interrupt\n"); 2085 2086 if (pch_iir & SDE_FDI_MASK) 2087 for_each_pipe(dev_priv, pipe) 2088 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2089 pipe_name(pipe), 2090 I915_READ(FDI_RX_IIR(pipe))); 2091 2092 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2093 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2094 2095 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2096 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2097 2098 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2099 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2100 2101 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2102 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2103 } 2104 2105 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2106 { 2107 u32 err_int = I915_READ(GEN7_ERR_INT); 2108 enum pipe pipe; 2109 2110 if (err_int & ERR_INT_POISON) 2111 DRM_ERROR("Poison interrupt\n"); 2112 2113 for_each_pipe(dev_priv, pipe) { 2114 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2115 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2116 2117 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2118 if (IS_IVYBRIDGE(dev_priv)) 2119 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2120 else 2121 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2122 } 2123 } 2124 2125 I915_WRITE(GEN7_ERR_INT, err_int); 2126 } 2127 2128 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2129 { 2130 u32 serr_int = I915_READ(SERR_INT); 2131 2132 if (serr_int & SERR_INT_POISON) 2133 DRM_ERROR("PCH poison interrupt\n"); 2134 2135 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2136 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2137 2138 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2139 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2140 2141 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2142 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2143 2144 I915_WRITE(SERR_INT, serr_int); 2145 } 2146 2147 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2148 { 2149 int pipe; 2150 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2151 2152 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2153 2154 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2155 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2156 SDE_AUDIO_POWER_SHIFT_CPT); 2157 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2158 port_name(port)); 2159 } 2160 2161 if (pch_iir & SDE_AUX_MASK_CPT) 2162 dp_aux_irq_handler(dev_priv); 2163 2164 if (pch_iir & SDE_GMBUS_CPT) 2165 gmbus_irq_handler(dev_priv); 2166 2167 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2168 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2169 2170 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2171 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2172 2173 if (pch_iir & SDE_FDI_MASK_CPT) 2174 for_each_pipe(dev_priv, pipe) 2175 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2176 pipe_name(pipe), 2177 I915_READ(FDI_RX_IIR(pipe))); 2178 2179 if (pch_iir & SDE_ERROR_CPT) 2180 cpt_serr_int_handler(dev_priv); 2181 } 2182 2183 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2184 { 2185 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2186 ~SDE_PORTE_HOTPLUG_SPT; 2187 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2188 u32 pin_mask = 0, long_mask = 0; 2189 2190 if (hotplug_trigger) { 2191 u32 dig_hotplug_reg; 2192 2193 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2194 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2195 2196 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2197 dig_hotplug_reg, hpd_spt, 2198 spt_port_hotplug_long_detect); 2199 } 2200 2201 if (hotplug2_trigger) { 2202 u32 dig_hotplug_reg; 2203 2204 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2205 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2206 2207 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2208 dig_hotplug_reg, hpd_spt, 2209 spt_port_hotplug2_long_detect); 2210 } 2211 2212 if (pin_mask) 2213 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2214 2215 if (pch_iir & SDE_GMBUS_CPT) 2216 gmbus_irq_handler(dev_priv); 2217 } 2218 2219 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2220 u32 hotplug_trigger, 2221 const u32 hpd[HPD_NUM_PINS]) 2222 { 2223 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2224 2225 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2226 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2227 2228 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2229 dig_hotplug_reg, hpd, 2230 ilk_port_hotplug_long_detect); 2231 2232 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2233 } 2234 2235 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2236 u32 de_iir) 2237 { 2238 enum pipe pipe; 2239 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2240 2241 if (hotplug_trigger) 2242 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2243 2244 if (de_iir & DE_AUX_CHANNEL_A) 2245 dp_aux_irq_handler(dev_priv); 2246 2247 if (de_iir & DE_GSE) 2248 intel_opregion_asle_intr(dev_priv); 2249 2250 if (de_iir & DE_POISON) 2251 DRM_ERROR("Poison interrupt\n"); 2252 2253 for_each_pipe(dev_priv, pipe) { 2254 if (de_iir & DE_PIPE_VBLANK(pipe) && 2255 intel_pipe_handle_vblank(dev_priv, pipe)) 2256 intel_check_page_flip(dev_priv, pipe); 2257 2258 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2259 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2260 2261 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2262 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2263 2264 /* plane/pipes map 1:1 on ilk+ */ 2265 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2266 intel_finish_page_flip_cs(dev_priv, pipe); 2267 } 2268 2269 /* check event from PCH */ 2270 if (de_iir & DE_PCH_EVENT) { 2271 u32 pch_iir = I915_READ(SDEIIR); 2272 2273 if (HAS_PCH_CPT(dev_priv)) 2274 cpt_irq_handler(dev_priv, pch_iir); 2275 else 2276 ibx_irq_handler(dev_priv, pch_iir); 2277 2278 /* should clear PCH hotplug event before clear CPU irq */ 2279 I915_WRITE(SDEIIR, pch_iir); 2280 } 2281 2282 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2283 ironlake_rps_change_irq_handler(dev_priv); 2284 } 2285 2286 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2287 u32 de_iir) 2288 { 2289 enum pipe pipe; 2290 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2291 2292 if (hotplug_trigger) 2293 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2294 2295 if (de_iir & DE_ERR_INT_IVB) 2296 ivb_err_int_handler(dev_priv); 2297 2298 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2299 dp_aux_irq_handler(dev_priv); 2300 2301 if (de_iir & DE_GSE_IVB) 2302 intel_opregion_asle_intr(dev_priv); 2303 2304 for_each_pipe(dev_priv, pipe) { 2305 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2306 intel_pipe_handle_vblank(dev_priv, pipe)) 2307 intel_check_page_flip(dev_priv, pipe); 2308 2309 /* plane/pipes map 1:1 on ilk+ */ 2310 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2311 intel_finish_page_flip_cs(dev_priv, pipe); 2312 } 2313 2314 /* check event from PCH */ 2315 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2316 u32 pch_iir = I915_READ(SDEIIR); 2317 2318 cpt_irq_handler(dev_priv, pch_iir); 2319 2320 /* clear PCH hotplug event before clear CPU irq */ 2321 I915_WRITE(SDEIIR, pch_iir); 2322 } 2323 } 2324 2325 /* 2326 * To handle irqs with the minimum potential races with fresh interrupts, we: 2327 * 1 - Disable Master Interrupt Control. 2328 * 2 - Find the source(s) of the interrupt. 2329 * 3 - Clear the Interrupt Identity bits (IIR). 2330 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2331 * 5 - Re-enable Master Interrupt Control. 2332 */ 2333 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2334 { 2335 struct drm_device *dev = arg; 2336 struct drm_i915_private *dev_priv = to_i915(dev); 2337 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2338 irqreturn_t ret = IRQ_NONE; 2339 2340 if (!intel_irqs_enabled(dev_priv)) 2341 return IRQ_NONE; 2342 2343 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2344 disable_rpm_wakeref_asserts(dev_priv); 2345 2346 /* disable master interrupt before clearing iir */ 2347 de_ier = I915_READ(DEIER); 2348 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2349 POSTING_READ(DEIER); 2350 2351 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2352 * interrupts will will be stored on its back queue, and then we'll be 2353 * able to process them after we restore SDEIER (as soon as we restore 2354 * it, we'll get an interrupt if SDEIIR still has something to process 2355 * due to its back queue). */ 2356 if (!HAS_PCH_NOP(dev_priv)) { 2357 sde_ier = I915_READ(SDEIER); 2358 I915_WRITE(SDEIER, 0); 2359 POSTING_READ(SDEIER); 2360 } 2361 2362 /* Find, clear, then process each source of interrupt */ 2363 2364 gt_iir = I915_READ(GTIIR); 2365 if (gt_iir) { 2366 I915_WRITE(GTIIR, gt_iir); 2367 ret = IRQ_HANDLED; 2368 if (INTEL_GEN(dev_priv) >= 6) 2369 snb_gt_irq_handler(dev_priv, gt_iir); 2370 else 2371 ilk_gt_irq_handler(dev_priv, gt_iir); 2372 } 2373 2374 de_iir = I915_READ(DEIIR); 2375 if (de_iir) { 2376 I915_WRITE(DEIIR, de_iir); 2377 ret = IRQ_HANDLED; 2378 if (INTEL_GEN(dev_priv) >= 7) 2379 ivb_display_irq_handler(dev_priv, de_iir); 2380 else 2381 ilk_display_irq_handler(dev_priv, de_iir); 2382 } 2383 2384 if (INTEL_GEN(dev_priv) >= 6) { 2385 u32 pm_iir = I915_READ(GEN6_PMIIR); 2386 if (pm_iir) { 2387 I915_WRITE(GEN6_PMIIR, pm_iir); 2388 ret = IRQ_HANDLED; 2389 gen6_rps_irq_handler(dev_priv, pm_iir); 2390 } 2391 } 2392 2393 I915_WRITE(DEIER, de_ier); 2394 POSTING_READ(DEIER); 2395 if (!HAS_PCH_NOP(dev_priv)) { 2396 I915_WRITE(SDEIER, sde_ier); 2397 POSTING_READ(SDEIER); 2398 } 2399 2400 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2401 enable_rpm_wakeref_asserts(dev_priv); 2402 2403 return ret; 2404 } 2405 2406 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2407 u32 hotplug_trigger, 2408 const u32 hpd[HPD_NUM_PINS]) 2409 { 2410 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2411 2412 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2413 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2414 2415 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2416 dig_hotplug_reg, hpd, 2417 bxt_port_hotplug_long_detect); 2418 2419 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2420 } 2421 2422 static irqreturn_t 2423 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2424 { 2425 irqreturn_t ret = IRQ_NONE; 2426 u32 iir; 2427 enum pipe pipe; 2428 2429 if (master_ctl & GEN8_DE_MISC_IRQ) { 2430 iir = I915_READ(GEN8_DE_MISC_IIR); 2431 if (iir) { 2432 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2433 ret = IRQ_HANDLED; 2434 if (iir & GEN8_DE_MISC_GSE) 2435 intel_opregion_asle_intr(dev_priv); 2436 else 2437 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2438 } 2439 else 2440 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2441 } 2442 2443 if (master_ctl & GEN8_DE_PORT_IRQ) { 2444 iir = I915_READ(GEN8_DE_PORT_IIR); 2445 if (iir) { 2446 u32 tmp_mask; 2447 bool found = false; 2448 2449 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2450 ret = IRQ_HANDLED; 2451 2452 tmp_mask = GEN8_AUX_CHANNEL_A; 2453 if (INTEL_INFO(dev_priv)->gen >= 9) 2454 tmp_mask |= GEN9_AUX_CHANNEL_B | 2455 GEN9_AUX_CHANNEL_C | 2456 GEN9_AUX_CHANNEL_D; 2457 2458 if (iir & tmp_mask) { 2459 dp_aux_irq_handler(dev_priv); 2460 found = true; 2461 } 2462 2463 if (IS_GEN9_LP(dev_priv)) { 2464 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2465 if (tmp_mask) { 2466 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2467 hpd_bxt); 2468 found = true; 2469 } 2470 } else if (IS_BROADWELL(dev_priv)) { 2471 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2472 if (tmp_mask) { 2473 ilk_hpd_irq_handler(dev_priv, 2474 tmp_mask, hpd_bdw); 2475 found = true; 2476 } 2477 } 2478 2479 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2480 gmbus_irq_handler(dev_priv); 2481 found = true; 2482 } 2483 2484 if (!found) 2485 DRM_ERROR("Unexpected DE Port interrupt\n"); 2486 } 2487 else 2488 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2489 } 2490 2491 for_each_pipe(dev_priv, pipe) { 2492 u32 flip_done, fault_errors; 2493 2494 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2495 continue; 2496 2497 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2498 if (!iir) { 2499 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2500 continue; 2501 } 2502 2503 ret = IRQ_HANDLED; 2504 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2505 2506 if (iir & GEN8_PIPE_VBLANK && 2507 intel_pipe_handle_vblank(dev_priv, pipe)) 2508 intel_check_page_flip(dev_priv, pipe); 2509 2510 flip_done = iir; 2511 if (INTEL_INFO(dev_priv)->gen >= 9) 2512 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2513 else 2514 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2515 2516 if (flip_done) 2517 intel_finish_page_flip_cs(dev_priv, pipe); 2518 2519 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2520 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2521 2522 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2523 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2524 2525 fault_errors = iir; 2526 if (INTEL_INFO(dev_priv)->gen >= 9) 2527 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2528 else 2529 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2530 2531 if (fault_errors) 2532 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2533 pipe_name(pipe), 2534 fault_errors); 2535 } 2536 2537 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2538 master_ctl & GEN8_DE_PCH_IRQ) { 2539 /* 2540 * FIXME(BDW): Assume for now that the new interrupt handling 2541 * scheme also closed the SDE interrupt handling race we've seen 2542 * on older pch-split platforms. But this needs testing. 2543 */ 2544 iir = I915_READ(SDEIIR); 2545 if (iir) { 2546 I915_WRITE(SDEIIR, iir); 2547 ret = IRQ_HANDLED; 2548 2549 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2550 spt_irq_handler(dev_priv, iir); 2551 else 2552 cpt_irq_handler(dev_priv, iir); 2553 } else { 2554 /* 2555 * Like on previous PCH there seems to be something 2556 * fishy going on with forwarding PCH interrupts. 2557 */ 2558 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2559 } 2560 } 2561 2562 return ret; 2563 } 2564 2565 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2566 { 2567 struct drm_device *dev = arg; 2568 struct drm_i915_private *dev_priv = to_i915(dev); 2569 u32 master_ctl; 2570 u32 gt_iir[4] = {}; 2571 irqreturn_t ret; 2572 2573 if (!intel_irqs_enabled(dev_priv)) 2574 return IRQ_NONE; 2575 2576 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2577 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2578 if (!master_ctl) 2579 return IRQ_NONE; 2580 2581 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2582 2583 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2584 disable_rpm_wakeref_asserts(dev_priv); 2585 2586 /* Find, clear, then process each source of interrupt */ 2587 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2588 gen8_gt_irq_handler(dev_priv, gt_iir); 2589 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2590 2591 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2592 POSTING_READ_FW(GEN8_MASTER_IRQ); 2593 2594 enable_rpm_wakeref_asserts(dev_priv); 2595 2596 return ret; 2597 } 2598 2599 /** 2600 * i915_reset_and_wakeup - do process context error handling work 2601 * @dev_priv: i915 device private 2602 * 2603 * Fire an error uevent so userspace can see that a hang or error 2604 * was detected. 2605 */ 2606 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2607 { 2608 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2609 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2610 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2611 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2612 2613 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2614 2615 DRM_DEBUG_DRIVER("resetting chip\n"); 2616 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2617 2618 intel_prepare_reset(dev_priv); 2619 2620 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2621 wake_up_all(&dev_priv->gpu_error.wait_queue); 2622 2623 do { 2624 /* 2625 * All state reset _must_ be completed before we update the 2626 * reset counter, for otherwise waiters might miss the reset 2627 * pending state and not properly drop locks, resulting in 2628 * deadlocks with the reset work. 2629 */ 2630 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2631 i915_reset(dev_priv); 2632 mutex_unlock(&dev_priv->drm.struct_mutex); 2633 } 2634 2635 /* We need to wait for anyone holding the lock to wakeup */ 2636 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2637 I915_RESET_HANDOFF, 2638 TASK_UNINTERRUPTIBLE, 2639 HZ)); 2640 2641 intel_finish_reset(dev_priv); 2642 2643 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2644 kobject_uevent_env(kobj, 2645 KOBJ_CHANGE, reset_done_event); 2646 2647 /* 2648 * Note: The wake_up also serves as a memory barrier so that 2649 * waiters see the updated value of the dev_priv->gpu_error. 2650 */ 2651 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 2652 wake_up_all(&dev_priv->gpu_error.reset_queue); 2653 } 2654 2655 static inline void 2656 i915_err_print_instdone(struct drm_i915_private *dev_priv, 2657 struct intel_instdone *instdone) 2658 { 2659 int slice; 2660 int subslice; 2661 2662 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone); 2663 2664 if (INTEL_GEN(dev_priv) <= 3) 2665 return; 2666 2667 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common); 2668 2669 if (INTEL_GEN(dev_priv) <= 6) 2670 return; 2671 2672 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2673 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 2674 slice, subslice, instdone->sampler[slice][subslice]); 2675 2676 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2677 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n", 2678 slice, subslice, instdone->row[slice][subslice]); 2679 } 2680 2681 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2682 { 2683 u32 eir; 2684 2685 if (!IS_GEN2(dev_priv)) 2686 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2687 2688 if (INTEL_GEN(dev_priv) < 4) 2689 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2690 else 2691 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2692 2693 I915_WRITE(EIR, I915_READ(EIR)); 2694 eir = I915_READ(EIR); 2695 if (eir) { 2696 /* 2697 * some errors might have become stuck, 2698 * mask them. 2699 */ 2700 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2701 I915_WRITE(EMR, I915_READ(EMR) | eir); 2702 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2703 } 2704 } 2705 2706 /** 2707 * i915_handle_error - handle a gpu error 2708 * @dev_priv: i915 device private 2709 * @engine_mask: mask representing engines that are hung 2710 * @fmt: Error message format string 2711 * 2712 * Do some basic checking of register state at error time and 2713 * dump it to the syslog. Also call i915_capture_error_state() to make 2714 * sure we get a record and make it available in debugfs. Fire a uevent 2715 * so userspace knows something bad happened (should trigger collection 2716 * of a ring dump etc.). 2717 */ 2718 void i915_handle_error(struct drm_i915_private *dev_priv, 2719 u32 engine_mask, 2720 const char *fmt, ...) 2721 { 2722 va_list args; 2723 char error_msg[80]; 2724 2725 va_start(args, fmt); 2726 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2727 va_end(args); 2728 2729 /* 2730 * In most cases it's guaranteed that we get here with an RPM 2731 * reference held, for example because there is a pending GPU 2732 * request that won't finish until the reset is done. This 2733 * isn't the case at least when we get here by doing a 2734 * simulated reset via debugfs, so get an RPM reference. 2735 */ 2736 intel_runtime_pm_get(dev_priv); 2737 2738 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2739 i915_clear_error_registers(dev_priv); 2740 2741 if (!engine_mask) 2742 goto out; 2743 2744 if (test_and_set_bit(I915_RESET_BACKOFF, 2745 &dev_priv->gpu_error.flags)) 2746 goto out; 2747 2748 i915_reset_and_wakeup(dev_priv); 2749 2750 out: 2751 intel_runtime_pm_put(dev_priv); 2752 } 2753 2754 /* Called from drm generic code, passed 'crtc' which 2755 * we use as a pipe index 2756 */ 2757 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2758 { 2759 struct drm_i915_private *dev_priv = to_i915(dev); 2760 unsigned long irqflags; 2761 2762 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2763 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2764 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2765 2766 return 0; 2767 } 2768 2769 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2770 { 2771 struct drm_i915_private *dev_priv = to_i915(dev); 2772 unsigned long irqflags; 2773 2774 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2775 i915_enable_pipestat(dev_priv, pipe, 2776 PIPE_START_VBLANK_INTERRUPT_STATUS); 2777 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2778 2779 return 0; 2780 } 2781 2782 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2783 { 2784 struct drm_i915_private *dev_priv = to_i915(dev); 2785 unsigned long irqflags; 2786 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2787 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2788 2789 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2790 ilk_enable_display_irq(dev_priv, bit); 2791 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2792 2793 return 0; 2794 } 2795 2796 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2797 { 2798 struct drm_i915_private *dev_priv = to_i915(dev); 2799 unsigned long irqflags; 2800 2801 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2802 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2803 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2804 2805 return 0; 2806 } 2807 2808 /* Called from drm generic code, passed 'crtc' which 2809 * we use as a pipe index 2810 */ 2811 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2812 { 2813 struct drm_i915_private *dev_priv = to_i915(dev); 2814 unsigned long irqflags; 2815 2816 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2817 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2818 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2819 } 2820 2821 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2822 { 2823 struct drm_i915_private *dev_priv = to_i915(dev); 2824 unsigned long irqflags; 2825 2826 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2827 i915_disable_pipestat(dev_priv, pipe, 2828 PIPE_START_VBLANK_INTERRUPT_STATUS); 2829 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2830 } 2831 2832 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2833 { 2834 struct drm_i915_private *dev_priv = to_i915(dev); 2835 unsigned long irqflags; 2836 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2837 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2838 2839 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2840 ilk_disable_display_irq(dev_priv, bit); 2841 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2842 } 2843 2844 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2845 { 2846 struct drm_i915_private *dev_priv = to_i915(dev); 2847 unsigned long irqflags; 2848 2849 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2850 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2851 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2852 } 2853 2854 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2855 { 2856 if (HAS_PCH_NOP(dev_priv)) 2857 return; 2858 2859 GEN5_IRQ_RESET(SDE); 2860 2861 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2862 I915_WRITE(SERR_INT, 0xffffffff); 2863 } 2864 2865 /* 2866 * SDEIER is also touched by the interrupt handler to work around missed PCH 2867 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2868 * instead we unconditionally enable all PCH interrupt sources here, but then 2869 * only unmask them as needed with SDEIMR. 2870 * 2871 * This function needs to be called before interrupts are enabled. 2872 */ 2873 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2874 { 2875 struct drm_i915_private *dev_priv = to_i915(dev); 2876 2877 if (HAS_PCH_NOP(dev_priv)) 2878 return; 2879 2880 WARN_ON(I915_READ(SDEIER) != 0); 2881 I915_WRITE(SDEIER, 0xffffffff); 2882 POSTING_READ(SDEIER); 2883 } 2884 2885 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 2886 { 2887 GEN5_IRQ_RESET(GT); 2888 if (INTEL_GEN(dev_priv) >= 6) 2889 GEN5_IRQ_RESET(GEN6_PM); 2890 } 2891 2892 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2893 { 2894 enum pipe pipe; 2895 2896 if (IS_CHERRYVIEW(dev_priv)) 2897 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2898 else 2899 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2900 2901 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2902 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2903 2904 for_each_pipe(dev_priv, pipe) { 2905 I915_WRITE(PIPESTAT(pipe), 2906 PIPE_FIFO_UNDERRUN_STATUS | 2907 PIPESTAT_INT_STATUS_MASK); 2908 dev_priv->pipestat_irq_mask[pipe] = 0; 2909 } 2910 2911 GEN5_IRQ_RESET(VLV_); 2912 dev_priv->irq_mask = ~0; 2913 } 2914 2915 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2916 { 2917 u32 pipestat_mask; 2918 u32 enable_mask; 2919 enum pipe pipe; 2920 u32 val; 2921 2922 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2923 PIPE_CRC_DONE_INTERRUPT_STATUS; 2924 2925 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2926 for_each_pipe(dev_priv, pipe) 2927 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2928 2929 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2930 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2931 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2932 if (IS_CHERRYVIEW(dev_priv)) 2933 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2934 2935 WARN_ON(dev_priv->irq_mask != ~0); 2936 2937 val = (I915_LPE_PIPE_A_INTERRUPT | 2938 I915_LPE_PIPE_B_INTERRUPT | 2939 I915_LPE_PIPE_C_INTERRUPT); 2940 2941 enable_mask |= val; 2942 2943 dev_priv->irq_mask = ~enable_mask; 2944 2945 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 2946 } 2947 2948 /* drm_dma.h hooks 2949 */ 2950 static void ironlake_irq_reset(struct drm_device *dev) 2951 { 2952 struct drm_i915_private *dev_priv = to_i915(dev); 2953 2954 I915_WRITE(HWSTAM, 0xffffffff); 2955 2956 GEN5_IRQ_RESET(DE); 2957 if (IS_GEN7(dev_priv)) 2958 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2959 2960 gen5_gt_irq_reset(dev_priv); 2961 2962 ibx_irq_reset(dev_priv); 2963 } 2964 2965 static void valleyview_irq_preinstall(struct drm_device *dev) 2966 { 2967 struct drm_i915_private *dev_priv = to_i915(dev); 2968 2969 I915_WRITE(VLV_MASTER_IER, 0); 2970 POSTING_READ(VLV_MASTER_IER); 2971 2972 gen5_gt_irq_reset(dev_priv); 2973 2974 spin_lock_irq(&dev_priv->irq_lock); 2975 if (dev_priv->display_irqs_enabled) 2976 vlv_display_irq_reset(dev_priv); 2977 spin_unlock_irq(&dev_priv->irq_lock); 2978 } 2979 2980 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 2981 { 2982 GEN8_IRQ_RESET_NDX(GT, 0); 2983 GEN8_IRQ_RESET_NDX(GT, 1); 2984 GEN8_IRQ_RESET_NDX(GT, 2); 2985 GEN8_IRQ_RESET_NDX(GT, 3); 2986 } 2987 2988 static void gen8_irq_reset(struct drm_device *dev) 2989 { 2990 struct drm_i915_private *dev_priv = to_i915(dev); 2991 int pipe; 2992 2993 I915_WRITE(GEN8_MASTER_IRQ, 0); 2994 POSTING_READ(GEN8_MASTER_IRQ); 2995 2996 gen8_gt_irq_reset(dev_priv); 2997 2998 for_each_pipe(dev_priv, pipe) 2999 if (intel_display_power_is_enabled(dev_priv, 3000 POWER_DOMAIN_PIPE(pipe))) 3001 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3002 3003 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3004 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3005 GEN5_IRQ_RESET(GEN8_PCU_); 3006 3007 if (HAS_PCH_SPLIT(dev_priv)) 3008 ibx_irq_reset(dev_priv); 3009 } 3010 3011 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3012 unsigned int pipe_mask) 3013 { 3014 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3015 enum pipe pipe; 3016 3017 spin_lock_irq(&dev_priv->irq_lock); 3018 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3019 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3020 dev_priv->de_irq_mask[pipe], 3021 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3022 spin_unlock_irq(&dev_priv->irq_lock); 3023 } 3024 3025 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3026 unsigned int pipe_mask) 3027 { 3028 enum pipe pipe; 3029 3030 spin_lock_irq(&dev_priv->irq_lock); 3031 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3032 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3033 spin_unlock_irq(&dev_priv->irq_lock); 3034 3035 /* make sure we're done processing display irqs */ 3036 synchronize_irq(dev_priv->drm.irq); 3037 } 3038 3039 static void cherryview_irq_preinstall(struct drm_device *dev) 3040 { 3041 struct drm_i915_private *dev_priv = to_i915(dev); 3042 3043 I915_WRITE(GEN8_MASTER_IRQ, 0); 3044 POSTING_READ(GEN8_MASTER_IRQ); 3045 3046 gen8_gt_irq_reset(dev_priv); 3047 3048 GEN5_IRQ_RESET(GEN8_PCU_); 3049 3050 spin_lock_irq(&dev_priv->irq_lock); 3051 if (dev_priv->display_irqs_enabled) 3052 vlv_display_irq_reset(dev_priv); 3053 spin_unlock_irq(&dev_priv->irq_lock); 3054 } 3055 3056 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3057 const u32 hpd[HPD_NUM_PINS]) 3058 { 3059 struct intel_encoder *encoder; 3060 u32 enabled_irqs = 0; 3061 3062 for_each_intel_encoder(&dev_priv->drm, encoder) 3063 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3064 enabled_irqs |= hpd[encoder->hpd_pin]; 3065 3066 return enabled_irqs; 3067 } 3068 3069 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3070 { 3071 u32 hotplug; 3072 3073 /* 3074 * Enable digital hotplug on the PCH, and configure the DP short pulse 3075 * duration to 2ms (which is the minimum in the Display Port spec). 3076 * The pulse duration bits are reserved on LPT+. 3077 */ 3078 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3079 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3080 PORTC_PULSE_DURATION_MASK | 3081 PORTD_PULSE_DURATION_MASK); 3082 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3083 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3084 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3085 /* 3086 * When CPU and PCH are on the same package, port A 3087 * HPD must be enabled in both north and south. 3088 */ 3089 if (HAS_PCH_LPT_LP(dev_priv)) 3090 hotplug |= PORTA_HOTPLUG_ENABLE; 3091 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3092 } 3093 3094 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3095 { 3096 u32 hotplug_irqs, enabled_irqs; 3097 3098 if (HAS_PCH_IBX(dev_priv)) { 3099 hotplug_irqs = SDE_HOTPLUG_MASK; 3100 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3101 } else { 3102 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3103 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3104 } 3105 3106 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3107 3108 ibx_hpd_detection_setup(dev_priv); 3109 } 3110 3111 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3112 { 3113 u32 hotplug; 3114 3115 /* Enable digital hotplug on the PCH */ 3116 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3117 hotplug |= PORTA_HOTPLUG_ENABLE | 3118 PORTB_HOTPLUG_ENABLE | 3119 PORTC_HOTPLUG_ENABLE | 3120 PORTD_HOTPLUG_ENABLE; 3121 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3122 3123 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3124 hotplug |= PORTE_HOTPLUG_ENABLE; 3125 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3126 } 3127 3128 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3129 { 3130 u32 hotplug_irqs, enabled_irqs; 3131 3132 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3133 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3134 3135 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3136 3137 spt_hpd_detection_setup(dev_priv); 3138 } 3139 3140 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3141 { 3142 u32 hotplug; 3143 3144 /* 3145 * Enable digital hotplug on the CPU, and configure the DP short pulse 3146 * duration to 2ms (which is the minimum in the Display Port spec) 3147 * The pulse duration bits are reserved on HSW+. 3148 */ 3149 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3150 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3151 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3152 DIGITAL_PORTA_PULSE_DURATION_2ms; 3153 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3154 } 3155 3156 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3157 { 3158 u32 hotplug_irqs, enabled_irqs; 3159 3160 if (INTEL_GEN(dev_priv) >= 8) { 3161 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3162 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3163 3164 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3165 } else if (INTEL_GEN(dev_priv) >= 7) { 3166 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3167 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3168 3169 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3170 } else { 3171 hotplug_irqs = DE_DP_A_HOTPLUG; 3172 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3173 3174 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3175 } 3176 3177 ilk_hpd_detection_setup(dev_priv); 3178 3179 ibx_hpd_irq_setup(dev_priv); 3180 } 3181 3182 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3183 u32 enabled_irqs) 3184 { 3185 u32 hotplug; 3186 3187 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3188 hotplug |= PORTA_HOTPLUG_ENABLE | 3189 PORTB_HOTPLUG_ENABLE | 3190 PORTC_HOTPLUG_ENABLE; 3191 3192 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3193 hotplug, enabled_irqs); 3194 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3195 3196 /* 3197 * For BXT invert bit has to be set based on AOB design 3198 * for HPD detection logic, update it based on VBT fields. 3199 */ 3200 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3201 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3202 hotplug |= BXT_DDIA_HPD_INVERT; 3203 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3204 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3205 hotplug |= BXT_DDIB_HPD_INVERT; 3206 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3207 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3208 hotplug |= BXT_DDIC_HPD_INVERT; 3209 3210 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3211 } 3212 3213 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3214 { 3215 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3216 } 3217 3218 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3219 { 3220 u32 hotplug_irqs, enabled_irqs; 3221 3222 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3223 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3224 3225 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3226 3227 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3228 } 3229 3230 static void ibx_irq_postinstall(struct drm_device *dev) 3231 { 3232 struct drm_i915_private *dev_priv = to_i915(dev); 3233 u32 mask; 3234 3235 if (HAS_PCH_NOP(dev_priv)) 3236 return; 3237 3238 if (HAS_PCH_IBX(dev_priv)) 3239 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3240 else 3241 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3242 3243 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3244 I915_WRITE(SDEIMR, ~mask); 3245 3246 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3247 HAS_PCH_LPT(dev_priv)) 3248 ibx_hpd_detection_setup(dev_priv); 3249 else 3250 spt_hpd_detection_setup(dev_priv); 3251 } 3252 3253 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3254 { 3255 struct drm_i915_private *dev_priv = to_i915(dev); 3256 u32 pm_irqs, gt_irqs; 3257 3258 pm_irqs = gt_irqs = 0; 3259 3260 dev_priv->gt_irq_mask = ~0; 3261 if (HAS_L3_DPF(dev_priv)) { 3262 /* L3 parity interrupt is always unmasked. */ 3263 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3264 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3265 } 3266 3267 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3268 if (IS_GEN5(dev_priv)) { 3269 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3270 } else { 3271 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3272 } 3273 3274 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3275 3276 if (INTEL_GEN(dev_priv) >= 6) { 3277 /* 3278 * RPS interrupts will get enabled/disabled on demand when RPS 3279 * itself is enabled/disabled. 3280 */ 3281 if (HAS_VEBOX(dev_priv)) { 3282 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3283 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3284 } 3285 3286 dev_priv->pm_imr = 0xffffffff; 3287 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3288 } 3289 } 3290 3291 static int ironlake_irq_postinstall(struct drm_device *dev) 3292 { 3293 struct drm_i915_private *dev_priv = to_i915(dev); 3294 u32 display_mask, extra_mask; 3295 3296 if (INTEL_GEN(dev_priv) >= 7) { 3297 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3298 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3299 DE_PLANEB_FLIP_DONE_IVB | 3300 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3301 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3302 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3303 DE_DP_A_HOTPLUG_IVB); 3304 } else { 3305 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3306 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3307 DE_AUX_CHANNEL_A | 3308 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3309 DE_POISON); 3310 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3311 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3312 DE_DP_A_HOTPLUG); 3313 } 3314 3315 dev_priv->irq_mask = ~display_mask; 3316 3317 I915_WRITE(HWSTAM, 0xeffe); 3318 3319 ibx_irq_pre_postinstall(dev); 3320 3321 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3322 3323 gen5_gt_irq_postinstall(dev); 3324 3325 ilk_hpd_detection_setup(dev_priv); 3326 3327 ibx_irq_postinstall(dev); 3328 3329 if (IS_IRONLAKE_M(dev_priv)) { 3330 /* Enable PCU event interrupts 3331 * 3332 * spinlocking not required here for correctness since interrupt 3333 * setup is guaranteed to run in single-threaded context. But we 3334 * need it to make the assert_spin_locked happy. */ 3335 spin_lock_irq(&dev_priv->irq_lock); 3336 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3337 spin_unlock_irq(&dev_priv->irq_lock); 3338 } 3339 3340 return 0; 3341 } 3342 3343 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3344 { 3345 lockdep_assert_held(&dev_priv->irq_lock); 3346 3347 if (dev_priv->display_irqs_enabled) 3348 return; 3349 3350 dev_priv->display_irqs_enabled = true; 3351 3352 if (intel_irqs_enabled(dev_priv)) { 3353 vlv_display_irq_reset(dev_priv); 3354 vlv_display_irq_postinstall(dev_priv); 3355 } 3356 } 3357 3358 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3359 { 3360 lockdep_assert_held(&dev_priv->irq_lock); 3361 3362 if (!dev_priv->display_irqs_enabled) 3363 return; 3364 3365 dev_priv->display_irqs_enabled = false; 3366 3367 if (intel_irqs_enabled(dev_priv)) 3368 vlv_display_irq_reset(dev_priv); 3369 } 3370 3371 3372 static int valleyview_irq_postinstall(struct drm_device *dev) 3373 { 3374 struct drm_i915_private *dev_priv = to_i915(dev); 3375 3376 gen5_gt_irq_postinstall(dev); 3377 3378 spin_lock_irq(&dev_priv->irq_lock); 3379 if (dev_priv->display_irqs_enabled) 3380 vlv_display_irq_postinstall(dev_priv); 3381 spin_unlock_irq(&dev_priv->irq_lock); 3382 3383 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3384 POSTING_READ(VLV_MASTER_IER); 3385 3386 return 0; 3387 } 3388 3389 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3390 { 3391 /* These are interrupts we'll toggle with the ring mask register */ 3392 uint32_t gt_interrupts[] = { 3393 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3394 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3395 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3396 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3397 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3398 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3399 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3400 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3401 0, 3402 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3403 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3404 }; 3405 3406 if (HAS_L3_DPF(dev_priv)) 3407 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3408 3409 dev_priv->pm_ier = 0x0; 3410 dev_priv->pm_imr = ~dev_priv->pm_ier; 3411 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3412 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3413 /* 3414 * RPS interrupts will get enabled/disabled on demand when RPS itself 3415 * is enabled/disabled. Same wil be the case for GuC interrupts. 3416 */ 3417 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3418 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3419 } 3420 3421 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3422 { 3423 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3424 uint32_t de_pipe_enables; 3425 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3426 u32 de_port_enables; 3427 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3428 enum pipe pipe; 3429 3430 if (INTEL_INFO(dev_priv)->gen >= 9) { 3431 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3432 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3433 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3434 GEN9_AUX_CHANNEL_D; 3435 if (IS_GEN9_LP(dev_priv)) 3436 de_port_masked |= BXT_DE_PORT_GMBUS; 3437 } else { 3438 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3439 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3440 } 3441 3442 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3443 GEN8_PIPE_FIFO_UNDERRUN; 3444 3445 de_port_enables = de_port_masked; 3446 if (IS_GEN9_LP(dev_priv)) 3447 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3448 else if (IS_BROADWELL(dev_priv)) 3449 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3450 3451 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3452 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3453 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3454 3455 for_each_pipe(dev_priv, pipe) 3456 if (intel_display_power_is_enabled(dev_priv, 3457 POWER_DOMAIN_PIPE(pipe))) 3458 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3459 dev_priv->de_irq_mask[pipe], 3460 de_pipe_enables); 3461 3462 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3463 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3464 3465 if (IS_GEN9_LP(dev_priv)) 3466 bxt_hpd_detection_setup(dev_priv); 3467 else if (IS_BROADWELL(dev_priv)) 3468 ilk_hpd_detection_setup(dev_priv); 3469 } 3470 3471 static int gen8_irq_postinstall(struct drm_device *dev) 3472 { 3473 struct drm_i915_private *dev_priv = to_i915(dev); 3474 3475 if (HAS_PCH_SPLIT(dev_priv)) 3476 ibx_irq_pre_postinstall(dev); 3477 3478 gen8_gt_irq_postinstall(dev_priv); 3479 gen8_de_irq_postinstall(dev_priv); 3480 3481 if (HAS_PCH_SPLIT(dev_priv)) 3482 ibx_irq_postinstall(dev); 3483 3484 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3485 POSTING_READ(GEN8_MASTER_IRQ); 3486 3487 return 0; 3488 } 3489 3490 static int cherryview_irq_postinstall(struct drm_device *dev) 3491 { 3492 struct drm_i915_private *dev_priv = to_i915(dev); 3493 3494 gen8_gt_irq_postinstall(dev_priv); 3495 3496 spin_lock_irq(&dev_priv->irq_lock); 3497 if (dev_priv->display_irqs_enabled) 3498 vlv_display_irq_postinstall(dev_priv); 3499 spin_unlock_irq(&dev_priv->irq_lock); 3500 3501 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3502 POSTING_READ(GEN8_MASTER_IRQ); 3503 3504 return 0; 3505 } 3506 3507 static void gen8_irq_uninstall(struct drm_device *dev) 3508 { 3509 struct drm_i915_private *dev_priv = to_i915(dev); 3510 3511 if (!dev_priv) 3512 return; 3513 3514 gen8_irq_reset(dev); 3515 } 3516 3517 static void valleyview_irq_uninstall(struct drm_device *dev) 3518 { 3519 struct drm_i915_private *dev_priv = to_i915(dev); 3520 3521 if (!dev_priv) 3522 return; 3523 3524 I915_WRITE(VLV_MASTER_IER, 0); 3525 POSTING_READ(VLV_MASTER_IER); 3526 3527 gen5_gt_irq_reset(dev_priv); 3528 3529 I915_WRITE(HWSTAM, 0xffffffff); 3530 3531 spin_lock_irq(&dev_priv->irq_lock); 3532 if (dev_priv->display_irqs_enabled) 3533 vlv_display_irq_reset(dev_priv); 3534 spin_unlock_irq(&dev_priv->irq_lock); 3535 } 3536 3537 static void cherryview_irq_uninstall(struct drm_device *dev) 3538 { 3539 struct drm_i915_private *dev_priv = to_i915(dev); 3540 3541 if (!dev_priv) 3542 return; 3543 3544 I915_WRITE(GEN8_MASTER_IRQ, 0); 3545 POSTING_READ(GEN8_MASTER_IRQ); 3546 3547 gen8_gt_irq_reset(dev_priv); 3548 3549 GEN5_IRQ_RESET(GEN8_PCU_); 3550 3551 spin_lock_irq(&dev_priv->irq_lock); 3552 if (dev_priv->display_irqs_enabled) 3553 vlv_display_irq_reset(dev_priv); 3554 spin_unlock_irq(&dev_priv->irq_lock); 3555 } 3556 3557 static void ironlake_irq_uninstall(struct drm_device *dev) 3558 { 3559 struct drm_i915_private *dev_priv = to_i915(dev); 3560 3561 if (!dev_priv) 3562 return; 3563 3564 ironlake_irq_reset(dev); 3565 } 3566 3567 static void i8xx_irq_preinstall(struct drm_device * dev) 3568 { 3569 struct drm_i915_private *dev_priv = to_i915(dev); 3570 int pipe; 3571 3572 for_each_pipe(dev_priv, pipe) 3573 I915_WRITE(PIPESTAT(pipe), 0); 3574 I915_WRITE16(IMR, 0xffff); 3575 I915_WRITE16(IER, 0x0); 3576 POSTING_READ16(IER); 3577 } 3578 3579 static int i8xx_irq_postinstall(struct drm_device *dev) 3580 { 3581 struct drm_i915_private *dev_priv = to_i915(dev); 3582 3583 I915_WRITE16(EMR, 3584 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3585 3586 /* Unmask the interrupts that we always want on. */ 3587 dev_priv->irq_mask = 3588 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3589 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3590 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3591 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3592 I915_WRITE16(IMR, dev_priv->irq_mask); 3593 3594 I915_WRITE16(IER, 3595 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3596 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3597 I915_USER_INTERRUPT); 3598 POSTING_READ16(IER); 3599 3600 /* Interrupt setup is already guaranteed to be single-threaded, this is 3601 * just to make the assert_spin_locked check happy. */ 3602 spin_lock_irq(&dev_priv->irq_lock); 3603 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3604 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3605 spin_unlock_irq(&dev_priv->irq_lock); 3606 3607 return 0; 3608 } 3609 3610 /* 3611 * Returns true when a page flip has completed. 3612 */ 3613 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3614 int plane, int pipe, u32 iir) 3615 { 3616 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3617 3618 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3619 return false; 3620 3621 if ((iir & flip_pending) == 0) 3622 goto check_page_flip; 3623 3624 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3625 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3626 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3627 * the flip is completed (no longer pending). Since this doesn't raise 3628 * an interrupt per se, we watch for the change at vblank. 3629 */ 3630 if (I915_READ16(ISR) & flip_pending) 3631 goto check_page_flip; 3632 3633 intel_finish_page_flip_cs(dev_priv, pipe); 3634 return true; 3635 3636 check_page_flip: 3637 intel_check_page_flip(dev_priv, pipe); 3638 return false; 3639 } 3640 3641 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3642 { 3643 struct drm_device *dev = arg; 3644 struct drm_i915_private *dev_priv = to_i915(dev); 3645 u16 iir, new_iir; 3646 u32 pipe_stats[2]; 3647 int pipe; 3648 u16 flip_mask = 3649 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3650 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3651 irqreturn_t ret; 3652 3653 if (!intel_irqs_enabled(dev_priv)) 3654 return IRQ_NONE; 3655 3656 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3657 disable_rpm_wakeref_asserts(dev_priv); 3658 3659 ret = IRQ_NONE; 3660 iir = I915_READ16(IIR); 3661 if (iir == 0) 3662 goto out; 3663 3664 while (iir & ~flip_mask) { 3665 /* Can't rely on pipestat interrupt bit in iir as it might 3666 * have been cleared after the pipestat interrupt was received. 3667 * It doesn't set the bit in iir again, but it still produces 3668 * interrupts (for non-MSI). 3669 */ 3670 spin_lock(&dev_priv->irq_lock); 3671 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3672 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3673 3674 for_each_pipe(dev_priv, pipe) { 3675 i915_reg_t reg = PIPESTAT(pipe); 3676 pipe_stats[pipe] = I915_READ(reg); 3677 3678 /* 3679 * Clear the PIPE*STAT regs before the IIR 3680 */ 3681 if (pipe_stats[pipe] & 0x8000ffff) 3682 I915_WRITE(reg, pipe_stats[pipe]); 3683 } 3684 spin_unlock(&dev_priv->irq_lock); 3685 3686 I915_WRITE16(IIR, iir & ~flip_mask); 3687 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3688 3689 if (iir & I915_USER_INTERRUPT) 3690 notify_ring(dev_priv->engine[RCS]); 3691 3692 for_each_pipe(dev_priv, pipe) { 3693 int plane = pipe; 3694 if (HAS_FBC(dev_priv)) 3695 plane = !plane; 3696 3697 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3698 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 3699 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3700 3701 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3702 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3703 3704 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3705 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3706 pipe); 3707 } 3708 3709 iir = new_iir; 3710 } 3711 ret = IRQ_HANDLED; 3712 3713 out: 3714 enable_rpm_wakeref_asserts(dev_priv); 3715 3716 return ret; 3717 } 3718 3719 static void i8xx_irq_uninstall(struct drm_device * dev) 3720 { 3721 struct drm_i915_private *dev_priv = to_i915(dev); 3722 int pipe; 3723 3724 for_each_pipe(dev_priv, pipe) { 3725 /* Clear enable bits; then clear status bits */ 3726 I915_WRITE(PIPESTAT(pipe), 0); 3727 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3728 } 3729 I915_WRITE16(IMR, 0xffff); 3730 I915_WRITE16(IER, 0x0); 3731 I915_WRITE16(IIR, I915_READ16(IIR)); 3732 } 3733 3734 static void i915_irq_preinstall(struct drm_device * dev) 3735 { 3736 struct drm_i915_private *dev_priv = to_i915(dev); 3737 int pipe; 3738 3739 if (I915_HAS_HOTPLUG(dev_priv)) { 3740 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3741 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3742 } 3743 3744 I915_WRITE16(HWSTAM, 0xeffe); 3745 for_each_pipe(dev_priv, pipe) 3746 I915_WRITE(PIPESTAT(pipe), 0); 3747 I915_WRITE(IMR, 0xffffffff); 3748 I915_WRITE(IER, 0x0); 3749 POSTING_READ(IER); 3750 } 3751 3752 static int i915_irq_postinstall(struct drm_device *dev) 3753 { 3754 struct drm_i915_private *dev_priv = to_i915(dev); 3755 u32 enable_mask; 3756 3757 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3758 3759 /* Unmask the interrupts that we always want on. */ 3760 dev_priv->irq_mask = 3761 ~(I915_ASLE_INTERRUPT | 3762 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3763 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3764 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3765 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3766 3767 enable_mask = 3768 I915_ASLE_INTERRUPT | 3769 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3770 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3771 I915_USER_INTERRUPT; 3772 3773 if (I915_HAS_HOTPLUG(dev_priv)) { 3774 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3775 POSTING_READ(PORT_HOTPLUG_EN); 3776 3777 /* Enable in IER... */ 3778 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3779 /* and unmask in IMR */ 3780 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3781 } 3782 3783 I915_WRITE(IMR, dev_priv->irq_mask); 3784 I915_WRITE(IER, enable_mask); 3785 POSTING_READ(IER); 3786 3787 i915_enable_asle_pipestat(dev_priv); 3788 3789 /* Interrupt setup is already guaranteed to be single-threaded, this is 3790 * just to make the assert_spin_locked check happy. */ 3791 spin_lock_irq(&dev_priv->irq_lock); 3792 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3793 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3794 spin_unlock_irq(&dev_priv->irq_lock); 3795 3796 return 0; 3797 } 3798 3799 /* 3800 * Returns true when a page flip has completed. 3801 */ 3802 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 3803 int plane, int pipe, u32 iir) 3804 { 3805 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3806 3807 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3808 return false; 3809 3810 if ((iir & flip_pending) == 0) 3811 goto check_page_flip; 3812 3813 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3814 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3815 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3816 * the flip is completed (no longer pending). Since this doesn't raise 3817 * an interrupt per se, we watch for the change at vblank. 3818 */ 3819 if (I915_READ(ISR) & flip_pending) 3820 goto check_page_flip; 3821 3822 intel_finish_page_flip_cs(dev_priv, pipe); 3823 return true; 3824 3825 check_page_flip: 3826 intel_check_page_flip(dev_priv, pipe); 3827 return false; 3828 } 3829 3830 static irqreturn_t i915_irq_handler(int irq, void *arg) 3831 { 3832 struct drm_device *dev = arg; 3833 struct drm_i915_private *dev_priv = to_i915(dev); 3834 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3835 u32 flip_mask = 3836 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3837 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3838 int pipe, ret = IRQ_NONE; 3839 3840 if (!intel_irqs_enabled(dev_priv)) 3841 return IRQ_NONE; 3842 3843 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3844 disable_rpm_wakeref_asserts(dev_priv); 3845 3846 iir = I915_READ(IIR); 3847 do { 3848 bool irq_received = (iir & ~flip_mask) != 0; 3849 bool blc_event = false; 3850 3851 /* Can't rely on pipestat interrupt bit in iir as it might 3852 * have been cleared after the pipestat interrupt was received. 3853 * It doesn't set the bit in iir again, but it still produces 3854 * interrupts (for non-MSI). 3855 */ 3856 spin_lock(&dev_priv->irq_lock); 3857 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3858 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3859 3860 for_each_pipe(dev_priv, pipe) { 3861 i915_reg_t reg = PIPESTAT(pipe); 3862 pipe_stats[pipe] = I915_READ(reg); 3863 3864 /* Clear the PIPE*STAT regs before the IIR */ 3865 if (pipe_stats[pipe] & 0x8000ffff) { 3866 I915_WRITE(reg, pipe_stats[pipe]); 3867 irq_received = true; 3868 } 3869 } 3870 spin_unlock(&dev_priv->irq_lock); 3871 3872 if (!irq_received) 3873 break; 3874 3875 /* Consume port. Then clear IIR or we'll miss events */ 3876 if (I915_HAS_HOTPLUG(dev_priv) && 3877 iir & I915_DISPLAY_PORT_INTERRUPT) { 3878 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3879 if (hotplug_status) 3880 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3881 } 3882 3883 I915_WRITE(IIR, iir & ~flip_mask); 3884 new_iir = I915_READ(IIR); /* Flush posted writes */ 3885 3886 if (iir & I915_USER_INTERRUPT) 3887 notify_ring(dev_priv->engine[RCS]); 3888 3889 for_each_pipe(dev_priv, pipe) { 3890 int plane = pipe; 3891 if (HAS_FBC(dev_priv)) 3892 plane = !plane; 3893 3894 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3895 i915_handle_vblank(dev_priv, plane, pipe, iir)) 3896 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3897 3898 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3899 blc_event = true; 3900 3901 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3902 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3903 3904 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3905 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3906 pipe); 3907 } 3908 3909 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3910 intel_opregion_asle_intr(dev_priv); 3911 3912 /* With MSI, interrupts are only generated when iir 3913 * transitions from zero to nonzero. If another bit got 3914 * set while we were handling the existing iir bits, then 3915 * we would never get another interrupt. 3916 * 3917 * This is fine on non-MSI as well, as if we hit this path 3918 * we avoid exiting the interrupt handler only to generate 3919 * another one. 3920 * 3921 * Note that for MSI this could cause a stray interrupt report 3922 * if an interrupt landed in the time between writing IIR and 3923 * the posting read. This should be rare enough to never 3924 * trigger the 99% of 100,000 interrupts test for disabling 3925 * stray interrupts. 3926 */ 3927 ret = IRQ_HANDLED; 3928 iir = new_iir; 3929 } while (iir & ~flip_mask); 3930 3931 enable_rpm_wakeref_asserts(dev_priv); 3932 3933 return ret; 3934 } 3935 3936 static void i915_irq_uninstall(struct drm_device * dev) 3937 { 3938 struct drm_i915_private *dev_priv = to_i915(dev); 3939 int pipe; 3940 3941 if (I915_HAS_HOTPLUG(dev_priv)) { 3942 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3943 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3944 } 3945 3946 I915_WRITE16(HWSTAM, 0xffff); 3947 for_each_pipe(dev_priv, pipe) { 3948 /* Clear enable bits; then clear status bits */ 3949 I915_WRITE(PIPESTAT(pipe), 0); 3950 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3951 } 3952 I915_WRITE(IMR, 0xffffffff); 3953 I915_WRITE(IER, 0x0); 3954 3955 I915_WRITE(IIR, I915_READ(IIR)); 3956 } 3957 3958 static void i965_irq_preinstall(struct drm_device * dev) 3959 { 3960 struct drm_i915_private *dev_priv = to_i915(dev); 3961 int pipe; 3962 3963 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3964 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3965 3966 I915_WRITE(HWSTAM, 0xeffe); 3967 for_each_pipe(dev_priv, pipe) 3968 I915_WRITE(PIPESTAT(pipe), 0); 3969 I915_WRITE(IMR, 0xffffffff); 3970 I915_WRITE(IER, 0x0); 3971 POSTING_READ(IER); 3972 } 3973 3974 static int i965_irq_postinstall(struct drm_device *dev) 3975 { 3976 struct drm_i915_private *dev_priv = to_i915(dev); 3977 u32 enable_mask; 3978 u32 error_mask; 3979 3980 /* Unmask the interrupts that we always want on. */ 3981 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3982 I915_DISPLAY_PORT_INTERRUPT | 3983 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3984 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3985 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3986 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3987 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3988 3989 enable_mask = ~dev_priv->irq_mask; 3990 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3991 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3992 enable_mask |= I915_USER_INTERRUPT; 3993 3994 if (IS_G4X(dev_priv)) 3995 enable_mask |= I915_BSD_USER_INTERRUPT; 3996 3997 /* Interrupt setup is already guaranteed to be single-threaded, this is 3998 * just to make the assert_spin_locked check happy. */ 3999 spin_lock_irq(&dev_priv->irq_lock); 4000 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4001 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4002 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4003 spin_unlock_irq(&dev_priv->irq_lock); 4004 4005 /* 4006 * Enable some error detection, note the instruction error mask 4007 * bit is reserved, so we leave it masked. 4008 */ 4009 if (IS_G4X(dev_priv)) { 4010 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4011 GM45_ERROR_MEM_PRIV | 4012 GM45_ERROR_CP_PRIV | 4013 I915_ERROR_MEMORY_REFRESH); 4014 } else { 4015 error_mask = ~(I915_ERROR_PAGE_TABLE | 4016 I915_ERROR_MEMORY_REFRESH); 4017 } 4018 I915_WRITE(EMR, error_mask); 4019 4020 I915_WRITE(IMR, dev_priv->irq_mask); 4021 I915_WRITE(IER, enable_mask); 4022 POSTING_READ(IER); 4023 4024 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4025 POSTING_READ(PORT_HOTPLUG_EN); 4026 4027 i915_enable_asle_pipestat(dev_priv); 4028 4029 return 0; 4030 } 4031 4032 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4033 { 4034 u32 hotplug_en; 4035 4036 lockdep_assert_held(&dev_priv->irq_lock); 4037 4038 /* Note HDMI and DP share hotplug bits */ 4039 /* enable bits are the same for all generations */ 4040 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4041 /* Programming the CRT detection parameters tends 4042 to generate a spurious hotplug event about three 4043 seconds later. So just do it once. 4044 */ 4045 if (IS_G4X(dev_priv)) 4046 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4047 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4048 4049 /* Ignore TV since it's buggy */ 4050 i915_hotplug_interrupt_update_locked(dev_priv, 4051 HOTPLUG_INT_EN_MASK | 4052 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4053 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4054 hotplug_en); 4055 } 4056 4057 static irqreturn_t i965_irq_handler(int irq, void *arg) 4058 { 4059 struct drm_device *dev = arg; 4060 struct drm_i915_private *dev_priv = to_i915(dev); 4061 u32 iir, new_iir; 4062 u32 pipe_stats[I915_MAX_PIPES]; 4063 int ret = IRQ_NONE, pipe; 4064 u32 flip_mask = 4065 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4066 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4067 4068 if (!intel_irqs_enabled(dev_priv)) 4069 return IRQ_NONE; 4070 4071 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4072 disable_rpm_wakeref_asserts(dev_priv); 4073 4074 iir = I915_READ(IIR); 4075 4076 for (;;) { 4077 bool irq_received = (iir & ~flip_mask) != 0; 4078 bool blc_event = false; 4079 4080 /* Can't rely on pipestat interrupt bit in iir as it might 4081 * have been cleared after the pipestat interrupt was received. 4082 * It doesn't set the bit in iir again, but it still produces 4083 * interrupts (for non-MSI). 4084 */ 4085 spin_lock(&dev_priv->irq_lock); 4086 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4087 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4088 4089 for_each_pipe(dev_priv, pipe) { 4090 i915_reg_t reg = PIPESTAT(pipe); 4091 pipe_stats[pipe] = I915_READ(reg); 4092 4093 /* 4094 * Clear the PIPE*STAT regs before the IIR 4095 */ 4096 if (pipe_stats[pipe] & 0x8000ffff) { 4097 I915_WRITE(reg, pipe_stats[pipe]); 4098 irq_received = true; 4099 } 4100 } 4101 spin_unlock(&dev_priv->irq_lock); 4102 4103 if (!irq_received) 4104 break; 4105 4106 ret = IRQ_HANDLED; 4107 4108 /* Consume port. Then clear IIR or we'll miss events */ 4109 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4110 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4111 if (hotplug_status) 4112 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4113 } 4114 4115 I915_WRITE(IIR, iir & ~flip_mask); 4116 new_iir = I915_READ(IIR); /* Flush posted writes */ 4117 4118 if (iir & I915_USER_INTERRUPT) 4119 notify_ring(dev_priv->engine[RCS]); 4120 if (iir & I915_BSD_USER_INTERRUPT) 4121 notify_ring(dev_priv->engine[VCS]); 4122 4123 for_each_pipe(dev_priv, pipe) { 4124 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4125 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4126 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4127 4128 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4129 blc_event = true; 4130 4131 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4132 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4133 4134 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4135 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4136 } 4137 4138 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4139 intel_opregion_asle_intr(dev_priv); 4140 4141 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4142 gmbus_irq_handler(dev_priv); 4143 4144 /* With MSI, interrupts are only generated when iir 4145 * transitions from zero to nonzero. If another bit got 4146 * set while we were handling the existing iir bits, then 4147 * we would never get another interrupt. 4148 * 4149 * This is fine on non-MSI as well, as if we hit this path 4150 * we avoid exiting the interrupt handler only to generate 4151 * another one. 4152 * 4153 * Note that for MSI this could cause a stray interrupt report 4154 * if an interrupt landed in the time between writing IIR and 4155 * the posting read. This should be rare enough to never 4156 * trigger the 99% of 100,000 interrupts test for disabling 4157 * stray interrupts. 4158 */ 4159 iir = new_iir; 4160 } 4161 4162 enable_rpm_wakeref_asserts(dev_priv); 4163 4164 return ret; 4165 } 4166 4167 static void i965_irq_uninstall(struct drm_device * dev) 4168 { 4169 struct drm_i915_private *dev_priv = to_i915(dev); 4170 int pipe; 4171 4172 if (!dev_priv) 4173 return; 4174 4175 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4176 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4177 4178 I915_WRITE(HWSTAM, 0xffffffff); 4179 for_each_pipe(dev_priv, pipe) 4180 I915_WRITE(PIPESTAT(pipe), 0); 4181 I915_WRITE(IMR, 0xffffffff); 4182 I915_WRITE(IER, 0x0); 4183 4184 for_each_pipe(dev_priv, pipe) 4185 I915_WRITE(PIPESTAT(pipe), 4186 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4187 I915_WRITE(IIR, I915_READ(IIR)); 4188 } 4189 4190 /** 4191 * intel_irq_init - initializes irq support 4192 * @dev_priv: i915 device instance 4193 * 4194 * This function initializes all the irq support including work items, timers 4195 * and all the vtables. It does not setup the interrupt itself though. 4196 */ 4197 void intel_irq_init(struct drm_i915_private *dev_priv) 4198 { 4199 struct drm_device *dev = &dev_priv->drm; 4200 4201 intel_hpd_init_work(dev_priv); 4202 4203 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4204 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4205 4206 if (HAS_GUC_SCHED(dev_priv)) 4207 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4208 4209 /* Let's track the enabled rps events */ 4210 if (IS_VALLEYVIEW(dev_priv)) 4211 /* WaGsvRC0ResidencyMethod:vlv */ 4212 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4213 else 4214 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4215 4216 dev_priv->rps.pm_intrmsk_mbz = 0; 4217 4218 /* 4219 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4220 * if GEN6_PM_UP_EI_EXPIRED is masked. 4221 * 4222 * TODO: verify if this can be reproduced on VLV,CHV. 4223 */ 4224 if (INTEL_INFO(dev_priv)->gen <= 7) 4225 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4226 4227 if (INTEL_INFO(dev_priv)->gen >= 8) 4228 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4229 4230 if (IS_GEN2(dev_priv)) { 4231 /* Gen2 doesn't have a hardware frame counter */ 4232 dev->max_vblank_count = 0; 4233 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4234 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4235 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4236 } else { 4237 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4238 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4239 } 4240 4241 /* 4242 * Opt out of the vblank disable timer on everything except gen2. 4243 * Gen2 doesn't have a hardware frame counter and so depends on 4244 * vblank interrupts to produce sane vblank seuquence numbers. 4245 */ 4246 if (!IS_GEN2(dev_priv)) 4247 dev->vblank_disable_immediate = true; 4248 4249 /* Most platforms treat the display irq block as an always-on 4250 * power domain. vlv/chv can disable it at runtime and need 4251 * special care to avoid writing any of the display block registers 4252 * outside of the power domain. We defer setting up the display irqs 4253 * in this case to the runtime pm. 4254 */ 4255 dev_priv->display_irqs_enabled = true; 4256 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4257 dev_priv->display_irqs_enabled = false; 4258 4259 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4260 4261 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4262 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4263 4264 if (IS_CHERRYVIEW(dev_priv)) { 4265 dev->driver->irq_handler = cherryview_irq_handler; 4266 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4267 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4268 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4269 dev->driver->enable_vblank = i965_enable_vblank; 4270 dev->driver->disable_vblank = i965_disable_vblank; 4271 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4272 } else if (IS_VALLEYVIEW(dev_priv)) { 4273 dev->driver->irq_handler = valleyview_irq_handler; 4274 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4275 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4276 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4277 dev->driver->enable_vblank = i965_enable_vblank; 4278 dev->driver->disable_vblank = i965_disable_vblank; 4279 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4280 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4281 dev->driver->irq_handler = gen8_irq_handler; 4282 dev->driver->irq_preinstall = gen8_irq_reset; 4283 dev->driver->irq_postinstall = gen8_irq_postinstall; 4284 dev->driver->irq_uninstall = gen8_irq_uninstall; 4285 dev->driver->enable_vblank = gen8_enable_vblank; 4286 dev->driver->disable_vblank = gen8_disable_vblank; 4287 if (IS_GEN9_LP(dev_priv)) 4288 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4289 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 4290 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4291 else 4292 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4293 } else if (HAS_PCH_SPLIT(dev_priv)) { 4294 dev->driver->irq_handler = ironlake_irq_handler; 4295 dev->driver->irq_preinstall = ironlake_irq_reset; 4296 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4297 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4298 dev->driver->enable_vblank = ironlake_enable_vblank; 4299 dev->driver->disable_vblank = ironlake_disable_vblank; 4300 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4301 } else { 4302 if (IS_GEN2(dev_priv)) { 4303 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4304 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4305 dev->driver->irq_handler = i8xx_irq_handler; 4306 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4307 dev->driver->enable_vblank = i8xx_enable_vblank; 4308 dev->driver->disable_vblank = i8xx_disable_vblank; 4309 } else if (IS_GEN3(dev_priv)) { 4310 dev->driver->irq_preinstall = i915_irq_preinstall; 4311 dev->driver->irq_postinstall = i915_irq_postinstall; 4312 dev->driver->irq_uninstall = i915_irq_uninstall; 4313 dev->driver->irq_handler = i915_irq_handler; 4314 dev->driver->enable_vblank = i8xx_enable_vblank; 4315 dev->driver->disable_vblank = i8xx_disable_vblank; 4316 } else { 4317 dev->driver->irq_preinstall = i965_irq_preinstall; 4318 dev->driver->irq_postinstall = i965_irq_postinstall; 4319 dev->driver->irq_uninstall = i965_irq_uninstall; 4320 dev->driver->irq_handler = i965_irq_handler; 4321 dev->driver->enable_vblank = i965_enable_vblank; 4322 dev->driver->disable_vblank = i965_disable_vblank; 4323 } 4324 if (I915_HAS_HOTPLUG(dev_priv)) 4325 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4326 } 4327 } 4328 4329 /** 4330 * intel_irq_install - enables the hardware interrupt 4331 * @dev_priv: i915 device instance 4332 * 4333 * This function enables the hardware interrupt handling, but leaves the hotplug 4334 * handling still disabled. It is called after intel_irq_init(). 4335 * 4336 * In the driver load and resume code we need working interrupts in a few places 4337 * but don't want to deal with the hassle of concurrent probe and hotplug 4338 * workers. Hence the split into this two-stage approach. 4339 */ 4340 int intel_irq_install(struct drm_i915_private *dev_priv) 4341 { 4342 /* 4343 * We enable some interrupt sources in our postinstall hooks, so mark 4344 * interrupts as enabled _before_ actually enabling them to avoid 4345 * special cases in our ordering checks. 4346 */ 4347 dev_priv->pm.irqs_enabled = true; 4348 4349 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4350 } 4351 4352 /** 4353 * intel_irq_uninstall - finilizes all irq handling 4354 * @dev_priv: i915 device instance 4355 * 4356 * This stops interrupt and hotplug handling and unregisters and frees all 4357 * resources acquired in the init functions. 4358 */ 4359 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4360 { 4361 drm_irq_uninstall(&dev_priv->drm); 4362 intel_hpd_cancel_work(dev_priv); 4363 dev_priv->pm.irqs_enabled = false; 4364 } 4365 4366 /** 4367 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4368 * @dev_priv: i915 device instance 4369 * 4370 * This function is used to disable interrupts at runtime, both in the runtime 4371 * pm and the system suspend/resume code. 4372 */ 4373 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4374 { 4375 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4376 dev_priv->pm.irqs_enabled = false; 4377 synchronize_irq(dev_priv->drm.irq); 4378 } 4379 4380 /** 4381 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4382 * @dev_priv: i915 device instance 4383 * 4384 * This function is used to enable interrupts at runtime, both in the runtime 4385 * pm and the system suspend/resume code. 4386 */ 4387 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4388 { 4389 dev_priv->pm.irqs_enabled = true; 4390 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4391 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4392 } 4393