1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 174 175 /* For display hotplug interrupt */ 176 static inline void 177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 178 uint32_t mask, 179 uint32_t bits) 180 { 181 uint32_t val; 182 183 lockdep_assert_held(&dev_priv->irq_lock); 184 WARN_ON(bits & ~mask); 185 186 val = I915_READ(PORT_HOTPLUG_EN); 187 val &= ~mask; 188 val |= bits; 189 I915_WRITE(PORT_HOTPLUG_EN, val); 190 } 191 192 /** 193 * i915_hotplug_interrupt_update - update hotplug interrupt enable 194 * @dev_priv: driver private 195 * @mask: bits to update 196 * @bits: bits to enable 197 * NOTE: the HPD enable bits are modified both inside and outside 198 * of an interrupt context. To avoid that read-modify-write cycles 199 * interfer, these bits are protected by a spinlock. Since this 200 * function is usually not called from a context where the lock is 201 * held already, this function acquires the lock itself. A non-locking 202 * version is also available. 203 */ 204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 205 uint32_t mask, 206 uint32_t bits) 207 { 208 spin_lock_irq(&dev_priv->irq_lock); 209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 210 spin_unlock_irq(&dev_priv->irq_lock); 211 } 212 213 /** 214 * ilk_update_display_irq - update DEIMR 215 * @dev_priv: driver private 216 * @interrupt_mask: mask of interrupt bits to update 217 * @enabled_irq_mask: mask of interrupt bits to enable 218 */ 219 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 220 uint32_t interrupt_mask, 221 uint32_t enabled_irq_mask) 222 { 223 uint32_t new_val; 224 225 lockdep_assert_held(&dev_priv->irq_lock); 226 227 WARN_ON(enabled_irq_mask & ~interrupt_mask); 228 229 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 230 return; 231 232 new_val = dev_priv->irq_mask; 233 new_val &= ~interrupt_mask; 234 new_val |= (~enabled_irq_mask & interrupt_mask); 235 236 if (new_val != dev_priv->irq_mask) { 237 dev_priv->irq_mask = new_val; 238 I915_WRITE(DEIMR, dev_priv->irq_mask); 239 POSTING_READ(DEIMR); 240 } 241 } 242 243 /** 244 * ilk_update_gt_irq - update GTIMR 245 * @dev_priv: driver private 246 * @interrupt_mask: mask of interrupt bits to update 247 * @enabled_irq_mask: mask of interrupt bits to enable 248 */ 249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 250 uint32_t interrupt_mask, 251 uint32_t enabled_irq_mask) 252 { 253 lockdep_assert_held(&dev_priv->irq_lock); 254 255 WARN_ON(enabled_irq_mask & ~interrupt_mask); 256 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 dev_priv->gt_irq_mask &= ~interrupt_mask; 261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 POSTING_READ_FW(GTIMR); 269 } 270 271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 ilk_update_gt_irq(dev_priv, mask, 0); 274 } 275 276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 277 { 278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 279 } 280 281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 282 { 283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 284 } 285 286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 287 { 288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 289 } 290 291 /** 292 * snb_update_pm_irq - update GEN6_PMIMR 293 * @dev_priv: driver private 294 * @interrupt_mask: mask of interrupt bits to update 295 * @enabled_irq_mask: mask of interrupt bits to enable 296 */ 297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 298 uint32_t interrupt_mask, 299 uint32_t enabled_irq_mask) 300 { 301 uint32_t new_val; 302 303 WARN_ON(enabled_irq_mask & ~interrupt_mask); 304 305 lockdep_assert_held(&dev_priv->irq_lock); 306 307 new_val = dev_priv->pm_imr; 308 new_val &= ~interrupt_mask; 309 new_val |= (~enabled_irq_mask & interrupt_mask); 310 311 if (new_val != dev_priv->pm_imr) { 312 dev_priv->pm_imr = new_val; 313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 314 POSTING_READ(gen6_pm_imr(dev_priv)); 315 } 316 } 317 318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 319 { 320 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 321 return; 322 323 snb_update_pm_irq(dev_priv, mask, mask); 324 } 325 326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_mask_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 lockdep_assert_held(&dev_priv->irq_lock); 344 345 I915_WRITE(reg, reset_mask); 346 I915_WRITE(reg, reset_mask); 347 POSTING_READ(reg); 348 } 349 350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 351 { 352 lockdep_assert_held(&dev_priv->irq_lock); 353 354 dev_priv->pm_ier |= enable_mask; 355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 356 gen6_unmask_pm_irq(dev_priv, enable_mask); 357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 358 } 359 360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 361 { 362 lockdep_assert_held(&dev_priv->irq_lock); 363 364 dev_priv->pm_ier &= ~disable_mask; 365 __gen6_mask_pm_irq(dev_priv, disable_mask); 366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 367 /* though a barrier is missing here, but don't really need a one */ 368 } 369 370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 371 { 372 spin_lock_irq(&dev_priv->irq_lock); 373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 374 dev_priv->rps.pm_iir = 0; 375 spin_unlock_irq(&dev_priv->irq_lock); 376 } 377 378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 379 { 380 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 381 return; 382 383 spin_lock_irq(&dev_priv->irq_lock); 384 WARN_ON_ONCE(dev_priv->rps.pm_iir); 385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 386 dev_priv->rps.interrupts_enabled = true; 387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 388 389 spin_unlock_irq(&dev_priv->irq_lock); 390 } 391 392 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 393 { 394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 395 return; 396 397 spin_lock_irq(&dev_priv->irq_lock); 398 dev_priv->rps.interrupts_enabled = false; 399 400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 401 402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 403 404 spin_unlock_irq(&dev_priv->irq_lock); 405 synchronize_irq(dev_priv->drm.irq); 406 407 /* Now that we will not be generating any more work, flush any 408 * outsanding tasks. As we are called on the RPS idle path, 409 * we will reset the GPU to minimum frequencies, so the current 410 * state of the worker can be discarded. 411 */ 412 cancel_work_sync(&dev_priv->rps.work); 413 gen6_reset_rps_interrupts(dev_priv); 414 } 415 416 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 417 { 418 spin_lock_irq(&dev_priv->irq_lock); 419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 420 spin_unlock_irq(&dev_priv->irq_lock); 421 } 422 423 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 424 { 425 spin_lock_irq(&dev_priv->irq_lock); 426 if (!dev_priv->guc.interrupts_enabled) { 427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 428 dev_priv->pm_guc_events); 429 dev_priv->guc.interrupts_enabled = true; 430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 431 } 432 spin_unlock_irq(&dev_priv->irq_lock); 433 } 434 435 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 436 { 437 spin_lock_irq(&dev_priv->irq_lock); 438 dev_priv->guc.interrupts_enabled = false; 439 440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 441 442 spin_unlock_irq(&dev_priv->irq_lock); 443 synchronize_irq(dev_priv->drm.irq); 444 445 gen9_reset_guc_interrupts(dev_priv); 446 } 447 448 /** 449 * bdw_update_port_irq - update DE port interrupt 450 * @dev_priv: driver private 451 * @interrupt_mask: mask of interrupt bits to update 452 * @enabled_irq_mask: mask of interrupt bits to enable 453 */ 454 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 455 uint32_t interrupt_mask, 456 uint32_t enabled_irq_mask) 457 { 458 uint32_t new_val; 459 uint32_t old_val; 460 461 lockdep_assert_held(&dev_priv->irq_lock); 462 463 WARN_ON(enabled_irq_mask & ~interrupt_mask); 464 465 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 466 return; 467 468 old_val = I915_READ(GEN8_DE_PORT_IMR); 469 470 new_val = old_val; 471 new_val &= ~interrupt_mask; 472 new_val |= (~enabled_irq_mask & interrupt_mask); 473 474 if (new_val != old_val) { 475 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 476 POSTING_READ(GEN8_DE_PORT_IMR); 477 } 478 } 479 480 /** 481 * bdw_update_pipe_irq - update DE pipe interrupt 482 * @dev_priv: driver private 483 * @pipe: pipe whose interrupt to update 484 * @interrupt_mask: mask of interrupt bits to update 485 * @enabled_irq_mask: mask of interrupt bits to enable 486 */ 487 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 488 enum pipe pipe, 489 uint32_t interrupt_mask, 490 uint32_t enabled_irq_mask) 491 { 492 uint32_t new_val; 493 494 lockdep_assert_held(&dev_priv->irq_lock); 495 496 WARN_ON(enabled_irq_mask & ~interrupt_mask); 497 498 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 499 return; 500 501 new_val = dev_priv->de_irq_mask[pipe]; 502 new_val &= ~interrupt_mask; 503 new_val |= (~enabled_irq_mask & interrupt_mask); 504 505 if (new_val != dev_priv->de_irq_mask[pipe]) { 506 dev_priv->de_irq_mask[pipe] = new_val; 507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 509 } 510 } 511 512 /** 513 * ibx_display_interrupt_update - update SDEIMR 514 * @dev_priv: driver private 515 * @interrupt_mask: mask of interrupt bits to update 516 * @enabled_irq_mask: mask of interrupt bits to enable 517 */ 518 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 519 uint32_t interrupt_mask, 520 uint32_t enabled_irq_mask) 521 { 522 uint32_t sdeimr = I915_READ(SDEIMR); 523 sdeimr &= ~interrupt_mask; 524 sdeimr |= (~enabled_irq_mask & interrupt_mask); 525 526 WARN_ON(enabled_irq_mask & ~interrupt_mask); 527 528 lockdep_assert_held(&dev_priv->irq_lock); 529 530 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 531 return; 532 533 I915_WRITE(SDEIMR, sdeimr); 534 POSTING_READ(SDEIMR); 535 } 536 537 static void 538 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 539 u32 enable_mask, u32 status_mask) 540 { 541 i915_reg_t reg = PIPESTAT(pipe); 542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 543 544 lockdep_assert_held(&dev_priv->irq_lock); 545 WARN_ON(!intel_irqs_enabled(dev_priv)); 546 547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 548 status_mask & ~PIPESTAT_INT_STATUS_MASK, 549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 550 pipe_name(pipe), enable_mask, status_mask)) 551 return; 552 553 if ((pipestat & enable_mask) == enable_mask) 554 return; 555 556 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 557 558 /* Enable the interrupt, clear any pending status */ 559 pipestat |= enable_mask | status_mask; 560 I915_WRITE(reg, pipestat); 561 POSTING_READ(reg); 562 } 563 564 static void 565 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 566 u32 enable_mask, u32 status_mask) 567 { 568 i915_reg_t reg = PIPESTAT(pipe); 569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 570 571 lockdep_assert_held(&dev_priv->irq_lock); 572 WARN_ON(!intel_irqs_enabled(dev_priv)); 573 574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 575 status_mask & ~PIPESTAT_INT_STATUS_MASK, 576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 577 pipe_name(pipe), enable_mask, status_mask)) 578 return; 579 580 if ((pipestat & enable_mask) == 0) 581 return; 582 583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 584 585 pipestat &= ~enable_mask; 586 I915_WRITE(reg, pipestat); 587 POSTING_READ(reg); 588 } 589 590 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 591 { 592 u32 enable_mask = status_mask << 16; 593 594 /* 595 * On pipe A we don't support the PSR interrupt yet, 596 * on pipe B and C the same bit MBZ. 597 */ 598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 599 return 0; 600 /* 601 * On pipe B and C we don't support the PSR interrupt yet, on pipe 602 * A the same bit is for perf counters which we don't use either. 603 */ 604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 605 return 0; 606 607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 608 SPRITE0_FLIP_DONE_INT_EN_VLV | 609 SPRITE1_FLIP_DONE_INT_EN_VLV); 610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 614 615 return enable_mask; 616 } 617 618 void 619 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 620 u32 status_mask) 621 { 622 u32 enable_mask; 623 624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 626 status_mask); 627 else 628 enable_mask = status_mask << 16; 629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 630 } 631 632 void 633 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 634 u32 status_mask) 635 { 636 u32 enable_mask; 637 638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 640 status_mask); 641 else 642 enable_mask = status_mask << 16; 643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 644 } 645 646 /** 647 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 648 * @dev_priv: i915 device private 649 */ 650 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 651 { 652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 653 return; 654 655 spin_lock_irq(&dev_priv->irq_lock); 656 657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 658 if (INTEL_GEN(dev_priv) >= 4) 659 i915_enable_pipestat(dev_priv, PIPE_A, 660 PIPE_LEGACY_BLC_EVENT_STATUS); 661 662 spin_unlock_irq(&dev_priv->irq_lock); 663 } 664 665 /* 666 * This timing diagram depicts the video signal in and 667 * around the vertical blanking period. 668 * 669 * Assumptions about the fictitious mode used in this example: 670 * vblank_start >= 3 671 * vsync_start = vblank_start + 1 672 * vsync_end = vblank_start + 2 673 * vtotal = vblank_start + 3 674 * 675 * start of vblank: 676 * latch double buffered registers 677 * increment frame counter (ctg+) 678 * generate start of vblank interrupt (gen4+) 679 * | 680 * | frame start: 681 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 682 * | may be shifted forward 1-3 extra lines via PIPECONF 683 * | | 684 * | | start of vsync: 685 * | | generate vsync interrupt 686 * | | | 687 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 688 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 689 * ----va---> <-----------------vb--------------------> <--------va------------- 690 * | | <----vs-----> | 691 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 692 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 693 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 694 * | | | 695 * last visible pixel first visible pixel 696 * | increment frame counter (gen3/4) 697 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 698 * 699 * x = horizontal active 700 * _ = horizontal blanking 701 * hs = horizontal sync 702 * va = vertical active 703 * vb = vertical blanking 704 * vs = vertical sync 705 * vbs = vblank_start (number) 706 * 707 * Summary: 708 * - most events happen at the start of horizontal sync 709 * - frame start happens at the start of horizontal blank, 1-4 lines 710 * (depending on PIPECONF settings) after the start of vblank 711 * - gen3/4 pixel and frame counter are synchronized with the start 712 * of horizontal active on the first line of vertical active 713 */ 714 715 /* Called from drm generic code, passed a 'crtc', which 716 * we use as a pipe index 717 */ 718 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 719 { 720 struct drm_i915_private *dev_priv = to_i915(dev); 721 i915_reg_t high_frame, low_frame; 722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 723 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 724 pipe); 725 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 726 unsigned long irqflags; 727 728 htotal = mode->crtc_htotal; 729 hsync_start = mode->crtc_hsync_start; 730 vbl_start = mode->crtc_vblank_start; 731 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 732 vbl_start = DIV_ROUND_UP(vbl_start, 2); 733 734 /* Convert to pixel count */ 735 vbl_start *= htotal; 736 737 /* Start of vblank event occurs at start of hsync */ 738 vbl_start -= htotal - hsync_start; 739 740 high_frame = PIPEFRAME(pipe); 741 low_frame = PIPEFRAMEPIXEL(pipe); 742 743 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 744 745 /* 746 * High & low register fields aren't synchronized, so make sure 747 * we get a low value that's stable across two reads of the high 748 * register. 749 */ 750 do { 751 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 752 low = I915_READ_FW(low_frame); 753 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 754 } while (high1 != high2); 755 756 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 757 758 high1 >>= PIPE_FRAME_HIGH_SHIFT; 759 pixel = low & PIPE_PIXEL_MASK; 760 low >>= PIPE_FRAME_LOW_SHIFT; 761 762 /* 763 * The frame counter increments at beginning of active. 764 * Cook up a vblank counter by also checking the pixel 765 * counter against vblank start. 766 */ 767 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 768 } 769 770 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 771 { 772 struct drm_i915_private *dev_priv = to_i915(dev); 773 774 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 775 } 776 777 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 778 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 779 { 780 struct drm_device *dev = crtc->base.dev; 781 struct drm_i915_private *dev_priv = to_i915(dev); 782 const struct drm_display_mode *mode = &crtc->base.hwmode; 783 enum pipe pipe = crtc->pipe; 784 int position, vtotal; 785 786 if (!crtc->active) 787 return -1; 788 789 vtotal = mode->crtc_vtotal; 790 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 791 vtotal /= 2; 792 793 if (IS_GEN2(dev_priv)) 794 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 795 else 796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 797 798 /* 799 * On HSW, the DSL reg (0x70000) appears to return 0 if we 800 * read it just before the start of vblank. So try it again 801 * so we don't accidentally end up spanning a vblank frame 802 * increment, causing the pipe_update_end() code to squak at us. 803 * 804 * The nature of this problem means we can't simply check the ISR 805 * bit and return the vblank start value; nor can we use the scanline 806 * debug register in the transcoder as it appears to have the same 807 * problem. We may need to extend this to include other platforms, 808 * but so far testing only shows the problem on HSW. 809 */ 810 if (HAS_DDI(dev_priv) && !position) { 811 int i, temp; 812 813 for (i = 0; i < 100; i++) { 814 udelay(1); 815 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 816 if (temp != position) { 817 position = temp; 818 break; 819 } 820 } 821 } 822 823 /* 824 * See update_scanline_offset() for the details on the 825 * scanline_offset adjustment. 826 */ 827 return (position + crtc->scanline_offset) % vtotal; 828 } 829 830 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 831 unsigned int flags, int *vpos, int *hpos, 832 ktime_t *stime, ktime_t *etime, 833 const struct drm_display_mode *mode) 834 { 835 struct drm_i915_private *dev_priv = to_i915(dev); 836 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 837 pipe); 838 int position; 839 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 840 bool in_vbl = true; 841 int ret = 0; 842 unsigned long irqflags; 843 844 if (WARN_ON(!mode->crtc_clock)) { 845 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 846 "pipe %c\n", pipe_name(pipe)); 847 return 0; 848 } 849 850 htotal = mode->crtc_htotal; 851 hsync_start = mode->crtc_hsync_start; 852 vtotal = mode->crtc_vtotal; 853 vbl_start = mode->crtc_vblank_start; 854 vbl_end = mode->crtc_vblank_end; 855 856 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 857 vbl_start = DIV_ROUND_UP(vbl_start, 2); 858 vbl_end /= 2; 859 vtotal /= 2; 860 } 861 862 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 863 864 /* 865 * Lock uncore.lock, as we will do multiple timing critical raw 866 * register reads, potentially with preemption disabled, so the 867 * following code must not block on uncore.lock. 868 */ 869 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 870 871 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 872 873 /* Get optional system timestamp before query. */ 874 if (stime) 875 *stime = ktime_get(); 876 877 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 878 /* No obvious pixelcount register. Only query vertical 879 * scanout position from Display scan line register. 880 */ 881 position = __intel_get_crtc_scanline(intel_crtc); 882 } else { 883 /* Have access to pixelcount since start of frame. 884 * We can split this into vertical and horizontal 885 * scanout position. 886 */ 887 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 888 889 /* convert to pixel counts */ 890 vbl_start *= htotal; 891 vbl_end *= htotal; 892 vtotal *= htotal; 893 894 /* 895 * In interlaced modes, the pixel counter counts all pixels, 896 * so one field will have htotal more pixels. In order to avoid 897 * the reported position from jumping backwards when the pixel 898 * counter is beyond the length of the shorter field, just 899 * clamp the position the length of the shorter field. This 900 * matches how the scanline counter based position works since 901 * the scanline counter doesn't count the two half lines. 902 */ 903 if (position >= vtotal) 904 position = vtotal - 1; 905 906 /* 907 * Start of vblank interrupt is triggered at start of hsync, 908 * just prior to the first active line of vblank. However we 909 * consider lines to start at the leading edge of horizontal 910 * active. So, should we get here before we've crossed into 911 * the horizontal active of the first line in vblank, we would 912 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 913 * always add htotal-hsync_start to the current pixel position. 914 */ 915 position = (position + htotal - hsync_start) % vtotal; 916 } 917 918 /* Get optional system timestamp after query. */ 919 if (etime) 920 *etime = ktime_get(); 921 922 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 923 924 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 925 926 in_vbl = position >= vbl_start && position < vbl_end; 927 928 /* 929 * While in vblank, position will be negative 930 * counting up towards 0 at vbl_end. And outside 931 * vblank, position will be positive counting 932 * up since vbl_end. 933 */ 934 if (position >= vbl_start) 935 position -= vbl_end; 936 else 937 position += vtotal - vbl_end; 938 939 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 940 *vpos = position; 941 *hpos = 0; 942 } else { 943 *vpos = position / htotal; 944 *hpos = position - (*vpos * htotal); 945 } 946 947 /* In vblank? */ 948 if (in_vbl) 949 ret |= DRM_SCANOUTPOS_IN_VBLANK; 950 951 return ret; 952 } 953 954 int intel_get_crtc_scanline(struct intel_crtc *crtc) 955 { 956 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 957 unsigned long irqflags; 958 int position; 959 960 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 961 position = __intel_get_crtc_scanline(crtc); 962 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 963 964 return position; 965 } 966 967 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 968 int *max_error, 969 struct timeval *vblank_time, 970 unsigned flags) 971 { 972 struct drm_i915_private *dev_priv = to_i915(dev); 973 struct intel_crtc *crtc; 974 975 if (pipe >= INTEL_INFO(dev_priv)->num_pipes) { 976 DRM_ERROR("Invalid crtc %u\n", pipe); 977 return -EINVAL; 978 } 979 980 /* Get drm_crtc to timestamp: */ 981 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 982 if (crtc == NULL) { 983 DRM_ERROR("Invalid crtc %u\n", pipe); 984 return -EINVAL; 985 } 986 987 if (!crtc->base.hwmode.crtc_clock) { 988 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 989 return -EBUSY; 990 } 991 992 /* Helper routine in DRM core does all the work: */ 993 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 994 vblank_time, flags, 995 &crtc->base.hwmode); 996 } 997 998 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 999 { 1000 u32 busy_up, busy_down, max_avg, min_avg; 1001 u8 new_delay; 1002 1003 spin_lock(&mchdev_lock); 1004 1005 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1006 1007 new_delay = dev_priv->ips.cur_delay; 1008 1009 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1010 busy_up = I915_READ(RCPREVBSYTUPAVG); 1011 busy_down = I915_READ(RCPREVBSYTDNAVG); 1012 max_avg = I915_READ(RCBMAXAVG); 1013 min_avg = I915_READ(RCBMINAVG); 1014 1015 /* Handle RCS change request from hw */ 1016 if (busy_up > max_avg) { 1017 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1018 new_delay = dev_priv->ips.cur_delay - 1; 1019 if (new_delay < dev_priv->ips.max_delay) 1020 new_delay = dev_priv->ips.max_delay; 1021 } else if (busy_down < min_avg) { 1022 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1023 new_delay = dev_priv->ips.cur_delay + 1; 1024 if (new_delay > dev_priv->ips.min_delay) 1025 new_delay = dev_priv->ips.min_delay; 1026 } 1027 1028 if (ironlake_set_drps(dev_priv, new_delay)) 1029 dev_priv->ips.cur_delay = new_delay; 1030 1031 spin_unlock(&mchdev_lock); 1032 1033 return; 1034 } 1035 1036 static void notify_ring(struct intel_engine_cs *engine) 1037 { 1038 struct drm_i915_gem_request *rq = NULL; 1039 struct intel_wait *wait; 1040 1041 atomic_inc(&engine->irq_count); 1042 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1043 1044 spin_lock(&engine->breadcrumbs.irq_lock); 1045 wait = engine->breadcrumbs.irq_wait; 1046 if (wait) { 1047 /* We use a callback from the dma-fence to submit 1048 * requests after waiting on our own requests. To 1049 * ensure minimum delay in queuing the next request to 1050 * hardware, signal the fence now rather than wait for 1051 * the signaler to be woken up. We still wake up the 1052 * waiter in order to handle the irq-seqno coherency 1053 * issues (we may receive the interrupt before the 1054 * seqno is written, see __i915_request_irq_complete()) 1055 * and to handle coalescing of multiple seqno updates 1056 * and many waiters. 1057 */ 1058 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1059 wait->seqno) && 1060 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1061 &wait->request->fence.flags)) 1062 rq = i915_gem_request_get(wait->request); 1063 1064 wake_up_process(wait->tsk); 1065 } else { 1066 __intel_engine_disarm_breadcrumbs(engine); 1067 } 1068 spin_unlock(&engine->breadcrumbs.irq_lock); 1069 1070 if (rq) { 1071 dma_fence_signal(&rq->fence); 1072 i915_gem_request_put(rq); 1073 } 1074 1075 trace_intel_engine_notify(engine, wait); 1076 } 1077 1078 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1079 struct intel_rps_ei *ei) 1080 { 1081 ei->ktime = ktime_get_raw(); 1082 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1083 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1084 } 1085 1086 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1087 { 1088 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); 1089 } 1090 1091 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1092 { 1093 const struct intel_rps_ei *prev = &dev_priv->rps.ei; 1094 struct intel_rps_ei now; 1095 u32 events = 0; 1096 1097 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1098 return 0; 1099 1100 vlv_c0_read(dev_priv, &now); 1101 1102 if (prev->ktime) { 1103 u64 time, c0; 1104 u32 render, media; 1105 1106 time = ktime_us_delta(now.ktime, prev->ktime); 1107 1108 time *= dev_priv->czclk_freq; 1109 1110 /* Workload can be split between render + media, 1111 * e.g. SwapBuffers being blitted in X after being rendered in 1112 * mesa. To account for this we need to combine both engines 1113 * into our activity counter. 1114 */ 1115 render = now.render_c0 - prev->render_c0; 1116 media = now.media_c0 - prev->media_c0; 1117 c0 = max(render, media); 1118 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1119 1120 if (c0 > time * dev_priv->rps.up_threshold) 1121 events = GEN6_PM_RP_UP_THRESHOLD; 1122 else if (c0 < time * dev_priv->rps.down_threshold) 1123 events = GEN6_PM_RP_DOWN_THRESHOLD; 1124 } 1125 1126 dev_priv->rps.ei = now; 1127 return events; 1128 } 1129 1130 static bool any_waiters(struct drm_i915_private *dev_priv) 1131 { 1132 struct intel_engine_cs *engine; 1133 enum intel_engine_id id; 1134 1135 for_each_engine(engine, dev_priv, id) 1136 if (intel_engine_has_waiter(engine)) 1137 return true; 1138 1139 return false; 1140 } 1141 1142 static void gen6_pm_rps_work(struct work_struct *work) 1143 { 1144 struct drm_i915_private *dev_priv = 1145 container_of(work, struct drm_i915_private, rps.work); 1146 bool client_boost = false; 1147 int new_delay, adj, min, max; 1148 u32 pm_iir = 0; 1149 1150 spin_lock_irq(&dev_priv->irq_lock); 1151 if (dev_priv->rps.interrupts_enabled) { 1152 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); 1153 client_boost = fetch_and_zero(&dev_priv->rps.client_boost); 1154 } 1155 spin_unlock_irq(&dev_priv->irq_lock); 1156 1157 /* Make sure we didn't queue anything we're not going to process. */ 1158 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1159 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1160 goto out; 1161 1162 mutex_lock(&dev_priv->rps.hw_lock); 1163 1164 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1165 1166 adj = dev_priv->rps.last_adj; 1167 new_delay = dev_priv->rps.cur_freq; 1168 min = dev_priv->rps.min_freq_softlimit; 1169 max = dev_priv->rps.max_freq_softlimit; 1170 if (client_boost || any_waiters(dev_priv)) 1171 max = dev_priv->rps.max_freq; 1172 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1173 new_delay = dev_priv->rps.boost_freq; 1174 adj = 0; 1175 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1176 if (adj > 0) 1177 adj *= 2; 1178 else /* CHV needs even encode values */ 1179 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1180 1181 if (new_delay >= dev_priv->rps.max_freq_softlimit) 1182 adj = 0; 1183 } else if (client_boost || any_waiters(dev_priv)) { 1184 adj = 0; 1185 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1186 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1187 new_delay = dev_priv->rps.efficient_freq; 1188 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1189 new_delay = dev_priv->rps.min_freq_softlimit; 1190 adj = 0; 1191 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1192 if (adj < 0) 1193 adj *= 2; 1194 else /* CHV needs even encode values */ 1195 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1196 1197 if (new_delay <= dev_priv->rps.min_freq_softlimit) 1198 adj = 0; 1199 } else { /* unknown event */ 1200 adj = 0; 1201 } 1202 1203 dev_priv->rps.last_adj = adj; 1204 1205 /* sysfs frequency interfaces may have snuck in while servicing the 1206 * interrupt 1207 */ 1208 new_delay += adj; 1209 new_delay = clamp_t(int, new_delay, min, max); 1210 1211 if (intel_set_rps(dev_priv, new_delay)) { 1212 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1213 dev_priv->rps.last_adj = 0; 1214 } 1215 1216 mutex_unlock(&dev_priv->rps.hw_lock); 1217 1218 out: 1219 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1220 spin_lock_irq(&dev_priv->irq_lock); 1221 if (dev_priv->rps.interrupts_enabled) 1222 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1223 spin_unlock_irq(&dev_priv->irq_lock); 1224 } 1225 1226 1227 /** 1228 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1229 * occurred. 1230 * @work: workqueue struct 1231 * 1232 * Doesn't actually do anything except notify userspace. As a consequence of 1233 * this event, userspace should try to remap the bad rows since statistically 1234 * it is likely the same row is more likely to go bad again. 1235 */ 1236 static void ivybridge_parity_work(struct work_struct *work) 1237 { 1238 struct drm_i915_private *dev_priv = 1239 container_of(work, struct drm_i915_private, l3_parity.error_work); 1240 u32 error_status, row, bank, subbank; 1241 char *parity_event[6]; 1242 uint32_t misccpctl; 1243 uint8_t slice = 0; 1244 1245 /* We must turn off DOP level clock gating to access the L3 registers. 1246 * In order to prevent a get/put style interface, acquire struct mutex 1247 * any time we access those registers. 1248 */ 1249 mutex_lock(&dev_priv->drm.struct_mutex); 1250 1251 /* If we've screwed up tracking, just let the interrupt fire again */ 1252 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1253 goto out; 1254 1255 misccpctl = I915_READ(GEN7_MISCCPCTL); 1256 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1257 POSTING_READ(GEN7_MISCCPCTL); 1258 1259 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1260 i915_reg_t reg; 1261 1262 slice--; 1263 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1264 break; 1265 1266 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1267 1268 reg = GEN7_L3CDERRST1(slice); 1269 1270 error_status = I915_READ(reg); 1271 row = GEN7_PARITY_ERROR_ROW(error_status); 1272 bank = GEN7_PARITY_ERROR_BANK(error_status); 1273 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1274 1275 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1276 POSTING_READ(reg); 1277 1278 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1279 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1280 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1281 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1282 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1283 parity_event[5] = NULL; 1284 1285 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1286 KOBJ_CHANGE, parity_event); 1287 1288 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1289 slice, row, bank, subbank); 1290 1291 kfree(parity_event[4]); 1292 kfree(parity_event[3]); 1293 kfree(parity_event[2]); 1294 kfree(parity_event[1]); 1295 } 1296 1297 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1298 1299 out: 1300 WARN_ON(dev_priv->l3_parity.which_slice); 1301 spin_lock_irq(&dev_priv->irq_lock); 1302 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1303 spin_unlock_irq(&dev_priv->irq_lock); 1304 1305 mutex_unlock(&dev_priv->drm.struct_mutex); 1306 } 1307 1308 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1309 u32 iir) 1310 { 1311 if (!HAS_L3_DPF(dev_priv)) 1312 return; 1313 1314 spin_lock(&dev_priv->irq_lock); 1315 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1316 spin_unlock(&dev_priv->irq_lock); 1317 1318 iir &= GT_PARITY_ERROR(dev_priv); 1319 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1320 dev_priv->l3_parity.which_slice |= 1 << 1; 1321 1322 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1323 dev_priv->l3_parity.which_slice |= 1 << 0; 1324 1325 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1326 } 1327 1328 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1329 u32 gt_iir) 1330 { 1331 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1332 notify_ring(dev_priv->engine[RCS]); 1333 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1334 notify_ring(dev_priv->engine[VCS]); 1335 } 1336 1337 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1338 u32 gt_iir) 1339 { 1340 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1341 notify_ring(dev_priv->engine[RCS]); 1342 if (gt_iir & GT_BSD_USER_INTERRUPT) 1343 notify_ring(dev_priv->engine[VCS]); 1344 if (gt_iir & GT_BLT_USER_INTERRUPT) 1345 notify_ring(dev_priv->engine[BCS]); 1346 1347 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1348 GT_BSD_CS_ERROR_INTERRUPT | 1349 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1350 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1351 1352 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1353 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1354 } 1355 1356 static __always_inline void 1357 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1358 { 1359 bool tasklet = false; 1360 1361 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1362 set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1363 tasklet = true; 1364 } 1365 1366 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1367 notify_ring(engine); 1368 tasklet |= i915.enable_guc_submission; 1369 } 1370 1371 if (tasklet) 1372 tasklet_hi_schedule(&engine->irq_tasklet); 1373 } 1374 1375 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1376 u32 master_ctl, 1377 u32 gt_iir[4]) 1378 { 1379 irqreturn_t ret = IRQ_NONE; 1380 1381 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1382 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1383 if (gt_iir[0]) { 1384 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1385 ret = IRQ_HANDLED; 1386 } else 1387 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1388 } 1389 1390 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1391 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1392 if (gt_iir[1]) { 1393 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1394 ret = IRQ_HANDLED; 1395 } else 1396 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1397 } 1398 1399 if (master_ctl & GEN8_GT_VECS_IRQ) { 1400 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1401 if (gt_iir[3]) { 1402 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1403 ret = IRQ_HANDLED; 1404 } else 1405 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1406 } 1407 1408 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1409 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1410 if (gt_iir[2] & (dev_priv->pm_rps_events | 1411 dev_priv->pm_guc_events)) { 1412 I915_WRITE_FW(GEN8_GT_IIR(2), 1413 gt_iir[2] & (dev_priv->pm_rps_events | 1414 dev_priv->pm_guc_events)); 1415 ret = IRQ_HANDLED; 1416 } else 1417 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1418 } 1419 1420 return ret; 1421 } 1422 1423 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1424 u32 gt_iir[4]) 1425 { 1426 if (gt_iir[0]) { 1427 gen8_cs_irq_handler(dev_priv->engine[RCS], 1428 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1429 gen8_cs_irq_handler(dev_priv->engine[BCS], 1430 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1431 } 1432 1433 if (gt_iir[1]) { 1434 gen8_cs_irq_handler(dev_priv->engine[VCS], 1435 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1436 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1437 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1438 } 1439 1440 if (gt_iir[3]) 1441 gen8_cs_irq_handler(dev_priv->engine[VECS], 1442 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1443 1444 if (gt_iir[2] & dev_priv->pm_rps_events) 1445 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1446 1447 if (gt_iir[2] & dev_priv->pm_guc_events) 1448 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1449 } 1450 1451 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1452 { 1453 switch (port) { 1454 case PORT_A: 1455 return val & PORTA_HOTPLUG_LONG_DETECT; 1456 case PORT_B: 1457 return val & PORTB_HOTPLUG_LONG_DETECT; 1458 case PORT_C: 1459 return val & PORTC_HOTPLUG_LONG_DETECT; 1460 default: 1461 return false; 1462 } 1463 } 1464 1465 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1466 { 1467 switch (port) { 1468 case PORT_E: 1469 return val & PORTE_HOTPLUG_LONG_DETECT; 1470 default: 1471 return false; 1472 } 1473 } 1474 1475 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1476 { 1477 switch (port) { 1478 case PORT_A: 1479 return val & PORTA_HOTPLUG_LONG_DETECT; 1480 case PORT_B: 1481 return val & PORTB_HOTPLUG_LONG_DETECT; 1482 case PORT_C: 1483 return val & PORTC_HOTPLUG_LONG_DETECT; 1484 case PORT_D: 1485 return val & PORTD_HOTPLUG_LONG_DETECT; 1486 default: 1487 return false; 1488 } 1489 } 1490 1491 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1492 { 1493 switch (port) { 1494 case PORT_A: 1495 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1496 default: 1497 return false; 1498 } 1499 } 1500 1501 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1502 { 1503 switch (port) { 1504 case PORT_B: 1505 return val & PORTB_HOTPLUG_LONG_DETECT; 1506 case PORT_C: 1507 return val & PORTC_HOTPLUG_LONG_DETECT; 1508 case PORT_D: 1509 return val & PORTD_HOTPLUG_LONG_DETECT; 1510 default: 1511 return false; 1512 } 1513 } 1514 1515 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1516 { 1517 switch (port) { 1518 case PORT_B: 1519 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1520 case PORT_C: 1521 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1522 case PORT_D: 1523 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1524 default: 1525 return false; 1526 } 1527 } 1528 1529 /* 1530 * Get a bit mask of pins that have triggered, and which ones may be long. 1531 * This can be called multiple times with the same masks to accumulate 1532 * hotplug detection results from several registers. 1533 * 1534 * Note that the caller is expected to zero out the masks initially. 1535 */ 1536 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1537 u32 hotplug_trigger, u32 dig_hotplug_reg, 1538 const u32 hpd[HPD_NUM_PINS], 1539 bool long_pulse_detect(enum port port, u32 val)) 1540 { 1541 enum port port; 1542 int i; 1543 1544 for_each_hpd_pin(i) { 1545 if ((hpd[i] & hotplug_trigger) == 0) 1546 continue; 1547 1548 *pin_mask |= BIT(i); 1549 1550 if (!intel_hpd_pin_to_port(i, &port)) 1551 continue; 1552 1553 if (long_pulse_detect(port, dig_hotplug_reg)) 1554 *long_mask |= BIT(i); 1555 } 1556 1557 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1558 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1559 1560 } 1561 1562 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1563 { 1564 wake_up_all(&dev_priv->gmbus_wait_queue); 1565 } 1566 1567 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1568 { 1569 wake_up_all(&dev_priv->gmbus_wait_queue); 1570 } 1571 1572 #if defined(CONFIG_DEBUG_FS) 1573 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1574 enum pipe pipe, 1575 uint32_t crc0, uint32_t crc1, 1576 uint32_t crc2, uint32_t crc3, 1577 uint32_t crc4) 1578 { 1579 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1580 struct intel_pipe_crc_entry *entry; 1581 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1582 struct drm_driver *driver = dev_priv->drm.driver; 1583 uint32_t crcs[5]; 1584 int head, tail; 1585 1586 spin_lock(&pipe_crc->lock); 1587 if (pipe_crc->source) { 1588 if (!pipe_crc->entries) { 1589 spin_unlock(&pipe_crc->lock); 1590 DRM_DEBUG_KMS("spurious interrupt\n"); 1591 return; 1592 } 1593 1594 head = pipe_crc->head; 1595 tail = pipe_crc->tail; 1596 1597 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1598 spin_unlock(&pipe_crc->lock); 1599 DRM_ERROR("CRC buffer overflowing\n"); 1600 return; 1601 } 1602 1603 entry = &pipe_crc->entries[head]; 1604 1605 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1606 entry->crc[0] = crc0; 1607 entry->crc[1] = crc1; 1608 entry->crc[2] = crc2; 1609 entry->crc[3] = crc3; 1610 entry->crc[4] = crc4; 1611 1612 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1613 pipe_crc->head = head; 1614 1615 spin_unlock(&pipe_crc->lock); 1616 1617 wake_up_interruptible(&pipe_crc->wq); 1618 } else { 1619 /* 1620 * For some not yet identified reason, the first CRC is 1621 * bonkers. So let's just wait for the next vblank and read 1622 * out the buggy result. 1623 * 1624 * On CHV sometimes the second CRC is bonkers as well, so 1625 * don't trust that one either. 1626 */ 1627 if (pipe_crc->skipped == 0 || 1628 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { 1629 pipe_crc->skipped++; 1630 spin_unlock(&pipe_crc->lock); 1631 return; 1632 } 1633 spin_unlock(&pipe_crc->lock); 1634 crcs[0] = crc0; 1635 crcs[1] = crc1; 1636 crcs[2] = crc2; 1637 crcs[3] = crc3; 1638 crcs[4] = crc4; 1639 drm_crtc_add_crc_entry(&crtc->base, true, 1640 drm_accurate_vblank_count(&crtc->base), 1641 crcs); 1642 } 1643 } 1644 #else 1645 static inline void 1646 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1647 enum pipe pipe, 1648 uint32_t crc0, uint32_t crc1, 1649 uint32_t crc2, uint32_t crc3, 1650 uint32_t crc4) {} 1651 #endif 1652 1653 1654 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1655 enum pipe pipe) 1656 { 1657 display_pipe_crc_irq_handler(dev_priv, pipe, 1658 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1659 0, 0, 0, 0); 1660 } 1661 1662 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1663 enum pipe pipe) 1664 { 1665 display_pipe_crc_irq_handler(dev_priv, pipe, 1666 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1667 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1668 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1669 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1670 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1671 } 1672 1673 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1674 enum pipe pipe) 1675 { 1676 uint32_t res1, res2; 1677 1678 if (INTEL_GEN(dev_priv) >= 3) 1679 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1680 else 1681 res1 = 0; 1682 1683 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1684 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1685 else 1686 res2 = 0; 1687 1688 display_pipe_crc_irq_handler(dev_priv, pipe, 1689 I915_READ(PIPE_CRC_RES_RED(pipe)), 1690 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1691 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1692 res1, res2); 1693 } 1694 1695 /* The RPS events need forcewake, so we add them to a work queue and mask their 1696 * IMR bits until the work is done. Other interrupts can be processed without 1697 * the work queue. */ 1698 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1699 { 1700 if (pm_iir & dev_priv->pm_rps_events) { 1701 spin_lock(&dev_priv->irq_lock); 1702 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1703 if (dev_priv->rps.interrupts_enabled) { 1704 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1705 schedule_work(&dev_priv->rps.work); 1706 } 1707 spin_unlock(&dev_priv->irq_lock); 1708 } 1709 1710 if (INTEL_INFO(dev_priv)->gen >= 8) 1711 return; 1712 1713 if (HAS_VEBOX(dev_priv)) { 1714 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1715 notify_ring(dev_priv->engine[VECS]); 1716 1717 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1718 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1719 } 1720 } 1721 1722 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1723 { 1724 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1725 /* Sample the log buffer flush related bits & clear them out now 1726 * itself from the message identity register to minimize the 1727 * probability of losing a flush interrupt, when there are back 1728 * to back flush interrupts. 1729 * There can be a new flush interrupt, for different log buffer 1730 * type (like for ISR), whilst Host is handling one (for DPC). 1731 * Since same bit is used in message register for ISR & DPC, it 1732 * could happen that GuC sets the bit for 2nd interrupt but Host 1733 * clears out the bit on handling the 1st interrupt. 1734 */ 1735 u32 msg, flush; 1736 1737 msg = I915_READ(SOFT_SCRATCH(15)); 1738 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1739 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1740 if (flush) { 1741 /* Clear the message bits that are handled */ 1742 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1743 1744 /* Handle flush interrupt in bottom half */ 1745 queue_work(dev_priv->guc.log.runtime.flush_wq, 1746 &dev_priv->guc.log.runtime.flush_work); 1747 1748 dev_priv->guc.log.flush_interrupt_count++; 1749 } else { 1750 /* Not clearing of unhandled event bits won't result in 1751 * re-triggering of the interrupt. 1752 */ 1753 } 1754 } 1755 } 1756 1757 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1758 enum pipe pipe) 1759 { 1760 bool ret; 1761 1762 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1763 if (ret) 1764 intel_finish_page_flip_mmio(dev_priv, pipe); 1765 1766 return ret; 1767 } 1768 1769 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1770 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1771 { 1772 int pipe; 1773 1774 spin_lock(&dev_priv->irq_lock); 1775 1776 if (!dev_priv->display_irqs_enabled) { 1777 spin_unlock(&dev_priv->irq_lock); 1778 return; 1779 } 1780 1781 for_each_pipe(dev_priv, pipe) { 1782 i915_reg_t reg; 1783 u32 mask, iir_bit = 0; 1784 1785 /* 1786 * PIPESTAT bits get signalled even when the interrupt is 1787 * disabled with the mask bits, and some of the status bits do 1788 * not generate interrupts at all (like the underrun bit). Hence 1789 * we need to be careful that we only handle what we want to 1790 * handle. 1791 */ 1792 1793 /* fifo underruns are filterered in the underrun handler. */ 1794 mask = PIPE_FIFO_UNDERRUN_STATUS; 1795 1796 switch (pipe) { 1797 case PIPE_A: 1798 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1799 break; 1800 case PIPE_B: 1801 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1802 break; 1803 case PIPE_C: 1804 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1805 break; 1806 } 1807 if (iir & iir_bit) 1808 mask |= dev_priv->pipestat_irq_mask[pipe]; 1809 1810 if (!mask) 1811 continue; 1812 1813 reg = PIPESTAT(pipe); 1814 mask |= PIPESTAT_INT_ENABLE_MASK; 1815 pipe_stats[pipe] = I915_READ(reg) & mask; 1816 1817 /* 1818 * Clear the PIPE*STAT regs before the IIR 1819 */ 1820 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1821 PIPESTAT_INT_STATUS_MASK)) 1822 I915_WRITE(reg, pipe_stats[pipe]); 1823 } 1824 spin_unlock(&dev_priv->irq_lock); 1825 } 1826 1827 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1828 u32 pipe_stats[I915_MAX_PIPES]) 1829 { 1830 enum pipe pipe; 1831 1832 for_each_pipe(dev_priv, pipe) { 1833 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1834 intel_pipe_handle_vblank(dev_priv, pipe)) 1835 intel_check_page_flip(dev_priv, pipe); 1836 1837 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1838 intel_finish_page_flip_cs(dev_priv, pipe); 1839 1840 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1841 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1842 1843 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1844 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1845 } 1846 1847 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1848 gmbus_irq_handler(dev_priv); 1849 } 1850 1851 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1852 { 1853 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1854 1855 if (hotplug_status) 1856 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1857 1858 return hotplug_status; 1859 } 1860 1861 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1862 u32 hotplug_status) 1863 { 1864 u32 pin_mask = 0, long_mask = 0; 1865 1866 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1867 IS_CHERRYVIEW(dev_priv)) { 1868 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1869 1870 if (hotplug_trigger) { 1871 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1872 hotplug_trigger, hpd_status_g4x, 1873 i9xx_port_hotplug_long_detect); 1874 1875 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1876 } 1877 1878 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1879 dp_aux_irq_handler(dev_priv); 1880 } else { 1881 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1882 1883 if (hotplug_trigger) { 1884 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1885 hotplug_trigger, hpd_status_i915, 1886 i9xx_port_hotplug_long_detect); 1887 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1888 } 1889 } 1890 } 1891 1892 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1893 { 1894 struct drm_device *dev = arg; 1895 struct drm_i915_private *dev_priv = to_i915(dev); 1896 irqreturn_t ret = IRQ_NONE; 1897 1898 if (!intel_irqs_enabled(dev_priv)) 1899 return IRQ_NONE; 1900 1901 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1902 disable_rpm_wakeref_asserts(dev_priv); 1903 1904 do { 1905 u32 iir, gt_iir, pm_iir; 1906 u32 pipe_stats[I915_MAX_PIPES] = {}; 1907 u32 hotplug_status = 0; 1908 u32 ier = 0; 1909 1910 gt_iir = I915_READ(GTIIR); 1911 pm_iir = I915_READ(GEN6_PMIIR); 1912 iir = I915_READ(VLV_IIR); 1913 1914 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1915 break; 1916 1917 ret = IRQ_HANDLED; 1918 1919 /* 1920 * Theory on interrupt generation, based on empirical evidence: 1921 * 1922 * x = ((VLV_IIR & VLV_IER) || 1923 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1924 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1925 * 1926 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1927 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1928 * guarantee the CPU interrupt will be raised again even if we 1929 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1930 * bits this time around. 1931 */ 1932 I915_WRITE(VLV_MASTER_IER, 0); 1933 ier = I915_READ(VLV_IER); 1934 I915_WRITE(VLV_IER, 0); 1935 1936 if (gt_iir) 1937 I915_WRITE(GTIIR, gt_iir); 1938 if (pm_iir) 1939 I915_WRITE(GEN6_PMIIR, pm_iir); 1940 1941 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1942 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1943 1944 /* Call regardless, as some status bits might not be 1945 * signalled in iir */ 1946 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1947 1948 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1949 I915_LPE_PIPE_B_INTERRUPT)) 1950 intel_lpe_audio_irq_handler(dev_priv); 1951 1952 /* 1953 * VLV_IIR is single buffered, and reflects the level 1954 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1955 */ 1956 if (iir) 1957 I915_WRITE(VLV_IIR, iir); 1958 1959 I915_WRITE(VLV_IER, ier); 1960 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1961 POSTING_READ(VLV_MASTER_IER); 1962 1963 if (gt_iir) 1964 snb_gt_irq_handler(dev_priv, gt_iir); 1965 if (pm_iir) 1966 gen6_rps_irq_handler(dev_priv, pm_iir); 1967 1968 if (hotplug_status) 1969 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1970 1971 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1972 } while (0); 1973 1974 enable_rpm_wakeref_asserts(dev_priv); 1975 1976 return ret; 1977 } 1978 1979 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1980 { 1981 struct drm_device *dev = arg; 1982 struct drm_i915_private *dev_priv = to_i915(dev); 1983 irqreturn_t ret = IRQ_NONE; 1984 1985 if (!intel_irqs_enabled(dev_priv)) 1986 return IRQ_NONE; 1987 1988 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1989 disable_rpm_wakeref_asserts(dev_priv); 1990 1991 do { 1992 u32 master_ctl, iir; 1993 u32 gt_iir[4] = {}; 1994 u32 pipe_stats[I915_MAX_PIPES] = {}; 1995 u32 hotplug_status = 0; 1996 u32 ier = 0; 1997 1998 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1999 iir = I915_READ(VLV_IIR); 2000 2001 if (master_ctl == 0 && iir == 0) 2002 break; 2003 2004 ret = IRQ_HANDLED; 2005 2006 /* 2007 * Theory on interrupt generation, based on empirical evidence: 2008 * 2009 * x = ((VLV_IIR & VLV_IER) || 2010 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2011 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2012 * 2013 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2014 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2015 * guarantee the CPU interrupt will be raised again even if we 2016 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2017 * bits this time around. 2018 */ 2019 I915_WRITE(GEN8_MASTER_IRQ, 0); 2020 ier = I915_READ(VLV_IER); 2021 I915_WRITE(VLV_IER, 0); 2022 2023 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2024 2025 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2026 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2027 2028 /* Call regardless, as some status bits might not be 2029 * signalled in iir */ 2030 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2031 2032 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2033 I915_LPE_PIPE_B_INTERRUPT | 2034 I915_LPE_PIPE_C_INTERRUPT)) 2035 intel_lpe_audio_irq_handler(dev_priv); 2036 2037 /* 2038 * VLV_IIR is single buffered, and reflects the level 2039 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2040 */ 2041 if (iir) 2042 I915_WRITE(VLV_IIR, iir); 2043 2044 I915_WRITE(VLV_IER, ier); 2045 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2046 POSTING_READ(GEN8_MASTER_IRQ); 2047 2048 gen8_gt_irq_handler(dev_priv, gt_iir); 2049 2050 if (hotplug_status) 2051 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2052 2053 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2054 } while (0); 2055 2056 enable_rpm_wakeref_asserts(dev_priv); 2057 2058 return ret; 2059 } 2060 2061 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2062 u32 hotplug_trigger, 2063 const u32 hpd[HPD_NUM_PINS]) 2064 { 2065 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2066 2067 /* 2068 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2069 * unless we touch the hotplug register, even if hotplug_trigger is 2070 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2071 * errors. 2072 */ 2073 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2074 if (!hotplug_trigger) { 2075 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2076 PORTD_HOTPLUG_STATUS_MASK | 2077 PORTC_HOTPLUG_STATUS_MASK | 2078 PORTB_HOTPLUG_STATUS_MASK; 2079 dig_hotplug_reg &= ~mask; 2080 } 2081 2082 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2083 if (!hotplug_trigger) 2084 return; 2085 2086 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2087 dig_hotplug_reg, hpd, 2088 pch_port_hotplug_long_detect); 2089 2090 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2091 } 2092 2093 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2094 { 2095 int pipe; 2096 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2097 2098 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2099 2100 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2101 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2102 SDE_AUDIO_POWER_SHIFT); 2103 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2104 port_name(port)); 2105 } 2106 2107 if (pch_iir & SDE_AUX_MASK) 2108 dp_aux_irq_handler(dev_priv); 2109 2110 if (pch_iir & SDE_GMBUS) 2111 gmbus_irq_handler(dev_priv); 2112 2113 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2114 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2115 2116 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2117 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2118 2119 if (pch_iir & SDE_POISON) 2120 DRM_ERROR("PCH poison interrupt\n"); 2121 2122 if (pch_iir & SDE_FDI_MASK) 2123 for_each_pipe(dev_priv, pipe) 2124 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2125 pipe_name(pipe), 2126 I915_READ(FDI_RX_IIR(pipe))); 2127 2128 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2129 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2130 2131 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2132 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2133 2134 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2135 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2136 2137 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2138 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2139 } 2140 2141 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2142 { 2143 u32 err_int = I915_READ(GEN7_ERR_INT); 2144 enum pipe pipe; 2145 2146 if (err_int & ERR_INT_POISON) 2147 DRM_ERROR("Poison interrupt\n"); 2148 2149 for_each_pipe(dev_priv, pipe) { 2150 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2151 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2152 2153 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2154 if (IS_IVYBRIDGE(dev_priv)) 2155 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2156 else 2157 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2158 } 2159 } 2160 2161 I915_WRITE(GEN7_ERR_INT, err_int); 2162 } 2163 2164 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2165 { 2166 u32 serr_int = I915_READ(SERR_INT); 2167 2168 if (serr_int & SERR_INT_POISON) 2169 DRM_ERROR("PCH poison interrupt\n"); 2170 2171 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2172 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2173 2174 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2175 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2176 2177 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2178 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2179 2180 I915_WRITE(SERR_INT, serr_int); 2181 } 2182 2183 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2184 { 2185 int pipe; 2186 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2187 2188 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2189 2190 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2191 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2192 SDE_AUDIO_POWER_SHIFT_CPT); 2193 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2194 port_name(port)); 2195 } 2196 2197 if (pch_iir & SDE_AUX_MASK_CPT) 2198 dp_aux_irq_handler(dev_priv); 2199 2200 if (pch_iir & SDE_GMBUS_CPT) 2201 gmbus_irq_handler(dev_priv); 2202 2203 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2204 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2205 2206 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2207 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2208 2209 if (pch_iir & SDE_FDI_MASK_CPT) 2210 for_each_pipe(dev_priv, pipe) 2211 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2212 pipe_name(pipe), 2213 I915_READ(FDI_RX_IIR(pipe))); 2214 2215 if (pch_iir & SDE_ERROR_CPT) 2216 cpt_serr_int_handler(dev_priv); 2217 } 2218 2219 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2220 { 2221 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2222 ~SDE_PORTE_HOTPLUG_SPT; 2223 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2224 u32 pin_mask = 0, long_mask = 0; 2225 2226 if (hotplug_trigger) { 2227 u32 dig_hotplug_reg; 2228 2229 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2230 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2231 2232 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2233 dig_hotplug_reg, hpd_spt, 2234 spt_port_hotplug_long_detect); 2235 } 2236 2237 if (hotplug2_trigger) { 2238 u32 dig_hotplug_reg; 2239 2240 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2241 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2242 2243 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2244 dig_hotplug_reg, hpd_spt, 2245 spt_port_hotplug2_long_detect); 2246 } 2247 2248 if (pin_mask) 2249 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2250 2251 if (pch_iir & SDE_GMBUS_CPT) 2252 gmbus_irq_handler(dev_priv); 2253 } 2254 2255 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2256 u32 hotplug_trigger, 2257 const u32 hpd[HPD_NUM_PINS]) 2258 { 2259 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2260 2261 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2262 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2263 2264 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2265 dig_hotplug_reg, hpd, 2266 ilk_port_hotplug_long_detect); 2267 2268 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2269 } 2270 2271 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2272 u32 de_iir) 2273 { 2274 enum pipe pipe; 2275 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2276 2277 if (hotplug_trigger) 2278 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2279 2280 if (de_iir & DE_AUX_CHANNEL_A) 2281 dp_aux_irq_handler(dev_priv); 2282 2283 if (de_iir & DE_GSE) 2284 intel_opregion_asle_intr(dev_priv); 2285 2286 if (de_iir & DE_POISON) 2287 DRM_ERROR("Poison interrupt\n"); 2288 2289 for_each_pipe(dev_priv, pipe) { 2290 if (de_iir & DE_PIPE_VBLANK(pipe) && 2291 intel_pipe_handle_vblank(dev_priv, pipe)) 2292 intel_check_page_flip(dev_priv, pipe); 2293 2294 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2295 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2296 2297 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2298 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2299 2300 /* plane/pipes map 1:1 on ilk+ */ 2301 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2302 intel_finish_page_flip_cs(dev_priv, pipe); 2303 } 2304 2305 /* check event from PCH */ 2306 if (de_iir & DE_PCH_EVENT) { 2307 u32 pch_iir = I915_READ(SDEIIR); 2308 2309 if (HAS_PCH_CPT(dev_priv)) 2310 cpt_irq_handler(dev_priv, pch_iir); 2311 else 2312 ibx_irq_handler(dev_priv, pch_iir); 2313 2314 /* should clear PCH hotplug event before clear CPU irq */ 2315 I915_WRITE(SDEIIR, pch_iir); 2316 } 2317 2318 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2319 ironlake_rps_change_irq_handler(dev_priv); 2320 } 2321 2322 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2323 u32 de_iir) 2324 { 2325 enum pipe pipe; 2326 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2327 2328 if (hotplug_trigger) 2329 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2330 2331 if (de_iir & DE_ERR_INT_IVB) 2332 ivb_err_int_handler(dev_priv); 2333 2334 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2335 dp_aux_irq_handler(dev_priv); 2336 2337 if (de_iir & DE_GSE_IVB) 2338 intel_opregion_asle_intr(dev_priv); 2339 2340 for_each_pipe(dev_priv, pipe) { 2341 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2342 intel_pipe_handle_vblank(dev_priv, pipe)) 2343 intel_check_page_flip(dev_priv, pipe); 2344 2345 /* plane/pipes map 1:1 on ilk+ */ 2346 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2347 intel_finish_page_flip_cs(dev_priv, pipe); 2348 } 2349 2350 /* check event from PCH */ 2351 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2352 u32 pch_iir = I915_READ(SDEIIR); 2353 2354 cpt_irq_handler(dev_priv, pch_iir); 2355 2356 /* clear PCH hotplug event before clear CPU irq */ 2357 I915_WRITE(SDEIIR, pch_iir); 2358 } 2359 } 2360 2361 /* 2362 * To handle irqs with the minimum potential races with fresh interrupts, we: 2363 * 1 - Disable Master Interrupt Control. 2364 * 2 - Find the source(s) of the interrupt. 2365 * 3 - Clear the Interrupt Identity bits (IIR). 2366 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2367 * 5 - Re-enable Master Interrupt Control. 2368 */ 2369 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2370 { 2371 struct drm_device *dev = arg; 2372 struct drm_i915_private *dev_priv = to_i915(dev); 2373 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2374 irqreturn_t ret = IRQ_NONE; 2375 2376 if (!intel_irqs_enabled(dev_priv)) 2377 return IRQ_NONE; 2378 2379 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2380 disable_rpm_wakeref_asserts(dev_priv); 2381 2382 /* disable master interrupt before clearing iir */ 2383 de_ier = I915_READ(DEIER); 2384 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2385 POSTING_READ(DEIER); 2386 2387 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2388 * interrupts will will be stored on its back queue, and then we'll be 2389 * able to process them after we restore SDEIER (as soon as we restore 2390 * it, we'll get an interrupt if SDEIIR still has something to process 2391 * due to its back queue). */ 2392 if (!HAS_PCH_NOP(dev_priv)) { 2393 sde_ier = I915_READ(SDEIER); 2394 I915_WRITE(SDEIER, 0); 2395 POSTING_READ(SDEIER); 2396 } 2397 2398 /* Find, clear, then process each source of interrupt */ 2399 2400 gt_iir = I915_READ(GTIIR); 2401 if (gt_iir) { 2402 I915_WRITE(GTIIR, gt_iir); 2403 ret = IRQ_HANDLED; 2404 if (INTEL_GEN(dev_priv) >= 6) 2405 snb_gt_irq_handler(dev_priv, gt_iir); 2406 else 2407 ilk_gt_irq_handler(dev_priv, gt_iir); 2408 } 2409 2410 de_iir = I915_READ(DEIIR); 2411 if (de_iir) { 2412 I915_WRITE(DEIIR, de_iir); 2413 ret = IRQ_HANDLED; 2414 if (INTEL_GEN(dev_priv) >= 7) 2415 ivb_display_irq_handler(dev_priv, de_iir); 2416 else 2417 ilk_display_irq_handler(dev_priv, de_iir); 2418 } 2419 2420 if (INTEL_GEN(dev_priv) >= 6) { 2421 u32 pm_iir = I915_READ(GEN6_PMIIR); 2422 if (pm_iir) { 2423 I915_WRITE(GEN6_PMIIR, pm_iir); 2424 ret = IRQ_HANDLED; 2425 gen6_rps_irq_handler(dev_priv, pm_iir); 2426 } 2427 } 2428 2429 I915_WRITE(DEIER, de_ier); 2430 POSTING_READ(DEIER); 2431 if (!HAS_PCH_NOP(dev_priv)) { 2432 I915_WRITE(SDEIER, sde_ier); 2433 POSTING_READ(SDEIER); 2434 } 2435 2436 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2437 enable_rpm_wakeref_asserts(dev_priv); 2438 2439 return ret; 2440 } 2441 2442 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2443 u32 hotplug_trigger, 2444 const u32 hpd[HPD_NUM_PINS]) 2445 { 2446 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2447 2448 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2449 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2450 2451 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2452 dig_hotplug_reg, hpd, 2453 bxt_port_hotplug_long_detect); 2454 2455 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2456 } 2457 2458 static irqreturn_t 2459 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2460 { 2461 irqreturn_t ret = IRQ_NONE; 2462 u32 iir; 2463 enum pipe pipe; 2464 2465 if (master_ctl & GEN8_DE_MISC_IRQ) { 2466 iir = I915_READ(GEN8_DE_MISC_IIR); 2467 if (iir) { 2468 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2469 ret = IRQ_HANDLED; 2470 if (iir & GEN8_DE_MISC_GSE) 2471 intel_opregion_asle_intr(dev_priv); 2472 else 2473 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2474 } 2475 else 2476 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2477 } 2478 2479 if (master_ctl & GEN8_DE_PORT_IRQ) { 2480 iir = I915_READ(GEN8_DE_PORT_IIR); 2481 if (iir) { 2482 u32 tmp_mask; 2483 bool found = false; 2484 2485 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2486 ret = IRQ_HANDLED; 2487 2488 tmp_mask = GEN8_AUX_CHANNEL_A; 2489 if (INTEL_INFO(dev_priv)->gen >= 9) 2490 tmp_mask |= GEN9_AUX_CHANNEL_B | 2491 GEN9_AUX_CHANNEL_C | 2492 GEN9_AUX_CHANNEL_D; 2493 2494 if (iir & tmp_mask) { 2495 dp_aux_irq_handler(dev_priv); 2496 found = true; 2497 } 2498 2499 if (IS_GEN9_LP(dev_priv)) { 2500 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2501 if (tmp_mask) { 2502 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2503 hpd_bxt); 2504 found = true; 2505 } 2506 } else if (IS_BROADWELL(dev_priv)) { 2507 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2508 if (tmp_mask) { 2509 ilk_hpd_irq_handler(dev_priv, 2510 tmp_mask, hpd_bdw); 2511 found = true; 2512 } 2513 } 2514 2515 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2516 gmbus_irq_handler(dev_priv); 2517 found = true; 2518 } 2519 2520 if (!found) 2521 DRM_ERROR("Unexpected DE Port interrupt\n"); 2522 } 2523 else 2524 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2525 } 2526 2527 for_each_pipe(dev_priv, pipe) { 2528 u32 flip_done, fault_errors; 2529 2530 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2531 continue; 2532 2533 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2534 if (!iir) { 2535 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2536 continue; 2537 } 2538 2539 ret = IRQ_HANDLED; 2540 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2541 2542 if (iir & GEN8_PIPE_VBLANK && 2543 intel_pipe_handle_vblank(dev_priv, pipe)) 2544 intel_check_page_flip(dev_priv, pipe); 2545 2546 flip_done = iir; 2547 if (INTEL_INFO(dev_priv)->gen >= 9) 2548 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2549 else 2550 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2551 2552 if (flip_done) 2553 intel_finish_page_flip_cs(dev_priv, pipe); 2554 2555 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2556 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2557 2558 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2559 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2560 2561 fault_errors = iir; 2562 if (INTEL_INFO(dev_priv)->gen >= 9) 2563 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2564 else 2565 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2566 2567 if (fault_errors) 2568 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2569 pipe_name(pipe), 2570 fault_errors); 2571 } 2572 2573 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2574 master_ctl & GEN8_DE_PCH_IRQ) { 2575 /* 2576 * FIXME(BDW): Assume for now that the new interrupt handling 2577 * scheme also closed the SDE interrupt handling race we've seen 2578 * on older pch-split platforms. But this needs testing. 2579 */ 2580 iir = I915_READ(SDEIIR); 2581 if (iir) { 2582 I915_WRITE(SDEIIR, iir); 2583 ret = IRQ_HANDLED; 2584 2585 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2586 spt_irq_handler(dev_priv, iir); 2587 else 2588 cpt_irq_handler(dev_priv, iir); 2589 } else { 2590 /* 2591 * Like on previous PCH there seems to be something 2592 * fishy going on with forwarding PCH interrupts. 2593 */ 2594 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2595 } 2596 } 2597 2598 return ret; 2599 } 2600 2601 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2602 { 2603 struct drm_device *dev = arg; 2604 struct drm_i915_private *dev_priv = to_i915(dev); 2605 u32 master_ctl; 2606 u32 gt_iir[4] = {}; 2607 irqreturn_t ret; 2608 2609 if (!intel_irqs_enabled(dev_priv)) 2610 return IRQ_NONE; 2611 2612 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2613 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2614 if (!master_ctl) 2615 return IRQ_NONE; 2616 2617 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2618 2619 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2620 disable_rpm_wakeref_asserts(dev_priv); 2621 2622 /* Find, clear, then process each source of interrupt */ 2623 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2624 gen8_gt_irq_handler(dev_priv, gt_iir); 2625 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2626 2627 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2628 POSTING_READ_FW(GEN8_MASTER_IRQ); 2629 2630 enable_rpm_wakeref_asserts(dev_priv); 2631 2632 return ret; 2633 } 2634 2635 /** 2636 * i915_reset_and_wakeup - do process context error handling work 2637 * @dev_priv: i915 device private 2638 * 2639 * Fire an error uevent so userspace can see that a hang or error 2640 * was detected. 2641 */ 2642 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2643 { 2644 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2645 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2646 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2647 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2648 2649 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2650 2651 DRM_DEBUG_DRIVER("resetting chip\n"); 2652 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2653 2654 intel_prepare_reset(dev_priv); 2655 2656 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2657 wake_up_all(&dev_priv->gpu_error.wait_queue); 2658 2659 do { 2660 /* 2661 * All state reset _must_ be completed before we update the 2662 * reset counter, for otherwise waiters might miss the reset 2663 * pending state and not properly drop locks, resulting in 2664 * deadlocks with the reset work. 2665 */ 2666 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2667 i915_reset(dev_priv); 2668 mutex_unlock(&dev_priv->drm.struct_mutex); 2669 } 2670 2671 /* We need to wait for anyone holding the lock to wakeup */ 2672 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2673 I915_RESET_HANDOFF, 2674 TASK_UNINTERRUPTIBLE, 2675 HZ)); 2676 2677 intel_finish_reset(dev_priv); 2678 2679 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2680 kobject_uevent_env(kobj, 2681 KOBJ_CHANGE, reset_done_event); 2682 2683 /* 2684 * Note: The wake_up also serves as a memory barrier so that 2685 * waiters see the updated value of the dev_priv->gpu_error. 2686 */ 2687 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 2688 wake_up_all(&dev_priv->gpu_error.reset_queue); 2689 } 2690 2691 static inline void 2692 i915_err_print_instdone(struct drm_i915_private *dev_priv, 2693 struct intel_instdone *instdone) 2694 { 2695 int slice; 2696 int subslice; 2697 2698 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone); 2699 2700 if (INTEL_GEN(dev_priv) <= 3) 2701 return; 2702 2703 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common); 2704 2705 if (INTEL_GEN(dev_priv) <= 6) 2706 return; 2707 2708 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2709 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 2710 slice, subslice, instdone->sampler[slice][subslice]); 2711 2712 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2713 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n", 2714 slice, subslice, instdone->row[slice][subslice]); 2715 } 2716 2717 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2718 { 2719 u32 eir; 2720 2721 if (!IS_GEN2(dev_priv)) 2722 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2723 2724 if (INTEL_GEN(dev_priv) < 4) 2725 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2726 else 2727 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2728 2729 I915_WRITE(EIR, I915_READ(EIR)); 2730 eir = I915_READ(EIR); 2731 if (eir) { 2732 /* 2733 * some errors might have become stuck, 2734 * mask them. 2735 */ 2736 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2737 I915_WRITE(EMR, I915_READ(EMR) | eir); 2738 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2739 } 2740 } 2741 2742 /** 2743 * i915_handle_error - handle a gpu error 2744 * @dev_priv: i915 device private 2745 * @engine_mask: mask representing engines that are hung 2746 * @fmt: Error message format string 2747 * 2748 * Do some basic checking of register state at error time and 2749 * dump it to the syslog. Also call i915_capture_error_state() to make 2750 * sure we get a record and make it available in debugfs. Fire a uevent 2751 * so userspace knows something bad happened (should trigger collection 2752 * of a ring dump etc.). 2753 */ 2754 void i915_handle_error(struct drm_i915_private *dev_priv, 2755 u32 engine_mask, 2756 const char *fmt, ...) 2757 { 2758 va_list args; 2759 char error_msg[80]; 2760 2761 va_start(args, fmt); 2762 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2763 va_end(args); 2764 2765 /* 2766 * In most cases it's guaranteed that we get here with an RPM 2767 * reference held, for example because there is a pending GPU 2768 * request that won't finish until the reset is done. This 2769 * isn't the case at least when we get here by doing a 2770 * simulated reset via debugfs, so get an RPM reference. 2771 */ 2772 intel_runtime_pm_get(dev_priv); 2773 2774 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2775 i915_clear_error_registers(dev_priv); 2776 2777 if (!engine_mask) 2778 goto out; 2779 2780 if (test_and_set_bit(I915_RESET_BACKOFF, 2781 &dev_priv->gpu_error.flags)) 2782 goto out; 2783 2784 i915_reset_and_wakeup(dev_priv); 2785 2786 out: 2787 intel_runtime_pm_put(dev_priv); 2788 } 2789 2790 /* Called from drm generic code, passed 'crtc' which 2791 * we use as a pipe index 2792 */ 2793 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2794 { 2795 struct drm_i915_private *dev_priv = to_i915(dev); 2796 unsigned long irqflags; 2797 2798 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2799 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2800 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2801 2802 return 0; 2803 } 2804 2805 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2806 { 2807 struct drm_i915_private *dev_priv = to_i915(dev); 2808 unsigned long irqflags; 2809 2810 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2811 i915_enable_pipestat(dev_priv, pipe, 2812 PIPE_START_VBLANK_INTERRUPT_STATUS); 2813 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2814 2815 return 0; 2816 } 2817 2818 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2819 { 2820 struct drm_i915_private *dev_priv = to_i915(dev); 2821 unsigned long irqflags; 2822 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2823 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2824 2825 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2826 ilk_enable_display_irq(dev_priv, bit); 2827 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2828 2829 return 0; 2830 } 2831 2832 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2833 { 2834 struct drm_i915_private *dev_priv = to_i915(dev); 2835 unsigned long irqflags; 2836 2837 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2838 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2839 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2840 2841 return 0; 2842 } 2843 2844 /* Called from drm generic code, passed 'crtc' which 2845 * we use as a pipe index 2846 */ 2847 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2848 { 2849 struct drm_i915_private *dev_priv = to_i915(dev); 2850 unsigned long irqflags; 2851 2852 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2853 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2854 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2855 } 2856 2857 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2858 { 2859 struct drm_i915_private *dev_priv = to_i915(dev); 2860 unsigned long irqflags; 2861 2862 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2863 i915_disable_pipestat(dev_priv, pipe, 2864 PIPE_START_VBLANK_INTERRUPT_STATUS); 2865 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2866 } 2867 2868 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2869 { 2870 struct drm_i915_private *dev_priv = to_i915(dev); 2871 unsigned long irqflags; 2872 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2873 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2874 2875 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2876 ilk_disable_display_irq(dev_priv, bit); 2877 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2878 } 2879 2880 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2881 { 2882 struct drm_i915_private *dev_priv = to_i915(dev); 2883 unsigned long irqflags; 2884 2885 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2886 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2887 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2888 } 2889 2890 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2891 { 2892 if (HAS_PCH_NOP(dev_priv)) 2893 return; 2894 2895 GEN5_IRQ_RESET(SDE); 2896 2897 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2898 I915_WRITE(SERR_INT, 0xffffffff); 2899 } 2900 2901 /* 2902 * SDEIER is also touched by the interrupt handler to work around missed PCH 2903 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2904 * instead we unconditionally enable all PCH interrupt sources here, but then 2905 * only unmask them as needed with SDEIMR. 2906 * 2907 * This function needs to be called before interrupts are enabled. 2908 */ 2909 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2910 { 2911 struct drm_i915_private *dev_priv = to_i915(dev); 2912 2913 if (HAS_PCH_NOP(dev_priv)) 2914 return; 2915 2916 WARN_ON(I915_READ(SDEIER) != 0); 2917 I915_WRITE(SDEIER, 0xffffffff); 2918 POSTING_READ(SDEIER); 2919 } 2920 2921 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 2922 { 2923 GEN5_IRQ_RESET(GT); 2924 if (INTEL_GEN(dev_priv) >= 6) 2925 GEN5_IRQ_RESET(GEN6_PM); 2926 } 2927 2928 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2929 { 2930 enum pipe pipe; 2931 2932 if (IS_CHERRYVIEW(dev_priv)) 2933 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2934 else 2935 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2936 2937 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2938 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2939 2940 for_each_pipe(dev_priv, pipe) { 2941 I915_WRITE(PIPESTAT(pipe), 2942 PIPE_FIFO_UNDERRUN_STATUS | 2943 PIPESTAT_INT_STATUS_MASK); 2944 dev_priv->pipestat_irq_mask[pipe] = 0; 2945 } 2946 2947 GEN5_IRQ_RESET(VLV_); 2948 dev_priv->irq_mask = ~0; 2949 } 2950 2951 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2952 { 2953 u32 pipestat_mask; 2954 u32 enable_mask; 2955 enum pipe pipe; 2956 2957 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2958 PIPE_CRC_DONE_INTERRUPT_STATUS; 2959 2960 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2961 for_each_pipe(dev_priv, pipe) 2962 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2963 2964 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2965 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2966 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2967 I915_LPE_PIPE_A_INTERRUPT | 2968 I915_LPE_PIPE_B_INTERRUPT; 2969 2970 if (IS_CHERRYVIEW(dev_priv)) 2971 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2972 I915_LPE_PIPE_C_INTERRUPT; 2973 2974 WARN_ON(dev_priv->irq_mask != ~0); 2975 2976 dev_priv->irq_mask = ~enable_mask; 2977 2978 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 2979 } 2980 2981 /* drm_dma.h hooks 2982 */ 2983 static void ironlake_irq_reset(struct drm_device *dev) 2984 { 2985 struct drm_i915_private *dev_priv = to_i915(dev); 2986 2987 I915_WRITE(HWSTAM, 0xffffffff); 2988 2989 GEN5_IRQ_RESET(DE); 2990 if (IS_GEN7(dev_priv)) 2991 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2992 2993 gen5_gt_irq_reset(dev_priv); 2994 2995 ibx_irq_reset(dev_priv); 2996 } 2997 2998 static void valleyview_irq_preinstall(struct drm_device *dev) 2999 { 3000 struct drm_i915_private *dev_priv = to_i915(dev); 3001 3002 I915_WRITE(VLV_MASTER_IER, 0); 3003 POSTING_READ(VLV_MASTER_IER); 3004 3005 gen5_gt_irq_reset(dev_priv); 3006 3007 spin_lock_irq(&dev_priv->irq_lock); 3008 if (dev_priv->display_irqs_enabled) 3009 vlv_display_irq_reset(dev_priv); 3010 spin_unlock_irq(&dev_priv->irq_lock); 3011 } 3012 3013 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3014 { 3015 GEN8_IRQ_RESET_NDX(GT, 0); 3016 GEN8_IRQ_RESET_NDX(GT, 1); 3017 GEN8_IRQ_RESET_NDX(GT, 2); 3018 GEN8_IRQ_RESET_NDX(GT, 3); 3019 } 3020 3021 static void gen8_irq_reset(struct drm_device *dev) 3022 { 3023 struct drm_i915_private *dev_priv = to_i915(dev); 3024 int pipe; 3025 3026 I915_WRITE(GEN8_MASTER_IRQ, 0); 3027 POSTING_READ(GEN8_MASTER_IRQ); 3028 3029 gen8_gt_irq_reset(dev_priv); 3030 3031 for_each_pipe(dev_priv, pipe) 3032 if (intel_display_power_is_enabled(dev_priv, 3033 POWER_DOMAIN_PIPE(pipe))) 3034 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3035 3036 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3037 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3038 GEN5_IRQ_RESET(GEN8_PCU_); 3039 3040 if (HAS_PCH_SPLIT(dev_priv)) 3041 ibx_irq_reset(dev_priv); 3042 } 3043 3044 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3045 unsigned int pipe_mask) 3046 { 3047 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3048 enum pipe pipe; 3049 3050 spin_lock_irq(&dev_priv->irq_lock); 3051 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3052 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3053 dev_priv->de_irq_mask[pipe], 3054 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3055 spin_unlock_irq(&dev_priv->irq_lock); 3056 } 3057 3058 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3059 unsigned int pipe_mask) 3060 { 3061 enum pipe pipe; 3062 3063 spin_lock_irq(&dev_priv->irq_lock); 3064 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3065 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3066 spin_unlock_irq(&dev_priv->irq_lock); 3067 3068 /* make sure we're done processing display irqs */ 3069 synchronize_irq(dev_priv->drm.irq); 3070 } 3071 3072 static void cherryview_irq_preinstall(struct drm_device *dev) 3073 { 3074 struct drm_i915_private *dev_priv = to_i915(dev); 3075 3076 I915_WRITE(GEN8_MASTER_IRQ, 0); 3077 POSTING_READ(GEN8_MASTER_IRQ); 3078 3079 gen8_gt_irq_reset(dev_priv); 3080 3081 GEN5_IRQ_RESET(GEN8_PCU_); 3082 3083 spin_lock_irq(&dev_priv->irq_lock); 3084 if (dev_priv->display_irqs_enabled) 3085 vlv_display_irq_reset(dev_priv); 3086 spin_unlock_irq(&dev_priv->irq_lock); 3087 } 3088 3089 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3090 const u32 hpd[HPD_NUM_PINS]) 3091 { 3092 struct intel_encoder *encoder; 3093 u32 enabled_irqs = 0; 3094 3095 for_each_intel_encoder(&dev_priv->drm, encoder) 3096 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3097 enabled_irqs |= hpd[encoder->hpd_pin]; 3098 3099 return enabled_irqs; 3100 } 3101 3102 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3103 { 3104 u32 hotplug; 3105 3106 /* 3107 * Enable digital hotplug on the PCH, and configure the DP short pulse 3108 * duration to 2ms (which is the minimum in the Display Port spec). 3109 * The pulse duration bits are reserved on LPT+. 3110 */ 3111 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3112 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3113 PORTC_PULSE_DURATION_MASK | 3114 PORTD_PULSE_DURATION_MASK); 3115 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3116 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3117 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3118 /* 3119 * When CPU and PCH are on the same package, port A 3120 * HPD must be enabled in both north and south. 3121 */ 3122 if (HAS_PCH_LPT_LP(dev_priv)) 3123 hotplug |= PORTA_HOTPLUG_ENABLE; 3124 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3125 } 3126 3127 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3128 { 3129 u32 hotplug_irqs, enabled_irqs; 3130 3131 if (HAS_PCH_IBX(dev_priv)) { 3132 hotplug_irqs = SDE_HOTPLUG_MASK; 3133 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3134 } else { 3135 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3136 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3137 } 3138 3139 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3140 3141 ibx_hpd_detection_setup(dev_priv); 3142 } 3143 3144 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3145 { 3146 u32 hotplug; 3147 3148 /* Enable digital hotplug on the PCH */ 3149 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3150 hotplug |= PORTA_HOTPLUG_ENABLE | 3151 PORTB_HOTPLUG_ENABLE | 3152 PORTC_HOTPLUG_ENABLE | 3153 PORTD_HOTPLUG_ENABLE; 3154 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3155 3156 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3157 hotplug |= PORTE_HOTPLUG_ENABLE; 3158 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3159 } 3160 3161 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3162 { 3163 u32 hotplug_irqs, enabled_irqs; 3164 3165 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3166 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3167 3168 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3169 3170 spt_hpd_detection_setup(dev_priv); 3171 } 3172 3173 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3174 { 3175 u32 hotplug; 3176 3177 /* 3178 * Enable digital hotplug on the CPU, and configure the DP short pulse 3179 * duration to 2ms (which is the minimum in the Display Port spec) 3180 * The pulse duration bits are reserved on HSW+. 3181 */ 3182 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3183 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3184 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3185 DIGITAL_PORTA_PULSE_DURATION_2ms; 3186 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3187 } 3188 3189 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3190 { 3191 u32 hotplug_irqs, enabled_irqs; 3192 3193 if (INTEL_GEN(dev_priv) >= 8) { 3194 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3195 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3196 3197 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3198 } else if (INTEL_GEN(dev_priv) >= 7) { 3199 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3200 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3201 3202 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3203 } else { 3204 hotplug_irqs = DE_DP_A_HOTPLUG; 3205 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3206 3207 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3208 } 3209 3210 ilk_hpd_detection_setup(dev_priv); 3211 3212 ibx_hpd_irq_setup(dev_priv); 3213 } 3214 3215 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3216 u32 enabled_irqs) 3217 { 3218 u32 hotplug; 3219 3220 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3221 hotplug |= PORTA_HOTPLUG_ENABLE | 3222 PORTB_HOTPLUG_ENABLE | 3223 PORTC_HOTPLUG_ENABLE; 3224 3225 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3226 hotplug, enabled_irqs); 3227 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3228 3229 /* 3230 * For BXT invert bit has to be set based on AOB design 3231 * for HPD detection logic, update it based on VBT fields. 3232 */ 3233 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3234 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3235 hotplug |= BXT_DDIA_HPD_INVERT; 3236 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3237 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3238 hotplug |= BXT_DDIB_HPD_INVERT; 3239 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3240 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3241 hotplug |= BXT_DDIC_HPD_INVERT; 3242 3243 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3244 } 3245 3246 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3247 { 3248 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3249 } 3250 3251 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3252 { 3253 u32 hotplug_irqs, enabled_irqs; 3254 3255 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3256 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3257 3258 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3259 3260 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3261 } 3262 3263 static void ibx_irq_postinstall(struct drm_device *dev) 3264 { 3265 struct drm_i915_private *dev_priv = to_i915(dev); 3266 u32 mask; 3267 3268 if (HAS_PCH_NOP(dev_priv)) 3269 return; 3270 3271 if (HAS_PCH_IBX(dev_priv)) 3272 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3273 else 3274 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3275 3276 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3277 I915_WRITE(SDEIMR, ~mask); 3278 3279 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3280 HAS_PCH_LPT(dev_priv)) 3281 ibx_hpd_detection_setup(dev_priv); 3282 else 3283 spt_hpd_detection_setup(dev_priv); 3284 } 3285 3286 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3287 { 3288 struct drm_i915_private *dev_priv = to_i915(dev); 3289 u32 pm_irqs, gt_irqs; 3290 3291 pm_irqs = gt_irqs = 0; 3292 3293 dev_priv->gt_irq_mask = ~0; 3294 if (HAS_L3_DPF(dev_priv)) { 3295 /* L3 parity interrupt is always unmasked. */ 3296 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3297 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3298 } 3299 3300 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3301 if (IS_GEN5(dev_priv)) { 3302 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3303 } else { 3304 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3305 } 3306 3307 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3308 3309 if (INTEL_GEN(dev_priv) >= 6) { 3310 /* 3311 * RPS interrupts will get enabled/disabled on demand when RPS 3312 * itself is enabled/disabled. 3313 */ 3314 if (HAS_VEBOX(dev_priv)) { 3315 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3316 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3317 } 3318 3319 dev_priv->pm_imr = 0xffffffff; 3320 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3321 } 3322 } 3323 3324 static int ironlake_irq_postinstall(struct drm_device *dev) 3325 { 3326 struct drm_i915_private *dev_priv = to_i915(dev); 3327 u32 display_mask, extra_mask; 3328 3329 if (INTEL_GEN(dev_priv) >= 7) { 3330 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3331 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3332 DE_PLANEB_FLIP_DONE_IVB | 3333 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3334 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3335 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3336 DE_DP_A_HOTPLUG_IVB); 3337 } else { 3338 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3339 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3340 DE_AUX_CHANNEL_A | 3341 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3342 DE_POISON); 3343 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3344 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3345 DE_DP_A_HOTPLUG); 3346 } 3347 3348 dev_priv->irq_mask = ~display_mask; 3349 3350 I915_WRITE(HWSTAM, 0xeffe); 3351 3352 ibx_irq_pre_postinstall(dev); 3353 3354 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3355 3356 gen5_gt_irq_postinstall(dev); 3357 3358 ilk_hpd_detection_setup(dev_priv); 3359 3360 ibx_irq_postinstall(dev); 3361 3362 if (IS_IRONLAKE_M(dev_priv)) { 3363 /* Enable PCU event interrupts 3364 * 3365 * spinlocking not required here for correctness since interrupt 3366 * setup is guaranteed to run in single-threaded context. But we 3367 * need it to make the assert_spin_locked happy. */ 3368 spin_lock_irq(&dev_priv->irq_lock); 3369 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3370 spin_unlock_irq(&dev_priv->irq_lock); 3371 } 3372 3373 return 0; 3374 } 3375 3376 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3377 { 3378 lockdep_assert_held(&dev_priv->irq_lock); 3379 3380 if (dev_priv->display_irqs_enabled) 3381 return; 3382 3383 dev_priv->display_irqs_enabled = true; 3384 3385 if (intel_irqs_enabled(dev_priv)) { 3386 vlv_display_irq_reset(dev_priv); 3387 vlv_display_irq_postinstall(dev_priv); 3388 } 3389 } 3390 3391 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3392 { 3393 lockdep_assert_held(&dev_priv->irq_lock); 3394 3395 if (!dev_priv->display_irqs_enabled) 3396 return; 3397 3398 dev_priv->display_irqs_enabled = false; 3399 3400 if (intel_irqs_enabled(dev_priv)) 3401 vlv_display_irq_reset(dev_priv); 3402 } 3403 3404 3405 static int valleyview_irq_postinstall(struct drm_device *dev) 3406 { 3407 struct drm_i915_private *dev_priv = to_i915(dev); 3408 3409 gen5_gt_irq_postinstall(dev); 3410 3411 spin_lock_irq(&dev_priv->irq_lock); 3412 if (dev_priv->display_irqs_enabled) 3413 vlv_display_irq_postinstall(dev_priv); 3414 spin_unlock_irq(&dev_priv->irq_lock); 3415 3416 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3417 POSTING_READ(VLV_MASTER_IER); 3418 3419 return 0; 3420 } 3421 3422 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3423 { 3424 /* These are interrupts we'll toggle with the ring mask register */ 3425 uint32_t gt_interrupts[] = { 3426 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3427 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3428 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3429 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3430 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3431 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3432 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3433 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3434 0, 3435 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3436 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3437 }; 3438 3439 if (HAS_L3_DPF(dev_priv)) 3440 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3441 3442 dev_priv->pm_ier = 0x0; 3443 dev_priv->pm_imr = ~dev_priv->pm_ier; 3444 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3445 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3446 /* 3447 * RPS interrupts will get enabled/disabled on demand when RPS itself 3448 * is enabled/disabled. Same wil be the case for GuC interrupts. 3449 */ 3450 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3451 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3452 } 3453 3454 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3455 { 3456 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3457 uint32_t de_pipe_enables; 3458 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3459 u32 de_port_enables; 3460 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3461 enum pipe pipe; 3462 3463 if (INTEL_INFO(dev_priv)->gen >= 9) { 3464 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3465 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3466 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3467 GEN9_AUX_CHANNEL_D; 3468 if (IS_GEN9_LP(dev_priv)) 3469 de_port_masked |= BXT_DE_PORT_GMBUS; 3470 } else { 3471 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3472 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3473 } 3474 3475 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3476 GEN8_PIPE_FIFO_UNDERRUN; 3477 3478 de_port_enables = de_port_masked; 3479 if (IS_GEN9_LP(dev_priv)) 3480 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3481 else if (IS_BROADWELL(dev_priv)) 3482 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3483 3484 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3485 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3486 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3487 3488 for_each_pipe(dev_priv, pipe) 3489 if (intel_display_power_is_enabled(dev_priv, 3490 POWER_DOMAIN_PIPE(pipe))) 3491 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3492 dev_priv->de_irq_mask[pipe], 3493 de_pipe_enables); 3494 3495 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3496 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3497 3498 if (IS_GEN9_LP(dev_priv)) 3499 bxt_hpd_detection_setup(dev_priv); 3500 else if (IS_BROADWELL(dev_priv)) 3501 ilk_hpd_detection_setup(dev_priv); 3502 } 3503 3504 static int gen8_irq_postinstall(struct drm_device *dev) 3505 { 3506 struct drm_i915_private *dev_priv = to_i915(dev); 3507 3508 if (HAS_PCH_SPLIT(dev_priv)) 3509 ibx_irq_pre_postinstall(dev); 3510 3511 gen8_gt_irq_postinstall(dev_priv); 3512 gen8_de_irq_postinstall(dev_priv); 3513 3514 if (HAS_PCH_SPLIT(dev_priv)) 3515 ibx_irq_postinstall(dev); 3516 3517 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3518 POSTING_READ(GEN8_MASTER_IRQ); 3519 3520 return 0; 3521 } 3522 3523 static int cherryview_irq_postinstall(struct drm_device *dev) 3524 { 3525 struct drm_i915_private *dev_priv = to_i915(dev); 3526 3527 gen8_gt_irq_postinstall(dev_priv); 3528 3529 spin_lock_irq(&dev_priv->irq_lock); 3530 if (dev_priv->display_irqs_enabled) 3531 vlv_display_irq_postinstall(dev_priv); 3532 spin_unlock_irq(&dev_priv->irq_lock); 3533 3534 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3535 POSTING_READ(GEN8_MASTER_IRQ); 3536 3537 return 0; 3538 } 3539 3540 static void gen8_irq_uninstall(struct drm_device *dev) 3541 { 3542 struct drm_i915_private *dev_priv = to_i915(dev); 3543 3544 if (!dev_priv) 3545 return; 3546 3547 gen8_irq_reset(dev); 3548 } 3549 3550 static void valleyview_irq_uninstall(struct drm_device *dev) 3551 { 3552 struct drm_i915_private *dev_priv = to_i915(dev); 3553 3554 if (!dev_priv) 3555 return; 3556 3557 I915_WRITE(VLV_MASTER_IER, 0); 3558 POSTING_READ(VLV_MASTER_IER); 3559 3560 gen5_gt_irq_reset(dev_priv); 3561 3562 I915_WRITE(HWSTAM, 0xffffffff); 3563 3564 spin_lock_irq(&dev_priv->irq_lock); 3565 if (dev_priv->display_irqs_enabled) 3566 vlv_display_irq_reset(dev_priv); 3567 spin_unlock_irq(&dev_priv->irq_lock); 3568 } 3569 3570 static void cherryview_irq_uninstall(struct drm_device *dev) 3571 { 3572 struct drm_i915_private *dev_priv = to_i915(dev); 3573 3574 if (!dev_priv) 3575 return; 3576 3577 I915_WRITE(GEN8_MASTER_IRQ, 0); 3578 POSTING_READ(GEN8_MASTER_IRQ); 3579 3580 gen8_gt_irq_reset(dev_priv); 3581 3582 GEN5_IRQ_RESET(GEN8_PCU_); 3583 3584 spin_lock_irq(&dev_priv->irq_lock); 3585 if (dev_priv->display_irqs_enabled) 3586 vlv_display_irq_reset(dev_priv); 3587 spin_unlock_irq(&dev_priv->irq_lock); 3588 } 3589 3590 static void ironlake_irq_uninstall(struct drm_device *dev) 3591 { 3592 struct drm_i915_private *dev_priv = to_i915(dev); 3593 3594 if (!dev_priv) 3595 return; 3596 3597 ironlake_irq_reset(dev); 3598 } 3599 3600 static void i8xx_irq_preinstall(struct drm_device * dev) 3601 { 3602 struct drm_i915_private *dev_priv = to_i915(dev); 3603 int pipe; 3604 3605 for_each_pipe(dev_priv, pipe) 3606 I915_WRITE(PIPESTAT(pipe), 0); 3607 I915_WRITE16(IMR, 0xffff); 3608 I915_WRITE16(IER, 0x0); 3609 POSTING_READ16(IER); 3610 } 3611 3612 static int i8xx_irq_postinstall(struct drm_device *dev) 3613 { 3614 struct drm_i915_private *dev_priv = to_i915(dev); 3615 3616 I915_WRITE16(EMR, 3617 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3618 3619 /* Unmask the interrupts that we always want on. */ 3620 dev_priv->irq_mask = 3621 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3622 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3623 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3624 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3625 I915_WRITE16(IMR, dev_priv->irq_mask); 3626 3627 I915_WRITE16(IER, 3628 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3629 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3630 I915_USER_INTERRUPT); 3631 POSTING_READ16(IER); 3632 3633 /* Interrupt setup is already guaranteed to be single-threaded, this is 3634 * just to make the assert_spin_locked check happy. */ 3635 spin_lock_irq(&dev_priv->irq_lock); 3636 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3637 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3638 spin_unlock_irq(&dev_priv->irq_lock); 3639 3640 return 0; 3641 } 3642 3643 /* 3644 * Returns true when a page flip has completed. 3645 */ 3646 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3647 int plane, int pipe, u32 iir) 3648 { 3649 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3650 3651 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3652 return false; 3653 3654 if ((iir & flip_pending) == 0) 3655 goto check_page_flip; 3656 3657 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3658 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3659 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3660 * the flip is completed (no longer pending). Since this doesn't raise 3661 * an interrupt per se, we watch for the change at vblank. 3662 */ 3663 if (I915_READ16(ISR) & flip_pending) 3664 goto check_page_flip; 3665 3666 intel_finish_page_flip_cs(dev_priv, pipe); 3667 return true; 3668 3669 check_page_flip: 3670 intel_check_page_flip(dev_priv, pipe); 3671 return false; 3672 } 3673 3674 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3675 { 3676 struct drm_device *dev = arg; 3677 struct drm_i915_private *dev_priv = to_i915(dev); 3678 u16 iir, new_iir; 3679 u32 pipe_stats[2]; 3680 int pipe; 3681 u16 flip_mask = 3682 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3683 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3684 irqreturn_t ret; 3685 3686 if (!intel_irqs_enabled(dev_priv)) 3687 return IRQ_NONE; 3688 3689 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3690 disable_rpm_wakeref_asserts(dev_priv); 3691 3692 ret = IRQ_NONE; 3693 iir = I915_READ16(IIR); 3694 if (iir == 0) 3695 goto out; 3696 3697 while (iir & ~flip_mask) { 3698 /* Can't rely on pipestat interrupt bit in iir as it might 3699 * have been cleared after the pipestat interrupt was received. 3700 * It doesn't set the bit in iir again, but it still produces 3701 * interrupts (for non-MSI). 3702 */ 3703 spin_lock(&dev_priv->irq_lock); 3704 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3705 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3706 3707 for_each_pipe(dev_priv, pipe) { 3708 i915_reg_t reg = PIPESTAT(pipe); 3709 pipe_stats[pipe] = I915_READ(reg); 3710 3711 /* 3712 * Clear the PIPE*STAT regs before the IIR 3713 */ 3714 if (pipe_stats[pipe] & 0x8000ffff) 3715 I915_WRITE(reg, pipe_stats[pipe]); 3716 } 3717 spin_unlock(&dev_priv->irq_lock); 3718 3719 I915_WRITE16(IIR, iir & ~flip_mask); 3720 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3721 3722 if (iir & I915_USER_INTERRUPT) 3723 notify_ring(dev_priv->engine[RCS]); 3724 3725 for_each_pipe(dev_priv, pipe) { 3726 int plane = pipe; 3727 if (HAS_FBC(dev_priv)) 3728 plane = !plane; 3729 3730 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3731 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 3732 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3733 3734 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3735 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3736 3737 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3738 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3739 pipe); 3740 } 3741 3742 iir = new_iir; 3743 } 3744 ret = IRQ_HANDLED; 3745 3746 out: 3747 enable_rpm_wakeref_asserts(dev_priv); 3748 3749 return ret; 3750 } 3751 3752 static void i8xx_irq_uninstall(struct drm_device * dev) 3753 { 3754 struct drm_i915_private *dev_priv = to_i915(dev); 3755 int pipe; 3756 3757 for_each_pipe(dev_priv, pipe) { 3758 /* Clear enable bits; then clear status bits */ 3759 I915_WRITE(PIPESTAT(pipe), 0); 3760 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3761 } 3762 I915_WRITE16(IMR, 0xffff); 3763 I915_WRITE16(IER, 0x0); 3764 I915_WRITE16(IIR, I915_READ16(IIR)); 3765 } 3766 3767 static void i915_irq_preinstall(struct drm_device * dev) 3768 { 3769 struct drm_i915_private *dev_priv = to_i915(dev); 3770 int pipe; 3771 3772 if (I915_HAS_HOTPLUG(dev_priv)) { 3773 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3774 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3775 } 3776 3777 I915_WRITE16(HWSTAM, 0xeffe); 3778 for_each_pipe(dev_priv, pipe) 3779 I915_WRITE(PIPESTAT(pipe), 0); 3780 I915_WRITE(IMR, 0xffffffff); 3781 I915_WRITE(IER, 0x0); 3782 POSTING_READ(IER); 3783 } 3784 3785 static int i915_irq_postinstall(struct drm_device *dev) 3786 { 3787 struct drm_i915_private *dev_priv = to_i915(dev); 3788 u32 enable_mask; 3789 3790 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3791 3792 /* Unmask the interrupts that we always want on. */ 3793 dev_priv->irq_mask = 3794 ~(I915_ASLE_INTERRUPT | 3795 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3796 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3797 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3798 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3799 3800 enable_mask = 3801 I915_ASLE_INTERRUPT | 3802 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3803 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3804 I915_USER_INTERRUPT; 3805 3806 if (I915_HAS_HOTPLUG(dev_priv)) { 3807 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3808 POSTING_READ(PORT_HOTPLUG_EN); 3809 3810 /* Enable in IER... */ 3811 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3812 /* and unmask in IMR */ 3813 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3814 } 3815 3816 I915_WRITE(IMR, dev_priv->irq_mask); 3817 I915_WRITE(IER, enable_mask); 3818 POSTING_READ(IER); 3819 3820 i915_enable_asle_pipestat(dev_priv); 3821 3822 /* Interrupt setup is already guaranteed to be single-threaded, this is 3823 * just to make the assert_spin_locked check happy. */ 3824 spin_lock_irq(&dev_priv->irq_lock); 3825 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3826 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3827 spin_unlock_irq(&dev_priv->irq_lock); 3828 3829 return 0; 3830 } 3831 3832 /* 3833 * Returns true when a page flip has completed. 3834 */ 3835 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 3836 int plane, int pipe, u32 iir) 3837 { 3838 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3839 3840 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3841 return false; 3842 3843 if ((iir & flip_pending) == 0) 3844 goto check_page_flip; 3845 3846 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3847 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3848 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3849 * the flip is completed (no longer pending). Since this doesn't raise 3850 * an interrupt per se, we watch for the change at vblank. 3851 */ 3852 if (I915_READ(ISR) & flip_pending) 3853 goto check_page_flip; 3854 3855 intel_finish_page_flip_cs(dev_priv, pipe); 3856 return true; 3857 3858 check_page_flip: 3859 intel_check_page_flip(dev_priv, pipe); 3860 return false; 3861 } 3862 3863 static irqreturn_t i915_irq_handler(int irq, void *arg) 3864 { 3865 struct drm_device *dev = arg; 3866 struct drm_i915_private *dev_priv = to_i915(dev); 3867 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3868 u32 flip_mask = 3869 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3870 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3871 int pipe, ret = IRQ_NONE; 3872 3873 if (!intel_irqs_enabled(dev_priv)) 3874 return IRQ_NONE; 3875 3876 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3877 disable_rpm_wakeref_asserts(dev_priv); 3878 3879 iir = I915_READ(IIR); 3880 do { 3881 bool irq_received = (iir & ~flip_mask) != 0; 3882 bool blc_event = false; 3883 3884 /* Can't rely on pipestat interrupt bit in iir as it might 3885 * have been cleared after the pipestat interrupt was received. 3886 * It doesn't set the bit in iir again, but it still produces 3887 * interrupts (for non-MSI). 3888 */ 3889 spin_lock(&dev_priv->irq_lock); 3890 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3891 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3892 3893 for_each_pipe(dev_priv, pipe) { 3894 i915_reg_t reg = PIPESTAT(pipe); 3895 pipe_stats[pipe] = I915_READ(reg); 3896 3897 /* Clear the PIPE*STAT regs before the IIR */ 3898 if (pipe_stats[pipe] & 0x8000ffff) { 3899 I915_WRITE(reg, pipe_stats[pipe]); 3900 irq_received = true; 3901 } 3902 } 3903 spin_unlock(&dev_priv->irq_lock); 3904 3905 if (!irq_received) 3906 break; 3907 3908 /* Consume port. Then clear IIR or we'll miss events */ 3909 if (I915_HAS_HOTPLUG(dev_priv) && 3910 iir & I915_DISPLAY_PORT_INTERRUPT) { 3911 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3912 if (hotplug_status) 3913 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3914 } 3915 3916 I915_WRITE(IIR, iir & ~flip_mask); 3917 new_iir = I915_READ(IIR); /* Flush posted writes */ 3918 3919 if (iir & I915_USER_INTERRUPT) 3920 notify_ring(dev_priv->engine[RCS]); 3921 3922 for_each_pipe(dev_priv, pipe) { 3923 int plane = pipe; 3924 if (HAS_FBC(dev_priv)) 3925 plane = !plane; 3926 3927 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3928 i915_handle_vblank(dev_priv, plane, pipe, iir)) 3929 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3930 3931 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3932 blc_event = true; 3933 3934 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3935 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3936 3937 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3938 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3939 pipe); 3940 } 3941 3942 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3943 intel_opregion_asle_intr(dev_priv); 3944 3945 /* With MSI, interrupts are only generated when iir 3946 * transitions from zero to nonzero. If another bit got 3947 * set while we were handling the existing iir bits, then 3948 * we would never get another interrupt. 3949 * 3950 * This is fine on non-MSI as well, as if we hit this path 3951 * we avoid exiting the interrupt handler only to generate 3952 * another one. 3953 * 3954 * Note that for MSI this could cause a stray interrupt report 3955 * if an interrupt landed in the time between writing IIR and 3956 * the posting read. This should be rare enough to never 3957 * trigger the 99% of 100,000 interrupts test for disabling 3958 * stray interrupts. 3959 */ 3960 ret = IRQ_HANDLED; 3961 iir = new_iir; 3962 } while (iir & ~flip_mask); 3963 3964 enable_rpm_wakeref_asserts(dev_priv); 3965 3966 return ret; 3967 } 3968 3969 static void i915_irq_uninstall(struct drm_device * dev) 3970 { 3971 struct drm_i915_private *dev_priv = to_i915(dev); 3972 int pipe; 3973 3974 if (I915_HAS_HOTPLUG(dev_priv)) { 3975 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3976 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3977 } 3978 3979 I915_WRITE16(HWSTAM, 0xffff); 3980 for_each_pipe(dev_priv, pipe) { 3981 /* Clear enable bits; then clear status bits */ 3982 I915_WRITE(PIPESTAT(pipe), 0); 3983 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3984 } 3985 I915_WRITE(IMR, 0xffffffff); 3986 I915_WRITE(IER, 0x0); 3987 3988 I915_WRITE(IIR, I915_READ(IIR)); 3989 } 3990 3991 static void i965_irq_preinstall(struct drm_device * dev) 3992 { 3993 struct drm_i915_private *dev_priv = to_i915(dev); 3994 int pipe; 3995 3996 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3997 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3998 3999 I915_WRITE(HWSTAM, 0xeffe); 4000 for_each_pipe(dev_priv, pipe) 4001 I915_WRITE(PIPESTAT(pipe), 0); 4002 I915_WRITE(IMR, 0xffffffff); 4003 I915_WRITE(IER, 0x0); 4004 POSTING_READ(IER); 4005 } 4006 4007 static int i965_irq_postinstall(struct drm_device *dev) 4008 { 4009 struct drm_i915_private *dev_priv = to_i915(dev); 4010 u32 enable_mask; 4011 u32 error_mask; 4012 4013 /* Unmask the interrupts that we always want on. */ 4014 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4015 I915_DISPLAY_PORT_INTERRUPT | 4016 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4017 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4018 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4019 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4020 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4021 4022 enable_mask = ~dev_priv->irq_mask; 4023 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4024 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4025 enable_mask |= I915_USER_INTERRUPT; 4026 4027 if (IS_G4X(dev_priv)) 4028 enable_mask |= I915_BSD_USER_INTERRUPT; 4029 4030 /* Interrupt setup is already guaranteed to be single-threaded, this is 4031 * just to make the assert_spin_locked check happy. */ 4032 spin_lock_irq(&dev_priv->irq_lock); 4033 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4034 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4035 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4036 spin_unlock_irq(&dev_priv->irq_lock); 4037 4038 /* 4039 * Enable some error detection, note the instruction error mask 4040 * bit is reserved, so we leave it masked. 4041 */ 4042 if (IS_G4X(dev_priv)) { 4043 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4044 GM45_ERROR_MEM_PRIV | 4045 GM45_ERROR_CP_PRIV | 4046 I915_ERROR_MEMORY_REFRESH); 4047 } else { 4048 error_mask = ~(I915_ERROR_PAGE_TABLE | 4049 I915_ERROR_MEMORY_REFRESH); 4050 } 4051 I915_WRITE(EMR, error_mask); 4052 4053 I915_WRITE(IMR, dev_priv->irq_mask); 4054 I915_WRITE(IER, enable_mask); 4055 POSTING_READ(IER); 4056 4057 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4058 POSTING_READ(PORT_HOTPLUG_EN); 4059 4060 i915_enable_asle_pipestat(dev_priv); 4061 4062 return 0; 4063 } 4064 4065 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4066 { 4067 u32 hotplug_en; 4068 4069 lockdep_assert_held(&dev_priv->irq_lock); 4070 4071 /* Note HDMI and DP share hotplug bits */ 4072 /* enable bits are the same for all generations */ 4073 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4074 /* Programming the CRT detection parameters tends 4075 to generate a spurious hotplug event about three 4076 seconds later. So just do it once. 4077 */ 4078 if (IS_G4X(dev_priv)) 4079 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4080 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4081 4082 /* Ignore TV since it's buggy */ 4083 i915_hotplug_interrupt_update_locked(dev_priv, 4084 HOTPLUG_INT_EN_MASK | 4085 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4086 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4087 hotplug_en); 4088 } 4089 4090 static irqreturn_t i965_irq_handler(int irq, void *arg) 4091 { 4092 struct drm_device *dev = arg; 4093 struct drm_i915_private *dev_priv = to_i915(dev); 4094 u32 iir, new_iir; 4095 u32 pipe_stats[I915_MAX_PIPES]; 4096 int ret = IRQ_NONE, pipe; 4097 u32 flip_mask = 4098 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4099 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4100 4101 if (!intel_irqs_enabled(dev_priv)) 4102 return IRQ_NONE; 4103 4104 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4105 disable_rpm_wakeref_asserts(dev_priv); 4106 4107 iir = I915_READ(IIR); 4108 4109 for (;;) { 4110 bool irq_received = (iir & ~flip_mask) != 0; 4111 bool blc_event = false; 4112 4113 /* Can't rely on pipestat interrupt bit in iir as it might 4114 * have been cleared after the pipestat interrupt was received. 4115 * It doesn't set the bit in iir again, but it still produces 4116 * interrupts (for non-MSI). 4117 */ 4118 spin_lock(&dev_priv->irq_lock); 4119 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4120 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4121 4122 for_each_pipe(dev_priv, pipe) { 4123 i915_reg_t reg = PIPESTAT(pipe); 4124 pipe_stats[pipe] = I915_READ(reg); 4125 4126 /* 4127 * Clear the PIPE*STAT regs before the IIR 4128 */ 4129 if (pipe_stats[pipe] & 0x8000ffff) { 4130 I915_WRITE(reg, pipe_stats[pipe]); 4131 irq_received = true; 4132 } 4133 } 4134 spin_unlock(&dev_priv->irq_lock); 4135 4136 if (!irq_received) 4137 break; 4138 4139 ret = IRQ_HANDLED; 4140 4141 /* Consume port. Then clear IIR or we'll miss events */ 4142 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4143 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4144 if (hotplug_status) 4145 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4146 } 4147 4148 I915_WRITE(IIR, iir & ~flip_mask); 4149 new_iir = I915_READ(IIR); /* Flush posted writes */ 4150 4151 if (iir & I915_USER_INTERRUPT) 4152 notify_ring(dev_priv->engine[RCS]); 4153 if (iir & I915_BSD_USER_INTERRUPT) 4154 notify_ring(dev_priv->engine[VCS]); 4155 4156 for_each_pipe(dev_priv, pipe) { 4157 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4158 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4159 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4160 4161 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4162 blc_event = true; 4163 4164 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4165 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4166 4167 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4168 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4169 } 4170 4171 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4172 intel_opregion_asle_intr(dev_priv); 4173 4174 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4175 gmbus_irq_handler(dev_priv); 4176 4177 /* With MSI, interrupts are only generated when iir 4178 * transitions from zero to nonzero. If another bit got 4179 * set while we were handling the existing iir bits, then 4180 * we would never get another interrupt. 4181 * 4182 * This is fine on non-MSI as well, as if we hit this path 4183 * we avoid exiting the interrupt handler only to generate 4184 * another one. 4185 * 4186 * Note that for MSI this could cause a stray interrupt report 4187 * if an interrupt landed in the time between writing IIR and 4188 * the posting read. This should be rare enough to never 4189 * trigger the 99% of 100,000 interrupts test for disabling 4190 * stray interrupts. 4191 */ 4192 iir = new_iir; 4193 } 4194 4195 enable_rpm_wakeref_asserts(dev_priv); 4196 4197 return ret; 4198 } 4199 4200 static void i965_irq_uninstall(struct drm_device * dev) 4201 { 4202 struct drm_i915_private *dev_priv = to_i915(dev); 4203 int pipe; 4204 4205 if (!dev_priv) 4206 return; 4207 4208 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4209 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4210 4211 I915_WRITE(HWSTAM, 0xffffffff); 4212 for_each_pipe(dev_priv, pipe) 4213 I915_WRITE(PIPESTAT(pipe), 0); 4214 I915_WRITE(IMR, 0xffffffff); 4215 I915_WRITE(IER, 0x0); 4216 4217 for_each_pipe(dev_priv, pipe) 4218 I915_WRITE(PIPESTAT(pipe), 4219 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4220 I915_WRITE(IIR, I915_READ(IIR)); 4221 } 4222 4223 /** 4224 * intel_irq_init - initializes irq support 4225 * @dev_priv: i915 device instance 4226 * 4227 * This function initializes all the irq support including work items, timers 4228 * and all the vtables. It does not setup the interrupt itself though. 4229 */ 4230 void intel_irq_init(struct drm_i915_private *dev_priv) 4231 { 4232 struct drm_device *dev = &dev_priv->drm; 4233 4234 intel_hpd_init_work(dev_priv); 4235 4236 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4237 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4238 4239 if (HAS_GUC_SCHED(dev_priv)) 4240 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4241 4242 /* Let's track the enabled rps events */ 4243 if (IS_VALLEYVIEW(dev_priv)) 4244 /* WaGsvRC0ResidencyMethod:vlv */ 4245 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4246 else 4247 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4248 4249 dev_priv->rps.pm_intrmsk_mbz = 0; 4250 4251 /* 4252 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4253 * if GEN6_PM_UP_EI_EXPIRED is masked. 4254 * 4255 * TODO: verify if this can be reproduced on VLV,CHV. 4256 */ 4257 if (INTEL_INFO(dev_priv)->gen <= 7) 4258 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4259 4260 if (INTEL_INFO(dev_priv)->gen >= 8) 4261 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4262 4263 if (IS_GEN2(dev_priv)) { 4264 /* Gen2 doesn't have a hardware frame counter */ 4265 dev->max_vblank_count = 0; 4266 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4267 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4268 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4269 } else { 4270 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4271 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4272 } 4273 4274 /* 4275 * Opt out of the vblank disable timer on everything except gen2. 4276 * Gen2 doesn't have a hardware frame counter and so depends on 4277 * vblank interrupts to produce sane vblank seuquence numbers. 4278 */ 4279 if (!IS_GEN2(dev_priv)) 4280 dev->vblank_disable_immediate = true; 4281 4282 /* Most platforms treat the display irq block as an always-on 4283 * power domain. vlv/chv can disable it at runtime and need 4284 * special care to avoid writing any of the display block registers 4285 * outside of the power domain. We defer setting up the display irqs 4286 * in this case to the runtime pm. 4287 */ 4288 dev_priv->display_irqs_enabled = true; 4289 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4290 dev_priv->display_irqs_enabled = false; 4291 4292 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4293 4294 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4295 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4296 4297 if (IS_CHERRYVIEW(dev_priv)) { 4298 dev->driver->irq_handler = cherryview_irq_handler; 4299 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4300 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4301 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4302 dev->driver->enable_vblank = i965_enable_vblank; 4303 dev->driver->disable_vblank = i965_disable_vblank; 4304 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4305 } else if (IS_VALLEYVIEW(dev_priv)) { 4306 dev->driver->irq_handler = valleyview_irq_handler; 4307 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4308 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4309 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4310 dev->driver->enable_vblank = i965_enable_vblank; 4311 dev->driver->disable_vblank = i965_disable_vblank; 4312 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4313 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4314 dev->driver->irq_handler = gen8_irq_handler; 4315 dev->driver->irq_preinstall = gen8_irq_reset; 4316 dev->driver->irq_postinstall = gen8_irq_postinstall; 4317 dev->driver->irq_uninstall = gen8_irq_uninstall; 4318 dev->driver->enable_vblank = gen8_enable_vblank; 4319 dev->driver->disable_vblank = gen8_disable_vblank; 4320 if (IS_GEN9_LP(dev_priv)) 4321 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4322 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 4323 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4324 else 4325 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4326 } else if (HAS_PCH_SPLIT(dev_priv)) { 4327 dev->driver->irq_handler = ironlake_irq_handler; 4328 dev->driver->irq_preinstall = ironlake_irq_reset; 4329 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4330 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4331 dev->driver->enable_vblank = ironlake_enable_vblank; 4332 dev->driver->disable_vblank = ironlake_disable_vblank; 4333 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4334 } else { 4335 if (IS_GEN2(dev_priv)) { 4336 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4337 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4338 dev->driver->irq_handler = i8xx_irq_handler; 4339 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4340 dev->driver->enable_vblank = i8xx_enable_vblank; 4341 dev->driver->disable_vblank = i8xx_disable_vblank; 4342 } else if (IS_GEN3(dev_priv)) { 4343 dev->driver->irq_preinstall = i915_irq_preinstall; 4344 dev->driver->irq_postinstall = i915_irq_postinstall; 4345 dev->driver->irq_uninstall = i915_irq_uninstall; 4346 dev->driver->irq_handler = i915_irq_handler; 4347 dev->driver->enable_vblank = i8xx_enable_vblank; 4348 dev->driver->disable_vblank = i8xx_disable_vblank; 4349 } else { 4350 dev->driver->irq_preinstall = i965_irq_preinstall; 4351 dev->driver->irq_postinstall = i965_irq_postinstall; 4352 dev->driver->irq_uninstall = i965_irq_uninstall; 4353 dev->driver->irq_handler = i965_irq_handler; 4354 dev->driver->enable_vblank = i965_enable_vblank; 4355 dev->driver->disable_vblank = i965_disable_vblank; 4356 } 4357 if (I915_HAS_HOTPLUG(dev_priv)) 4358 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4359 } 4360 } 4361 4362 /** 4363 * intel_irq_install - enables the hardware interrupt 4364 * @dev_priv: i915 device instance 4365 * 4366 * This function enables the hardware interrupt handling, but leaves the hotplug 4367 * handling still disabled. It is called after intel_irq_init(). 4368 * 4369 * In the driver load and resume code we need working interrupts in a few places 4370 * but don't want to deal with the hassle of concurrent probe and hotplug 4371 * workers. Hence the split into this two-stage approach. 4372 */ 4373 int intel_irq_install(struct drm_i915_private *dev_priv) 4374 { 4375 /* 4376 * We enable some interrupt sources in our postinstall hooks, so mark 4377 * interrupts as enabled _before_ actually enabling them to avoid 4378 * special cases in our ordering checks. 4379 */ 4380 dev_priv->pm.irqs_enabled = true; 4381 4382 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4383 } 4384 4385 /** 4386 * intel_irq_uninstall - finilizes all irq handling 4387 * @dev_priv: i915 device instance 4388 * 4389 * This stops interrupt and hotplug handling and unregisters and frees all 4390 * resources acquired in the init functions. 4391 */ 4392 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4393 { 4394 drm_irq_uninstall(&dev_priv->drm); 4395 intel_hpd_cancel_work(dev_priv); 4396 dev_priv->pm.irqs_enabled = false; 4397 } 4398 4399 /** 4400 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4401 * @dev_priv: i915 device instance 4402 * 4403 * This function is used to disable interrupts at runtime, both in the runtime 4404 * pm and the system suspend/resume code. 4405 */ 4406 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4407 { 4408 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4409 dev_priv->pm.irqs_enabled = false; 4410 synchronize_irq(dev_priv->drm.irq); 4411 } 4412 4413 /** 4414 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4415 * @dev_priv: i915 device instance 4416 * 4417 * This function is used to enable interrupts at runtime, both in the runtime 4418 * pm and the system suspend/resume code. 4419 */ 4420 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4421 { 4422 dev_priv->pm.irqs_enabled = true; 4423 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4424 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4425 } 4426