1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 174 175 /* For display hotplug interrupt */ 176 static inline void 177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 178 uint32_t mask, 179 uint32_t bits) 180 { 181 uint32_t val; 182 183 lockdep_assert_held(&dev_priv->irq_lock); 184 WARN_ON(bits & ~mask); 185 186 val = I915_READ(PORT_HOTPLUG_EN); 187 val &= ~mask; 188 val |= bits; 189 I915_WRITE(PORT_HOTPLUG_EN, val); 190 } 191 192 /** 193 * i915_hotplug_interrupt_update - update hotplug interrupt enable 194 * @dev_priv: driver private 195 * @mask: bits to update 196 * @bits: bits to enable 197 * NOTE: the HPD enable bits are modified both inside and outside 198 * of an interrupt context. To avoid that read-modify-write cycles 199 * interfer, these bits are protected by a spinlock. Since this 200 * function is usually not called from a context where the lock is 201 * held already, this function acquires the lock itself. A non-locking 202 * version is also available. 203 */ 204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 205 uint32_t mask, 206 uint32_t bits) 207 { 208 spin_lock_irq(&dev_priv->irq_lock); 209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 210 spin_unlock_irq(&dev_priv->irq_lock); 211 } 212 213 /** 214 * ilk_update_display_irq - update DEIMR 215 * @dev_priv: driver private 216 * @interrupt_mask: mask of interrupt bits to update 217 * @enabled_irq_mask: mask of interrupt bits to enable 218 */ 219 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 220 uint32_t interrupt_mask, 221 uint32_t enabled_irq_mask) 222 { 223 uint32_t new_val; 224 225 lockdep_assert_held(&dev_priv->irq_lock); 226 227 WARN_ON(enabled_irq_mask & ~interrupt_mask); 228 229 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 230 return; 231 232 new_val = dev_priv->irq_mask; 233 new_val &= ~interrupt_mask; 234 new_val |= (~enabled_irq_mask & interrupt_mask); 235 236 if (new_val != dev_priv->irq_mask) { 237 dev_priv->irq_mask = new_val; 238 I915_WRITE(DEIMR, dev_priv->irq_mask); 239 POSTING_READ(DEIMR); 240 } 241 } 242 243 /** 244 * ilk_update_gt_irq - update GTIMR 245 * @dev_priv: driver private 246 * @interrupt_mask: mask of interrupt bits to update 247 * @enabled_irq_mask: mask of interrupt bits to enable 248 */ 249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 250 uint32_t interrupt_mask, 251 uint32_t enabled_irq_mask) 252 { 253 lockdep_assert_held(&dev_priv->irq_lock); 254 255 WARN_ON(enabled_irq_mask & ~interrupt_mask); 256 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 dev_priv->gt_irq_mask &= ~interrupt_mask; 261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 POSTING_READ_FW(GTIMR); 269 } 270 271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 ilk_update_gt_irq(dev_priv, mask, 0); 274 } 275 276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 277 { 278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 279 } 280 281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 282 { 283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 284 } 285 286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 287 { 288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 289 } 290 291 /** 292 * snb_update_pm_irq - update GEN6_PMIMR 293 * @dev_priv: driver private 294 * @interrupt_mask: mask of interrupt bits to update 295 * @enabled_irq_mask: mask of interrupt bits to enable 296 */ 297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 298 uint32_t interrupt_mask, 299 uint32_t enabled_irq_mask) 300 { 301 uint32_t new_val; 302 303 WARN_ON(enabled_irq_mask & ~interrupt_mask); 304 305 lockdep_assert_held(&dev_priv->irq_lock); 306 307 new_val = dev_priv->pm_imr; 308 new_val &= ~interrupt_mask; 309 new_val |= (~enabled_irq_mask & interrupt_mask); 310 311 if (new_val != dev_priv->pm_imr) { 312 dev_priv->pm_imr = new_val; 313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 314 POSTING_READ(gen6_pm_imr(dev_priv)); 315 } 316 } 317 318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 319 { 320 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 321 return; 322 323 snb_update_pm_irq(dev_priv, mask, mask); 324 } 325 326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_mask_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 lockdep_assert_held(&dev_priv->irq_lock); 344 345 I915_WRITE(reg, reset_mask); 346 I915_WRITE(reg, reset_mask); 347 POSTING_READ(reg); 348 } 349 350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 351 { 352 lockdep_assert_held(&dev_priv->irq_lock); 353 354 dev_priv->pm_ier |= enable_mask; 355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 356 gen6_unmask_pm_irq(dev_priv, enable_mask); 357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 358 } 359 360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 361 { 362 lockdep_assert_held(&dev_priv->irq_lock); 363 364 dev_priv->pm_ier &= ~disable_mask; 365 __gen6_mask_pm_irq(dev_priv, disable_mask); 366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 367 /* though a barrier is missing here, but don't really need a one */ 368 } 369 370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 371 { 372 spin_lock_irq(&dev_priv->irq_lock); 373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 374 dev_priv->rps.pm_iir = 0; 375 spin_unlock_irq(&dev_priv->irq_lock); 376 } 377 378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 379 { 380 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 381 return; 382 383 spin_lock_irq(&dev_priv->irq_lock); 384 WARN_ON_ONCE(dev_priv->rps.pm_iir); 385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 386 dev_priv->rps.interrupts_enabled = true; 387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 388 389 spin_unlock_irq(&dev_priv->irq_lock); 390 } 391 392 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 393 { 394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 395 return; 396 397 spin_lock_irq(&dev_priv->irq_lock); 398 dev_priv->rps.interrupts_enabled = false; 399 400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 401 402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 403 404 spin_unlock_irq(&dev_priv->irq_lock); 405 synchronize_irq(dev_priv->drm.irq); 406 407 /* Now that we will not be generating any more work, flush any 408 * outsanding tasks. As we are called on the RPS idle path, 409 * we will reset the GPU to minimum frequencies, so the current 410 * state of the worker can be discarded. 411 */ 412 cancel_work_sync(&dev_priv->rps.work); 413 gen6_reset_rps_interrupts(dev_priv); 414 } 415 416 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 417 { 418 spin_lock_irq(&dev_priv->irq_lock); 419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 420 spin_unlock_irq(&dev_priv->irq_lock); 421 } 422 423 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 424 { 425 spin_lock_irq(&dev_priv->irq_lock); 426 if (!dev_priv->guc.interrupts_enabled) { 427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 428 dev_priv->pm_guc_events); 429 dev_priv->guc.interrupts_enabled = true; 430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 431 } 432 spin_unlock_irq(&dev_priv->irq_lock); 433 } 434 435 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 436 { 437 spin_lock_irq(&dev_priv->irq_lock); 438 dev_priv->guc.interrupts_enabled = false; 439 440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 441 442 spin_unlock_irq(&dev_priv->irq_lock); 443 synchronize_irq(dev_priv->drm.irq); 444 445 gen9_reset_guc_interrupts(dev_priv); 446 } 447 448 /** 449 * bdw_update_port_irq - update DE port interrupt 450 * @dev_priv: driver private 451 * @interrupt_mask: mask of interrupt bits to update 452 * @enabled_irq_mask: mask of interrupt bits to enable 453 */ 454 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 455 uint32_t interrupt_mask, 456 uint32_t enabled_irq_mask) 457 { 458 uint32_t new_val; 459 uint32_t old_val; 460 461 lockdep_assert_held(&dev_priv->irq_lock); 462 463 WARN_ON(enabled_irq_mask & ~interrupt_mask); 464 465 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 466 return; 467 468 old_val = I915_READ(GEN8_DE_PORT_IMR); 469 470 new_val = old_val; 471 new_val &= ~interrupt_mask; 472 new_val |= (~enabled_irq_mask & interrupt_mask); 473 474 if (new_val != old_val) { 475 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 476 POSTING_READ(GEN8_DE_PORT_IMR); 477 } 478 } 479 480 /** 481 * bdw_update_pipe_irq - update DE pipe interrupt 482 * @dev_priv: driver private 483 * @pipe: pipe whose interrupt to update 484 * @interrupt_mask: mask of interrupt bits to update 485 * @enabled_irq_mask: mask of interrupt bits to enable 486 */ 487 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 488 enum pipe pipe, 489 uint32_t interrupt_mask, 490 uint32_t enabled_irq_mask) 491 { 492 uint32_t new_val; 493 494 lockdep_assert_held(&dev_priv->irq_lock); 495 496 WARN_ON(enabled_irq_mask & ~interrupt_mask); 497 498 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 499 return; 500 501 new_val = dev_priv->de_irq_mask[pipe]; 502 new_val &= ~interrupt_mask; 503 new_val |= (~enabled_irq_mask & interrupt_mask); 504 505 if (new_val != dev_priv->de_irq_mask[pipe]) { 506 dev_priv->de_irq_mask[pipe] = new_val; 507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 509 } 510 } 511 512 /** 513 * ibx_display_interrupt_update - update SDEIMR 514 * @dev_priv: driver private 515 * @interrupt_mask: mask of interrupt bits to update 516 * @enabled_irq_mask: mask of interrupt bits to enable 517 */ 518 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 519 uint32_t interrupt_mask, 520 uint32_t enabled_irq_mask) 521 { 522 uint32_t sdeimr = I915_READ(SDEIMR); 523 sdeimr &= ~interrupt_mask; 524 sdeimr |= (~enabled_irq_mask & interrupt_mask); 525 526 WARN_ON(enabled_irq_mask & ~interrupt_mask); 527 528 lockdep_assert_held(&dev_priv->irq_lock); 529 530 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 531 return; 532 533 I915_WRITE(SDEIMR, sdeimr); 534 POSTING_READ(SDEIMR); 535 } 536 537 static void 538 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 539 u32 enable_mask, u32 status_mask) 540 { 541 i915_reg_t reg = PIPESTAT(pipe); 542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 543 544 lockdep_assert_held(&dev_priv->irq_lock); 545 WARN_ON(!intel_irqs_enabled(dev_priv)); 546 547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 548 status_mask & ~PIPESTAT_INT_STATUS_MASK, 549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 550 pipe_name(pipe), enable_mask, status_mask)) 551 return; 552 553 if ((pipestat & enable_mask) == enable_mask) 554 return; 555 556 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 557 558 /* Enable the interrupt, clear any pending status */ 559 pipestat |= enable_mask | status_mask; 560 I915_WRITE(reg, pipestat); 561 POSTING_READ(reg); 562 } 563 564 static void 565 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 566 u32 enable_mask, u32 status_mask) 567 { 568 i915_reg_t reg = PIPESTAT(pipe); 569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 570 571 lockdep_assert_held(&dev_priv->irq_lock); 572 WARN_ON(!intel_irqs_enabled(dev_priv)); 573 574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 575 status_mask & ~PIPESTAT_INT_STATUS_MASK, 576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 577 pipe_name(pipe), enable_mask, status_mask)) 578 return; 579 580 if ((pipestat & enable_mask) == 0) 581 return; 582 583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 584 585 pipestat &= ~enable_mask; 586 I915_WRITE(reg, pipestat); 587 POSTING_READ(reg); 588 } 589 590 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 591 { 592 u32 enable_mask = status_mask << 16; 593 594 /* 595 * On pipe A we don't support the PSR interrupt yet, 596 * on pipe B and C the same bit MBZ. 597 */ 598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 599 return 0; 600 /* 601 * On pipe B and C we don't support the PSR interrupt yet, on pipe 602 * A the same bit is for perf counters which we don't use either. 603 */ 604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 605 return 0; 606 607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 608 SPRITE0_FLIP_DONE_INT_EN_VLV | 609 SPRITE1_FLIP_DONE_INT_EN_VLV); 610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 614 615 return enable_mask; 616 } 617 618 void 619 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 620 u32 status_mask) 621 { 622 u32 enable_mask; 623 624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 626 status_mask); 627 else 628 enable_mask = status_mask << 16; 629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 630 } 631 632 void 633 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 634 u32 status_mask) 635 { 636 u32 enable_mask; 637 638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 640 status_mask); 641 else 642 enable_mask = status_mask << 16; 643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 644 } 645 646 /** 647 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 648 * @dev_priv: i915 device private 649 */ 650 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 651 { 652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 653 return; 654 655 spin_lock_irq(&dev_priv->irq_lock); 656 657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 658 if (INTEL_GEN(dev_priv) >= 4) 659 i915_enable_pipestat(dev_priv, PIPE_A, 660 PIPE_LEGACY_BLC_EVENT_STATUS); 661 662 spin_unlock_irq(&dev_priv->irq_lock); 663 } 664 665 /* 666 * This timing diagram depicts the video signal in and 667 * around the vertical blanking period. 668 * 669 * Assumptions about the fictitious mode used in this example: 670 * vblank_start >= 3 671 * vsync_start = vblank_start + 1 672 * vsync_end = vblank_start + 2 673 * vtotal = vblank_start + 3 674 * 675 * start of vblank: 676 * latch double buffered registers 677 * increment frame counter (ctg+) 678 * generate start of vblank interrupt (gen4+) 679 * | 680 * | frame start: 681 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 682 * | may be shifted forward 1-3 extra lines via PIPECONF 683 * | | 684 * | | start of vsync: 685 * | | generate vsync interrupt 686 * | | | 687 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 688 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 689 * ----va---> <-----------------vb--------------------> <--------va------------- 690 * | | <----vs-----> | 691 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 692 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 693 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 694 * | | | 695 * last visible pixel first visible pixel 696 * | increment frame counter (gen3/4) 697 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 698 * 699 * x = horizontal active 700 * _ = horizontal blanking 701 * hs = horizontal sync 702 * va = vertical active 703 * vb = vertical blanking 704 * vs = vertical sync 705 * vbs = vblank_start (number) 706 * 707 * Summary: 708 * - most events happen at the start of horizontal sync 709 * - frame start happens at the start of horizontal blank, 1-4 lines 710 * (depending on PIPECONF settings) after the start of vblank 711 * - gen3/4 pixel and frame counter are synchronized with the start 712 * of horizontal active on the first line of vertical active 713 */ 714 715 /* Called from drm generic code, passed a 'crtc', which 716 * we use as a pipe index 717 */ 718 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 719 { 720 struct drm_i915_private *dev_priv = to_i915(dev); 721 i915_reg_t high_frame, low_frame; 722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 723 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 724 unsigned long irqflags; 725 726 htotal = mode->crtc_htotal; 727 hsync_start = mode->crtc_hsync_start; 728 vbl_start = mode->crtc_vblank_start; 729 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 730 vbl_start = DIV_ROUND_UP(vbl_start, 2); 731 732 /* Convert to pixel count */ 733 vbl_start *= htotal; 734 735 /* Start of vblank event occurs at start of hsync */ 736 vbl_start -= htotal - hsync_start; 737 738 high_frame = PIPEFRAME(pipe); 739 low_frame = PIPEFRAMEPIXEL(pipe); 740 741 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 742 743 /* 744 * High & low register fields aren't synchronized, so make sure 745 * we get a low value that's stable across two reads of the high 746 * register. 747 */ 748 do { 749 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 750 low = I915_READ_FW(low_frame); 751 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 752 } while (high1 != high2); 753 754 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 755 756 high1 >>= PIPE_FRAME_HIGH_SHIFT; 757 pixel = low & PIPE_PIXEL_MASK; 758 low >>= PIPE_FRAME_LOW_SHIFT; 759 760 /* 761 * The frame counter increments at beginning of active. 762 * Cook up a vblank counter by also checking the pixel 763 * counter against vblank start. 764 */ 765 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 766 } 767 768 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 769 { 770 struct drm_i915_private *dev_priv = to_i915(dev); 771 772 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 773 } 774 775 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 776 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 777 { 778 struct drm_device *dev = crtc->base.dev; 779 struct drm_i915_private *dev_priv = to_i915(dev); 780 const struct drm_display_mode *mode; 781 struct drm_vblank_crtc *vblank; 782 enum pipe pipe = crtc->pipe; 783 int position, vtotal; 784 785 if (!crtc->active) 786 return -1; 787 788 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 789 mode = &vblank->hwmode; 790 791 vtotal = mode->crtc_vtotal; 792 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 793 vtotal /= 2; 794 795 if (IS_GEN2(dev_priv)) 796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 797 else 798 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 799 800 /* 801 * On HSW, the DSL reg (0x70000) appears to return 0 if we 802 * read it just before the start of vblank. So try it again 803 * so we don't accidentally end up spanning a vblank frame 804 * increment, causing the pipe_update_end() code to squak at us. 805 * 806 * The nature of this problem means we can't simply check the ISR 807 * bit and return the vblank start value; nor can we use the scanline 808 * debug register in the transcoder as it appears to have the same 809 * problem. We may need to extend this to include other platforms, 810 * but so far testing only shows the problem on HSW. 811 */ 812 if (HAS_DDI(dev_priv) && !position) { 813 int i, temp; 814 815 for (i = 0; i < 100; i++) { 816 udelay(1); 817 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 818 if (temp != position) { 819 position = temp; 820 break; 821 } 822 } 823 } 824 825 /* 826 * See update_scanline_offset() for the details on the 827 * scanline_offset adjustment. 828 */ 829 return (position + crtc->scanline_offset) % vtotal; 830 } 831 832 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 833 bool in_vblank_irq, int *vpos, int *hpos, 834 ktime_t *stime, ktime_t *etime, 835 const struct drm_display_mode *mode) 836 { 837 struct drm_i915_private *dev_priv = to_i915(dev); 838 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 839 pipe); 840 int position; 841 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 842 bool in_vbl = true; 843 unsigned long irqflags; 844 845 if (WARN_ON(!mode->crtc_clock)) { 846 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 847 "pipe %c\n", pipe_name(pipe)); 848 return false; 849 } 850 851 htotal = mode->crtc_htotal; 852 hsync_start = mode->crtc_hsync_start; 853 vtotal = mode->crtc_vtotal; 854 vbl_start = mode->crtc_vblank_start; 855 vbl_end = mode->crtc_vblank_end; 856 857 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 858 vbl_start = DIV_ROUND_UP(vbl_start, 2); 859 vbl_end /= 2; 860 vtotal /= 2; 861 } 862 863 /* 864 * Lock uncore.lock, as we will do multiple timing critical raw 865 * register reads, potentially with preemption disabled, so the 866 * following code must not block on uncore.lock. 867 */ 868 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 869 870 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 871 872 /* Get optional system timestamp before query. */ 873 if (stime) 874 *stime = ktime_get(); 875 876 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 877 /* No obvious pixelcount register. Only query vertical 878 * scanout position from Display scan line register. 879 */ 880 position = __intel_get_crtc_scanline(intel_crtc); 881 } else { 882 /* Have access to pixelcount since start of frame. 883 * We can split this into vertical and horizontal 884 * scanout position. 885 */ 886 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 887 888 /* convert to pixel counts */ 889 vbl_start *= htotal; 890 vbl_end *= htotal; 891 vtotal *= htotal; 892 893 /* 894 * In interlaced modes, the pixel counter counts all pixels, 895 * so one field will have htotal more pixels. In order to avoid 896 * the reported position from jumping backwards when the pixel 897 * counter is beyond the length of the shorter field, just 898 * clamp the position the length of the shorter field. This 899 * matches how the scanline counter based position works since 900 * the scanline counter doesn't count the two half lines. 901 */ 902 if (position >= vtotal) 903 position = vtotal - 1; 904 905 /* 906 * Start of vblank interrupt is triggered at start of hsync, 907 * just prior to the first active line of vblank. However we 908 * consider lines to start at the leading edge of horizontal 909 * active. So, should we get here before we've crossed into 910 * the horizontal active of the first line in vblank, we would 911 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 912 * always add htotal-hsync_start to the current pixel position. 913 */ 914 position = (position + htotal - hsync_start) % vtotal; 915 } 916 917 /* Get optional system timestamp after query. */ 918 if (etime) 919 *etime = ktime_get(); 920 921 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 922 923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 924 925 in_vbl = position >= vbl_start && position < vbl_end; 926 927 /* 928 * While in vblank, position will be negative 929 * counting up towards 0 at vbl_end. And outside 930 * vblank, position will be positive counting 931 * up since vbl_end. 932 */ 933 if (position >= vbl_start) 934 position -= vbl_end; 935 else 936 position += vtotal - vbl_end; 937 938 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 939 *vpos = position; 940 *hpos = 0; 941 } else { 942 *vpos = position / htotal; 943 *hpos = position - (*vpos * htotal); 944 } 945 946 return true; 947 } 948 949 int intel_get_crtc_scanline(struct intel_crtc *crtc) 950 { 951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 952 unsigned long irqflags; 953 int position; 954 955 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 956 position = __intel_get_crtc_scanline(crtc); 957 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 958 959 return position; 960 } 961 962 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 963 { 964 u32 busy_up, busy_down, max_avg, min_avg; 965 u8 new_delay; 966 967 spin_lock(&mchdev_lock); 968 969 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 970 971 new_delay = dev_priv->ips.cur_delay; 972 973 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 974 busy_up = I915_READ(RCPREVBSYTUPAVG); 975 busy_down = I915_READ(RCPREVBSYTDNAVG); 976 max_avg = I915_READ(RCBMAXAVG); 977 min_avg = I915_READ(RCBMINAVG); 978 979 /* Handle RCS change request from hw */ 980 if (busy_up > max_avg) { 981 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 982 new_delay = dev_priv->ips.cur_delay - 1; 983 if (new_delay < dev_priv->ips.max_delay) 984 new_delay = dev_priv->ips.max_delay; 985 } else if (busy_down < min_avg) { 986 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 987 new_delay = dev_priv->ips.cur_delay + 1; 988 if (new_delay > dev_priv->ips.min_delay) 989 new_delay = dev_priv->ips.min_delay; 990 } 991 992 if (ironlake_set_drps(dev_priv, new_delay)) 993 dev_priv->ips.cur_delay = new_delay; 994 995 spin_unlock(&mchdev_lock); 996 997 return; 998 } 999 1000 static void notify_ring(struct intel_engine_cs *engine) 1001 { 1002 struct drm_i915_gem_request *rq = NULL; 1003 struct intel_wait *wait; 1004 1005 atomic_inc(&engine->irq_count); 1006 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1007 1008 spin_lock(&engine->breadcrumbs.irq_lock); 1009 wait = engine->breadcrumbs.irq_wait; 1010 if (wait) { 1011 /* We use a callback from the dma-fence to submit 1012 * requests after waiting on our own requests. To 1013 * ensure minimum delay in queuing the next request to 1014 * hardware, signal the fence now rather than wait for 1015 * the signaler to be woken up. We still wake up the 1016 * waiter in order to handle the irq-seqno coherency 1017 * issues (we may receive the interrupt before the 1018 * seqno is written, see __i915_request_irq_complete()) 1019 * and to handle coalescing of multiple seqno updates 1020 * and many waiters. 1021 */ 1022 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1023 wait->seqno) && 1024 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1025 &wait->request->fence.flags)) 1026 rq = i915_gem_request_get(wait->request); 1027 1028 wake_up_process(wait->tsk); 1029 } else { 1030 __intel_engine_disarm_breadcrumbs(engine); 1031 } 1032 spin_unlock(&engine->breadcrumbs.irq_lock); 1033 1034 if (rq) { 1035 dma_fence_signal(&rq->fence); 1036 i915_gem_request_put(rq); 1037 } 1038 1039 trace_intel_engine_notify(engine, wait); 1040 } 1041 1042 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1043 struct intel_rps_ei *ei) 1044 { 1045 ei->ktime = ktime_get_raw(); 1046 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1047 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1048 } 1049 1050 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1051 { 1052 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); 1053 } 1054 1055 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1056 { 1057 const struct intel_rps_ei *prev = &dev_priv->rps.ei; 1058 struct intel_rps_ei now; 1059 u32 events = 0; 1060 1061 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1062 return 0; 1063 1064 vlv_c0_read(dev_priv, &now); 1065 1066 if (prev->ktime) { 1067 u64 time, c0; 1068 u32 render, media; 1069 1070 time = ktime_us_delta(now.ktime, prev->ktime); 1071 1072 time *= dev_priv->czclk_freq; 1073 1074 /* Workload can be split between render + media, 1075 * e.g. SwapBuffers being blitted in X after being rendered in 1076 * mesa. To account for this we need to combine both engines 1077 * into our activity counter. 1078 */ 1079 render = now.render_c0 - prev->render_c0; 1080 media = now.media_c0 - prev->media_c0; 1081 c0 = max(render, media); 1082 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1083 1084 if (c0 > time * dev_priv->rps.up_threshold) 1085 events = GEN6_PM_RP_UP_THRESHOLD; 1086 else if (c0 < time * dev_priv->rps.down_threshold) 1087 events = GEN6_PM_RP_DOWN_THRESHOLD; 1088 } 1089 1090 dev_priv->rps.ei = now; 1091 return events; 1092 } 1093 1094 static bool any_waiters(struct drm_i915_private *dev_priv) 1095 { 1096 struct intel_engine_cs *engine; 1097 enum intel_engine_id id; 1098 1099 for_each_engine(engine, dev_priv, id) 1100 if (intel_engine_has_waiter(engine)) 1101 return true; 1102 1103 return false; 1104 } 1105 1106 static void gen6_pm_rps_work(struct work_struct *work) 1107 { 1108 struct drm_i915_private *dev_priv = 1109 container_of(work, struct drm_i915_private, rps.work); 1110 bool client_boost = false; 1111 int new_delay, adj, min, max; 1112 u32 pm_iir = 0; 1113 1114 spin_lock_irq(&dev_priv->irq_lock); 1115 if (dev_priv->rps.interrupts_enabled) { 1116 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); 1117 client_boost = fetch_and_zero(&dev_priv->rps.client_boost); 1118 } 1119 spin_unlock_irq(&dev_priv->irq_lock); 1120 1121 /* Make sure we didn't queue anything we're not going to process. */ 1122 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1123 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1124 goto out; 1125 1126 mutex_lock(&dev_priv->rps.hw_lock); 1127 1128 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1129 1130 adj = dev_priv->rps.last_adj; 1131 new_delay = dev_priv->rps.cur_freq; 1132 min = dev_priv->rps.min_freq_softlimit; 1133 max = dev_priv->rps.max_freq_softlimit; 1134 if (client_boost || any_waiters(dev_priv)) 1135 max = dev_priv->rps.max_freq; 1136 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1137 new_delay = dev_priv->rps.boost_freq; 1138 adj = 0; 1139 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1140 if (adj > 0) 1141 adj *= 2; 1142 else /* CHV needs even encode values */ 1143 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1144 1145 if (new_delay >= dev_priv->rps.max_freq_softlimit) 1146 adj = 0; 1147 } else if (client_boost || any_waiters(dev_priv)) { 1148 adj = 0; 1149 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1150 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1151 new_delay = dev_priv->rps.efficient_freq; 1152 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1153 new_delay = dev_priv->rps.min_freq_softlimit; 1154 adj = 0; 1155 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1156 if (adj < 0) 1157 adj *= 2; 1158 else /* CHV needs even encode values */ 1159 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1160 1161 if (new_delay <= dev_priv->rps.min_freq_softlimit) 1162 adj = 0; 1163 } else { /* unknown event */ 1164 adj = 0; 1165 } 1166 1167 dev_priv->rps.last_adj = adj; 1168 1169 /* sysfs frequency interfaces may have snuck in while servicing the 1170 * interrupt 1171 */ 1172 new_delay += adj; 1173 new_delay = clamp_t(int, new_delay, min, max); 1174 1175 if (intel_set_rps(dev_priv, new_delay)) { 1176 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1177 dev_priv->rps.last_adj = 0; 1178 } 1179 1180 mutex_unlock(&dev_priv->rps.hw_lock); 1181 1182 out: 1183 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1184 spin_lock_irq(&dev_priv->irq_lock); 1185 if (dev_priv->rps.interrupts_enabled) 1186 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1187 spin_unlock_irq(&dev_priv->irq_lock); 1188 } 1189 1190 1191 /** 1192 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1193 * occurred. 1194 * @work: workqueue struct 1195 * 1196 * Doesn't actually do anything except notify userspace. As a consequence of 1197 * this event, userspace should try to remap the bad rows since statistically 1198 * it is likely the same row is more likely to go bad again. 1199 */ 1200 static void ivybridge_parity_work(struct work_struct *work) 1201 { 1202 struct drm_i915_private *dev_priv = 1203 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1204 u32 error_status, row, bank, subbank; 1205 char *parity_event[6]; 1206 uint32_t misccpctl; 1207 uint8_t slice = 0; 1208 1209 /* We must turn off DOP level clock gating to access the L3 registers. 1210 * In order to prevent a get/put style interface, acquire struct mutex 1211 * any time we access those registers. 1212 */ 1213 mutex_lock(&dev_priv->drm.struct_mutex); 1214 1215 /* If we've screwed up tracking, just let the interrupt fire again */ 1216 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1217 goto out; 1218 1219 misccpctl = I915_READ(GEN7_MISCCPCTL); 1220 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1221 POSTING_READ(GEN7_MISCCPCTL); 1222 1223 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1224 i915_reg_t reg; 1225 1226 slice--; 1227 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1228 break; 1229 1230 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1231 1232 reg = GEN7_L3CDERRST1(slice); 1233 1234 error_status = I915_READ(reg); 1235 row = GEN7_PARITY_ERROR_ROW(error_status); 1236 bank = GEN7_PARITY_ERROR_BANK(error_status); 1237 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1238 1239 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1240 POSTING_READ(reg); 1241 1242 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1243 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1244 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1245 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1246 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1247 parity_event[5] = NULL; 1248 1249 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1250 KOBJ_CHANGE, parity_event); 1251 1252 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1253 slice, row, bank, subbank); 1254 1255 kfree(parity_event[4]); 1256 kfree(parity_event[3]); 1257 kfree(parity_event[2]); 1258 kfree(parity_event[1]); 1259 } 1260 1261 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1262 1263 out: 1264 WARN_ON(dev_priv->l3_parity.which_slice); 1265 spin_lock_irq(&dev_priv->irq_lock); 1266 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1267 spin_unlock_irq(&dev_priv->irq_lock); 1268 1269 mutex_unlock(&dev_priv->drm.struct_mutex); 1270 } 1271 1272 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1273 u32 iir) 1274 { 1275 if (!HAS_L3_DPF(dev_priv)) 1276 return; 1277 1278 spin_lock(&dev_priv->irq_lock); 1279 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1280 spin_unlock(&dev_priv->irq_lock); 1281 1282 iir &= GT_PARITY_ERROR(dev_priv); 1283 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1284 dev_priv->l3_parity.which_slice |= 1 << 1; 1285 1286 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1287 dev_priv->l3_parity.which_slice |= 1 << 0; 1288 1289 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1290 } 1291 1292 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1293 u32 gt_iir) 1294 { 1295 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1296 notify_ring(dev_priv->engine[RCS]); 1297 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1298 notify_ring(dev_priv->engine[VCS]); 1299 } 1300 1301 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1302 u32 gt_iir) 1303 { 1304 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1305 notify_ring(dev_priv->engine[RCS]); 1306 if (gt_iir & GT_BSD_USER_INTERRUPT) 1307 notify_ring(dev_priv->engine[VCS]); 1308 if (gt_iir & GT_BLT_USER_INTERRUPT) 1309 notify_ring(dev_priv->engine[BCS]); 1310 1311 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1312 GT_BSD_CS_ERROR_INTERRUPT | 1313 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1314 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1315 1316 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1317 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1318 } 1319 1320 static void 1321 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1322 { 1323 bool tasklet = false; 1324 1325 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1326 if (port_count(&engine->execlist_port[0])) { 1327 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1328 tasklet = true; 1329 } 1330 } 1331 1332 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1333 notify_ring(engine); 1334 tasklet |= i915.enable_guc_submission; 1335 } 1336 1337 if (tasklet) 1338 tasklet_hi_schedule(&engine->irq_tasklet); 1339 } 1340 1341 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1342 u32 master_ctl, 1343 u32 gt_iir[4]) 1344 { 1345 irqreturn_t ret = IRQ_NONE; 1346 1347 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1348 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1349 if (gt_iir[0]) { 1350 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1351 ret = IRQ_HANDLED; 1352 } else 1353 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1354 } 1355 1356 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1357 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1358 if (gt_iir[1]) { 1359 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1360 ret = IRQ_HANDLED; 1361 } else 1362 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1363 } 1364 1365 if (master_ctl & GEN8_GT_VECS_IRQ) { 1366 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1367 if (gt_iir[3]) { 1368 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1369 ret = IRQ_HANDLED; 1370 } else 1371 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1372 } 1373 1374 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1375 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1376 if (gt_iir[2] & (dev_priv->pm_rps_events | 1377 dev_priv->pm_guc_events)) { 1378 I915_WRITE_FW(GEN8_GT_IIR(2), 1379 gt_iir[2] & (dev_priv->pm_rps_events | 1380 dev_priv->pm_guc_events)); 1381 ret = IRQ_HANDLED; 1382 } else 1383 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1384 } 1385 1386 return ret; 1387 } 1388 1389 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1390 u32 gt_iir[4]) 1391 { 1392 if (gt_iir[0]) { 1393 gen8_cs_irq_handler(dev_priv->engine[RCS], 1394 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1395 gen8_cs_irq_handler(dev_priv->engine[BCS], 1396 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1397 } 1398 1399 if (gt_iir[1]) { 1400 gen8_cs_irq_handler(dev_priv->engine[VCS], 1401 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1402 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1403 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1404 } 1405 1406 if (gt_iir[3]) 1407 gen8_cs_irq_handler(dev_priv->engine[VECS], 1408 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1409 1410 if (gt_iir[2] & dev_priv->pm_rps_events) 1411 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1412 1413 if (gt_iir[2] & dev_priv->pm_guc_events) 1414 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1415 } 1416 1417 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1418 { 1419 switch (port) { 1420 case PORT_A: 1421 return val & PORTA_HOTPLUG_LONG_DETECT; 1422 case PORT_B: 1423 return val & PORTB_HOTPLUG_LONG_DETECT; 1424 case PORT_C: 1425 return val & PORTC_HOTPLUG_LONG_DETECT; 1426 default: 1427 return false; 1428 } 1429 } 1430 1431 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1432 { 1433 switch (port) { 1434 case PORT_E: 1435 return val & PORTE_HOTPLUG_LONG_DETECT; 1436 default: 1437 return false; 1438 } 1439 } 1440 1441 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1442 { 1443 switch (port) { 1444 case PORT_A: 1445 return val & PORTA_HOTPLUG_LONG_DETECT; 1446 case PORT_B: 1447 return val & PORTB_HOTPLUG_LONG_DETECT; 1448 case PORT_C: 1449 return val & PORTC_HOTPLUG_LONG_DETECT; 1450 case PORT_D: 1451 return val & PORTD_HOTPLUG_LONG_DETECT; 1452 default: 1453 return false; 1454 } 1455 } 1456 1457 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1458 { 1459 switch (port) { 1460 case PORT_A: 1461 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1462 default: 1463 return false; 1464 } 1465 } 1466 1467 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1468 { 1469 switch (port) { 1470 case PORT_B: 1471 return val & PORTB_HOTPLUG_LONG_DETECT; 1472 case PORT_C: 1473 return val & PORTC_HOTPLUG_LONG_DETECT; 1474 case PORT_D: 1475 return val & PORTD_HOTPLUG_LONG_DETECT; 1476 default: 1477 return false; 1478 } 1479 } 1480 1481 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1482 { 1483 switch (port) { 1484 case PORT_B: 1485 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1486 case PORT_C: 1487 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1488 case PORT_D: 1489 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1490 default: 1491 return false; 1492 } 1493 } 1494 1495 /* 1496 * Get a bit mask of pins that have triggered, and which ones may be long. 1497 * This can be called multiple times with the same masks to accumulate 1498 * hotplug detection results from several registers. 1499 * 1500 * Note that the caller is expected to zero out the masks initially. 1501 */ 1502 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1503 u32 hotplug_trigger, u32 dig_hotplug_reg, 1504 const u32 hpd[HPD_NUM_PINS], 1505 bool long_pulse_detect(enum port port, u32 val)) 1506 { 1507 enum port port; 1508 int i; 1509 1510 for_each_hpd_pin(i) { 1511 if ((hpd[i] & hotplug_trigger) == 0) 1512 continue; 1513 1514 *pin_mask |= BIT(i); 1515 1516 if (!intel_hpd_pin_to_port(i, &port)) 1517 continue; 1518 1519 if (long_pulse_detect(port, dig_hotplug_reg)) 1520 *long_mask |= BIT(i); 1521 } 1522 1523 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1524 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1525 1526 } 1527 1528 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1529 { 1530 wake_up_all(&dev_priv->gmbus_wait_queue); 1531 } 1532 1533 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1534 { 1535 wake_up_all(&dev_priv->gmbus_wait_queue); 1536 } 1537 1538 #if defined(CONFIG_DEBUG_FS) 1539 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1540 enum pipe pipe, 1541 uint32_t crc0, uint32_t crc1, 1542 uint32_t crc2, uint32_t crc3, 1543 uint32_t crc4) 1544 { 1545 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1546 struct intel_pipe_crc_entry *entry; 1547 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1548 struct drm_driver *driver = dev_priv->drm.driver; 1549 uint32_t crcs[5]; 1550 int head, tail; 1551 1552 spin_lock(&pipe_crc->lock); 1553 if (pipe_crc->source) { 1554 if (!pipe_crc->entries) { 1555 spin_unlock(&pipe_crc->lock); 1556 DRM_DEBUG_KMS("spurious interrupt\n"); 1557 return; 1558 } 1559 1560 head = pipe_crc->head; 1561 tail = pipe_crc->tail; 1562 1563 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1564 spin_unlock(&pipe_crc->lock); 1565 DRM_ERROR("CRC buffer overflowing\n"); 1566 return; 1567 } 1568 1569 entry = &pipe_crc->entries[head]; 1570 1571 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1572 entry->crc[0] = crc0; 1573 entry->crc[1] = crc1; 1574 entry->crc[2] = crc2; 1575 entry->crc[3] = crc3; 1576 entry->crc[4] = crc4; 1577 1578 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1579 pipe_crc->head = head; 1580 1581 spin_unlock(&pipe_crc->lock); 1582 1583 wake_up_interruptible(&pipe_crc->wq); 1584 } else { 1585 /* 1586 * For some not yet identified reason, the first CRC is 1587 * bonkers. So let's just wait for the next vblank and read 1588 * out the buggy result. 1589 * 1590 * On CHV sometimes the second CRC is bonkers as well, so 1591 * don't trust that one either. 1592 */ 1593 if (pipe_crc->skipped == 0 || 1594 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { 1595 pipe_crc->skipped++; 1596 spin_unlock(&pipe_crc->lock); 1597 return; 1598 } 1599 spin_unlock(&pipe_crc->lock); 1600 crcs[0] = crc0; 1601 crcs[1] = crc1; 1602 crcs[2] = crc2; 1603 crcs[3] = crc3; 1604 crcs[4] = crc4; 1605 drm_crtc_add_crc_entry(&crtc->base, true, 1606 drm_accurate_vblank_count(&crtc->base), 1607 crcs); 1608 } 1609 } 1610 #else 1611 static inline void 1612 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1613 enum pipe pipe, 1614 uint32_t crc0, uint32_t crc1, 1615 uint32_t crc2, uint32_t crc3, 1616 uint32_t crc4) {} 1617 #endif 1618 1619 1620 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1621 enum pipe pipe) 1622 { 1623 display_pipe_crc_irq_handler(dev_priv, pipe, 1624 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1625 0, 0, 0, 0); 1626 } 1627 1628 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1629 enum pipe pipe) 1630 { 1631 display_pipe_crc_irq_handler(dev_priv, pipe, 1632 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1633 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1634 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1635 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1636 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1637 } 1638 1639 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1640 enum pipe pipe) 1641 { 1642 uint32_t res1, res2; 1643 1644 if (INTEL_GEN(dev_priv) >= 3) 1645 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1646 else 1647 res1 = 0; 1648 1649 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1650 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1651 else 1652 res2 = 0; 1653 1654 display_pipe_crc_irq_handler(dev_priv, pipe, 1655 I915_READ(PIPE_CRC_RES_RED(pipe)), 1656 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1657 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1658 res1, res2); 1659 } 1660 1661 /* The RPS events need forcewake, so we add them to a work queue and mask their 1662 * IMR bits until the work is done. Other interrupts can be processed without 1663 * the work queue. */ 1664 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1665 { 1666 if (pm_iir & dev_priv->pm_rps_events) { 1667 spin_lock(&dev_priv->irq_lock); 1668 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1669 if (dev_priv->rps.interrupts_enabled) { 1670 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1671 schedule_work(&dev_priv->rps.work); 1672 } 1673 spin_unlock(&dev_priv->irq_lock); 1674 } 1675 1676 if (INTEL_INFO(dev_priv)->gen >= 8) 1677 return; 1678 1679 if (HAS_VEBOX(dev_priv)) { 1680 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1681 notify_ring(dev_priv->engine[VECS]); 1682 1683 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1684 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1685 } 1686 } 1687 1688 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1689 { 1690 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1691 /* Sample the log buffer flush related bits & clear them out now 1692 * itself from the message identity register to minimize the 1693 * probability of losing a flush interrupt, when there are back 1694 * to back flush interrupts. 1695 * There can be a new flush interrupt, for different log buffer 1696 * type (like for ISR), whilst Host is handling one (for DPC). 1697 * Since same bit is used in message register for ISR & DPC, it 1698 * could happen that GuC sets the bit for 2nd interrupt but Host 1699 * clears out the bit on handling the 1st interrupt. 1700 */ 1701 u32 msg, flush; 1702 1703 msg = I915_READ(SOFT_SCRATCH(15)); 1704 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1705 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1706 if (flush) { 1707 /* Clear the message bits that are handled */ 1708 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1709 1710 /* Handle flush interrupt in bottom half */ 1711 queue_work(dev_priv->guc.log.runtime.flush_wq, 1712 &dev_priv->guc.log.runtime.flush_work); 1713 1714 dev_priv->guc.log.flush_interrupt_count++; 1715 } else { 1716 /* Not clearing of unhandled event bits won't result in 1717 * re-triggering of the interrupt. 1718 */ 1719 } 1720 } 1721 } 1722 1723 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1724 enum pipe pipe) 1725 { 1726 bool ret; 1727 1728 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1729 if (ret) 1730 intel_finish_page_flip_mmio(dev_priv, pipe); 1731 1732 return ret; 1733 } 1734 1735 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1736 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1737 { 1738 int pipe; 1739 1740 spin_lock(&dev_priv->irq_lock); 1741 1742 if (!dev_priv->display_irqs_enabled) { 1743 spin_unlock(&dev_priv->irq_lock); 1744 return; 1745 } 1746 1747 for_each_pipe(dev_priv, pipe) { 1748 i915_reg_t reg; 1749 u32 mask, iir_bit = 0; 1750 1751 /* 1752 * PIPESTAT bits get signalled even when the interrupt is 1753 * disabled with the mask bits, and some of the status bits do 1754 * not generate interrupts at all (like the underrun bit). Hence 1755 * we need to be careful that we only handle what we want to 1756 * handle. 1757 */ 1758 1759 /* fifo underruns are filterered in the underrun handler. */ 1760 mask = PIPE_FIFO_UNDERRUN_STATUS; 1761 1762 switch (pipe) { 1763 case PIPE_A: 1764 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1765 break; 1766 case PIPE_B: 1767 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1768 break; 1769 case PIPE_C: 1770 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1771 break; 1772 } 1773 if (iir & iir_bit) 1774 mask |= dev_priv->pipestat_irq_mask[pipe]; 1775 1776 if (!mask) 1777 continue; 1778 1779 reg = PIPESTAT(pipe); 1780 mask |= PIPESTAT_INT_ENABLE_MASK; 1781 pipe_stats[pipe] = I915_READ(reg) & mask; 1782 1783 /* 1784 * Clear the PIPE*STAT regs before the IIR 1785 */ 1786 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1787 PIPESTAT_INT_STATUS_MASK)) 1788 I915_WRITE(reg, pipe_stats[pipe]); 1789 } 1790 spin_unlock(&dev_priv->irq_lock); 1791 } 1792 1793 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1794 u32 pipe_stats[I915_MAX_PIPES]) 1795 { 1796 enum pipe pipe; 1797 1798 for_each_pipe(dev_priv, pipe) { 1799 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1800 intel_pipe_handle_vblank(dev_priv, pipe)) 1801 intel_check_page_flip(dev_priv, pipe); 1802 1803 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1804 intel_finish_page_flip_cs(dev_priv, pipe); 1805 1806 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1807 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1808 1809 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1810 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1811 } 1812 1813 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1814 gmbus_irq_handler(dev_priv); 1815 } 1816 1817 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1818 { 1819 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1820 1821 if (hotplug_status) 1822 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1823 1824 return hotplug_status; 1825 } 1826 1827 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1828 u32 hotplug_status) 1829 { 1830 u32 pin_mask = 0, long_mask = 0; 1831 1832 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1833 IS_CHERRYVIEW(dev_priv)) { 1834 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1835 1836 if (hotplug_trigger) { 1837 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1838 hotplug_trigger, hpd_status_g4x, 1839 i9xx_port_hotplug_long_detect); 1840 1841 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1842 } 1843 1844 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1845 dp_aux_irq_handler(dev_priv); 1846 } else { 1847 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1848 1849 if (hotplug_trigger) { 1850 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1851 hotplug_trigger, hpd_status_i915, 1852 i9xx_port_hotplug_long_detect); 1853 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1854 } 1855 } 1856 } 1857 1858 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1859 { 1860 struct drm_device *dev = arg; 1861 struct drm_i915_private *dev_priv = to_i915(dev); 1862 irqreturn_t ret = IRQ_NONE; 1863 1864 if (!intel_irqs_enabled(dev_priv)) 1865 return IRQ_NONE; 1866 1867 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1868 disable_rpm_wakeref_asserts(dev_priv); 1869 1870 do { 1871 u32 iir, gt_iir, pm_iir; 1872 u32 pipe_stats[I915_MAX_PIPES] = {}; 1873 u32 hotplug_status = 0; 1874 u32 ier = 0; 1875 1876 gt_iir = I915_READ(GTIIR); 1877 pm_iir = I915_READ(GEN6_PMIIR); 1878 iir = I915_READ(VLV_IIR); 1879 1880 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1881 break; 1882 1883 ret = IRQ_HANDLED; 1884 1885 /* 1886 * Theory on interrupt generation, based on empirical evidence: 1887 * 1888 * x = ((VLV_IIR & VLV_IER) || 1889 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1890 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1891 * 1892 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1893 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1894 * guarantee the CPU interrupt will be raised again even if we 1895 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1896 * bits this time around. 1897 */ 1898 I915_WRITE(VLV_MASTER_IER, 0); 1899 ier = I915_READ(VLV_IER); 1900 I915_WRITE(VLV_IER, 0); 1901 1902 if (gt_iir) 1903 I915_WRITE(GTIIR, gt_iir); 1904 if (pm_iir) 1905 I915_WRITE(GEN6_PMIIR, pm_iir); 1906 1907 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1908 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1909 1910 /* Call regardless, as some status bits might not be 1911 * signalled in iir */ 1912 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1913 1914 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1915 I915_LPE_PIPE_B_INTERRUPT)) 1916 intel_lpe_audio_irq_handler(dev_priv); 1917 1918 /* 1919 * VLV_IIR is single buffered, and reflects the level 1920 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1921 */ 1922 if (iir) 1923 I915_WRITE(VLV_IIR, iir); 1924 1925 I915_WRITE(VLV_IER, ier); 1926 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1927 POSTING_READ(VLV_MASTER_IER); 1928 1929 if (gt_iir) 1930 snb_gt_irq_handler(dev_priv, gt_iir); 1931 if (pm_iir) 1932 gen6_rps_irq_handler(dev_priv, pm_iir); 1933 1934 if (hotplug_status) 1935 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1936 1937 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1938 } while (0); 1939 1940 enable_rpm_wakeref_asserts(dev_priv); 1941 1942 return ret; 1943 } 1944 1945 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1946 { 1947 struct drm_device *dev = arg; 1948 struct drm_i915_private *dev_priv = to_i915(dev); 1949 irqreturn_t ret = IRQ_NONE; 1950 1951 if (!intel_irqs_enabled(dev_priv)) 1952 return IRQ_NONE; 1953 1954 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1955 disable_rpm_wakeref_asserts(dev_priv); 1956 1957 do { 1958 u32 master_ctl, iir; 1959 u32 gt_iir[4] = {}; 1960 u32 pipe_stats[I915_MAX_PIPES] = {}; 1961 u32 hotplug_status = 0; 1962 u32 ier = 0; 1963 1964 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1965 iir = I915_READ(VLV_IIR); 1966 1967 if (master_ctl == 0 && iir == 0) 1968 break; 1969 1970 ret = IRQ_HANDLED; 1971 1972 /* 1973 * Theory on interrupt generation, based on empirical evidence: 1974 * 1975 * x = ((VLV_IIR & VLV_IER) || 1976 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1977 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1978 * 1979 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1980 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1981 * guarantee the CPU interrupt will be raised again even if we 1982 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1983 * bits this time around. 1984 */ 1985 I915_WRITE(GEN8_MASTER_IRQ, 0); 1986 ier = I915_READ(VLV_IER); 1987 I915_WRITE(VLV_IER, 0); 1988 1989 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1990 1991 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1992 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1993 1994 /* Call regardless, as some status bits might not be 1995 * signalled in iir */ 1996 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1997 1998 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1999 I915_LPE_PIPE_B_INTERRUPT | 2000 I915_LPE_PIPE_C_INTERRUPT)) 2001 intel_lpe_audio_irq_handler(dev_priv); 2002 2003 /* 2004 * VLV_IIR is single buffered, and reflects the level 2005 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2006 */ 2007 if (iir) 2008 I915_WRITE(VLV_IIR, iir); 2009 2010 I915_WRITE(VLV_IER, ier); 2011 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2012 POSTING_READ(GEN8_MASTER_IRQ); 2013 2014 gen8_gt_irq_handler(dev_priv, gt_iir); 2015 2016 if (hotplug_status) 2017 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2018 2019 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2020 } while (0); 2021 2022 enable_rpm_wakeref_asserts(dev_priv); 2023 2024 return ret; 2025 } 2026 2027 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2028 u32 hotplug_trigger, 2029 const u32 hpd[HPD_NUM_PINS]) 2030 { 2031 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2032 2033 /* 2034 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2035 * unless we touch the hotplug register, even if hotplug_trigger is 2036 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2037 * errors. 2038 */ 2039 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2040 if (!hotplug_trigger) { 2041 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2042 PORTD_HOTPLUG_STATUS_MASK | 2043 PORTC_HOTPLUG_STATUS_MASK | 2044 PORTB_HOTPLUG_STATUS_MASK; 2045 dig_hotplug_reg &= ~mask; 2046 } 2047 2048 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2049 if (!hotplug_trigger) 2050 return; 2051 2052 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2053 dig_hotplug_reg, hpd, 2054 pch_port_hotplug_long_detect); 2055 2056 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2057 } 2058 2059 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2060 { 2061 int pipe; 2062 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2063 2064 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2065 2066 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2067 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2068 SDE_AUDIO_POWER_SHIFT); 2069 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2070 port_name(port)); 2071 } 2072 2073 if (pch_iir & SDE_AUX_MASK) 2074 dp_aux_irq_handler(dev_priv); 2075 2076 if (pch_iir & SDE_GMBUS) 2077 gmbus_irq_handler(dev_priv); 2078 2079 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2080 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2081 2082 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2083 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2084 2085 if (pch_iir & SDE_POISON) 2086 DRM_ERROR("PCH poison interrupt\n"); 2087 2088 if (pch_iir & SDE_FDI_MASK) 2089 for_each_pipe(dev_priv, pipe) 2090 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2091 pipe_name(pipe), 2092 I915_READ(FDI_RX_IIR(pipe))); 2093 2094 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2095 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2096 2097 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2098 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2099 2100 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2101 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2102 2103 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2104 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2105 } 2106 2107 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2108 { 2109 u32 err_int = I915_READ(GEN7_ERR_INT); 2110 enum pipe pipe; 2111 2112 if (err_int & ERR_INT_POISON) 2113 DRM_ERROR("Poison interrupt\n"); 2114 2115 for_each_pipe(dev_priv, pipe) { 2116 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2117 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2118 2119 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2120 if (IS_IVYBRIDGE(dev_priv)) 2121 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2122 else 2123 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2124 } 2125 } 2126 2127 I915_WRITE(GEN7_ERR_INT, err_int); 2128 } 2129 2130 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2131 { 2132 u32 serr_int = I915_READ(SERR_INT); 2133 2134 if (serr_int & SERR_INT_POISON) 2135 DRM_ERROR("PCH poison interrupt\n"); 2136 2137 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2138 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2139 2140 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2141 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2142 2143 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2144 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2145 2146 I915_WRITE(SERR_INT, serr_int); 2147 } 2148 2149 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2150 { 2151 int pipe; 2152 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2153 2154 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2155 2156 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2157 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2158 SDE_AUDIO_POWER_SHIFT_CPT); 2159 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2160 port_name(port)); 2161 } 2162 2163 if (pch_iir & SDE_AUX_MASK_CPT) 2164 dp_aux_irq_handler(dev_priv); 2165 2166 if (pch_iir & SDE_GMBUS_CPT) 2167 gmbus_irq_handler(dev_priv); 2168 2169 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2170 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2171 2172 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2173 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2174 2175 if (pch_iir & SDE_FDI_MASK_CPT) 2176 for_each_pipe(dev_priv, pipe) 2177 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2178 pipe_name(pipe), 2179 I915_READ(FDI_RX_IIR(pipe))); 2180 2181 if (pch_iir & SDE_ERROR_CPT) 2182 cpt_serr_int_handler(dev_priv); 2183 } 2184 2185 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2186 { 2187 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2188 ~SDE_PORTE_HOTPLUG_SPT; 2189 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2190 u32 pin_mask = 0, long_mask = 0; 2191 2192 if (hotplug_trigger) { 2193 u32 dig_hotplug_reg; 2194 2195 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2196 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2197 2198 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2199 dig_hotplug_reg, hpd_spt, 2200 spt_port_hotplug_long_detect); 2201 } 2202 2203 if (hotplug2_trigger) { 2204 u32 dig_hotplug_reg; 2205 2206 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2207 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2208 2209 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2210 dig_hotplug_reg, hpd_spt, 2211 spt_port_hotplug2_long_detect); 2212 } 2213 2214 if (pin_mask) 2215 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2216 2217 if (pch_iir & SDE_GMBUS_CPT) 2218 gmbus_irq_handler(dev_priv); 2219 } 2220 2221 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2222 u32 hotplug_trigger, 2223 const u32 hpd[HPD_NUM_PINS]) 2224 { 2225 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2226 2227 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2228 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2229 2230 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2231 dig_hotplug_reg, hpd, 2232 ilk_port_hotplug_long_detect); 2233 2234 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2235 } 2236 2237 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2238 u32 de_iir) 2239 { 2240 enum pipe pipe; 2241 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2242 2243 if (hotplug_trigger) 2244 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2245 2246 if (de_iir & DE_AUX_CHANNEL_A) 2247 dp_aux_irq_handler(dev_priv); 2248 2249 if (de_iir & DE_GSE) 2250 intel_opregion_asle_intr(dev_priv); 2251 2252 if (de_iir & DE_POISON) 2253 DRM_ERROR("Poison interrupt\n"); 2254 2255 for_each_pipe(dev_priv, pipe) { 2256 if (de_iir & DE_PIPE_VBLANK(pipe) && 2257 intel_pipe_handle_vblank(dev_priv, pipe)) 2258 intel_check_page_flip(dev_priv, pipe); 2259 2260 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2261 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2262 2263 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2264 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2265 2266 /* plane/pipes map 1:1 on ilk+ */ 2267 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2268 intel_finish_page_flip_cs(dev_priv, pipe); 2269 } 2270 2271 /* check event from PCH */ 2272 if (de_iir & DE_PCH_EVENT) { 2273 u32 pch_iir = I915_READ(SDEIIR); 2274 2275 if (HAS_PCH_CPT(dev_priv)) 2276 cpt_irq_handler(dev_priv, pch_iir); 2277 else 2278 ibx_irq_handler(dev_priv, pch_iir); 2279 2280 /* should clear PCH hotplug event before clear CPU irq */ 2281 I915_WRITE(SDEIIR, pch_iir); 2282 } 2283 2284 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2285 ironlake_rps_change_irq_handler(dev_priv); 2286 } 2287 2288 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2289 u32 de_iir) 2290 { 2291 enum pipe pipe; 2292 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2293 2294 if (hotplug_trigger) 2295 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2296 2297 if (de_iir & DE_ERR_INT_IVB) 2298 ivb_err_int_handler(dev_priv); 2299 2300 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2301 dp_aux_irq_handler(dev_priv); 2302 2303 if (de_iir & DE_GSE_IVB) 2304 intel_opregion_asle_intr(dev_priv); 2305 2306 for_each_pipe(dev_priv, pipe) { 2307 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2308 intel_pipe_handle_vblank(dev_priv, pipe)) 2309 intel_check_page_flip(dev_priv, pipe); 2310 2311 /* plane/pipes map 1:1 on ilk+ */ 2312 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2313 intel_finish_page_flip_cs(dev_priv, pipe); 2314 } 2315 2316 /* check event from PCH */ 2317 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2318 u32 pch_iir = I915_READ(SDEIIR); 2319 2320 cpt_irq_handler(dev_priv, pch_iir); 2321 2322 /* clear PCH hotplug event before clear CPU irq */ 2323 I915_WRITE(SDEIIR, pch_iir); 2324 } 2325 } 2326 2327 /* 2328 * To handle irqs with the minimum potential races with fresh interrupts, we: 2329 * 1 - Disable Master Interrupt Control. 2330 * 2 - Find the source(s) of the interrupt. 2331 * 3 - Clear the Interrupt Identity bits (IIR). 2332 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2333 * 5 - Re-enable Master Interrupt Control. 2334 */ 2335 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2336 { 2337 struct drm_device *dev = arg; 2338 struct drm_i915_private *dev_priv = to_i915(dev); 2339 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2340 irqreturn_t ret = IRQ_NONE; 2341 2342 if (!intel_irqs_enabled(dev_priv)) 2343 return IRQ_NONE; 2344 2345 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2346 disable_rpm_wakeref_asserts(dev_priv); 2347 2348 /* disable master interrupt before clearing iir */ 2349 de_ier = I915_READ(DEIER); 2350 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2351 POSTING_READ(DEIER); 2352 2353 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2354 * interrupts will will be stored on its back queue, and then we'll be 2355 * able to process them after we restore SDEIER (as soon as we restore 2356 * it, we'll get an interrupt if SDEIIR still has something to process 2357 * due to its back queue). */ 2358 if (!HAS_PCH_NOP(dev_priv)) { 2359 sde_ier = I915_READ(SDEIER); 2360 I915_WRITE(SDEIER, 0); 2361 POSTING_READ(SDEIER); 2362 } 2363 2364 /* Find, clear, then process each source of interrupt */ 2365 2366 gt_iir = I915_READ(GTIIR); 2367 if (gt_iir) { 2368 I915_WRITE(GTIIR, gt_iir); 2369 ret = IRQ_HANDLED; 2370 if (INTEL_GEN(dev_priv) >= 6) 2371 snb_gt_irq_handler(dev_priv, gt_iir); 2372 else 2373 ilk_gt_irq_handler(dev_priv, gt_iir); 2374 } 2375 2376 de_iir = I915_READ(DEIIR); 2377 if (de_iir) { 2378 I915_WRITE(DEIIR, de_iir); 2379 ret = IRQ_HANDLED; 2380 if (INTEL_GEN(dev_priv) >= 7) 2381 ivb_display_irq_handler(dev_priv, de_iir); 2382 else 2383 ilk_display_irq_handler(dev_priv, de_iir); 2384 } 2385 2386 if (INTEL_GEN(dev_priv) >= 6) { 2387 u32 pm_iir = I915_READ(GEN6_PMIIR); 2388 if (pm_iir) { 2389 I915_WRITE(GEN6_PMIIR, pm_iir); 2390 ret = IRQ_HANDLED; 2391 gen6_rps_irq_handler(dev_priv, pm_iir); 2392 } 2393 } 2394 2395 I915_WRITE(DEIER, de_ier); 2396 POSTING_READ(DEIER); 2397 if (!HAS_PCH_NOP(dev_priv)) { 2398 I915_WRITE(SDEIER, sde_ier); 2399 POSTING_READ(SDEIER); 2400 } 2401 2402 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2403 enable_rpm_wakeref_asserts(dev_priv); 2404 2405 return ret; 2406 } 2407 2408 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2409 u32 hotplug_trigger, 2410 const u32 hpd[HPD_NUM_PINS]) 2411 { 2412 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2413 2414 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2415 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2416 2417 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2418 dig_hotplug_reg, hpd, 2419 bxt_port_hotplug_long_detect); 2420 2421 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2422 } 2423 2424 static irqreturn_t 2425 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2426 { 2427 irqreturn_t ret = IRQ_NONE; 2428 u32 iir; 2429 enum pipe pipe; 2430 2431 if (master_ctl & GEN8_DE_MISC_IRQ) { 2432 iir = I915_READ(GEN8_DE_MISC_IIR); 2433 if (iir) { 2434 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2435 ret = IRQ_HANDLED; 2436 if (iir & GEN8_DE_MISC_GSE) 2437 intel_opregion_asle_intr(dev_priv); 2438 else 2439 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2440 } 2441 else 2442 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2443 } 2444 2445 if (master_ctl & GEN8_DE_PORT_IRQ) { 2446 iir = I915_READ(GEN8_DE_PORT_IIR); 2447 if (iir) { 2448 u32 tmp_mask; 2449 bool found = false; 2450 2451 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2452 ret = IRQ_HANDLED; 2453 2454 tmp_mask = GEN8_AUX_CHANNEL_A; 2455 if (INTEL_INFO(dev_priv)->gen >= 9) 2456 tmp_mask |= GEN9_AUX_CHANNEL_B | 2457 GEN9_AUX_CHANNEL_C | 2458 GEN9_AUX_CHANNEL_D; 2459 2460 if (iir & tmp_mask) { 2461 dp_aux_irq_handler(dev_priv); 2462 found = true; 2463 } 2464 2465 if (IS_GEN9_LP(dev_priv)) { 2466 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2467 if (tmp_mask) { 2468 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2469 hpd_bxt); 2470 found = true; 2471 } 2472 } else if (IS_BROADWELL(dev_priv)) { 2473 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2474 if (tmp_mask) { 2475 ilk_hpd_irq_handler(dev_priv, 2476 tmp_mask, hpd_bdw); 2477 found = true; 2478 } 2479 } 2480 2481 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2482 gmbus_irq_handler(dev_priv); 2483 found = true; 2484 } 2485 2486 if (!found) 2487 DRM_ERROR("Unexpected DE Port interrupt\n"); 2488 } 2489 else 2490 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2491 } 2492 2493 for_each_pipe(dev_priv, pipe) { 2494 u32 flip_done, fault_errors; 2495 2496 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2497 continue; 2498 2499 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2500 if (!iir) { 2501 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2502 continue; 2503 } 2504 2505 ret = IRQ_HANDLED; 2506 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2507 2508 if (iir & GEN8_PIPE_VBLANK && 2509 intel_pipe_handle_vblank(dev_priv, pipe)) 2510 intel_check_page_flip(dev_priv, pipe); 2511 2512 flip_done = iir; 2513 if (INTEL_INFO(dev_priv)->gen >= 9) 2514 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2515 else 2516 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2517 2518 if (flip_done) 2519 intel_finish_page_flip_cs(dev_priv, pipe); 2520 2521 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2522 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2523 2524 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2525 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2526 2527 fault_errors = iir; 2528 if (INTEL_INFO(dev_priv)->gen >= 9) 2529 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2530 else 2531 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2532 2533 if (fault_errors) 2534 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2535 pipe_name(pipe), 2536 fault_errors); 2537 } 2538 2539 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2540 master_ctl & GEN8_DE_PCH_IRQ) { 2541 /* 2542 * FIXME(BDW): Assume for now that the new interrupt handling 2543 * scheme also closed the SDE interrupt handling race we've seen 2544 * on older pch-split platforms. But this needs testing. 2545 */ 2546 iir = I915_READ(SDEIIR); 2547 if (iir) { 2548 I915_WRITE(SDEIIR, iir); 2549 ret = IRQ_HANDLED; 2550 2551 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 2552 HAS_PCH_CNP(dev_priv)) 2553 spt_irq_handler(dev_priv, iir); 2554 else 2555 cpt_irq_handler(dev_priv, iir); 2556 } else { 2557 /* 2558 * Like on previous PCH there seems to be something 2559 * fishy going on with forwarding PCH interrupts. 2560 */ 2561 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2562 } 2563 } 2564 2565 return ret; 2566 } 2567 2568 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2569 { 2570 struct drm_device *dev = arg; 2571 struct drm_i915_private *dev_priv = to_i915(dev); 2572 u32 master_ctl; 2573 u32 gt_iir[4] = {}; 2574 irqreturn_t ret; 2575 2576 if (!intel_irqs_enabled(dev_priv)) 2577 return IRQ_NONE; 2578 2579 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2580 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2581 if (!master_ctl) 2582 return IRQ_NONE; 2583 2584 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2585 2586 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2587 disable_rpm_wakeref_asserts(dev_priv); 2588 2589 /* Find, clear, then process each source of interrupt */ 2590 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2591 gen8_gt_irq_handler(dev_priv, gt_iir); 2592 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2593 2594 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2595 POSTING_READ_FW(GEN8_MASTER_IRQ); 2596 2597 enable_rpm_wakeref_asserts(dev_priv); 2598 2599 return ret; 2600 } 2601 2602 /** 2603 * i915_reset_and_wakeup - do process context error handling work 2604 * @dev_priv: i915 device private 2605 * 2606 * Fire an error uevent so userspace can see that a hang or error 2607 * was detected. 2608 */ 2609 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2610 { 2611 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2612 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2613 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2614 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2615 2616 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2617 2618 DRM_DEBUG_DRIVER("resetting chip\n"); 2619 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2620 2621 intel_prepare_reset(dev_priv); 2622 2623 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2624 wake_up_all(&dev_priv->gpu_error.wait_queue); 2625 2626 do { 2627 /* 2628 * All state reset _must_ be completed before we update the 2629 * reset counter, for otherwise waiters might miss the reset 2630 * pending state and not properly drop locks, resulting in 2631 * deadlocks with the reset work. 2632 */ 2633 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2634 i915_reset(dev_priv); 2635 mutex_unlock(&dev_priv->drm.struct_mutex); 2636 } 2637 2638 /* We need to wait for anyone holding the lock to wakeup */ 2639 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2640 I915_RESET_HANDOFF, 2641 TASK_UNINTERRUPTIBLE, 2642 HZ)); 2643 2644 intel_finish_reset(dev_priv); 2645 2646 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2647 kobject_uevent_env(kobj, 2648 KOBJ_CHANGE, reset_done_event); 2649 2650 /* 2651 * Note: The wake_up also serves as a memory barrier so that 2652 * waiters see the updated value of the dev_priv->gpu_error. 2653 */ 2654 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 2655 wake_up_all(&dev_priv->gpu_error.reset_queue); 2656 } 2657 2658 static inline void 2659 i915_err_print_instdone(struct drm_i915_private *dev_priv, 2660 struct intel_instdone *instdone) 2661 { 2662 int slice; 2663 int subslice; 2664 2665 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone); 2666 2667 if (INTEL_GEN(dev_priv) <= 3) 2668 return; 2669 2670 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common); 2671 2672 if (INTEL_GEN(dev_priv) <= 6) 2673 return; 2674 2675 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2676 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 2677 slice, subslice, instdone->sampler[slice][subslice]); 2678 2679 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2680 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n", 2681 slice, subslice, instdone->row[slice][subslice]); 2682 } 2683 2684 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2685 { 2686 u32 eir; 2687 2688 if (!IS_GEN2(dev_priv)) 2689 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2690 2691 if (INTEL_GEN(dev_priv) < 4) 2692 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2693 else 2694 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2695 2696 I915_WRITE(EIR, I915_READ(EIR)); 2697 eir = I915_READ(EIR); 2698 if (eir) { 2699 /* 2700 * some errors might have become stuck, 2701 * mask them. 2702 */ 2703 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2704 I915_WRITE(EMR, I915_READ(EMR) | eir); 2705 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2706 } 2707 } 2708 2709 /** 2710 * i915_handle_error - handle a gpu error 2711 * @dev_priv: i915 device private 2712 * @engine_mask: mask representing engines that are hung 2713 * @fmt: Error message format string 2714 * 2715 * Do some basic checking of register state at error time and 2716 * dump it to the syslog. Also call i915_capture_error_state() to make 2717 * sure we get a record and make it available in debugfs. Fire a uevent 2718 * so userspace knows something bad happened (should trigger collection 2719 * of a ring dump etc.). 2720 */ 2721 void i915_handle_error(struct drm_i915_private *dev_priv, 2722 u32 engine_mask, 2723 const char *fmt, ...) 2724 { 2725 va_list args; 2726 char error_msg[80]; 2727 2728 va_start(args, fmt); 2729 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2730 va_end(args); 2731 2732 /* 2733 * In most cases it's guaranteed that we get here with an RPM 2734 * reference held, for example because there is a pending GPU 2735 * request that won't finish until the reset is done. This 2736 * isn't the case at least when we get here by doing a 2737 * simulated reset via debugfs, so get an RPM reference. 2738 */ 2739 intel_runtime_pm_get(dev_priv); 2740 2741 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2742 i915_clear_error_registers(dev_priv); 2743 2744 if (!engine_mask) 2745 goto out; 2746 2747 if (test_and_set_bit(I915_RESET_BACKOFF, 2748 &dev_priv->gpu_error.flags)) 2749 goto out; 2750 2751 i915_reset_and_wakeup(dev_priv); 2752 2753 out: 2754 intel_runtime_pm_put(dev_priv); 2755 } 2756 2757 /* Called from drm generic code, passed 'crtc' which 2758 * we use as a pipe index 2759 */ 2760 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2761 { 2762 struct drm_i915_private *dev_priv = to_i915(dev); 2763 unsigned long irqflags; 2764 2765 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2766 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2767 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2768 2769 return 0; 2770 } 2771 2772 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2773 { 2774 struct drm_i915_private *dev_priv = to_i915(dev); 2775 unsigned long irqflags; 2776 2777 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2778 i915_enable_pipestat(dev_priv, pipe, 2779 PIPE_START_VBLANK_INTERRUPT_STATUS); 2780 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2781 2782 return 0; 2783 } 2784 2785 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2786 { 2787 struct drm_i915_private *dev_priv = to_i915(dev); 2788 unsigned long irqflags; 2789 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2790 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2791 2792 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2793 ilk_enable_display_irq(dev_priv, bit); 2794 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2795 2796 return 0; 2797 } 2798 2799 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2800 { 2801 struct drm_i915_private *dev_priv = to_i915(dev); 2802 unsigned long irqflags; 2803 2804 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2805 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2806 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2807 2808 return 0; 2809 } 2810 2811 /* Called from drm generic code, passed 'crtc' which 2812 * we use as a pipe index 2813 */ 2814 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2815 { 2816 struct drm_i915_private *dev_priv = to_i915(dev); 2817 unsigned long irqflags; 2818 2819 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2820 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2821 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2822 } 2823 2824 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2825 { 2826 struct drm_i915_private *dev_priv = to_i915(dev); 2827 unsigned long irqflags; 2828 2829 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2830 i915_disable_pipestat(dev_priv, pipe, 2831 PIPE_START_VBLANK_INTERRUPT_STATUS); 2832 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2833 } 2834 2835 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2836 { 2837 struct drm_i915_private *dev_priv = to_i915(dev); 2838 unsigned long irqflags; 2839 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2840 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2841 2842 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2843 ilk_disable_display_irq(dev_priv, bit); 2844 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2845 } 2846 2847 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2848 { 2849 struct drm_i915_private *dev_priv = to_i915(dev); 2850 unsigned long irqflags; 2851 2852 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2853 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2854 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2855 } 2856 2857 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2858 { 2859 if (HAS_PCH_NOP(dev_priv)) 2860 return; 2861 2862 GEN5_IRQ_RESET(SDE); 2863 2864 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2865 I915_WRITE(SERR_INT, 0xffffffff); 2866 } 2867 2868 /* 2869 * SDEIER is also touched by the interrupt handler to work around missed PCH 2870 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2871 * instead we unconditionally enable all PCH interrupt sources here, but then 2872 * only unmask them as needed with SDEIMR. 2873 * 2874 * This function needs to be called before interrupts are enabled. 2875 */ 2876 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2877 { 2878 struct drm_i915_private *dev_priv = to_i915(dev); 2879 2880 if (HAS_PCH_NOP(dev_priv)) 2881 return; 2882 2883 WARN_ON(I915_READ(SDEIER) != 0); 2884 I915_WRITE(SDEIER, 0xffffffff); 2885 POSTING_READ(SDEIER); 2886 } 2887 2888 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 2889 { 2890 GEN5_IRQ_RESET(GT); 2891 if (INTEL_GEN(dev_priv) >= 6) 2892 GEN5_IRQ_RESET(GEN6_PM); 2893 } 2894 2895 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2896 { 2897 enum pipe pipe; 2898 2899 if (IS_CHERRYVIEW(dev_priv)) 2900 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2901 else 2902 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2903 2904 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2905 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2906 2907 for_each_pipe(dev_priv, pipe) { 2908 I915_WRITE(PIPESTAT(pipe), 2909 PIPE_FIFO_UNDERRUN_STATUS | 2910 PIPESTAT_INT_STATUS_MASK); 2911 dev_priv->pipestat_irq_mask[pipe] = 0; 2912 } 2913 2914 GEN5_IRQ_RESET(VLV_); 2915 dev_priv->irq_mask = ~0; 2916 } 2917 2918 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2919 { 2920 u32 pipestat_mask; 2921 u32 enable_mask; 2922 enum pipe pipe; 2923 2924 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2925 PIPE_CRC_DONE_INTERRUPT_STATUS; 2926 2927 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2928 for_each_pipe(dev_priv, pipe) 2929 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2930 2931 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2932 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2933 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2934 I915_LPE_PIPE_A_INTERRUPT | 2935 I915_LPE_PIPE_B_INTERRUPT; 2936 2937 if (IS_CHERRYVIEW(dev_priv)) 2938 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2939 I915_LPE_PIPE_C_INTERRUPT; 2940 2941 WARN_ON(dev_priv->irq_mask != ~0); 2942 2943 dev_priv->irq_mask = ~enable_mask; 2944 2945 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 2946 } 2947 2948 /* drm_dma.h hooks 2949 */ 2950 static void ironlake_irq_reset(struct drm_device *dev) 2951 { 2952 struct drm_i915_private *dev_priv = to_i915(dev); 2953 2954 I915_WRITE(HWSTAM, 0xffffffff); 2955 2956 GEN5_IRQ_RESET(DE); 2957 if (IS_GEN7(dev_priv)) 2958 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2959 2960 gen5_gt_irq_reset(dev_priv); 2961 2962 ibx_irq_reset(dev_priv); 2963 } 2964 2965 static void valleyview_irq_preinstall(struct drm_device *dev) 2966 { 2967 struct drm_i915_private *dev_priv = to_i915(dev); 2968 2969 I915_WRITE(VLV_MASTER_IER, 0); 2970 POSTING_READ(VLV_MASTER_IER); 2971 2972 gen5_gt_irq_reset(dev_priv); 2973 2974 spin_lock_irq(&dev_priv->irq_lock); 2975 if (dev_priv->display_irqs_enabled) 2976 vlv_display_irq_reset(dev_priv); 2977 spin_unlock_irq(&dev_priv->irq_lock); 2978 } 2979 2980 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 2981 { 2982 GEN8_IRQ_RESET_NDX(GT, 0); 2983 GEN8_IRQ_RESET_NDX(GT, 1); 2984 GEN8_IRQ_RESET_NDX(GT, 2); 2985 GEN8_IRQ_RESET_NDX(GT, 3); 2986 } 2987 2988 static void gen8_irq_reset(struct drm_device *dev) 2989 { 2990 struct drm_i915_private *dev_priv = to_i915(dev); 2991 int pipe; 2992 2993 I915_WRITE(GEN8_MASTER_IRQ, 0); 2994 POSTING_READ(GEN8_MASTER_IRQ); 2995 2996 gen8_gt_irq_reset(dev_priv); 2997 2998 for_each_pipe(dev_priv, pipe) 2999 if (intel_display_power_is_enabled(dev_priv, 3000 POWER_DOMAIN_PIPE(pipe))) 3001 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3002 3003 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3004 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3005 GEN5_IRQ_RESET(GEN8_PCU_); 3006 3007 if (HAS_PCH_SPLIT(dev_priv)) 3008 ibx_irq_reset(dev_priv); 3009 } 3010 3011 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3012 unsigned int pipe_mask) 3013 { 3014 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3015 enum pipe pipe; 3016 3017 spin_lock_irq(&dev_priv->irq_lock); 3018 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3019 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3020 dev_priv->de_irq_mask[pipe], 3021 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3022 spin_unlock_irq(&dev_priv->irq_lock); 3023 } 3024 3025 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3026 unsigned int pipe_mask) 3027 { 3028 enum pipe pipe; 3029 3030 spin_lock_irq(&dev_priv->irq_lock); 3031 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3032 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3033 spin_unlock_irq(&dev_priv->irq_lock); 3034 3035 /* make sure we're done processing display irqs */ 3036 synchronize_irq(dev_priv->drm.irq); 3037 } 3038 3039 static void cherryview_irq_preinstall(struct drm_device *dev) 3040 { 3041 struct drm_i915_private *dev_priv = to_i915(dev); 3042 3043 I915_WRITE(GEN8_MASTER_IRQ, 0); 3044 POSTING_READ(GEN8_MASTER_IRQ); 3045 3046 gen8_gt_irq_reset(dev_priv); 3047 3048 GEN5_IRQ_RESET(GEN8_PCU_); 3049 3050 spin_lock_irq(&dev_priv->irq_lock); 3051 if (dev_priv->display_irqs_enabled) 3052 vlv_display_irq_reset(dev_priv); 3053 spin_unlock_irq(&dev_priv->irq_lock); 3054 } 3055 3056 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3057 const u32 hpd[HPD_NUM_PINS]) 3058 { 3059 struct intel_encoder *encoder; 3060 u32 enabled_irqs = 0; 3061 3062 for_each_intel_encoder(&dev_priv->drm, encoder) 3063 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3064 enabled_irqs |= hpd[encoder->hpd_pin]; 3065 3066 return enabled_irqs; 3067 } 3068 3069 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3070 { 3071 u32 hotplug; 3072 3073 /* 3074 * Enable digital hotplug on the PCH, and configure the DP short pulse 3075 * duration to 2ms (which is the minimum in the Display Port spec). 3076 * The pulse duration bits are reserved on LPT+. 3077 */ 3078 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3079 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3080 PORTC_PULSE_DURATION_MASK | 3081 PORTD_PULSE_DURATION_MASK); 3082 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3083 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3084 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3085 /* 3086 * When CPU and PCH are on the same package, port A 3087 * HPD must be enabled in both north and south. 3088 */ 3089 if (HAS_PCH_LPT_LP(dev_priv)) 3090 hotplug |= PORTA_HOTPLUG_ENABLE; 3091 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3092 } 3093 3094 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3095 { 3096 u32 hotplug_irqs, enabled_irqs; 3097 3098 if (HAS_PCH_IBX(dev_priv)) { 3099 hotplug_irqs = SDE_HOTPLUG_MASK; 3100 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3101 } else { 3102 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3103 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3104 } 3105 3106 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3107 3108 ibx_hpd_detection_setup(dev_priv); 3109 } 3110 3111 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3112 { 3113 u32 hotplug; 3114 3115 /* Enable digital hotplug on the PCH */ 3116 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3117 hotplug |= PORTA_HOTPLUG_ENABLE | 3118 PORTB_HOTPLUG_ENABLE | 3119 PORTC_HOTPLUG_ENABLE | 3120 PORTD_HOTPLUG_ENABLE; 3121 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3122 3123 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3124 hotplug |= PORTE_HOTPLUG_ENABLE; 3125 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3126 } 3127 3128 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3129 { 3130 u32 hotplug_irqs, enabled_irqs; 3131 3132 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3133 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3134 3135 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3136 3137 spt_hpd_detection_setup(dev_priv); 3138 } 3139 3140 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3141 { 3142 u32 hotplug; 3143 3144 /* 3145 * Enable digital hotplug on the CPU, and configure the DP short pulse 3146 * duration to 2ms (which is the minimum in the Display Port spec) 3147 * The pulse duration bits are reserved on HSW+. 3148 */ 3149 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3150 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3151 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3152 DIGITAL_PORTA_PULSE_DURATION_2ms; 3153 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3154 } 3155 3156 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3157 { 3158 u32 hotplug_irqs, enabled_irqs; 3159 3160 if (INTEL_GEN(dev_priv) >= 8) { 3161 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3162 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3163 3164 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3165 } else if (INTEL_GEN(dev_priv) >= 7) { 3166 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3167 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3168 3169 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3170 } else { 3171 hotplug_irqs = DE_DP_A_HOTPLUG; 3172 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3173 3174 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3175 } 3176 3177 ilk_hpd_detection_setup(dev_priv); 3178 3179 ibx_hpd_irq_setup(dev_priv); 3180 } 3181 3182 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3183 u32 enabled_irqs) 3184 { 3185 u32 hotplug; 3186 3187 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3188 hotplug |= PORTA_HOTPLUG_ENABLE | 3189 PORTB_HOTPLUG_ENABLE | 3190 PORTC_HOTPLUG_ENABLE; 3191 3192 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3193 hotplug, enabled_irqs); 3194 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3195 3196 /* 3197 * For BXT invert bit has to be set based on AOB design 3198 * for HPD detection logic, update it based on VBT fields. 3199 */ 3200 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3201 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3202 hotplug |= BXT_DDIA_HPD_INVERT; 3203 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3204 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3205 hotplug |= BXT_DDIB_HPD_INVERT; 3206 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3207 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3208 hotplug |= BXT_DDIC_HPD_INVERT; 3209 3210 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3211 } 3212 3213 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3214 { 3215 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3216 } 3217 3218 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3219 { 3220 u32 hotplug_irqs, enabled_irqs; 3221 3222 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3223 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3224 3225 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3226 3227 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3228 } 3229 3230 static void ibx_irq_postinstall(struct drm_device *dev) 3231 { 3232 struct drm_i915_private *dev_priv = to_i915(dev); 3233 u32 mask; 3234 3235 if (HAS_PCH_NOP(dev_priv)) 3236 return; 3237 3238 if (HAS_PCH_IBX(dev_priv)) 3239 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3240 else 3241 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3242 3243 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3244 I915_WRITE(SDEIMR, ~mask); 3245 3246 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3247 HAS_PCH_LPT(dev_priv)) 3248 ibx_hpd_detection_setup(dev_priv); 3249 else 3250 spt_hpd_detection_setup(dev_priv); 3251 } 3252 3253 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3254 { 3255 struct drm_i915_private *dev_priv = to_i915(dev); 3256 u32 pm_irqs, gt_irqs; 3257 3258 pm_irqs = gt_irqs = 0; 3259 3260 dev_priv->gt_irq_mask = ~0; 3261 if (HAS_L3_DPF(dev_priv)) { 3262 /* L3 parity interrupt is always unmasked. */ 3263 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3264 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3265 } 3266 3267 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3268 if (IS_GEN5(dev_priv)) { 3269 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3270 } else { 3271 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3272 } 3273 3274 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3275 3276 if (INTEL_GEN(dev_priv) >= 6) { 3277 /* 3278 * RPS interrupts will get enabled/disabled on demand when RPS 3279 * itself is enabled/disabled. 3280 */ 3281 if (HAS_VEBOX(dev_priv)) { 3282 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3283 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3284 } 3285 3286 dev_priv->pm_imr = 0xffffffff; 3287 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3288 } 3289 } 3290 3291 static int ironlake_irq_postinstall(struct drm_device *dev) 3292 { 3293 struct drm_i915_private *dev_priv = to_i915(dev); 3294 u32 display_mask, extra_mask; 3295 3296 if (INTEL_GEN(dev_priv) >= 7) { 3297 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3298 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3299 DE_PLANEB_FLIP_DONE_IVB | 3300 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3301 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3302 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3303 DE_DP_A_HOTPLUG_IVB); 3304 } else { 3305 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3306 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3307 DE_AUX_CHANNEL_A | 3308 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3309 DE_POISON); 3310 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3311 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3312 DE_DP_A_HOTPLUG); 3313 } 3314 3315 dev_priv->irq_mask = ~display_mask; 3316 3317 I915_WRITE(HWSTAM, 0xeffe); 3318 3319 ibx_irq_pre_postinstall(dev); 3320 3321 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3322 3323 gen5_gt_irq_postinstall(dev); 3324 3325 ilk_hpd_detection_setup(dev_priv); 3326 3327 ibx_irq_postinstall(dev); 3328 3329 if (IS_IRONLAKE_M(dev_priv)) { 3330 /* Enable PCU event interrupts 3331 * 3332 * spinlocking not required here for correctness since interrupt 3333 * setup is guaranteed to run in single-threaded context. But we 3334 * need it to make the assert_spin_locked happy. */ 3335 spin_lock_irq(&dev_priv->irq_lock); 3336 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3337 spin_unlock_irq(&dev_priv->irq_lock); 3338 } 3339 3340 return 0; 3341 } 3342 3343 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3344 { 3345 lockdep_assert_held(&dev_priv->irq_lock); 3346 3347 if (dev_priv->display_irqs_enabled) 3348 return; 3349 3350 dev_priv->display_irqs_enabled = true; 3351 3352 if (intel_irqs_enabled(dev_priv)) { 3353 vlv_display_irq_reset(dev_priv); 3354 vlv_display_irq_postinstall(dev_priv); 3355 } 3356 } 3357 3358 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3359 { 3360 lockdep_assert_held(&dev_priv->irq_lock); 3361 3362 if (!dev_priv->display_irqs_enabled) 3363 return; 3364 3365 dev_priv->display_irqs_enabled = false; 3366 3367 if (intel_irqs_enabled(dev_priv)) 3368 vlv_display_irq_reset(dev_priv); 3369 } 3370 3371 3372 static int valleyview_irq_postinstall(struct drm_device *dev) 3373 { 3374 struct drm_i915_private *dev_priv = to_i915(dev); 3375 3376 gen5_gt_irq_postinstall(dev); 3377 3378 spin_lock_irq(&dev_priv->irq_lock); 3379 if (dev_priv->display_irqs_enabled) 3380 vlv_display_irq_postinstall(dev_priv); 3381 spin_unlock_irq(&dev_priv->irq_lock); 3382 3383 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3384 POSTING_READ(VLV_MASTER_IER); 3385 3386 return 0; 3387 } 3388 3389 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3390 { 3391 /* These are interrupts we'll toggle with the ring mask register */ 3392 uint32_t gt_interrupts[] = { 3393 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3394 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3395 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3396 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3397 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3398 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3399 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3400 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3401 0, 3402 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3403 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3404 }; 3405 3406 if (HAS_L3_DPF(dev_priv)) 3407 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3408 3409 dev_priv->pm_ier = 0x0; 3410 dev_priv->pm_imr = ~dev_priv->pm_ier; 3411 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3412 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3413 /* 3414 * RPS interrupts will get enabled/disabled on demand when RPS itself 3415 * is enabled/disabled. Same wil be the case for GuC interrupts. 3416 */ 3417 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3418 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3419 } 3420 3421 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3422 { 3423 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3424 uint32_t de_pipe_enables; 3425 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3426 u32 de_port_enables; 3427 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3428 enum pipe pipe; 3429 3430 if (INTEL_INFO(dev_priv)->gen >= 9) { 3431 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3432 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3433 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3434 GEN9_AUX_CHANNEL_D; 3435 if (IS_GEN9_LP(dev_priv)) 3436 de_port_masked |= BXT_DE_PORT_GMBUS; 3437 } else { 3438 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3439 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3440 } 3441 3442 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3443 GEN8_PIPE_FIFO_UNDERRUN; 3444 3445 de_port_enables = de_port_masked; 3446 if (IS_GEN9_LP(dev_priv)) 3447 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3448 else if (IS_BROADWELL(dev_priv)) 3449 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3450 3451 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3452 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3453 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3454 3455 for_each_pipe(dev_priv, pipe) 3456 if (intel_display_power_is_enabled(dev_priv, 3457 POWER_DOMAIN_PIPE(pipe))) 3458 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3459 dev_priv->de_irq_mask[pipe], 3460 de_pipe_enables); 3461 3462 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3463 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3464 3465 if (IS_GEN9_LP(dev_priv)) 3466 bxt_hpd_detection_setup(dev_priv); 3467 else if (IS_BROADWELL(dev_priv)) 3468 ilk_hpd_detection_setup(dev_priv); 3469 } 3470 3471 static int gen8_irq_postinstall(struct drm_device *dev) 3472 { 3473 struct drm_i915_private *dev_priv = to_i915(dev); 3474 3475 if (HAS_PCH_SPLIT(dev_priv)) 3476 ibx_irq_pre_postinstall(dev); 3477 3478 gen8_gt_irq_postinstall(dev_priv); 3479 gen8_de_irq_postinstall(dev_priv); 3480 3481 if (HAS_PCH_SPLIT(dev_priv)) 3482 ibx_irq_postinstall(dev); 3483 3484 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3485 POSTING_READ(GEN8_MASTER_IRQ); 3486 3487 return 0; 3488 } 3489 3490 static int cherryview_irq_postinstall(struct drm_device *dev) 3491 { 3492 struct drm_i915_private *dev_priv = to_i915(dev); 3493 3494 gen8_gt_irq_postinstall(dev_priv); 3495 3496 spin_lock_irq(&dev_priv->irq_lock); 3497 if (dev_priv->display_irqs_enabled) 3498 vlv_display_irq_postinstall(dev_priv); 3499 spin_unlock_irq(&dev_priv->irq_lock); 3500 3501 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3502 POSTING_READ(GEN8_MASTER_IRQ); 3503 3504 return 0; 3505 } 3506 3507 static void gen8_irq_uninstall(struct drm_device *dev) 3508 { 3509 struct drm_i915_private *dev_priv = to_i915(dev); 3510 3511 if (!dev_priv) 3512 return; 3513 3514 gen8_irq_reset(dev); 3515 } 3516 3517 static void valleyview_irq_uninstall(struct drm_device *dev) 3518 { 3519 struct drm_i915_private *dev_priv = to_i915(dev); 3520 3521 if (!dev_priv) 3522 return; 3523 3524 I915_WRITE(VLV_MASTER_IER, 0); 3525 POSTING_READ(VLV_MASTER_IER); 3526 3527 gen5_gt_irq_reset(dev_priv); 3528 3529 I915_WRITE(HWSTAM, 0xffffffff); 3530 3531 spin_lock_irq(&dev_priv->irq_lock); 3532 if (dev_priv->display_irqs_enabled) 3533 vlv_display_irq_reset(dev_priv); 3534 spin_unlock_irq(&dev_priv->irq_lock); 3535 } 3536 3537 static void cherryview_irq_uninstall(struct drm_device *dev) 3538 { 3539 struct drm_i915_private *dev_priv = to_i915(dev); 3540 3541 if (!dev_priv) 3542 return; 3543 3544 I915_WRITE(GEN8_MASTER_IRQ, 0); 3545 POSTING_READ(GEN8_MASTER_IRQ); 3546 3547 gen8_gt_irq_reset(dev_priv); 3548 3549 GEN5_IRQ_RESET(GEN8_PCU_); 3550 3551 spin_lock_irq(&dev_priv->irq_lock); 3552 if (dev_priv->display_irqs_enabled) 3553 vlv_display_irq_reset(dev_priv); 3554 spin_unlock_irq(&dev_priv->irq_lock); 3555 } 3556 3557 static void ironlake_irq_uninstall(struct drm_device *dev) 3558 { 3559 struct drm_i915_private *dev_priv = to_i915(dev); 3560 3561 if (!dev_priv) 3562 return; 3563 3564 ironlake_irq_reset(dev); 3565 } 3566 3567 static void i8xx_irq_preinstall(struct drm_device * dev) 3568 { 3569 struct drm_i915_private *dev_priv = to_i915(dev); 3570 int pipe; 3571 3572 for_each_pipe(dev_priv, pipe) 3573 I915_WRITE(PIPESTAT(pipe), 0); 3574 I915_WRITE16(IMR, 0xffff); 3575 I915_WRITE16(IER, 0x0); 3576 POSTING_READ16(IER); 3577 } 3578 3579 static int i8xx_irq_postinstall(struct drm_device *dev) 3580 { 3581 struct drm_i915_private *dev_priv = to_i915(dev); 3582 3583 I915_WRITE16(EMR, 3584 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3585 3586 /* Unmask the interrupts that we always want on. */ 3587 dev_priv->irq_mask = 3588 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3589 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3590 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3591 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3592 I915_WRITE16(IMR, dev_priv->irq_mask); 3593 3594 I915_WRITE16(IER, 3595 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3596 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3597 I915_USER_INTERRUPT); 3598 POSTING_READ16(IER); 3599 3600 /* Interrupt setup is already guaranteed to be single-threaded, this is 3601 * just to make the assert_spin_locked check happy. */ 3602 spin_lock_irq(&dev_priv->irq_lock); 3603 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3604 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3605 spin_unlock_irq(&dev_priv->irq_lock); 3606 3607 return 0; 3608 } 3609 3610 /* 3611 * Returns true when a page flip has completed. 3612 */ 3613 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3614 int plane, int pipe, u32 iir) 3615 { 3616 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3617 3618 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3619 return false; 3620 3621 if ((iir & flip_pending) == 0) 3622 goto check_page_flip; 3623 3624 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3625 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3626 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3627 * the flip is completed (no longer pending). Since this doesn't raise 3628 * an interrupt per se, we watch for the change at vblank. 3629 */ 3630 if (I915_READ16(ISR) & flip_pending) 3631 goto check_page_flip; 3632 3633 intel_finish_page_flip_cs(dev_priv, pipe); 3634 return true; 3635 3636 check_page_flip: 3637 intel_check_page_flip(dev_priv, pipe); 3638 return false; 3639 } 3640 3641 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3642 { 3643 struct drm_device *dev = arg; 3644 struct drm_i915_private *dev_priv = to_i915(dev); 3645 u16 iir, new_iir; 3646 u32 pipe_stats[2]; 3647 int pipe; 3648 u16 flip_mask = 3649 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3650 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3651 irqreturn_t ret; 3652 3653 if (!intel_irqs_enabled(dev_priv)) 3654 return IRQ_NONE; 3655 3656 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3657 disable_rpm_wakeref_asserts(dev_priv); 3658 3659 ret = IRQ_NONE; 3660 iir = I915_READ16(IIR); 3661 if (iir == 0) 3662 goto out; 3663 3664 while (iir & ~flip_mask) { 3665 /* Can't rely on pipestat interrupt bit in iir as it might 3666 * have been cleared after the pipestat interrupt was received. 3667 * It doesn't set the bit in iir again, but it still produces 3668 * interrupts (for non-MSI). 3669 */ 3670 spin_lock(&dev_priv->irq_lock); 3671 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3672 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3673 3674 for_each_pipe(dev_priv, pipe) { 3675 i915_reg_t reg = PIPESTAT(pipe); 3676 pipe_stats[pipe] = I915_READ(reg); 3677 3678 /* 3679 * Clear the PIPE*STAT regs before the IIR 3680 */ 3681 if (pipe_stats[pipe] & 0x8000ffff) 3682 I915_WRITE(reg, pipe_stats[pipe]); 3683 } 3684 spin_unlock(&dev_priv->irq_lock); 3685 3686 I915_WRITE16(IIR, iir & ~flip_mask); 3687 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3688 3689 if (iir & I915_USER_INTERRUPT) 3690 notify_ring(dev_priv->engine[RCS]); 3691 3692 for_each_pipe(dev_priv, pipe) { 3693 int plane = pipe; 3694 if (HAS_FBC(dev_priv)) 3695 plane = !plane; 3696 3697 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3698 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 3699 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3700 3701 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3702 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3703 3704 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3705 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3706 pipe); 3707 } 3708 3709 iir = new_iir; 3710 } 3711 ret = IRQ_HANDLED; 3712 3713 out: 3714 enable_rpm_wakeref_asserts(dev_priv); 3715 3716 return ret; 3717 } 3718 3719 static void i8xx_irq_uninstall(struct drm_device * dev) 3720 { 3721 struct drm_i915_private *dev_priv = to_i915(dev); 3722 int pipe; 3723 3724 for_each_pipe(dev_priv, pipe) { 3725 /* Clear enable bits; then clear status bits */ 3726 I915_WRITE(PIPESTAT(pipe), 0); 3727 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3728 } 3729 I915_WRITE16(IMR, 0xffff); 3730 I915_WRITE16(IER, 0x0); 3731 I915_WRITE16(IIR, I915_READ16(IIR)); 3732 } 3733 3734 static void i915_irq_preinstall(struct drm_device * dev) 3735 { 3736 struct drm_i915_private *dev_priv = to_i915(dev); 3737 int pipe; 3738 3739 if (I915_HAS_HOTPLUG(dev_priv)) { 3740 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3741 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3742 } 3743 3744 I915_WRITE16(HWSTAM, 0xeffe); 3745 for_each_pipe(dev_priv, pipe) 3746 I915_WRITE(PIPESTAT(pipe), 0); 3747 I915_WRITE(IMR, 0xffffffff); 3748 I915_WRITE(IER, 0x0); 3749 POSTING_READ(IER); 3750 } 3751 3752 static int i915_irq_postinstall(struct drm_device *dev) 3753 { 3754 struct drm_i915_private *dev_priv = to_i915(dev); 3755 u32 enable_mask; 3756 3757 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3758 3759 /* Unmask the interrupts that we always want on. */ 3760 dev_priv->irq_mask = 3761 ~(I915_ASLE_INTERRUPT | 3762 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3763 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3764 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3765 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3766 3767 enable_mask = 3768 I915_ASLE_INTERRUPT | 3769 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3770 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3771 I915_USER_INTERRUPT; 3772 3773 if (I915_HAS_HOTPLUG(dev_priv)) { 3774 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3775 POSTING_READ(PORT_HOTPLUG_EN); 3776 3777 /* Enable in IER... */ 3778 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3779 /* and unmask in IMR */ 3780 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3781 } 3782 3783 I915_WRITE(IMR, dev_priv->irq_mask); 3784 I915_WRITE(IER, enable_mask); 3785 POSTING_READ(IER); 3786 3787 i915_enable_asle_pipestat(dev_priv); 3788 3789 /* Interrupt setup is already guaranteed to be single-threaded, this is 3790 * just to make the assert_spin_locked check happy. */ 3791 spin_lock_irq(&dev_priv->irq_lock); 3792 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3793 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3794 spin_unlock_irq(&dev_priv->irq_lock); 3795 3796 return 0; 3797 } 3798 3799 /* 3800 * Returns true when a page flip has completed. 3801 */ 3802 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 3803 int plane, int pipe, u32 iir) 3804 { 3805 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3806 3807 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3808 return false; 3809 3810 if ((iir & flip_pending) == 0) 3811 goto check_page_flip; 3812 3813 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3814 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3815 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3816 * the flip is completed (no longer pending). Since this doesn't raise 3817 * an interrupt per se, we watch for the change at vblank. 3818 */ 3819 if (I915_READ(ISR) & flip_pending) 3820 goto check_page_flip; 3821 3822 intel_finish_page_flip_cs(dev_priv, pipe); 3823 return true; 3824 3825 check_page_flip: 3826 intel_check_page_flip(dev_priv, pipe); 3827 return false; 3828 } 3829 3830 static irqreturn_t i915_irq_handler(int irq, void *arg) 3831 { 3832 struct drm_device *dev = arg; 3833 struct drm_i915_private *dev_priv = to_i915(dev); 3834 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3835 u32 flip_mask = 3836 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3837 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3838 int pipe, ret = IRQ_NONE; 3839 3840 if (!intel_irqs_enabled(dev_priv)) 3841 return IRQ_NONE; 3842 3843 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3844 disable_rpm_wakeref_asserts(dev_priv); 3845 3846 iir = I915_READ(IIR); 3847 do { 3848 bool irq_received = (iir & ~flip_mask) != 0; 3849 bool blc_event = false; 3850 3851 /* Can't rely on pipestat interrupt bit in iir as it might 3852 * have been cleared after the pipestat interrupt was received. 3853 * It doesn't set the bit in iir again, but it still produces 3854 * interrupts (for non-MSI). 3855 */ 3856 spin_lock(&dev_priv->irq_lock); 3857 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3858 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3859 3860 for_each_pipe(dev_priv, pipe) { 3861 i915_reg_t reg = PIPESTAT(pipe); 3862 pipe_stats[pipe] = I915_READ(reg); 3863 3864 /* Clear the PIPE*STAT regs before the IIR */ 3865 if (pipe_stats[pipe] & 0x8000ffff) { 3866 I915_WRITE(reg, pipe_stats[pipe]); 3867 irq_received = true; 3868 } 3869 } 3870 spin_unlock(&dev_priv->irq_lock); 3871 3872 if (!irq_received) 3873 break; 3874 3875 /* Consume port. Then clear IIR or we'll miss events */ 3876 if (I915_HAS_HOTPLUG(dev_priv) && 3877 iir & I915_DISPLAY_PORT_INTERRUPT) { 3878 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3879 if (hotplug_status) 3880 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3881 } 3882 3883 I915_WRITE(IIR, iir & ~flip_mask); 3884 new_iir = I915_READ(IIR); /* Flush posted writes */ 3885 3886 if (iir & I915_USER_INTERRUPT) 3887 notify_ring(dev_priv->engine[RCS]); 3888 3889 for_each_pipe(dev_priv, pipe) { 3890 int plane = pipe; 3891 if (HAS_FBC(dev_priv)) 3892 plane = !plane; 3893 3894 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3895 i915_handle_vblank(dev_priv, plane, pipe, iir)) 3896 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3897 3898 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3899 blc_event = true; 3900 3901 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3902 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3903 3904 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3905 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3906 pipe); 3907 } 3908 3909 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3910 intel_opregion_asle_intr(dev_priv); 3911 3912 /* With MSI, interrupts are only generated when iir 3913 * transitions from zero to nonzero. If another bit got 3914 * set while we were handling the existing iir bits, then 3915 * we would never get another interrupt. 3916 * 3917 * This is fine on non-MSI as well, as if we hit this path 3918 * we avoid exiting the interrupt handler only to generate 3919 * another one. 3920 * 3921 * Note that for MSI this could cause a stray interrupt report 3922 * if an interrupt landed in the time between writing IIR and 3923 * the posting read. This should be rare enough to never 3924 * trigger the 99% of 100,000 interrupts test for disabling 3925 * stray interrupts. 3926 */ 3927 ret = IRQ_HANDLED; 3928 iir = new_iir; 3929 } while (iir & ~flip_mask); 3930 3931 enable_rpm_wakeref_asserts(dev_priv); 3932 3933 return ret; 3934 } 3935 3936 static void i915_irq_uninstall(struct drm_device * dev) 3937 { 3938 struct drm_i915_private *dev_priv = to_i915(dev); 3939 int pipe; 3940 3941 if (I915_HAS_HOTPLUG(dev_priv)) { 3942 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3943 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3944 } 3945 3946 I915_WRITE16(HWSTAM, 0xffff); 3947 for_each_pipe(dev_priv, pipe) { 3948 /* Clear enable bits; then clear status bits */ 3949 I915_WRITE(PIPESTAT(pipe), 0); 3950 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3951 } 3952 I915_WRITE(IMR, 0xffffffff); 3953 I915_WRITE(IER, 0x0); 3954 3955 I915_WRITE(IIR, I915_READ(IIR)); 3956 } 3957 3958 static void i965_irq_preinstall(struct drm_device * dev) 3959 { 3960 struct drm_i915_private *dev_priv = to_i915(dev); 3961 int pipe; 3962 3963 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3964 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3965 3966 I915_WRITE(HWSTAM, 0xeffe); 3967 for_each_pipe(dev_priv, pipe) 3968 I915_WRITE(PIPESTAT(pipe), 0); 3969 I915_WRITE(IMR, 0xffffffff); 3970 I915_WRITE(IER, 0x0); 3971 POSTING_READ(IER); 3972 } 3973 3974 static int i965_irq_postinstall(struct drm_device *dev) 3975 { 3976 struct drm_i915_private *dev_priv = to_i915(dev); 3977 u32 enable_mask; 3978 u32 error_mask; 3979 3980 /* Unmask the interrupts that we always want on. */ 3981 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3982 I915_DISPLAY_PORT_INTERRUPT | 3983 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3984 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3985 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3986 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3987 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3988 3989 enable_mask = ~dev_priv->irq_mask; 3990 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3991 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3992 enable_mask |= I915_USER_INTERRUPT; 3993 3994 if (IS_G4X(dev_priv)) 3995 enable_mask |= I915_BSD_USER_INTERRUPT; 3996 3997 /* Interrupt setup is already guaranteed to be single-threaded, this is 3998 * just to make the assert_spin_locked check happy. */ 3999 spin_lock_irq(&dev_priv->irq_lock); 4000 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4001 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4002 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4003 spin_unlock_irq(&dev_priv->irq_lock); 4004 4005 /* 4006 * Enable some error detection, note the instruction error mask 4007 * bit is reserved, so we leave it masked. 4008 */ 4009 if (IS_G4X(dev_priv)) { 4010 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4011 GM45_ERROR_MEM_PRIV | 4012 GM45_ERROR_CP_PRIV | 4013 I915_ERROR_MEMORY_REFRESH); 4014 } else { 4015 error_mask = ~(I915_ERROR_PAGE_TABLE | 4016 I915_ERROR_MEMORY_REFRESH); 4017 } 4018 I915_WRITE(EMR, error_mask); 4019 4020 I915_WRITE(IMR, dev_priv->irq_mask); 4021 I915_WRITE(IER, enable_mask); 4022 POSTING_READ(IER); 4023 4024 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4025 POSTING_READ(PORT_HOTPLUG_EN); 4026 4027 i915_enable_asle_pipestat(dev_priv); 4028 4029 return 0; 4030 } 4031 4032 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4033 { 4034 u32 hotplug_en; 4035 4036 lockdep_assert_held(&dev_priv->irq_lock); 4037 4038 /* Note HDMI and DP share hotplug bits */ 4039 /* enable bits are the same for all generations */ 4040 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4041 /* Programming the CRT detection parameters tends 4042 to generate a spurious hotplug event about three 4043 seconds later. So just do it once. 4044 */ 4045 if (IS_G4X(dev_priv)) 4046 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4047 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4048 4049 /* Ignore TV since it's buggy */ 4050 i915_hotplug_interrupt_update_locked(dev_priv, 4051 HOTPLUG_INT_EN_MASK | 4052 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4053 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4054 hotplug_en); 4055 } 4056 4057 static irqreturn_t i965_irq_handler(int irq, void *arg) 4058 { 4059 struct drm_device *dev = arg; 4060 struct drm_i915_private *dev_priv = to_i915(dev); 4061 u32 iir, new_iir; 4062 u32 pipe_stats[I915_MAX_PIPES]; 4063 int ret = IRQ_NONE, pipe; 4064 u32 flip_mask = 4065 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4066 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4067 4068 if (!intel_irqs_enabled(dev_priv)) 4069 return IRQ_NONE; 4070 4071 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4072 disable_rpm_wakeref_asserts(dev_priv); 4073 4074 iir = I915_READ(IIR); 4075 4076 for (;;) { 4077 bool irq_received = (iir & ~flip_mask) != 0; 4078 bool blc_event = false; 4079 4080 /* Can't rely on pipestat interrupt bit in iir as it might 4081 * have been cleared after the pipestat interrupt was received. 4082 * It doesn't set the bit in iir again, but it still produces 4083 * interrupts (for non-MSI). 4084 */ 4085 spin_lock(&dev_priv->irq_lock); 4086 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4087 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4088 4089 for_each_pipe(dev_priv, pipe) { 4090 i915_reg_t reg = PIPESTAT(pipe); 4091 pipe_stats[pipe] = I915_READ(reg); 4092 4093 /* 4094 * Clear the PIPE*STAT regs before the IIR 4095 */ 4096 if (pipe_stats[pipe] & 0x8000ffff) { 4097 I915_WRITE(reg, pipe_stats[pipe]); 4098 irq_received = true; 4099 } 4100 } 4101 spin_unlock(&dev_priv->irq_lock); 4102 4103 if (!irq_received) 4104 break; 4105 4106 ret = IRQ_HANDLED; 4107 4108 /* Consume port. Then clear IIR or we'll miss events */ 4109 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4110 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4111 if (hotplug_status) 4112 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4113 } 4114 4115 I915_WRITE(IIR, iir & ~flip_mask); 4116 new_iir = I915_READ(IIR); /* Flush posted writes */ 4117 4118 if (iir & I915_USER_INTERRUPT) 4119 notify_ring(dev_priv->engine[RCS]); 4120 if (iir & I915_BSD_USER_INTERRUPT) 4121 notify_ring(dev_priv->engine[VCS]); 4122 4123 for_each_pipe(dev_priv, pipe) { 4124 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4125 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4126 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4127 4128 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4129 blc_event = true; 4130 4131 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4132 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4133 4134 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4135 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4136 } 4137 4138 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4139 intel_opregion_asle_intr(dev_priv); 4140 4141 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4142 gmbus_irq_handler(dev_priv); 4143 4144 /* With MSI, interrupts are only generated when iir 4145 * transitions from zero to nonzero. If another bit got 4146 * set while we were handling the existing iir bits, then 4147 * we would never get another interrupt. 4148 * 4149 * This is fine on non-MSI as well, as if we hit this path 4150 * we avoid exiting the interrupt handler only to generate 4151 * another one. 4152 * 4153 * Note that for MSI this could cause a stray interrupt report 4154 * if an interrupt landed in the time between writing IIR and 4155 * the posting read. This should be rare enough to never 4156 * trigger the 99% of 100,000 interrupts test for disabling 4157 * stray interrupts. 4158 */ 4159 iir = new_iir; 4160 } 4161 4162 enable_rpm_wakeref_asserts(dev_priv); 4163 4164 return ret; 4165 } 4166 4167 static void i965_irq_uninstall(struct drm_device * dev) 4168 { 4169 struct drm_i915_private *dev_priv = to_i915(dev); 4170 int pipe; 4171 4172 if (!dev_priv) 4173 return; 4174 4175 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4176 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4177 4178 I915_WRITE(HWSTAM, 0xffffffff); 4179 for_each_pipe(dev_priv, pipe) 4180 I915_WRITE(PIPESTAT(pipe), 0); 4181 I915_WRITE(IMR, 0xffffffff); 4182 I915_WRITE(IER, 0x0); 4183 4184 for_each_pipe(dev_priv, pipe) 4185 I915_WRITE(PIPESTAT(pipe), 4186 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4187 I915_WRITE(IIR, I915_READ(IIR)); 4188 } 4189 4190 /** 4191 * intel_irq_init - initializes irq support 4192 * @dev_priv: i915 device instance 4193 * 4194 * This function initializes all the irq support including work items, timers 4195 * and all the vtables. It does not setup the interrupt itself though. 4196 */ 4197 void intel_irq_init(struct drm_i915_private *dev_priv) 4198 { 4199 struct drm_device *dev = &dev_priv->drm; 4200 int i; 4201 4202 intel_hpd_init_work(dev_priv); 4203 4204 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4205 4206 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4207 for (i = 0; i < MAX_L3_SLICES; ++i) 4208 dev_priv->l3_parity.remap_info[i] = NULL; 4209 4210 if (HAS_GUC_SCHED(dev_priv)) 4211 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4212 4213 /* Let's track the enabled rps events */ 4214 if (IS_VALLEYVIEW(dev_priv)) 4215 /* WaGsvRC0ResidencyMethod:vlv */ 4216 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4217 else 4218 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4219 4220 dev_priv->rps.pm_intrmsk_mbz = 0; 4221 4222 /* 4223 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4224 * if GEN6_PM_UP_EI_EXPIRED is masked. 4225 * 4226 * TODO: verify if this can be reproduced on VLV,CHV. 4227 */ 4228 if (INTEL_INFO(dev_priv)->gen <= 7) 4229 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4230 4231 if (INTEL_INFO(dev_priv)->gen >= 8) 4232 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4233 4234 if (IS_GEN2(dev_priv)) { 4235 /* Gen2 doesn't have a hardware frame counter */ 4236 dev->max_vblank_count = 0; 4237 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4238 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4239 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4240 } else { 4241 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4242 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4243 } 4244 4245 /* 4246 * Opt out of the vblank disable timer on everything except gen2. 4247 * Gen2 doesn't have a hardware frame counter and so depends on 4248 * vblank interrupts to produce sane vblank seuquence numbers. 4249 */ 4250 if (!IS_GEN2(dev_priv)) 4251 dev->vblank_disable_immediate = true; 4252 4253 /* Most platforms treat the display irq block as an always-on 4254 * power domain. vlv/chv can disable it at runtime and need 4255 * special care to avoid writing any of the display block registers 4256 * outside of the power domain. We defer setting up the display irqs 4257 * in this case to the runtime pm. 4258 */ 4259 dev_priv->display_irqs_enabled = true; 4260 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4261 dev_priv->display_irqs_enabled = false; 4262 4263 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4264 4265 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4266 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4267 4268 if (IS_CHERRYVIEW(dev_priv)) { 4269 dev->driver->irq_handler = cherryview_irq_handler; 4270 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4271 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4272 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4273 dev->driver->enable_vblank = i965_enable_vblank; 4274 dev->driver->disable_vblank = i965_disable_vblank; 4275 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4276 } else if (IS_VALLEYVIEW(dev_priv)) { 4277 dev->driver->irq_handler = valleyview_irq_handler; 4278 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4279 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4280 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4281 dev->driver->enable_vblank = i965_enable_vblank; 4282 dev->driver->disable_vblank = i965_disable_vblank; 4283 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4284 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4285 dev->driver->irq_handler = gen8_irq_handler; 4286 dev->driver->irq_preinstall = gen8_irq_reset; 4287 dev->driver->irq_postinstall = gen8_irq_postinstall; 4288 dev->driver->irq_uninstall = gen8_irq_uninstall; 4289 dev->driver->enable_vblank = gen8_enable_vblank; 4290 dev->driver->disable_vblank = gen8_disable_vblank; 4291 if (IS_GEN9_LP(dev_priv)) 4292 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4293 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4294 HAS_PCH_CNP(dev_priv)) 4295 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4296 else 4297 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4298 } else if (HAS_PCH_SPLIT(dev_priv)) { 4299 dev->driver->irq_handler = ironlake_irq_handler; 4300 dev->driver->irq_preinstall = ironlake_irq_reset; 4301 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4302 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4303 dev->driver->enable_vblank = ironlake_enable_vblank; 4304 dev->driver->disable_vblank = ironlake_disable_vblank; 4305 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4306 } else { 4307 if (IS_GEN2(dev_priv)) { 4308 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4309 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4310 dev->driver->irq_handler = i8xx_irq_handler; 4311 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4312 dev->driver->enable_vblank = i8xx_enable_vblank; 4313 dev->driver->disable_vblank = i8xx_disable_vblank; 4314 } else if (IS_GEN3(dev_priv)) { 4315 dev->driver->irq_preinstall = i915_irq_preinstall; 4316 dev->driver->irq_postinstall = i915_irq_postinstall; 4317 dev->driver->irq_uninstall = i915_irq_uninstall; 4318 dev->driver->irq_handler = i915_irq_handler; 4319 dev->driver->enable_vblank = i8xx_enable_vblank; 4320 dev->driver->disable_vblank = i8xx_disable_vblank; 4321 } else { 4322 dev->driver->irq_preinstall = i965_irq_preinstall; 4323 dev->driver->irq_postinstall = i965_irq_postinstall; 4324 dev->driver->irq_uninstall = i965_irq_uninstall; 4325 dev->driver->irq_handler = i965_irq_handler; 4326 dev->driver->enable_vblank = i965_enable_vblank; 4327 dev->driver->disable_vblank = i965_disable_vblank; 4328 } 4329 if (I915_HAS_HOTPLUG(dev_priv)) 4330 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4331 } 4332 } 4333 4334 /** 4335 * intel_irq_fini - deinitializes IRQ support 4336 * @i915: i915 device instance 4337 * 4338 * This function deinitializes all the IRQ support. 4339 */ 4340 void intel_irq_fini(struct drm_i915_private *i915) 4341 { 4342 int i; 4343 4344 for (i = 0; i < MAX_L3_SLICES; ++i) 4345 kfree(i915->l3_parity.remap_info[i]); 4346 } 4347 4348 /** 4349 * intel_irq_install - enables the hardware interrupt 4350 * @dev_priv: i915 device instance 4351 * 4352 * This function enables the hardware interrupt handling, but leaves the hotplug 4353 * handling still disabled. It is called after intel_irq_init(). 4354 * 4355 * In the driver load and resume code we need working interrupts in a few places 4356 * but don't want to deal with the hassle of concurrent probe and hotplug 4357 * workers. Hence the split into this two-stage approach. 4358 */ 4359 int intel_irq_install(struct drm_i915_private *dev_priv) 4360 { 4361 /* 4362 * We enable some interrupt sources in our postinstall hooks, so mark 4363 * interrupts as enabled _before_ actually enabling them to avoid 4364 * special cases in our ordering checks. 4365 */ 4366 dev_priv->pm.irqs_enabled = true; 4367 4368 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4369 } 4370 4371 /** 4372 * intel_irq_uninstall - finilizes all irq handling 4373 * @dev_priv: i915 device instance 4374 * 4375 * This stops interrupt and hotplug handling and unregisters and frees all 4376 * resources acquired in the init functions. 4377 */ 4378 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4379 { 4380 drm_irq_uninstall(&dev_priv->drm); 4381 intel_hpd_cancel_work(dev_priv); 4382 dev_priv->pm.irqs_enabled = false; 4383 } 4384 4385 /** 4386 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4387 * @dev_priv: i915 device instance 4388 * 4389 * This function is used to disable interrupts at runtime, both in the runtime 4390 * pm and the system suspend/resume code. 4391 */ 4392 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4393 { 4394 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4395 dev_priv->pm.irqs_enabled = false; 4396 synchronize_irq(dev_priv->drm.irq); 4397 } 4398 4399 /** 4400 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4401 * @dev_priv: i915 device instance 4402 * 4403 * This function is used to enable interrupts at runtime, both in the runtime 4404 * pm and the system suspend/resume code. 4405 */ 4406 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4407 { 4408 dev_priv->pm.irqs_enabled = true; 4409 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4410 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4411 } 4412