1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 174 175 /* For display hotplug interrupt */ 176 static inline void 177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 178 uint32_t mask, 179 uint32_t bits) 180 { 181 uint32_t val; 182 183 lockdep_assert_held(&dev_priv->irq_lock); 184 WARN_ON(bits & ~mask); 185 186 val = I915_READ(PORT_HOTPLUG_EN); 187 val &= ~mask; 188 val |= bits; 189 I915_WRITE(PORT_HOTPLUG_EN, val); 190 } 191 192 /** 193 * i915_hotplug_interrupt_update - update hotplug interrupt enable 194 * @dev_priv: driver private 195 * @mask: bits to update 196 * @bits: bits to enable 197 * NOTE: the HPD enable bits are modified both inside and outside 198 * of an interrupt context. To avoid that read-modify-write cycles 199 * interfer, these bits are protected by a spinlock. Since this 200 * function is usually not called from a context where the lock is 201 * held already, this function acquires the lock itself. A non-locking 202 * version is also available. 203 */ 204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 205 uint32_t mask, 206 uint32_t bits) 207 { 208 spin_lock_irq(&dev_priv->irq_lock); 209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 210 spin_unlock_irq(&dev_priv->irq_lock); 211 } 212 213 /** 214 * ilk_update_display_irq - update DEIMR 215 * @dev_priv: driver private 216 * @interrupt_mask: mask of interrupt bits to update 217 * @enabled_irq_mask: mask of interrupt bits to enable 218 */ 219 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 220 uint32_t interrupt_mask, 221 uint32_t enabled_irq_mask) 222 { 223 uint32_t new_val; 224 225 lockdep_assert_held(&dev_priv->irq_lock); 226 227 WARN_ON(enabled_irq_mask & ~interrupt_mask); 228 229 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 230 return; 231 232 new_val = dev_priv->irq_mask; 233 new_val &= ~interrupt_mask; 234 new_val |= (~enabled_irq_mask & interrupt_mask); 235 236 if (new_val != dev_priv->irq_mask) { 237 dev_priv->irq_mask = new_val; 238 I915_WRITE(DEIMR, dev_priv->irq_mask); 239 POSTING_READ(DEIMR); 240 } 241 } 242 243 /** 244 * ilk_update_gt_irq - update GTIMR 245 * @dev_priv: driver private 246 * @interrupt_mask: mask of interrupt bits to update 247 * @enabled_irq_mask: mask of interrupt bits to enable 248 */ 249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 250 uint32_t interrupt_mask, 251 uint32_t enabled_irq_mask) 252 { 253 lockdep_assert_held(&dev_priv->irq_lock); 254 255 WARN_ON(enabled_irq_mask & ~interrupt_mask); 256 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 dev_priv->gt_irq_mask &= ~interrupt_mask; 261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 POSTING_READ_FW(GTIMR); 269 } 270 271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 ilk_update_gt_irq(dev_priv, mask, 0); 274 } 275 276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 277 { 278 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 279 } 280 281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 282 { 283 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 284 } 285 286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 287 { 288 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 289 } 290 291 /** 292 * snb_update_pm_irq - update GEN6_PMIMR 293 * @dev_priv: driver private 294 * @interrupt_mask: mask of interrupt bits to update 295 * @enabled_irq_mask: mask of interrupt bits to enable 296 */ 297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 298 uint32_t interrupt_mask, 299 uint32_t enabled_irq_mask) 300 { 301 uint32_t new_val; 302 303 WARN_ON(enabled_irq_mask & ~interrupt_mask); 304 305 lockdep_assert_held(&dev_priv->irq_lock); 306 307 new_val = dev_priv->pm_imr; 308 new_val &= ~interrupt_mask; 309 new_val |= (~enabled_irq_mask & interrupt_mask); 310 311 if (new_val != dev_priv->pm_imr) { 312 dev_priv->pm_imr = new_val; 313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 314 POSTING_READ(gen6_pm_imr(dev_priv)); 315 } 316 } 317 318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 319 { 320 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 321 return; 322 323 snb_update_pm_irq(dev_priv, mask, mask); 324 } 325 326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_mask_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 lockdep_assert_held(&dev_priv->irq_lock); 344 345 I915_WRITE(reg, reset_mask); 346 I915_WRITE(reg, reset_mask); 347 POSTING_READ(reg); 348 } 349 350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 351 { 352 lockdep_assert_held(&dev_priv->irq_lock); 353 354 dev_priv->pm_ier |= enable_mask; 355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 356 gen6_unmask_pm_irq(dev_priv, enable_mask); 357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 358 } 359 360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 361 { 362 lockdep_assert_held(&dev_priv->irq_lock); 363 364 dev_priv->pm_ier &= ~disable_mask; 365 __gen6_mask_pm_irq(dev_priv, disable_mask); 366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 367 /* though a barrier is missing here, but don't really need a one */ 368 } 369 370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 371 { 372 spin_lock_irq(&dev_priv->irq_lock); 373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 374 dev_priv->rps.pm_iir = 0; 375 spin_unlock_irq(&dev_priv->irq_lock); 376 } 377 378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 379 { 380 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 381 return; 382 383 spin_lock_irq(&dev_priv->irq_lock); 384 WARN_ON_ONCE(dev_priv->rps.pm_iir); 385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 386 dev_priv->rps.interrupts_enabled = true; 387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 388 389 spin_unlock_irq(&dev_priv->irq_lock); 390 } 391 392 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 393 { 394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 395 return; 396 397 spin_lock_irq(&dev_priv->irq_lock); 398 dev_priv->rps.interrupts_enabled = false; 399 400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 401 402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 403 404 spin_unlock_irq(&dev_priv->irq_lock); 405 synchronize_irq(dev_priv->drm.irq); 406 407 /* Now that we will not be generating any more work, flush any 408 * outsanding tasks. As we are called on the RPS idle path, 409 * we will reset the GPU to minimum frequencies, so the current 410 * state of the worker can be discarded. 411 */ 412 cancel_work_sync(&dev_priv->rps.work); 413 gen6_reset_rps_interrupts(dev_priv); 414 } 415 416 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 417 { 418 spin_lock_irq(&dev_priv->irq_lock); 419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 420 spin_unlock_irq(&dev_priv->irq_lock); 421 } 422 423 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 424 { 425 spin_lock_irq(&dev_priv->irq_lock); 426 if (!dev_priv->guc.interrupts_enabled) { 427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 428 dev_priv->pm_guc_events); 429 dev_priv->guc.interrupts_enabled = true; 430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 431 } 432 spin_unlock_irq(&dev_priv->irq_lock); 433 } 434 435 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 436 { 437 spin_lock_irq(&dev_priv->irq_lock); 438 dev_priv->guc.interrupts_enabled = false; 439 440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 441 442 spin_unlock_irq(&dev_priv->irq_lock); 443 synchronize_irq(dev_priv->drm.irq); 444 445 gen9_reset_guc_interrupts(dev_priv); 446 } 447 448 /** 449 * bdw_update_port_irq - update DE port interrupt 450 * @dev_priv: driver private 451 * @interrupt_mask: mask of interrupt bits to update 452 * @enabled_irq_mask: mask of interrupt bits to enable 453 */ 454 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 455 uint32_t interrupt_mask, 456 uint32_t enabled_irq_mask) 457 { 458 uint32_t new_val; 459 uint32_t old_val; 460 461 lockdep_assert_held(&dev_priv->irq_lock); 462 463 WARN_ON(enabled_irq_mask & ~interrupt_mask); 464 465 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 466 return; 467 468 old_val = I915_READ(GEN8_DE_PORT_IMR); 469 470 new_val = old_val; 471 new_val &= ~interrupt_mask; 472 new_val |= (~enabled_irq_mask & interrupt_mask); 473 474 if (new_val != old_val) { 475 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 476 POSTING_READ(GEN8_DE_PORT_IMR); 477 } 478 } 479 480 /** 481 * bdw_update_pipe_irq - update DE pipe interrupt 482 * @dev_priv: driver private 483 * @pipe: pipe whose interrupt to update 484 * @interrupt_mask: mask of interrupt bits to update 485 * @enabled_irq_mask: mask of interrupt bits to enable 486 */ 487 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 488 enum pipe pipe, 489 uint32_t interrupt_mask, 490 uint32_t enabled_irq_mask) 491 { 492 uint32_t new_val; 493 494 lockdep_assert_held(&dev_priv->irq_lock); 495 496 WARN_ON(enabled_irq_mask & ~interrupt_mask); 497 498 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 499 return; 500 501 new_val = dev_priv->de_irq_mask[pipe]; 502 new_val &= ~interrupt_mask; 503 new_val |= (~enabled_irq_mask & interrupt_mask); 504 505 if (new_val != dev_priv->de_irq_mask[pipe]) { 506 dev_priv->de_irq_mask[pipe] = new_val; 507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 509 } 510 } 511 512 /** 513 * ibx_display_interrupt_update - update SDEIMR 514 * @dev_priv: driver private 515 * @interrupt_mask: mask of interrupt bits to update 516 * @enabled_irq_mask: mask of interrupt bits to enable 517 */ 518 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 519 uint32_t interrupt_mask, 520 uint32_t enabled_irq_mask) 521 { 522 uint32_t sdeimr = I915_READ(SDEIMR); 523 sdeimr &= ~interrupt_mask; 524 sdeimr |= (~enabled_irq_mask & interrupt_mask); 525 526 WARN_ON(enabled_irq_mask & ~interrupt_mask); 527 528 lockdep_assert_held(&dev_priv->irq_lock); 529 530 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 531 return; 532 533 I915_WRITE(SDEIMR, sdeimr); 534 POSTING_READ(SDEIMR); 535 } 536 537 static void 538 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 539 u32 enable_mask, u32 status_mask) 540 { 541 i915_reg_t reg = PIPESTAT(pipe); 542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 543 544 lockdep_assert_held(&dev_priv->irq_lock); 545 WARN_ON(!intel_irqs_enabled(dev_priv)); 546 547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 548 status_mask & ~PIPESTAT_INT_STATUS_MASK, 549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 550 pipe_name(pipe), enable_mask, status_mask)) 551 return; 552 553 if ((pipestat & enable_mask) == enable_mask) 554 return; 555 556 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 557 558 /* Enable the interrupt, clear any pending status */ 559 pipestat |= enable_mask | status_mask; 560 I915_WRITE(reg, pipestat); 561 POSTING_READ(reg); 562 } 563 564 static void 565 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 566 u32 enable_mask, u32 status_mask) 567 { 568 i915_reg_t reg = PIPESTAT(pipe); 569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 570 571 lockdep_assert_held(&dev_priv->irq_lock); 572 WARN_ON(!intel_irqs_enabled(dev_priv)); 573 574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 575 status_mask & ~PIPESTAT_INT_STATUS_MASK, 576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 577 pipe_name(pipe), enable_mask, status_mask)) 578 return; 579 580 if ((pipestat & enable_mask) == 0) 581 return; 582 583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 584 585 pipestat &= ~enable_mask; 586 I915_WRITE(reg, pipestat); 587 POSTING_READ(reg); 588 } 589 590 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 591 { 592 u32 enable_mask = status_mask << 16; 593 594 /* 595 * On pipe A we don't support the PSR interrupt yet, 596 * on pipe B and C the same bit MBZ. 597 */ 598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 599 return 0; 600 /* 601 * On pipe B and C we don't support the PSR interrupt yet, on pipe 602 * A the same bit is for perf counters which we don't use either. 603 */ 604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 605 return 0; 606 607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 608 SPRITE0_FLIP_DONE_INT_EN_VLV | 609 SPRITE1_FLIP_DONE_INT_EN_VLV); 610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 614 615 return enable_mask; 616 } 617 618 void 619 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 620 u32 status_mask) 621 { 622 u32 enable_mask; 623 624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 626 status_mask); 627 else 628 enable_mask = status_mask << 16; 629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 630 } 631 632 void 633 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 634 u32 status_mask) 635 { 636 u32 enable_mask; 637 638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 640 status_mask); 641 else 642 enable_mask = status_mask << 16; 643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 644 } 645 646 /** 647 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 648 * @dev_priv: i915 device private 649 */ 650 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 651 { 652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 653 return; 654 655 spin_lock_irq(&dev_priv->irq_lock); 656 657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 658 if (INTEL_GEN(dev_priv) >= 4) 659 i915_enable_pipestat(dev_priv, PIPE_A, 660 PIPE_LEGACY_BLC_EVENT_STATUS); 661 662 spin_unlock_irq(&dev_priv->irq_lock); 663 } 664 665 /* 666 * This timing diagram depicts the video signal in and 667 * around the vertical blanking period. 668 * 669 * Assumptions about the fictitious mode used in this example: 670 * vblank_start >= 3 671 * vsync_start = vblank_start + 1 672 * vsync_end = vblank_start + 2 673 * vtotal = vblank_start + 3 674 * 675 * start of vblank: 676 * latch double buffered registers 677 * increment frame counter (ctg+) 678 * generate start of vblank interrupt (gen4+) 679 * | 680 * | frame start: 681 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 682 * | may be shifted forward 1-3 extra lines via PIPECONF 683 * | | 684 * | | start of vsync: 685 * | | generate vsync interrupt 686 * | | | 687 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 688 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 689 * ----va---> <-----------------vb--------------------> <--------va------------- 690 * | | <----vs-----> | 691 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 692 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 693 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 694 * | | | 695 * last visible pixel first visible pixel 696 * | increment frame counter (gen3/4) 697 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 698 * 699 * x = horizontal active 700 * _ = horizontal blanking 701 * hs = horizontal sync 702 * va = vertical active 703 * vb = vertical blanking 704 * vs = vertical sync 705 * vbs = vblank_start (number) 706 * 707 * Summary: 708 * - most events happen at the start of horizontal sync 709 * - frame start happens at the start of horizontal blank, 1-4 lines 710 * (depending on PIPECONF settings) after the start of vblank 711 * - gen3/4 pixel and frame counter are synchronized with the start 712 * of horizontal active on the first line of vertical active 713 */ 714 715 /* Called from drm generic code, passed a 'crtc', which 716 * we use as a pipe index 717 */ 718 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 719 { 720 struct drm_i915_private *dev_priv = to_i915(dev); 721 i915_reg_t high_frame, low_frame; 722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 723 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 724 unsigned long irqflags; 725 726 htotal = mode->crtc_htotal; 727 hsync_start = mode->crtc_hsync_start; 728 vbl_start = mode->crtc_vblank_start; 729 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 730 vbl_start = DIV_ROUND_UP(vbl_start, 2); 731 732 /* Convert to pixel count */ 733 vbl_start *= htotal; 734 735 /* Start of vblank event occurs at start of hsync */ 736 vbl_start -= htotal - hsync_start; 737 738 high_frame = PIPEFRAME(pipe); 739 low_frame = PIPEFRAMEPIXEL(pipe); 740 741 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 742 743 /* 744 * High & low register fields aren't synchronized, so make sure 745 * we get a low value that's stable across two reads of the high 746 * register. 747 */ 748 do { 749 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 750 low = I915_READ_FW(low_frame); 751 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 752 } while (high1 != high2); 753 754 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 755 756 high1 >>= PIPE_FRAME_HIGH_SHIFT; 757 pixel = low & PIPE_PIXEL_MASK; 758 low >>= PIPE_FRAME_LOW_SHIFT; 759 760 /* 761 * The frame counter increments at beginning of active. 762 * Cook up a vblank counter by also checking the pixel 763 * counter against vblank start. 764 */ 765 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 766 } 767 768 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 769 { 770 struct drm_i915_private *dev_priv = to_i915(dev); 771 772 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 773 } 774 775 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 776 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 777 { 778 struct drm_device *dev = crtc->base.dev; 779 struct drm_i915_private *dev_priv = to_i915(dev); 780 const struct drm_display_mode *mode; 781 struct drm_vblank_crtc *vblank; 782 enum pipe pipe = crtc->pipe; 783 int position, vtotal; 784 785 if (!crtc->active) 786 return -1; 787 788 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 789 mode = &vblank->hwmode; 790 791 vtotal = mode->crtc_vtotal; 792 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 793 vtotal /= 2; 794 795 if (IS_GEN2(dev_priv)) 796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 797 else 798 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 799 800 /* 801 * On HSW, the DSL reg (0x70000) appears to return 0 if we 802 * read it just before the start of vblank. So try it again 803 * so we don't accidentally end up spanning a vblank frame 804 * increment, causing the pipe_update_end() code to squak at us. 805 * 806 * The nature of this problem means we can't simply check the ISR 807 * bit and return the vblank start value; nor can we use the scanline 808 * debug register in the transcoder as it appears to have the same 809 * problem. We may need to extend this to include other platforms, 810 * but so far testing only shows the problem on HSW. 811 */ 812 if (HAS_DDI(dev_priv) && !position) { 813 int i, temp; 814 815 for (i = 0; i < 100; i++) { 816 udelay(1); 817 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 818 if (temp != position) { 819 position = temp; 820 break; 821 } 822 } 823 } 824 825 /* 826 * See update_scanline_offset() for the details on the 827 * scanline_offset adjustment. 828 */ 829 return (position + crtc->scanline_offset) % vtotal; 830 } 831 832 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 833 bool in_vblank_irq, int *vpos, int *hpos, 834 ktime_t *stime, ktime_t *etime, 835 const struct drm_display_mode *mode) 836 { 837 struct drm_i915_private *dev_priv = to_i915(dev); 838 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 839 pipe); 840 int position; 841 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 842 bool in_vbl = true; 843 unsigned long irqflags; 844 845 if (WARN_ON(!mode->crtc_clock)) { 846 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 847 "pipe %c\n", pipe_name(pipe)); 848 return false; 849 } 850 851 htotal = mode->crtc_htotal; 852 hsync_start = mode->crtc_hsync_start; 853 vtotal = mode->crtc_vtotal; 854 vbl_start = mode->crtc_vblank_start; 855 vbl_end = mode->crtc_vblank_end; 856 857 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 858 vbl_start = DIV_ROUND_UP(vbl_start, 2); 859 vbl_end /= 2; 860 vtotal /= 2; 861 } 862 863 /* 864 * Lock uncore.lock, as we will do multiple timing critical raw 865 * register reads, potentially with preemption disabled, so the 866 * following code must not block on uncore.lock. 867 */ 868 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 869 870 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 871 872 /* Get optional system timestamp before query. */ 873 if (stime) 874 *stime = ktime_get(); 875 876 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 877 /* No obvious pixelcount register. Only query vertical 878 * scanout position from Display scan line register. 879 */ 880 position = __intel_get_crtc_scanline(intel_crtc); 881 } else { 882 /* Have access to pixelcount since start of frame. 883 * We can split this into vertical and horizontal 884 * scanout position. 885 */ 886 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 887 888 /* convert to pixel counts */ 889 vbl_start *= htotal; 890 vbl_end *= htotal; 891 vtotal *= htotal; 892 893 /* 894 * In interlaced modes, the pixel counter counts all pixels, 895 * so one field will have htotal more pixels. In order to avoid 896 * the reported position from jumping backwards when the pixel 897 * counter is beyond the length of the shorter field, just 898 * clamp the position the length of the shorter field. This 899 * matches how the scanline counter based position works since 900 * the scanline counter doesn't count the two half lines. 901 */ 902 if (position >= vtotal) 903 position = vtotal - 1; 904 905 /* 906 * Start of vblank interrupt is triggered at start of hsync, 907 * just prior to the first active line of vblank. However we 908 * consider lines to start at the leading edge of horizontal 909 * active. So, should we get here before we've crossed into 910 * the horizontal active of the first line in vblank, we would 911 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 912 * always add htotal-hsync_start to the current pixel position. 913 */ 914 position = (position + htotal - hsync_start) % vtotal; 915 } 916 917 /* Get optional system timestamp after query. */ 918 if (etime) 919 *etime = ktime_get(); 920 921 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 922 923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 924 925 in_vbl = position >= vbl_start && position < vbl_end; 926 927 /* 928 * While in vblank, position will be negative 929 * counting up towards 0 at vbl_end. And outside 930 * vblank, position will be positive counting 931 * up since vbl_end. 932 */ 933 if (position >= vbl_start) 934 position -= vbl_end; 935 else 936 position += vtotal - vbl_end; 937 938 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 939 *vpos = position; 940 *hpos = 0; 941 } else { 942 *vpos = position / htotal; 943 *hpos = position - (*vpos * htotal); 944 } 945 946 return true; 947 } 948 949 int intel_get_crtc_scanline(struct intel_crtc *crtc) 950 { 951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 952 unsigned long irqflags; 953 int position; 954 955 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 956 position = __intel_get_crtc_scanline(crtc); 957 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 958 959 return position; 960 } 961 962 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 963 { 964 u32 busy_up, busy_down, max_avg, min_avg; 965 u8 new_delay; 966 967 spin_lock(&mchdev_lock); 968 969 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 970 971 new_delay = dev_priv->ips.cur_delay; 972 973 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 974 busy_up = I915_READ(RCPREVBSYTUPAVG); 975 busy_down = I915_READ(RCPREVBSYTDNAVG); 976 max_avg = I915_READ(RCBMAXAVG); 977 min_avg = I915_READ(RCBMINAVG); 978 979 /* Handle RCS change request from hw */ 980 if (busy_up > max_avg) { 981 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 982 new_delay = dev_priv->ips.cur_delay - 1; 983 if (new_delay < dev_priv->ips.max_delay) 984 new_delay = dev_priv->ips.max_delay; 985 } else if (busy_down < min_avg) { 986 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 987 new_delay = dev_priv->ips.cur_delay + 1; 988 if (new_delay > dev_priv->ips.min_delay) 989 new_delay = dev_priv->ips.min_delay; 990 } 991 992 if (ironlake_set_drps(dev_priv, new_delay)) 993 dev_priv->ips.cur_delay = new_delay; 994 995 spin_unlock(&mchdev_lock); 996 997 return; 998 } 999 1000 static void notify_ring(struct intel_engine_cs *engine) 1001 { 1002 struct drm_i915_gem_request *rq = NULL; 1003 struct intel_wait *wait; 1004 1005 atomic_inc(&engine->irq_count); 1006 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1007 1008 spin_lock(&engine->breadcrumbs.irq_lock); 1009 wait = engine->breadcrumbs.irq_wait; 1010 if (wait) { 1011 /* We use a callback from the dma-fence to submit 1012 * requests after waiting on our own requests. To 1013 * ensure minimum delay in queuing the next request to 1014 * hardware, signal the fence now rather than wait for 1015 * the signaler to be woken up. We still wake up the 1016 * waiter in order to handle the irq-seqno coherency 1017 * issues (we may receive the interrupt before the 1018 * seqno is written, see __i915_request_irq_complete()) 1019 * and to handle coalescing of multiple seqno updates 1020 * and many waiters. 1021 */ 1022 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1023 wait->seqno) && 1024 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1025 &wait->request->fence.flags)) 1026 rq = i915_gem_request_get(wait->request); 1027 1028 wake_up_process(wait->tsk); 1029 } else { 1030 __intel_engine_disarm_breadcrumbs(engine); 1031 } 1032 spin_unlock(&engine->breadcrumbs.irq_lock); 1033 1034 if (rq) { 1035 dma_fence_signal(&rq->fence); 1036 i915_gem_request_put(rq); 1037 } 1038 1039 trace_intel_engine_notify(engine, wait); 1040 } 1041 1042 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1043 struct intel_rps_ei *ei) 1044 { 1045 ei->ktime = ktime_get_raw(); 1046 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1047 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1048 } 1049 1050 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1051 { 1052 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); 1053 } 1054 1055 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1056 { 1057 const struct intel_rps_ei *prev = &dev_priv->rps.ei; 1058 struct intel_rps_ei now; 1059 u32 events = 0; 1060 1061 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1062 return 0; 1063 1064 vlv_c0_read(dev_priv, &now); 1065 1066 if (prev->ktime) { 1067 u64 time, c0; 1068 u32 render, media; 1069 1070 time = ktime_us_delta(now.ktime, prev->ktime); 1071 1072 time *= dev_priv->czclk_freq; 1073 1074 /* Workload can be split between render + media, 1075 * e.g. SwapBuffers being blitted in X after being rendered in 1076 * mesa. To account for this we need to combine both engines 1077 * into our activity counter. 1078 */ 1079 render = now.render_c0 - prev->render_c0; 1080 media = now.media_c0 - prev->media_c0; 1081 c0 = max(render, media); 1082 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1083 1084 if (c0 > time * dev_priv->rps.up_threshold) 1085 events = GEN6_PM_RP_UP_THRESHOLD; 1086 else if (c0 < time * dev_priv->rps.down_threshold) 1087 events = GEN6_PM_RP_DOWN_THRESHOLD; 1088 } 1089 1090 dev_priv->rps.ei = now; 1091 return events; 1092 } 1093 1094 static void gen6_pm_rps_work(struct work_struct *work) 1095 { 1096 struct drm_i915_private *dev_priv = 1097 container_of(work, struct drm_i915_private, rps.work); 1098 bool client_boost = false; 1099 int new_delay, adj, min, max; 1100 u32 pm_iir = 0; 1101 1102 spin_lock_irq(&dev_priv->irq_lock); 1103 if (dev_priv->rps.interrupts_enabled) { 1104 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); 1105 client_boost = atomic_read(&dev_priv->rps.num_waiters); 1106 } 1107 spin_unlock_irq(&dev_priv->irq_lock); 1108 1109 /* Make sure we didn't queue anything we're not going to process. */ 1110 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1111 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1112 goto out; 1113 1114 mutex_lock(&dev_priv->rps.hw_lock); 1115 1116 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1117 1118 adj = dev_priv->rps.last_adj; 1119 new_delay = dev_priv->rps.cur_freq; 1120 min = dev_priv->rps.min_freq_softlimit; 1121 max = dev_priv->rps.max_freq_softlimit; 1122 if (client_boost) 1123 max = dev_priv->rps.max_freq; 1124 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1125 new_delay = dev_priv->rps.boost_freq; 1126 adj = 0; 1127 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1128 if (adj > 0) 1129 adj *= 2; 1130 else /* CHV needs even encode values */ 1131 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1132 1133 if (new_delay >= dev_priv->rps.max_freq_softlimit) 1134 adj = 0; 1135 } else if (client_boost) { 1136 adj = 0; 1137 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1138 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1139 new_delay = dev_priv->rps.efficient_freq; 1140 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1141 new_delay = dev_priv->rps.min_freq_softlimit; 1142 adj = 0; 1143 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1144 if (adj < 0) 1145 adj *= 2; 1146 else /* CHV needs even encode values */ 1147 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1148 1149 if (new_delay <= dev_priv->rps.min_freq_softlimit) 1150 adj = 0; 1151 } else { /* unknown event */ 1152 adj = 0; 1153 } 1154 1155 dev_priv->rps.last_adj = adj; 1156 1157 /* sysfs frequency interfaces may have snuck in while servicing the 1158 * interrupt 1159 */ 1160 new_delay += adj; 1161 new_delay = clamp_t(int, new_delay, min, max); 1162 1163 if (intel_set_rps(dev_priv, new_delay)) { 1164 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1165 dev_priv->rps.last_adj = 0; 1166 } 1167 1168 mutex_unlock(&dev_priv->rps.hw_lock); 1169 1170 out: 1171 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1172 spin_lock_irq(&dev_priv->irq_lock); 1173 if (dev_priv->rps.interrupts_enabled) 1174 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1175 spin_unlock_irq(&dev_priv->irq_lock); 1176 } 1177 1178 1179 /** 1180 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1181 * occurred. 1182 * @work: workqueue struct 1183 * 1184 * Doesn't actually do anything except notify userspace. As a consequence of 1185 * this event, userspace should try to remap the bad rows since statistically 1186 * it is likely the same row is more likely to go bad again. 1187 */ 1188 static void ivybridge_parity_work(struct work_struct *work) 1189 { 1190 struct drm_i915_private *dev_priv = 1191 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1192 u32 error_status, row, bank, subbank; 1193 char *parity_event[6]; 1194 uint32_t misccpctl; 1195 uint8_t slice = 0; 1196 1197 /* We must turn off DOP level clock gating to access the L3 registers. 1198 * In order to prevent a get/put style interface, acquire struct mutex 1199 * any time we access those registers. 1200 */ 1201 mutex_lock(&dev_priv->drm.struct_mutex); 1202 1203 /* If we've screwed up tracking, just let the interrupt fire again */ 1204 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1205 goto out; 1206 1207 misccpctl = I915_READ(GEN7_MISCCPCTL); 1208 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1209 POSTING_READ(GEN7_MISCCPCTL); 1210 1211 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1212 i915_reg_t reg; 1213 1214 slice--; 1215 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1216 break; 1217 1218 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1219 1220 reg = GEN7_L3CDERRST1(slice); 1221 1222 error_status = I915_READ(reg); 1223 row = GEN7_PARITY_ERROR_ROW(error_status); 1224 bank = GEN7_PARITY_ERROR_BANK(error_status); 1225 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1226 1227 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1228 POSTING_READ(reg); 1229 1230 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1231 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1232 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1233 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1234 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1235 parity_event[5] = NULL; 1236 1237 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1238 KOBJ_CHANGE, parity_event); 1239 1240 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1241 slice, row, bank, subbank); 1242 1243 kfree(parity_event[4]); 1244 kfree(parity_event[3]); 1245 kfree(parity_event[2]); 1246 kfree(parity_event[1]); 1247 } 1248 1249 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1250 1251 out: 1252 WARN_ON(dev_priv->l3_parity.which_slice); 1253 spin_lock_irq(&dev_priv->irq_lock); 1254 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1255 spin_unlock_irq(&dev_priv->irq_lock); 1256 1257 mutex_unlock(&dev_priv->drm.struct_mutex); 1258 } 1259 1260 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1261 u32 iir) 1262 { 1263 if (!HAS_L3_DPF(dev_priv)) 1264 return; 1265 1266 spin_lock(&dev_priv->irq_lock); 1267 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1268 spin_unlock(&dev_priv->irq_lock); 1269 1270 iir &= GT_PARITY_ERROR(dev_priv); 1271 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1272 dev_priv->l3_parity.which_slice |= 1 << 1; 1273 1274 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1275 dev_priv->l3_parity.which_slice |= 1 << 0; 1276 1277 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1278 } 1279 1280 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1281 u32 gt_iir) 1282 { 1283 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1284 notify_ring(dev_priv->engine[RCS]); 1285 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1286 notify_ring(dev_priv->engine[VCS]); 1287 } 1288 1289 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1290 u32 gt_iir) 1291 { 1292 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1293 notify_ring(dev_priv->engine[RCS]); 1294 if (gt_iir & GT_BSD_USER_INTERRUPT) 1295 notify_ring(dev_priv->engine[VCS]); 1296 if (gt_iir & GT_BLT_USER_INTERRUPT) 1297 notify_ring(dev_priv->engine[BCS]); 1298 1299 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1300 GT_BSD_CS_ERROR_INTERRUPT | 1301 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1302 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1303 1304 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1305 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1306 } 1307 1308 static void 1309 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1310 { 1311 bool tasklet = false; 1312 1313 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1314 if (port_count(&engine->execlist_port[0])) { 1315 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1316 tasklet = true; 1317 } 1318 } 1319 1320 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1321 notify_ring(engine); 1322 tasklet |= i915.enable_guc_submission; 1323 } 1324 1325 if (tasklet) 1326 tasklet_hi_schedule(&engine->irq_tasklet); 1327 } 1328 1329 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1330 u32 master_ctl, 1331 u32 gt_iir[4]) 1332 { 1333 irqreturn_t ret = IRQ_NONE; 1334 1335 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1336 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1337 if (gt_iir[0]) { 1338 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1339 ret = IRQ_HANDLED; 1340 } else 1341 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1342 } 1343 1344 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1345 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1346 if (gt_iir[1]) { 1347 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1348 ret = IRQ_HANDLED; 1349 } else 1350 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1351 } 1352 1353 if (master_ctl & GEN8_GT_VECS_IRQ) { 1354 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1355 if (gt_iir[3]) { 1356 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1357 ret = IRQ_HANDLED; 1358 } else 1359 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1360 } 1361 1362 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1363 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1364 if (gt_iir[2] & (dev_priv->pm_rps_events | 1365 dev_priv->pm_guc_events)) { 1366 I915_WRITE_FW(GEN8_GT_IIR(2), 1367 gt_iir[2] & (dev_priv->pm_rps_events | 1368 dev_priv->pm_guc_events)); 1369 ret = IRQ_HANDLED; 1370 } else 1371 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1372 } 1373 1374 return ret; 1375 } 1376 1377 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1378 u32 gt_iir[4]) 1379 { 1380 if (gt_iir[0]) { 1381 gen8_cs_irq_handler(dev_priv->engine[RCS], 1382 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1383 gen8_cs_irq_handler(dev_priv->engine[BCS], 1384 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1385 } 1386 1387 if (gt_iir[1]) { 1388 gen8_cs_irq_handler(dev_priv->engine[VCS], 1389 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1390 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1391 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1392 } 1393 1394 if (gt_iir[3]) 1395 gen8_cs_irq_handler(dev_priv->engine[VECS], 1396 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1397 1398 if (gt_iir[2] & dev_priv->pm_rps_events) 1399 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1400 1401 if (gt_iir[2] & dev_priv->pm_guc_events) 1402 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1403 } 1404 1405 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1406 { 1407 switch (port) { 1408 case PORT_A: 1409 return val & PORTA_HOTPLUG_LONG_DETECT; 1410 case PORT_B: 1411 return val & PORTB_HOTPLUG_LONG_DETECT; 1412 case PORT_C: 1413 return val & PORTC_HOTPLUG_LONG_DETECT; 1414 default: 1415 return false; 1416 } 1417 } 1418 1419 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1420 { 1421 switch (port) { 1422 case PORT_E: 1423 return val & PORTE_HOTPLUG_LONG_DETECT; 1424 default: 1425 return false; 1426 } 1427 } 1428 1429 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1430 { 1431 switch (port) { 1432 case PORT_A: 1433 return val & PORTA_HOTPLUG_LONG_DETECT; 1434 case PORT_B: 1435 return val & PORTB_HOTPLUG_LONG_DETECT; 1436 case PORT_C: 1437 return val & PORTC_HOTPLUG_LONG_DETECT; 1438 case PORT_D: 1439 return val & PORTD_HOTPLUG_LONG_DETECT; 1440 default: 1441 return false; 1442 } 1443 } 1444 1445 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1446 { 1447 switch (port) { 1448 case PORT_A: 1449 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1450 default: 1451 return false; 1452 } 1453 } 1454 1455 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1456 { 1457 switch (port) { 1458 case PORT_B: 1459 return val & PORTB_HOTPLUG_LONG_DETECT; 1460 case PORT_C: 1461 return val & PORTC_HOTPLUG_LONG_DETECT; 1462 case PORT_D: 1463 return val & PORTD_HOTPLUG_LONG_DETECT; 1464 default: 1465 return false; 1466 } 1467 } 1468 1469 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1470 { 1471 switch (port) { 1472 case PORT_B: 1473 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1474 case PORT_C: 1475 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1476 case PORT_D: 1477 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1478 default: 1479 return false; 1480 } 1481 } 1482 1483 /* 1484 * Get a bit mask of pins that have triggered, and which ones may be long. 1485 * This can be called multiple times with the same masks to accumulate 1486 * hotplug detection results from several registers. 1487 * 1488 * Note that the caller is expected to zero out the masks initially. 1489 */ 1490 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1491 u32 hotplug_trigger, u32 dig_hotplug_reg, 1492 const u32 hpd[HPD_NUM_PINS], 1493 bool long_pulse_detect(enum port port, u32 val)) 1494 { 1495 enum port port; 1496 int i; 1497 1498 for_each_hpd_pin(i) { 1499 if ((hpd[i] & hotplug_trigger) == 0) 1500 continue; 1501 1502 *pin_mask |= BIT(i); 1503 1504 port = intel_hpd_pin_to_port(i); 1505 if (port == PORT_NONE) 1506 continue; 1507 1508 if (long_pulse_detect(port, dig_hotplug_reg)) 1509 *long_mask |= BIT(i); 1510 } 1511 1512 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1513 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1514 1515 } 1516 1517 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1518 { 1519 wake_up_all(&dev_priv->gmbus_wait_queue); 1520 } 1521 1522 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1523 { 1524 wake_up_all(&dev_priv->gmbus_wait_queue); 1525 } 1526 1527 #if defined(CONFIG_DEBUG_FS) 1528 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1529 enum pipe pipe, 1530 uint32_t crc0, uint32_t crc1, 1531 uint32_t crc2, uint32_t crc3, 1532 uint32_t crc4) 1533 { 1534 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1535 struct intel_pipe_crc_entry *entry; 1536 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1537 struct drm_driver *driver = dev_priv->drm.driver; 1538 uint32_t crcs[5]; 1539 int head, tail; 1540 1541 spin_lock(&pipe_crc->lock); 1542 if (pipe_crc->source) { 1543 if (!pipe_crc->entries) { 1544 spin_unlock(&pipe_crc->lock); 1545 DRM_DEBUG_KMS("spurious interrupt\n"); 1546 return; 1547 } 1548 1549 head = pipe_crc->head; 1550 tail = pipe_crc->tail; 1551 1552 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1553 spin_unlock(&pipe_crc->lock); 1554 DRM_ERROR("CRC buffer overflowing\n"); 1555 return; 1556 } 1557 1558 entry = &pipe_crc->entries[head]; 1559 1560 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1561 entry->crc[0] = crc0; 1562 entry->crc[1] = crc1; 1563 entry->crc[2] = crc2; 1564 entry->crc[3] = crc3; 1565 entry->crc[4] = crc4; 1566 1567 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1568 pipe_crc->head = head; 1569 1570 spin_unlock(&pipe_crc->lock); 1571 1572 wake_up_interruptible(&pipe_crc->wq); 1573 } else { 1574 /* 1575 * For some not yet identified reason, the first CRC is 1576 * bonkers. So let's just wait for the next vblank and read 1577 * out the buggy result. 1578 * 1579 * On CHV sometimes the second CRC is bonkers as well, so 1580 * don't trust that one either. 1581 */ 1582 if (pipe_crc->skipped == 0 || 1583 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { 1584 pipe_crc->skipped++; 1585 spin_unlock(&pipe_crc->lock); 1586 return; 1587 } 1588 spin_unlock(&pipe_crc->lock); 1589 crcs[0] = crc0; 1590 crcs[1] = crc1; 1591 crcs[2] = crc2; 1592 crcs[3] = crc3; 1593 crcs[4] = crc4; 1594 drm_crtc_add_crc_entry(&crtc->base, true, 1595 drm_crtc_accurate_vblank_count(&crtc->base), 1596 crcs); 1597 } 1598 } 1599 #else 1600 static inline void 1601 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1602 enum pipe pipe, 1603 uint32_t crc0, uint32_t crc1, 1604 uint32_t crc2, uint32_t crc3, 1605 uint32_t crc4) {} 1606 #endif 1607 1608 1609 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1610 enum pipe pipe) 1611 { 1612 display_pipe_crc_irq_handler(dev_priv, pipe, 1613 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1614 0, 0, 0, 0); 1615 } 1616 1617 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1618 enum pipe pipe) 1619 { 1620 display_pipe_crc_irq_handler(dev_priv, pipe, 1621 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1622 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1623 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1624 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1625 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1626 } 1627 1628 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1629 enum pipe pipe) 1630 { 1631 uint32_t res1, res2; 1632 1633 if (INTEL_GEN(dev_priv) >= 3) 1634 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1635 else 1636 res1 = 0; 1637 1638 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1639 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1640 else 1641 res2 = 0; 1642 1643 display_pipe_crc_irq_handler(dev_priv, pipe, 1644 I915_READ(PIPE_CRC_RES_RED(pipe)), 1645 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1646 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1647 res1, res2); 1648 } 1649 1650 /* The RPS events need forcewake, so we add them to a work queue and mask their 1651 * IMR bits until the work is done. Other interrupts can be processed without 1652 * the work queue. */ 1653 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1654 { 1655 if (pm_iir & dev_priv->pm_rps_events) { 1656 spin_lock(&dev_priv->irq_lock); 1657 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1658 if (dev_priv->rps.interrupts_enabled) { 1659 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1660 schedule_work(&dev_priv->rps.work); 1661 } 1662 spin_unlock(&dev_priv->irq_lock); 1663 } 1664 1665 if (INTEL_GEN(dev_priv) >= 8) 1666 return; 1667 1668 if (HAS_VEBOX(dev_priv)) { 1669 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1670 notify_ring(dev_priv->engine[VECS]); 1671 1672 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1673 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1674 } 1675 } 1676 1677 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1678 { 1679 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1680 /* Sample the log buffer flush related bits & clear them out now 1681 * itself from the message identity register to minimize the 1682 * probability of losing a flush interrupt, when there are back 1683 * to back flush interrupts. 1684 * There can be a new flush interrupt, for different log buffer 1685 * type (like for ISR), whilst Host is handling one (for DPC). 1686 * Since same bit is used in message register for ISR & DPC, it 1687 * could happen that GuC sets the bit for 2nd interrupt but Host 1688 * clears out the bit on handling the 1st interrupt. 1689 */ 1690 u32 msg, flush; 1691 1692 msg = I915_READ(SOFT_SCRATCH(15)); 1693 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1694 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1695 if (flush) { 1696 /* Clear the message bits that are handled */ 1697 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1698 1699 /* Handle flush interrupt in bottom half */ 1700 queue_work(dev_priv->guc.log.runtime.flush_wq, 1701 &dev_priv->guc.log.runtime.flush_work); 1702 1703 dev_priv->guc.log.flush_interrupt_count++; 1704 } else { 1705 /* Not clearing of unhandled event bits won't result in 1706 * re-triggering of the interrupt. 1707 */ 1708 } 1709 } 1710 } 1711 1712 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1713 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1714 { 1715 int pipe; 1716 1717 spin_lock(&dev_priv->irq_lock); 1718 1719 if (!dev_priv->display_irqs_enabled) { 1720 spin_unlock(&dev_priv->irq_lock); 1721 return; 1722 } 1723 1724 for_each_pipe(dev_priv, pipe) { 1725 i915_reg_t reg; 1726 u32 mask, iir_bit = 0; 1727 1728 /* 1729 * PIPESTAT bits get signalled even when the interrupt is 1730 * disabled with the mask bits, and some of the status bits do 1731 * not generate interrupts at all (like the underrun bit). Hence 1732 * we need to be careful that we only handle what we want to 1733 * handle. 1734 */ 1735 1736 /* fifo underruns are filterered in the underrun handler. */ 1737 mask = PIPE_FIFO_UNDERRUN_STATUS; 1738 1739 switch (pipe) { 1740 case PIPE_A: 1741 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1742 break; 1743 case PIPE_B: 1744 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1745 break; 1746 case PIPE_C: 1747 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1748 break; 1749 } 1750 if (iir & iir_bit) 1751 mask |= dev_priv->pipestat_irq_mask[pipe]; 1752 1753 if (!mask) 1754 continue; 1755 1756 reg = PIPESTAT(pipe); 1757 mask |= PIPESTAT_INT_ENABLE_MASK; 1758 pipe_stats[pipe] = I915_READ(reg) & mask; 1759 1760 /* 1761 * Clear the PIPE*STAT regs before the IIR 1762 */ 1763 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1764 PIPESTAT_INT_STATUS_MASK)) 1765 I915_WRITE(reg, pipe_stats[pipe]); 1766 } 1767 spin_unlock(&dev_priv->irq_lock); 1768 } 1769 1770 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1771 u32 pipe_stats[I915_MAX_PIPES]) 1772 { 1773 enum pipe pipe; 1774 1775 for_each_pipe(dev_priv, pipe) { 1776 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1777 drm_handle_vblank(&dev_priv->drm, pipe); 1778 1779 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1780 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1781 1782 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1783 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1784 } 1785 1786 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1787 gmbus_irq_handler(dev_priv); 1788 } 1789 1790 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1791 { 1792 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1793 1794 if (hotplug_status) 1795 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1796 1797 return hotplug_status; 1798 } 1799 1800 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1801 u32 hotplug_status) 1802 { 1803 u32 pin_mask = 0, long_mask = 0; 1804 1805 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1806 IS_CHERRYVIEW(dev_priv)) { 1807 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1808 1809 if (hotplug_trigger) { 1810 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1811 hotplug_trigger, hpd_status_g4x, 1812 i9xx_port_hotplug_long_detect); 1813 1814 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1815 } 1816 1817 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1818 dp_aux_irq_handler(dev_priv); 1819 } else { 1820 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1821 1822 if (hotplug_trigger) { 1823 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1824 hotplug_trigger, hpd_status_i915, 1825 i9xx_port_hotplug_long_detect); 1826 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1827 } 1828 } 1829 } 1830 1831 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1832 { 1833 struct drm_device *dev = arg; 1834 struct drm_i915_private *dev_priv = to_i915(dev); 1835 irqreturn_t ret = IRQ_NONE; 1836 1837 if (!intel_irqs_enabled(dev_priv)) 1838 return IRQ_NONE; 1839 1840 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1841 disable_rpm_wakeref_asserts(dev_priv); 1842 1843 do { 1844 u32 iir, gt_iir, pm_iir; 1845 u32 pipe_stats[I915_MAX_PIPES] = {}; 1846 u32 hotplug_status = 0; 1847 u32 ier = 0; 1848 1849 gt_iir = I915_READ(GTIIR); 1850 pm_iir = I915_READ(GEN6_PMIIR); 1851 iir = I915_READ(VLV_IIR); 1852 1853 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1854 break; 1855 1856 ret = IRQ_HANDLED; 1857 1858 /* 1859 * Theory on interrupt generation, based on empirical evidence: 1860 * 1861 * x = ((VLV_IIR & VLV_IER) || 1862 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1863 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1864 * 1865 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1866 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1867 * guarantee the CPU interrupt will be raised again even if we 1868 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1869 * bits this time around. 1870 */ 1871 I915_WRITE(VLV_MASTER_IER, 0); 1872 ier = I915_READ(VLV_IER); 1873 I915_WRITE(VLV_IER, 0); 1874 1875 if (gt_iir) 1876 I915_WRITE(GTIIR, gt_iir); 1877 if (pm_iir) 1878 I915_WRITE(GEN6_PMIIR, pm_iir); 1879 1880 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1881 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1882 1883 /* Call regardless, as some status bits might not be 1884 * signalled in iir */ 1885 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1886 1887 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1888 I915_LPE_PIPE_B_INTERRUPT)) 1889 intel_lpe_audio_irq_handler(dev_priv); 1890 1891 /* 1892 * VLV_IIR is single buffered, and reflects the level 1893 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1894 */ 1895 if (iir) 1896 I915_WRITE(VLV_IIR, iir); 1897 1898 I915_WRITE(VLV_IER, ier); 1899 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1900 POSTING_READ(VLV_MASTER_IER); 1901 1902 if (gt_iir) 1903 snb_gt_irq_handler(dev_priv, gt_iir); 1904 if (pm_iir) 1905 gen6_rps_irq_handler(dev_priv, pm_iir); 1906 1907 if (hotplug_status) 1908 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1909 1910 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1911 } while (0); 1912 1913 enable_rpm_wakeref_asserts(dev_priv); 1914 1915 return ret; 1916 } 1917 1918 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1919 { 1920 struct drm_device *dev = arg; 1921 struct drm_i915_private *dev_priv = to_i915(dev); 1922 irqreturn_t ret = IRQ_NONE; 1923 1924 if (!intel_irqs_enabled(dev_priv)) 1925 return IRQ_NONE; 1926 1927 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1928 disable_rpm_wakeref_asserts(dev_priv); 1929 1930 do { 1931 u32 master_ctl, iir; 1932 u32 gt_iir[4] = {}; 1933 u32 pipe_stats[I915_MAX_PIPES] = {}; 1934 u32 hotplug_status = 0; 1935 u32 ier = 0; 1936 1937 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1938 iir = I915_READ(VLV_IIR); 1939 1940 if (master_ctl == 0 && iir == 0) 1941 break; 1942 1943 ret = IRQ_HANDLED; 1944 1945 /* 1946 * Theory on interrupt generation, based on empirical evidence: 1947 * 1948 * x = ((VLV_IIR & VLV_IER) || 1949 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1950 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1951 * 1952 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1953 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1954 * guarantee the CPU interrupt will be raised again even if we 1955 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1956 * bits this time around. 1957 */ 1958 I915_WRITE(GEN8_MASTER_IRQ, 0); 1959 ier = I915_READ(VLV_IER); 1960 I915_WRITE(VLV_IER, 0); 1961 1962 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1963 1964 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1965 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1966 1967 /* Call regardless, as some status bits might not be 1968 * signalled in iir */ 1969 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1970 1971 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1972 I915_LPE_PIPE_B_INTERRUPT | 1973 I915_LPE_PIPE_C_INTERRUPT)) 1974 intel_lpe_audio_irq_handler(dev_priv); 1975 1976 /* 1977 * VLV_IIR is single buffered, and reflects the level 1978 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1979 */ 1980 if (iir) 1981 I915_WRITE(VLV_IIR, iir); 1982 1983 I915_WRITE(VLV_IER, ier); 1984 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1985 POSTING_READ(GEN8_MASTER_IRQ); 1986 1987 gen8_gt_irq_handler(dev_priv, gt_iir); 1988 1989 if (hotplug_status) 1990 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1991 1992 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1993 } while (0); 1994 1995 enable_rpm_wakeref_asserts(dev_priv); 1996 1997 return ret; 1998 } 1999 2000 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2001 u32 hotplug_trigger, 2002 const u32 hpd[HPD_NUM_PINS]) 2003 { 2004 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2005 2006 /* 2007 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2008 * unless we touch the hotplug register, even if hotplug_trigger is 2009 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2010 * errors. 2011 */ 2012 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2013 if (!hotplug_trigger) { 2014 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2015 PORTD_HOTPLUG_STATUS_MASK | 2016 PORTC_HOTPLUG_STATUS_MASK | 2017 PORTB_HOTPLUG_STATUS_MASK; 2018 dig_hotplug_reg &= ~mask; 2019 } 2020 2021 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2022 if (!hotplug_trigger) 2023 return; 2024 2025 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2026 dig_hotplug_reg, hpd, 2027 pch_port_hotplug_long_detect); 2028 2029 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2030 } 2031 2032 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2033 { 2034 int pipe; 2035 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2036 2037 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2038 2039 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2040 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2041 SDE_AUDIO_POWER_SHIFT); 2042 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2043 port_name(port)); 2044 } 2045 2046 if (pch_iir & SDE_AUX_MASK) 2047 dp_aux_irq_handler(dev_priv); 2048 2049 if (pch_iir & SDE_GMBUS) 2050 gmbus_irq_handler(dev_priv); 2051 2052 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2053 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2054 2055 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2056 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2057 2058 if (pch_iir & SDE_POISON) 2059 DRM_ERROR("PCH poison interrupt\n"); 2060 2061 if (pch_iir & SDE_FDI_MASK) 2062 for_each_pipe(dev_priv, pipe) 2063 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2064 pipe_name(pipe), 2065 I915_READ(FDI_RX_IIR(pipe))); 2066 2067 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2068 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2069 2070 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2071 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2072 2073 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2074 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2075 2076 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2077 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2078 } 2079 2080 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2081 { 2082 u32 err_int = I915_READ(GEN7_ERR_INT); 2083 enum pipe pipe; 2084 2085 if (err_int & ERR_INT_POISON) 2086 DRM_ERROR("Poison interrupt\n"); 2087 2088 for_each_pipe(dev_priv, pipe) { 2089 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2090 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2091 2092 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2093 if (IS_IVYBRIDGE(dev_priv)) 2094 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2095 else 2096 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2097 } 2098 } 2099 2100 I915_WRITE(GEN7_ERR_INT, err_int); 2101 } 2102 2103 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2104 { 2105 u32 serr_int = I915_READ(SERR_INT); 2106 2107 if (serr_int & SERR_INT_POISON) 2108 DRM_ERROR("PCH poison interrupt\n"); 2109 2110 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2111 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2112 2113 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2114 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2115 2116 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2117 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C); 2118 2119 I915_WRITE(SERR_INT, serr_int); 2120 } 2121 2122 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2123 { 2124 int pipe; 2125 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2126 2127 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2128 2129 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2130 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2131 SDE_AUDIO_POWER_SHIFT_CPT); 2132 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2133 port_name(port)); 2134 } 2135 2136 if (pch_iir & SDE_AUX_MASK_CPT) 2137 dp_aux_irq_handler(dev_priv); 2138 2139 if (pch_iir & SDE_GMBUS_CPT) 2140 gmbus_irq_handler(dev_priv); 2141 2142 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2143 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2144 2145 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2146 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2147 2148 if (pch_iir & SDE_FDI_MASK_CPT) 2149 for_each_pipe(dev_priv, pipe) 2150 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2151 pipe_name(pipe), 2152 I915_READ(FDI_RX_IIR(pipe))); 2153 2154 if (pch_iir & SDE_ERROR_CPT) 2155 cpt_serr_int_handler(dev_priv); 2156 } 2157 2158 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2159 { 2160 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2161 ~SDE_PORTE_HOTPLUG_SPT; 2162 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2163 u32 pin_mask = 0, long_mask = 0; 2164 2165 if (hotplug_trigger) { 2166 u32 dig_hotplug_reg; 2167 2168 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2169 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2170 2171 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2172 dig_hotplug_reg, hpd_spt, 2173 spt_port_hotplug_long_detect); 2174 } 2175 2176 if (hotplug2_trigger) { 2177 u32 dig_hotplug_reg; 2178 2179 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2180 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2181 2182 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2183 dig_hotplug_reg, hpd_spt, 2184 spt_port_hotplug2_long_detect); 2185 } 2186 2187 if (pin_mask) 2188 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2189 2190 if (pch_iir & SDE_GMBUS_CPT) 2191 gmbus_irq_handler(dev_priv); 2192 } 2193 2194 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2195 u32 hotplug_trigger, 2196 const u32 hpd[HPD_NUM_PINS]) 2197 { 2198 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2199 2200 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2201 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2202 2203 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2204 dig_hotplug_reg, hpd, 2205 ilk_port_hotplug_long_detect); 2206 2207 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2208 } 2209 2210 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2211 u32 de_iir) 2212 { 2213 enum pipe pipe; 2214 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2215 2216 if (hotplug_trigger) 2217 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2218 2219 if (de_iir & DE_AUX_CHANNEL_A) 2220 dp_aux_irq_handler(dev_priv); 2221 2222 if (de_iir & DE_GSE) 2223 intel_opregion_asle_intr(dev_priv); 2224 2225 if (de_iir & DE_POISON) 2226 DRM_ERROR("Poison interrupt\n"); 2227 2228 for_each_pipe(dev_priv, pipe) { 2229 if (de_iir & DE_PIPE_VBLANK(pipe)) 2230 drm_handle_vblank(&dev_priv->drm, pipe); 2231 2232 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2233 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2234 2235 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2236 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2237 } 2238 2239 /* check event from PCH */ 2240 if (de_iir & DE_PCH_EVENT) { 2241 u32 pch_iir = I915_READ(SDEIIR); 2242 2243 if (HAS_PCH_CPT(dev_priv)) 2244 cpt_irq_handler(dev_priv, pch_iir); 2245 else 2246 ibx_irq_handler(dev_priv, pch_iir); 2247 2248 /* should clear PCH hotplug event before clear CPU irq */ 2249 I915_WRITE(SDEIIR, pch_iir); 2250 } 2251 2252 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2253 ironlake_rps_change_irq_handler(dev_priv); 2254 } 2255 2256 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2257 u32 de_iir) 2258 { 2259 enum pipe pipe; 2260 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2261 2262 if (hotplug_trigger) 2263 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2264 2265 if (de_iir & DE_ERR_INT_IVB) 2266 ivb_err_int_handler(dev_priv); 2267 2268 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2269 dp_aux_irq_handler(dev_priv); 2270 2271 if (de_iir & DE_GSE_IVB) 2272 intel_opregion_asle_intr(dev_priv); 2273 2274 for_each_pipe(dev_priv, pipe) { 2275 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2276 drm_handle_vblank(&dev_priv->drm, pipe); 2277 } 2278 2279 /* check event from PCH */ 2280 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2281 u32 pch_iir = I915_READ(SDEIIR); 2282 2283 cpt_irq_handler(dev_priv, pch_iir); 2284 2285 /* clear PCH hotplug event before clear CPU irq */ 2286 I915_WRITE(SDEIIR, pch_iir); 2287 } 2288 } 2289 2290 /* 2291 * To handle irqs with the minimum potential races with fresh interrupts, we: 2292 * 1 - Disable Master Interrupt Control. 2293 * 2 - Find the source(s) of the interrupt. 2294 * 3 - Clear the Interrupt Identity bits (IIR). 2295 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2296 * 5 - Re-enable Master Interrupt Control. 2297 */ 2298 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2299 { 2300 struct drm_device *dev = arg; 2301 struct drm_i915_private *dev_priv = to_i915(dev); 2302 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2303 irqreturn_t ret = IRQ_NONE; 2304 2305 if (!intel_irqs_enabled(dev_priv)) 2306 return IRQ_NONE; 2307 2308 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2309 disable_rpm_wakeref_asserts(dev_priv); 2310 2311 /* disable master interrupt before clearing iir */ 2312 de_ier = I915_READ(DEIER); 2313 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2314 POSTING_READ(DEIER); 2315 2316 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2317 * interrupts will will be stored on its back queue, and then we'll be 2318 * able to process them after we restore SDEIER (as soon as we restore 2319 * it, we'll get an interrupt if SDEIIR still has something to process 2320 * due to its back queue). */ 2321 if (!HAS_PCH_NOP(dev_priv)) { 2322 sde_ier = I915_READ(SDEIER); 2323 I915_WRITE(SDEIER, 0); 2324 POSTING_READ(SDEIER); 2325 } 2326 2327 /* Find, clear, then process each source of interrupt */ 2328 2329 gt_iir = I915_READ(GTIIR); 2330 if (gt_iir) { 2331 I915_WRITE(GTIIR, gt_iir); 2332 ret = IRQ_HANDLED; 2333 if (INTEL_GEN(dev_priv) >= 6) 2334 snb_gt_irq_handler(dev_priv, gt_iir); 2335 else 2336 ilk_gt_irq_handler(dev_priv, gt_iir); 2337 } 2338 2339 de_iir = I915_READ(DEIIR); 2340 if (de_iir) { 2341 I915_WRITE(DEIIR, de_iir); 2342 ret = IRQ_HANDLED; 2343 if (INTEL_GEN(dev_priv) >= 7) 2344 ivb_display_irq_handler(dev_priv, de_iir); 2345 else 2346 ilk_display_irq_handler(dev_priv, de_iir); 2347 } 2348 2349 if (INTEL_GEN(dev_priv) >= 6) { 2350 u32 pm_iir = I915_READ(GEN6_PMIIR); 2351 if (pm_iir) { 2352 I915_WRITE(GEN6_PMIIR, pm_iir); 2353 ret = IRQ_HANDLED; 2354 gen6_rps_irq_handler(dev_priv, pm_iir); 2355 } 2356 } 2357 2358 I915_WRITE(DEIER, de_ier); 2359 POSTING_READ(DEIER); 2360 if (!HAS_PCH_NOP(dev_priv)) { 2361 I915_WRITE(SDEIER, sde_ier); 2362 POSTING_READ(SDEIER); 2363 } 2364 2365 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2366 enable_rpm_wakeref_asserts(dev_priv); 2367 2368 return ret; 2369 } 2370 2371 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2372 u32 hotplug_trigger, 2373 const u32 hpd[HPD_NUM_PINS]) 2374 { 2375 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2376 2377 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2378 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2379 2380 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2381 dig_hotplug_reg, hpd, 2382 bxt_port_hotplug_long_detect); 2383 2384 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2385 } 2386 2387 static irqreturn_t 2388 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2389 { 2390 irqreturn_t ret = IRQ_NONE; 2391 u32 iir; 2392 enum pipe pipe; 2393 2394 if (master_ctl & GEN8_DE_MISC_IRQ) { 2395 iir = I915_READ(GEN8_DE_MISC_IIR); 2396 if (iir) { 2397 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2398 ret = IRQ_HANDLED; 2399 if (iir & GEN8_DE_MISC_GSE) 2400 intel_opregion_asle_intr(dev_priv); 2401 else 2402 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2403 } 2404 else 2405 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2406 } 2407 2408 if (master_ctl & GEN8_DE_PORT_IRQ) { 2409 iir = I915_READ(GEN8_DE_PORT_IIR); 2410 if (iir) { 2411 u32 tmp_mask; 2412 bool found = false; 2413 2414 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2415 ret = IRQ_HANDLED; 2416 2417 tmp_mask = GEN8_AUX_CHANNEL_A; 2418 if (INTEL_GEN(dev_priv) >= 9) 2419 tmp_mask |= GEN9_AUX_CHANNEL_B | 2420 GEN9_AUX_CHANNEL_C | 2421 GEN9_AUX_CHANNEL_D; 2422 2423 if (iir & tmp_mask) { 2424 dp_aux_irq_handler(dev_priv); 2425 found = true; 2426 } 2427 2428 if (IS_GEN9_LP(dev_priv)) { 2429 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2430 if (tmp_mask) { 2431 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2432 hpd_bxt); 2433 found = true; 2434 } 2435 } else if (IS_BROADWELL(dev_priv)) { 2436 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2437 if (tmp_mask) { 2438 ilk_hpd_irq_handler(dev_priv, 2439 tmp_mask, hpd_bdw); 2440 found = true; 2441 } 2442 } 2443 2444 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2445 gmbus_irq_handler(dev_priv); 2446 found = true; 2447 } 2448 2449 if (!found) 2450 DRM_ERROR("Unexpected DE Port interrupt\n"); 2451 } 2452 else 2453 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2454 } 2455 2456 for_each_pipe(dev_priv, pipe) { 2457 u32 fault_errors; 2458 2459 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2460 continue; 2461 2462 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2463 if (!iir) { 2464 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2465 continue; 2466 } 2467 2468 ret = IRQ_HANDLED; 2469 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2470 2471 if (iir & GEN8_PIPE_VBLANK) 2472 drm_handle_vblank(&dev_priv->drm, pipe); 2473 2474 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2475 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2476 2477 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2478 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2479 2480 fault_errors = iir; 2481 if (INTEL_GEN(dev_priv) >= 9) 2482 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2483 else 2484 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2485 2486 if (fault_errors) 2487 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2488 pipe_name(pipe), 2489 fault_errors); 2490 } 2491 2492 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2493 master_ctl & GEN8_DE_PCH_IRQ) { 2494 /* 2495 * FIXME(BDW): Assume for now that the new interrupt handling 2496 * scheme also closed the SDE interrupt handling race we've seen 2497 * on older pch-split platforms. But this needs testing. 2498 */ 2499 iir = I915_READ(SDEIIR); 2500 if (iir) { 2501 I915_WRITE(SDEIIR, iir); 2502 ret = IRQ_HANDLED; 2503 2504 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 2505 HAS_PCH_CNP(dev_priv)) 2506 spt_irq_handler(dev_priv, iir); 2507 else 2508 cpt_irq_handler(dev_priv, iir); 2509 } else { 2510 /* 2511 * Like on previous PCH there seems to be something 2512 * fishy going on with forwarding PCH interrupts. 2513 */ 2514 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2515 } 2516 } 2517 2518 return ret; 2519 } 2520 2521 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2522 { 2523 struct drm_device *dev = arg; 2524 struct drm_i915_private *dev_priv = to_i915(dev); 2525 u32 master_ctl; 2526 u32 gt_iir[4] = {}; 2527 irqreturn_t ret; 2528 2529 if (!intel_irqs_enabled(dev_priv)) 2530 return IRQ_NONE; 2531 2532 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2533 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2534 if (!master_ctl) 2535 return IRQ_NONE; 2536 2537 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2538 2539 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2540 disable_rpm_wakeref_asserts(dev_priv); 2541 2542 /* Find, clear, then process each source of interrupt */ 2543 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2544 gen8_gt_irq_handler(dev_priv, gt_iir); 2545 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2546 2547 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2548 POSTING_READ_FW(GEN8_MASTER_IRQ); 2549 2550 enable_rpm_wakeref_asserts(dev_priv); 2551 2552 return ret; 2553 } 2554 2555 struct wedge_me { 2556 struct delayed_work work; 2557 struct drm_i915_private *i915; 2558 const char *name; 2559 }; 2560 2561 static void wedge_me(struct work_struct *work) 2562 { 2563 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2564 2565 dev_err(w->i915->drm.dev, 2566 "%s timed out, cancelling all in-flight rendering.\n", 2567 w->name); 2568 i915_gem_set_wedged(w->i915); 2569 } 2570 2571 static void __init_wedge(struct wedge_me *w, 2572 struct drm_i915_private *i915, 2573 long timeout, 2574 const char *name) 2575 { 2576 w->i915 = i915; 2577 w->name = name; 2578 2579 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2580 schedule_delayed_work(&w->work, timeout); 2581 } 2582 2583 static void __fini_wedge(struct wedge_me *w) 2584 { 2585 cancel_delayed_work_sync(&w->work); 2586 destroy_delayed_work_on_stack(&w->work); 2587 w->i915 = NULL; 2588 } 2589 2590 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2591 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2592 (W)->i915; \ 2593 __fini_wedge((W))) 2594 2595 /** 2596 * i915_reset_device - do process context error handling work 2597 * @dev_priv: i915 device private 2598 * 2599 * Fire an error uevent so userspace can see that a hang or error 2600 * was detected. 2601 */ 2602 static void i915_reset_device(struct drm_i915_private *dev_priv) 2603 { 2604 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2605 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2606 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2607 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2608 struct wedge_me w; 2609 2610 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2611 2612 DRM_DEBUG_DRIVER("resetting chip\n"); 2613 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2614 2615 /* Use a watchdog to ensure that our reset completes */ 2616 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 2617 intel_prepare_reset(dev_priv); 2618 2619 /* Signal that locked waiters should reset the GPU */ 2620 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2621 wake_up_all(&dev_priv->gpu_error.wait_queue); 2622 2623 /* Wait for anyone holding the lock to wakeup, without 2624 * blocking indefinitely on struct_mutex. 2625 */ 2626 do { 2627 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2628 i915_reset(dev_priv, 0); 2629 mutex_unlock(&dev_priv->drm.struct_mutex); 2630 } 2631 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2632 I915_RESET_HANDOFF, 2633 TASK_UNINTERRUPTIBLE, 2634 1)); 2635 2636 intel_finish_reset(dev_priv); 2637 } 2638 2639 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2640 kobject_uevent_env(kobj, 2641 KOBJ_CHANGE, reset_done_event); 2642 } 2643 2644 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2645 { 2646 u32 eir; 2647 2648 if (!IS_GEN2(dev_priv)) 2649 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2650 2651 if (INTEL_GEN(dev_priv) < 4) 2652 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2653 else 2654 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2655 2656 I915_WRITE(EIR, I915_READ(EIR)); 2657 eir = I915_READ(EIR); 2658 if (eir) { 2659 /* 2660 * some errors might have become stuck, 2661 * mask them. 2662 */ 2663 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2664 I915_WRITE(EMR, I915_READ(EMR) | eir); 2665 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2666 } 2667 } 2668 2669 /** 2670 * i915_handle_error - handle a gpu error 2671 * @dev_priv: i915 device private 2672 * @engine_mask: mask representing engines that are hung 2673 * @fmt: Error message format string 2674 * 2675 * Do some basic checking of register state at error time and 2676 * dump it to the syslog. Also call i915_capture_error_state() to make 2677 * sure we get a record and make it available in debugfs. Fire a uevent 2678 * so userspace knows something bad happened (should trigger collection 2679 * of a ring dump etc.). 2680 */ 2681 void i915_handle_error(struct drm_i915_private *dev_priv, 2682 u32 engine_mask, 2683 const char *fmt, ...) 2684 { 2685 struct intel_engine_cs *engine; 2686 unsigned int tmp; 2687 va_list args; 2688 char error_msg[80]; 2689 2690 va_start(args, fmt); 2691 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2692 va_end(args); 2693 2694 /* 2695 * In most cases it's guaranteed that we get here with an RPM 2696 * reference held, for example because there is a pending GPU 2697 * request that won't finish until the reset is done. This 2698 * isn't the case at least when we get here by doing a 2699 * simulated reset via debugfs, so get an RPM reference. 2700 */ 2701 intel_runtime_pm_get(dev_priv); 2702 2703 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2704 i915_clear_error_registers(dev_priv); 2705 2706 /* 2707 * Try engine reset when available. We fall back to full reset if 2708 * single reset fails. 2709 */ 2710 if (intel_has_reset_engine(dev_priv)) { 2711 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 2712 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 2713 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 2714 &dev_priv->gpu_error.flags)) 2715 continue; 2716 2717 if (i915_reset_engine(engine, 0) == 0) 2718 engine_mask &= ~intel_engine_flag(engine); 2719 2720 clear_bit(I915_RESET_ENGINE + engine->id, 2721 &dev_priv->gpu_error.flags); 2722 wake_up_bit(&dev_priv->gpu_error.flags, 2723 I915_RESET_ENGINE + engine->id); 2724 } 2725 } 2726 2727 if (!engine_mask) 2728 goto out; 2729 2730 /* Full reset needs the mutex, stop any other user trying to do so. */ 2731 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 2732 wait_event(dev_priv->gpu_error.reset_queue, 2733 !test_bit(I915_RESET_BACKOFF, 2734 &dev_priv->gpu_error.flags)); 2735 goto out; 2736 } 2737 2738 /* Prevent any other reset-engine attempt. */ 2739 for_each_engine(engine, dev_priv, tmp) { 2740 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 2741 &dev_priv->gpu_error.flags)) 2742 wait_on_bit(&dev_priv->gpu_error.flags, 2743 I915_RESET_ENGINE + engine->id, 2744 TASK_UNINTERRUPTIBLE); 2745 } 2746 2747 i915_reset_device(dev_priv); 2748 2749 for_each_engine(engine, dev_priv, tmp) { 2750 clear_bit(I915_RESET_ENGINE + engine->id, 2751 &dev_priv->gpu_error.flags); 2752 } 2753 2754 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 2755 wake_up_all(&dev_priv->gpu_error.reset_queue); 2756 2757 out: 2758 intel_runtime_pm_put(dev_priv); 2759 } 2760 2761 /* Called from drm generic code, passed 'crtc' which 2762 * we use as a pipe index 2763 */ 2764 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2765 { 2766 struct drm_i915_private *dev_priv = to_i915(dev); 2767 unsigned long irqflags; 2768 2769 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2770 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2772 2773 return 0; 2774 } 2775 2776 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2777 { 2778 struct drm_i915_private *dev_priv = to_i915(dev); 2779 unsigned long irqflags; 2780 2781 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2782 i915_enable_pipestat(dev_priv, pipe, 2783 PIPE_START_VBLANK_INTERRUPT_STATUS); 2784 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2785 2786 return 0; 2787 } 2788 2789 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2790 { 2791 struct drm_i915_private *dev_priv = to_i915(dev); 2792 unsigned long irqflags; 2793 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2794 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2795 2796 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2797 ilk_enable_display_irq(dev_priv, bit); 2798 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2799 2800 return 0; 2801 } 2802 2803 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2804 { 2805 struct drm_i915_private *dev_priv = to_i915(dev); 2806 unsigned long irqflags; 2807 2808 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2809 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2810 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2811 2812 return 0; 2813 } 2814 2815 /* Called from drm generic code, passed 'crtc' which 2816 * we use as a pipe index 2817 */ 2818 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2819 { 2820 struct drm_i915_private *dev_priv = to_i915(dev); 2821 unsigned long irqflags; 2822 2823 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2824 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2825 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2826 } 2827 2828 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2829 { 2830 struct drm_i915_private *dev_priv = to_i915(dev); 2831 unsigned long irqflags; 2832 2833 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2834 i915_disable_pipestat(dev_priv, pipe, 2835 PIPE_START_VBLANK_INTERRUPT_STATUS); 2836 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2837 } 2838 2839 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2840 { 2841 struct drm_i915_private *dev_priv = to_i915(dev); 2842 unsigned long irqflags; 2843 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2844 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2845 2846 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2847 ilk_disable_display_irq(dev_priv, bit); 2848 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2849 } 2850 2851 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2852 { 2853 struct drm_i915_private *dev_priv = to_i915(dev); 2854 unsigned long irqflags; 2855 2856 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2857 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2858 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2859 } 2860 2861 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2862 { 2863 if (HAS_PCH_NOP(dev_priv)) 2864 return; 2865 2866 GEN5_IRQ_RESET(SDE); 2867 2868 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2869 I915_WRITE(SERR_INT, 0xffffffff); 2870 } 2871 2872 /* 2873 * SDEIER is also touched by the interrupt handler to work around missed PCH 2874 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2875 * instead we unconditionally enable all PCH interrupt sources here, but then 2876 * only unmask them as needed with SDEIMR. 2877 * 2878 * This function needs to be called before interrupts are enabled. 2879 */ 2880 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2881 { 2882 struct drm_i915_private *dev_priv = to_i915(dev); 2883 2884 if (HAS_PCH_NOP(dev_priv)) 2885 return; 2886 2887 WARN_ON(I915_READ(SDEIER) != 0); 2888 I915_WRITE(SDEIER, 0xffffffff); 2889 POSTING_READ(SDEIER); 2890 } 2891 2892 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 2893 { 2894 GEN5_IRQ_RESET(GT); 2895 if (INTEL_GEN(dev_priv) >= 6) 2896 GEN5_IRQ_RESET(GEN6_PM); 2897 } 2898 2899 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2900 { 2901 enum pipe pipe; 2902 2903 if (IS_CHERRYVIEW(dev_priv)) 2904 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2905 else 2906 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2907 2908 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2909 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2910 2911 for_each_pipe(dev_priv, pipe) { 2912 I915_WRITE(PIPESTAT(pipe), 2913 PIPE_FIFO_UNDERRUN_STATUS | 2914 PIPESTAT_INT_STATUS_MASK); 2915 dev_priv->pipestat_irq_mask[pipe] = 0; 2916 } 2917 2918 GEN5_IRQ_RESET(VLV_); 2919 dev_priv->irq_mask = ~0; 2920 } 2921 2922 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2923 { 2924 u32 pipestat_mask; 2925 u32 enable_mask; 2926 enum pipe pipe; 2927 2928 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2929 PIPE_CRC_DONE_INTERRUPT_STATUS; 2930 2931 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2932 for_each_pipe(dev_priv, pipe) 2933 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2934 2935 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2936 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2937 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2938 I915_LPE_PIPE_A_INTERRUPT | 2939 I915_LPE_PIPE_B_INTERRUPT; 2940 2941 if (IS_CHERRYVIEW(dev_priv)) 2942 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2943 I915_LPE_PIPE_C_INTERRUPT; 2944 2945 WARN_ON(dev_priv->irq_mask != ~0); 2946 2947 dev_priv->irq_mask = ~enable_mask; 2948 2949 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 2950 } 2951 2952 /* drm_dma.h hooks 2953 */ 2954 static void ironlake_irq_reset(struct drm_device *dev) 2955 { 2956 struct drm_i915_private *dev_priv = to_i915(dev); 2957 2958 I915_WRITE(HWSTAM, 0xffffffff); 2959 2960 GEN5_IRQ_RESET(DE); 2961 if (IS_GEN7(dev_priv)) 2962 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2963 2964 gen5_gt_irq_reset(dev_priv); 2965 2966 ibx_irq_reset(dev_priv); 2967 } 2968 2969 static void valleyview_irq_preinstall(struct drm_device *dev) 2970 { 2971 struct drm_i915_private *dev_priv = to_i915(dev); 2972 2973 I915_WRITE(VLV_MASTER_IER, 0); 2974 POSTING_READ(VLV_MASTER_IER); 2975 2976 gen5_gt_irq_reset(dev_priv); 2977 2978 spin_lock_irq(&dev_priv->irq_lock); 2979 if (dev_priv->display_irqs_enabled) 2980 vlv_display_irq_reset(dev_priv); 2981 spin_unlock_irq(&dev_priv->irq_lock); 2982 } 2983 2984 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 2985 { 2986 GEN8_IRQ_RESET_NDX(GT, 0); 2987 GEN8_IRQ_RESET_NDX(GT, 1); 2988 GEN8_IRQ_RESET_NDX(GT, 2); 2989 GEN8_IRQ_RESET_NDX(GT, 3); 2990 } 2991 2992 static void gen8_irq_reset(struct drm_device *dev) 2993 { 2994 struct drm_i915_private *dev_priv = to_i915(dev); 2995 int pipe; 2996 2997 I915_WRITE(GEN8_MASTER_IRQ, 0); 2998 POSTING_READ(GEN8_MASTER_IRQ); 2999 3000 gen8_gt_irq_reset(dev_priv); 3001 3002 for_each_pipe(dev_priv, pipe) 3003 if (intel_display_power_is_enabled(dev_priv, 3004 POWER_DOMAIN_PIPE(pipe))) 3005 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3006 3007 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3008 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3009 GEN5_IRQ_RESET(GEN8_PCU_); 3010 3011 if (HAS_PCH_SPLIT(dev_priv)) 3012 ibx_irq_reset(dev_priv); 3013 } 3014 3015 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3016 u8 pipe_mask) 3017 { 3018 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3019 enum pipe pipe; 3020 3021 spin_lock_irq(&dev_priv->irq_lock); 3022 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3023 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3024 dev_priv->de_irq_mask[pipe], 3025 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3026 spin_unlock_irq(&dev_priv->irq_lock); 3027 } 3028 3029 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3030 u8 pipe_mask) 3031 { 3032 enum pipe pipe; 3033 3034 spin_lock_irq(&dev_priv->irq_lock); 3035 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3036 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3037 spin_unlock_irq(&dev_priv->irq_lock); 3038 3039 /* make sure we're done processing display irqs */ 3040 synchronize_irq(dev_priv->drm.irq); 3041 } 3042 3043 static void cherryview_irq_preinstall(struct drm_device *dev) 3044 { 3045 struct drm_i915_private *dev_priv = to_i915(dev); 3046 3047 I915_WRITE(GEN8_MASTER_IRQ, 0); 3048 POSTING_READ(GEN8_MASTER_IRQ); 3049 3050 gen8_gt_irq_reset(dev_priv); 3051 3052 GEN5_IRQ_RESET(GEN8_PCU_); 3053 3054 spin_lock_irq(&dev_priv->irq_lock); 3055 if (dev_priv->display_irqs_enabled) 3056 vlv_display_irq_reset(dev_priv); 3057 spin_unlock_irq(&dev_priv->irq_lock); 3058 } 3059 3060 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3061 const u32 hpd[HPD_NUM_PINS]) 3062 { 3063 struct intel_encoder *encoder; 3064 u32 enabled_irqs = 0; 3065 3066 for_each_intel_encoder(&dev_priv->drm, encoder) 3067 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3068 enabled_irqs |= hpd[encoder->hpd_pin]; 3069 3070 return enabled_irqs; 3071 } 3072 3073 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3074 { 3075 u32 hotplug; 3076 3077 /* 3078 * Enable digital hotplug on the PCH, and configure the DP short pulse 3079 * duration to 2ms (which is the minimum in the Display Port spec). 3080 * The pulse duration bits are reserved on LPT+. 3081 */ 3082 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3083 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3084 PORTC_PULSE_DURATION_MASK | 3085 PORTD_PULSE_DURATION_MASK); 3086 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3087 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3088 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3089 /* 3090 * When CPU and PCH are on the same package, port A 3091 * HPD must be enabled in both north and south. 3092 */ 3093 if (HAS_PCH_LPT_LP(dev_priv)) 3094 hotplug |= PORTA_HOTPLUG_ENABLE; 3095 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3096 } 3097 3098 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3099 { 3100 u32 hotplug_irqs, enabled_irqs; 3101 3102 if (HAS_PCH_IBX(dev_priv)) { 3103 hotplug_irqs = SDE_HOTPLUG_MASK; 3104 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3105 } else { 3106 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3107 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3108 } 3109 3110 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3111 3112 ibx_hpd_detection_setup(dev_priv); 3113 } 3114 3115 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3116 { 3117 u32 hotplug; 3118 3119 /* Enable digital hotplug on the PCH */ 3120 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3121 hotplug |= PORTA_HOTPLUG_ENABLE | 3122 PORTB_HOTPLUG_ENABLE | 3123 PORTC_HOTPLUG_ENABLE | 3124 PORTD_HOTPLUG_ENABLE; 3125 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3126 3127 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3128 hotplug |= PORTE_HOTPLUG_ENABLE; 3129 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3130 } 3131 3132 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3133 { 3134 u32 hotplug_irqs, enabled_irqs; 3135 3136 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3137 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3138 3139 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3140 3141 spt_hpd_detection_setup(dev_priv); 3142 } 3143 3144 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3145 { 3146 u32 hotplug; 3147 3148 /* 3149 * Enable digital hotplug on the CPU, and configure the DP short pulse 3150 * duration to 2ms (which is the minimum in the Display Port spec) 3151 * The pulse duration bits are reserved on HSW+. 3152 */ 3153 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3154 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3155 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3156 DIGITAL_PORTA_PULSE_DURATION_2ms; 3157 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3158 } 3159 3160 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3161 { 3162 u32 hotplug_irqs, enabled_irqs; 3163 3164 if (INTEL_GEN(dev_priv) >= 8) { 3165 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3166 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3167 3168 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3169 } else if (INTEL_GEN(dev_priv) >= 7) { 3170 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3171 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3172 3173 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3174 } else { 3175 hotplug_irqs = DE_DP_A_HOTPLUG; 3176 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3177 3178 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3179 } 3180 3181 ilk_hpd_detection_setup(dev_priv); 3182 3183 ibx_hpd_irq_setup(dev_priv); 3184 } 3185 3186 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3187 u32 enabled_irqs) 3188 { 3189 u32 hotplug; 3190 3191 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3192 hotplug |= PORTA_HOTPLUG_ENABLE | 3193 PORTB_HOTPLUG_ENABLE | 3194 PORTC_HOTPLUG_ENABLE; 3195 3196 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3197 hotplug, enabled_irqs); 3198 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3199 3200 /* 3201 * For BXT invert bit has to be set based on AOB design 3202 * for HPD detection logic, update it based on VBT fields. 3203 */ 3204 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3205 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3206 hotplug |= BXT_DDIA_HPD_INVERT; 3207 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3208 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3209 hotplug |= BXT_DDIB_HPD_INVERT; 3210 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3211 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3212 hotplug |= BXT_DDIC_HPD_INVERT; 3213 3214 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3215 } 3216 3217 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3218 { 3219 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3220 } 3221 3222 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3223 { 3224 u32 hotplug_irqs, enabled_irqs; 3225 3226 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3227 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3228 3229 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3230 3231 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3232 } 3233 3234 static void ibx_irq_postinstall(struct drm_device *dev) 3235 { 3236 struct drm_i915_private *dev_priv = to_i915(dev); 3237 u32 mask; 3238 3239 if (HAS_PCH_NOP(dev_priv)) 3240 return; 3241 3242 if (HAS_PCH_IBX(dev_priv)) 3243 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3244 else 3245 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3246 3247 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3248 I915_WRITE(SDEIMR, ~mask); 3249 3250 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3251 HAS_PCH_LPT(dev_priv)) 3252 ibx_hpd_detection_setup(dev_priv); 3253 else 3254 spt_hpd_detection_setup(dev_priv); 3255 } 3256 3257 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3258 { 3259 struct drm_i915_private *dev_priv = to_i915(dev); 3260 u32 pm_irqs, gt_irqs; 3261 3262 pm_irqs = gt_irqs = 0; 3263 3264 dev_priv->gt_irq_mask = ~0; 3265 if (HAS_L3_DPF(dev_priv)) { 3266 /* L3 parity interrupt is always unmasked. */ 3267 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3268 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3269 } 3270 3271 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3272 if (IS_GEN5(dev_priv)) { 3273 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3274 } else { 3275 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3276 } 3277 3278 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3279 3280 if (INTEL_GEN(dev_priv) >= 6) { 3281 /* 3282 * RPS interrupts will get enabled/disabled on demand when RPS 3283 * itself is enabled/disabled. 3284 */ 3285 if (HAS_VEBOX(dev_priv)) { 3286 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3287 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3288 } 3289 3290 dev_priv->pm_imr = 0xffffffff; 3291 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3292 } 3293 } 3294 3295 static int ironlake_irq_postinstall(struct drm_device *dev) 3296 { 3297 struct drm_i915_private *dev_priv = to_i915(dev); 3298 u32 display_mask, extra_mask; 3299 3300 if (INTEL_GEN(dev_priv) >= 7) { 3301 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3302 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3303 DE_PLANEB_FLIP_DONE_IVB | 3304 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3305 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3306 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3307 DE_DP_A_HOTPLUG_IVB); 3308 } else { 3309 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3310 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3311 DE_AUX_CHANNEL_A | 3312 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3313 DE_POISON); 3314 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3315 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3316 DE_DP_A_HOTPLUG); 3317 } 3318 3319 dev_priv->irq_mask = ~display_mask; 3320 3321 I915_WRITE(HWSTAM, 0xeffe); 3322 3323 ibx_irq_pre_postinstall(dev); 3324 3325 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3326 3327 gen5_gt_irq_postinstall(dev); 3328 3329 ilk_hpd_detection_setup(dev_priv); 3330 3331 ibx_irq_postinstall(dev); 3332 3333 if (IS_IRONLAKE_M(dev_priv)) { 3334 /* Enable PCU event interrupts 3335 * 3336 * spinlocking not required here for correctness since interrupt 3337 * setup is guaranteed to run in single-threaded context. But we 3338 * need it to make the assert_spin_locked happy. */ 3339 spin_lock_irq(&dev_priv->irq_lock); 3340 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3341 spin_unlock_irq(&dev_priv->irq_lock); 3342 } 3343 3344 return 0; 3345 } 3346 3347 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3348 { 3349 lockdep_assert_held(&dev_priv->irq_lock); 3350 3351 if (dev_priv->display_irqs_enabled) 3352 return; 3353 3354 dev_priv->display_irqs_enabled = true; 3355 3356 if (intel_irqs_enabled(dev_priv)) { 3357 vlv_display_irq_reset(dev_priv); 3358 vlv_display_irq_postinstall(dev_priv); 3359 } 3360 } 3361 3362 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3363 { 3364 lockdep_assert_held(&dev_priv->irq_lock); 3365 3366 if (!dev_priv->display_irqs_enabled) 3367 return; 3368 3369 dev_priv->display_irqs_enabled = false; 3370 3371 if (intel_irqs_enabled(dev_priv)) 3372 vlv_display_irq_reset(dev_priv); 3373 } 3374 3375 3376 static int valleyview_irq_postinstall(struct drm_device *dev) 3377 { 3378 struct drm_i915_private *dev_priv = to_i915(dev); 3379 3380 gen5_gt_irq_postinstall(dev); 3381 3382 spin_lock_irq(&dev_priv->irq_lock); 3383 if (dev_priv->display_irqs_enabled) 3384 vlv_display_irq_postinstall(dev_priv); 3385 spin_unlock_irq(&dev_priv->irq_lock); 3386 3387 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3388 POSTING_READ(VLV_MASTER_IER); 3389 3390 return 0; 3391 } 3392 3393 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3394 { 3395 /* These are interrupts we'll toggle with the ring mask register */ 3396 uint32_t gt_interrupts[] = { 3397 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3398 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3399 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3400 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3401 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3402 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3403 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3404 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3405 0, 3406 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3407 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3408 }; 3409 3410 if (HAS_L3_DPF(dev_priv)) 3411 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3412 3413 dev_priv->pm_ier = 0x0; 3414 dev_priv->pm_imr = ~dev_priv->pm_ier; 3415 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3416 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3417 /* 3418 * RPS interrupts will get enabled/disabled on demand when RPS itself 3419 * is enabled/disabled. Same wil be the case for GuC interrupts. 3420 */ 3421 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3422 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3423 } 3424 3425 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3426 { 3427 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3428 uint32_t de_pipe_enables; 3429 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3430 u32 de_port_enables; 3431 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3432 enum pipe pipe; 3433 3434 if (INTEL_GEN(dev_priv) >= 9) { 3435 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3436 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3437 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3438 GEN9_AUX_CHANNEL_D; 3439 if (IS_GEN9_LP(dev_priv)) 3440 de_port_masked |= BXT_DE_PORT_GMBUS; 3441 } else { 3442 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3443 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3444 } 3445 3446 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3447 GEN8_PIPE_FIFO_UNDERRUN; 3448 3449 de_port_enables = de_port_masked; 3450 if (IS_GEN9_LP(dev_priv)) 3451 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3452 else if (IS_BROADWELL(dev_priv)) 3453 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3454 3455 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3456 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3457 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3458 3459 for_each_pipe(dev_priv, pipe) 3460 if (intel_display_power_is_enabled(dev_priv, 3461 POWER_DOMAIN_PIPE(pipe))) 3462 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3463 dev_priv->de_irq_mask[pipe], 3464 de_pipe_enables); 3465 3466 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3467 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3468 3469 if (IS_GEN9_LP(dev_priv)) 3470 bxt_hpd_detection_setup(dev_priv); 3471 else if (IS_BROADWELL(dev_priv)) 3472 ilk_hpd_detection_setup(dev_priv); 3473 } 3474 3475 static int gen8_irq_postinstall(struct drm_device *dev) 3476 { 3477 struct drm_i915_private *dev_priv = to_i915(dev); 3478 3479 if (HAS_PCH_SPLIT(dev_priv)) 3480 ibx_irq_pre_postinstall(dev); 3481 3482 gen8_gt_irq_postinstall(dev_priv); 3483 gen8_de_irq_postinstall(dev_priv); 3484 3485 if (HAS_PCH_SPLIT(dev_priv)) 3486 ibx_irq_postinstall(dev); 3487 3488 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3489 POSTING_READ(GEN8_MASTER_IRQ); 3490 3491 return 0; 3492 } 3493 3494 static int cherryview_irq_postinstall(struct drm_device *dev) 3495 { 3496 struct drm_i915_private *dev_priv = to_i915(dev); 3497 3498 gen8_gt_irq_postinstall(dev_priv); 3499 3500 spin_lock_irq(&dev_priv->irq_lock); 3501 if (dev_priv->display_irqs_enabled) 3502 vlv_display_irq_postinstall(dev_priv); 3503 spin_unlock_irq(&dev_priv->irq_lock); 3504 3505 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3506 POSTING_READ(GEN8_MASTER_IRQ); 3507 3508 return 0; 3509 } 3510 3511 static void gen8_irq_uninstall(struct drm_device *dev) 3512 { 3513 struct drm_i915_private *dev_priv = to_i915(dev); 3514 3515 if (!dev_priv) 3516 return; 3517 3518 gen8_irq_reset(dev); 3519 } 3520 3521 static void valleyview_irq_uninstall(struct drm_device *dev) 3522 { 3523 struct drm_i915_private *dev_priv = to_i915(dev); 3524 3525 if (!dev_priv) 3526 return; 3527 3528 I915_WRITE(VLV_MASTER_IER, 0); 3529 POSTING_READ(VLV_MASTER_IER); 3530 3531 gen5_gt_irq_reset(dev_priv); 3532 3533 I915_WRITE(HWSTAM, 0xffffffff); 3534 3535 spin_lock_irq(&dev_priv->irq_lock); 3536 if (dev_priv->display_irqs_enabled) 3537 vlv_display_irq_reset(dev_priv); 3538 spin_unlock_irq(&dev_priv->irq_lock); 3539 } 3540 3541 static void cherryview_irq_uninstall(struct drm_device *dev) 3542 { 3543 struct drm_i915_private *dev_priv = to_i915(dev); 3544 3545 if (!dev_priv) 3546 return; 3547 3548 I915_WRITE(GEN8_MASTER_IRQ, 0); 3549 POSTING_READ(GEN8_MASTER_IRQ); 3550 3551 gen8_gt_irq_reset(dev_priv); 3552 3553 GEN5_IRQ_RESET(GEN8_PCU_); 3554 3555 spin_lock_irq(&dev_priv->irq_lock); 3556 if (dev_priv->display_irqs_enabled) 3557 vlv_display_irq_reset(dev_priv); 3558 spin_unlock_irq(&dev_priv->irq_lock); 3559 } 3560 3561 static void ironlake_irq_uninstall(struct drm_device *dev) 3562 { 3563 struct drm_i915_private *dev_priv = to_i915(dev); 3564 3565 if (!dev_priv) 3566 return; 3567 3568 ironlake_irq_reset(dev); 3569 } 3570 3571 static void i8xx_irq_preinstall(struct drm_device * dev) 3572 { 3573 struct drm_i915_private *dev_priv = to_i915(dev); 3574 int pipe; 3575 3576 for_each_pipe(dev_priv, pipe) 3577 I915_WRITE(PIPESTAT(pipe), 0); 3578 I915_WRITE16(IMR, 0xffff); 3579 I915_WRITE16(IER, 0x0); 3580 POSTING_READ16(IER); 3581 } 3582 3583 static int i8xx_irq_postinstall(struct drm_device *dev) 3584 { 3585 struct drm_i915_private *dev_priv = to_i915(dev); 3586 3587 I915_WRITE16(EMR, 3588 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3589 3590 /* Unmask the interrupts that we always want on. */ 3591 dev_priv->irq_mask = 3592 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3593 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3594 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3595 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3596 I915_WRITE16(IMR, dev_priv->irq_mask); 3597 3598 I915_WRITE16(IER, 3599 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3600 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3601 I915_USER_INTERRUPT); 3602 POSTING_READ16(IER); 3603 3604 /* Interrupt setup is already guaranteed to be single-threaded, this is 3605 * just to make the assert_spin_locked check happy. */ 3606 spin_lock_irq(&dev_priv->irq_lock); 3607 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3608 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3609 spin_unlock_irq(&dev_priv->irq_lock); 3610 3611 return 0; 3612 } 3613 3614 /* 3615 * Returns true when a page flip has completed. 3616 */ 3617 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3618 { 3619 struct drm_device *dev = arg; 3620 struct drm_i915_private *dev_priv = to_i915(dev); 3621 u16 iir, new_iir; 3622 u32 pipe_stats[2]; 3623 int pipe; 3624 irqreturn_t ret; 3625 3626 if (!intel_irqs_enabled(dev_priv)) 3627 return IRQ_NONE; 3628 3629 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3630 disable_rpm_wakeref_asserts(dev_priv); 3631 3632 ret = IRQ_NONE; 3633 iir = I915_READ16(IIR); 3634 if (iir == 0) 3635 goto out; 3636 3637 while (iir) { 3638 /* Can't rely on pipestat interrupt bit in iir as it might 3639 * have been cleared after the pipestat interrupt was received. 3640 * It doesn't set the bit in iir again, but it still produces 3641 * interrupts (for non-MSI). 3642 */ 3643 spin_lock(&dev_priv->irq_lock); 3644 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3645 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3646 3647 for_each_pipe(dev_priv, pipe) { 3648 i915_reg_t reg = PIPESTAT(pipe); 3649 pipe_stats[pipe] = I915_READ(reg); 3650 3651 /* 3652 * Clear the PIPE*STAT regs before the IIR 3653 */ 3654 if (pipe_stats[pipe] & 0x8000ffff) 3655 I915_WRITE(reg, pipe_stats[pipe]); 3656 } 3657 spin_unlock(&dev_priv->irq_lock); 3658 3659 I915_WRITE16(IIR, iir); 3660 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3661 3662 if (iir & I915_USER_INTERRUPT) 3663 notify_ring(dev_priv->engine[RCS]); 3664 3665 for_each_pipe(dev_priv, pipe) { 3666 int plane = pipe; 3667 if (HAS_FBC(dev_priv)) 3668 plane = !plane; 3669 3670 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 3671 drm_handle_vblank(&dev_priv->drm, pipe); 3672 3673 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3674 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3675 3676 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3677 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3678 pipe); 3679 } 3680 3681 iir = new_iir; 3682 } 3683 ret = IRQ_HANDLED; 3684 3685 out: 3686 enable_rpm_wakeref_asserts(dev_priv); 3687 3688 return ret; 3689 } 3690 3691 static void i8xx_irq_uninstall(struct drm_device * dev) 3692 { 3693 struct drm_i915_private *dev_priv = to_i915(dev); 3694 int pipe; 3695 3696 for_each_pipe(dev_priv, pipe) { 3697 /* Clear enable bits; then clear status bits */ 3698 I915_WRITE(PIPESTAT(pipe), 0); 3699 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3700 } 3701 I915_WRITE16(IMR, 0xffff); 3702 I915_WRITE16(IER, 0x0); 3703 I915_WRITE16(IIR, I915_READ16(IIR)); 3704 } 3705 3706 static void i915_irq_preinstall(struct drm_device * dev) 3707 { 3708 struct drm_i915_private *dev_priv = to_i915(dev); 3709 int pipe; 3710 3711 if (I915_HAS_HOTPLUG(dev_priv)) { 3712 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3713 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3714 } 3715 3716 I915_WRITE16(HWSTAM, 0xeffe); 3717 for_each_pipe(dev_priv, pipe) 3718 I915_WRITE(PIPESTAT(pipe), 0); 3719 I915_WRITE(IMR, 0xffffffff); 3720 I915_WRITE(IER, 0x0); 3721 POSTING_READ(IER); 3722 } 3723 3724 static int i915_irq_postinstall(struct drm_device *dev) 3725 { 3726 struct drm_i915_private *dev_priv = to_i915(dev); 3727 u32 enable_mask; 3728 3729 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3730 3731 /* Unmask the interrupts that we always want on. */ 3732 dev_priv->irq_mask = 3733 ~(I915_ASLE_INTERRUPT | 3734 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3735 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3736 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3737 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3738 3739 enable_mask = 3740 I915_ASLE_INTERRUPT | 3741 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3742 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3743 I915_USER_INTERRUPT; 3744 3745 if (I915_HAS_HOTPLUG(dev_priv)) { 3746 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3747 POSTING_READ(PORT_HOTPLUG_EN); 3748 3749 /* Enable in IER... */ 3750 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3751 /* and unmask in IMR */ 3752 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3753 } 3754 3755 I915_WRITE(IMR, dev_priv->irq_mask); 3756 I915_WRITE(IER, enable_mask); 3757 POSTING_READ(IER); 3758 3759 i915_enable_asle_pipestat(dev_priv); 3760 3761 /* Interrupt setup is already guaranteed to be single-threaded, this is 3762 * just to make the assert_spin_locked check happy. */ 3763 spin_lock_irq(&dev_priv->irq_lock); 3764 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3765 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3766 spin_unlock_irq(&dev_priv->irq_lock); 3767 3768 return 0; 3769 } 3770 3771 static irqreturn_t i915_irq_handler(int irq, void *arg) 3772 { 3773 struct drm_device *dev = arg; 3774 struct drm_i915_private *dev_priv = to_i915(dev); 3775 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3776 int pipe, ret = IRQ_NONE; 3777 3778 if (!intel_irqs_enabled(dev_priv)) 3779 return IRQ_NONE; 3780 3781 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3782 disable_rpm_wakeref_asserts(dev_priv); 3783 3784 iir = I915_READ(IIR); 3785 do { 3786 bool irq_received = (iir) != 0; 3787 bool blc_event = false; 3788 3789 /* Can't rely on pipestat interrupt bit in iir as it might 3790 * have been cleared after the pipestat interrupt was received. 3791 * It doesn't set the bit in iir again, but it still produces 3792 * interrupts (for non-MSI). 3793 */ 3794 spin_lock(&dev_priv->irq_lock); 3795 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3796 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3797 3798 for_each_pipe(dev_priv, pipe) { 3799 i915_reg_t reg = PIPESTAT(pipe); 3800 pipe_stats[pipe] = I915_READ(reg); 3801 3802 /* Clear the PIPE*STAT regs before the IIR */ 3803 if (pipe_stats[pipe] & 0x8000ffff) { 3804 I915_WRITE(reg, pipe_stats[pipe]); 3805 irq_received = true; 3806 } 3807 } 3808 spin_unlock(&dev_priv->irq_lock); 3809 3810 if (!irq_received) 3811 break; 3812 3813 /* Consume port. Then clear IIR or we'll miss events */ 3814 if (I915_HAS_HOTPLUG(dev_priv) && 3815 iir & I915_DISPLAY_PORT_INTERRUPT) { 3816 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3817 if (hotplug_status) 3818 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3819 } 3820 3821 I915_WRITE(IIR, iir); 3822 new_iir = I915_READ(IIR); /* Flush posted writes */ 3823 3824 if (iir & I915_USER_INTERRUPT) 3825 notify_ring(dev_priv->engine[RCS]); 3826 3827 for_each_pipe(dev_priv, pipe) { 3828 int plane = pipe; 3829 if (HAS_FBC(dev_priv)) 3830 plane = !plane; 3831 3832 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 3833 drm_handle_vblank(&dev_priv->drm, pipe); 3834 3835 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3836 blc_event = true; 3837 3838 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3839 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3840 3841 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3842 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3843 pipe); 3844 } 3845 3846 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3847 intel_opregion_asle_intr(dev_priv); 3848 3849 /* With MSI, interrupts are only generated when iir 3850 * transitions from zero to nonzero. If another bit got 3851 * set while we were handling the existing iir bits, then 3852 * we would never get another interrupt. 3853 * 3854 * This is fine on non-MSI as well, as if we hit this path 3855 * we avoid exiting the interrupt handler only to generate 3856 * another one. 3857 * 3858 * Note that for MSI this could cause a stray interrupt report 3859 * if an interrupt landed in the time between writing IIR and 3860 * the posting read. This should be rare enough to never 3861 * trigger the 99% of 100,000 interrupts test for disabling 3862 * stray interrupts. 3863 */ 3864 ret = IRQ_HANDLED; 3865 iir = new_iir; 3866 } while (iir); 3867 3868 enable_rpm_wakeref_asserts(dev_priv); 3869 3870 return ret; 3871 } 3872 3873 static void i915_irq_uninstall(struct drm_device * dev) 3874 { 3875 struct drm_i915_private *dev_priv = to_i915(dev); 3876 int pipe; 3877 3878 if (I915_HAS_HOTPLUG(dev_priv)) { 3879 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3880 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3881 } 3882 3883 I915_WRITE16(HWSTAM, 0xffff); 3884 for_each_pipe(dev_priv, pipe) { 3885 /* Clear enable bits; then clear status bits */ 3886 I915_WRITE(PIPESTAT(pipe), 0); 3887 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3888 } 3889 I915_WRITE(IMR, 0xffffffff); 3890 I915_WRITE(IER, 0x0); 3891 3892 I915_WRITE(IIR, I915_READ(IIR)); 3893 } 3894 3895 static void i965_irq_preinstall(struct drm_device * dev) 3896 { 3897 struct drm_i915_private *dev_priv = to_i915(dev); 3898 int pipe; 3899 3900 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3901 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3902 3903 I915_WRITE(HWSTAM, 0xeffe); 3904 for_each_pipe(dev_priv, pipe) 3905 I915_WRITE(PIPESTAT(pipe), 0); 3906 I915_WRITE(IMR, 0xffffffff); 3907 I915_WRITE(IER, 0x0); 3908 POSTING_READ(IER); 3909 } 3910 3911 static int i965_irq_postinstall(struct drm_device *dev) 3912 { 3913 struct drm_i915_private *dev_priv = to_i915(dev); 3914 u32 enable_mask; 3915 u32 error_mask; 3916 3917 /* Unmask the interrupts that we always want on. */ 3918 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3919 I915_DISPLAY_PORT_INTERRUPT | 3920 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3921 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3922 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3923 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3924 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3925 3926 enable_mask = ~dev_priv->irq_mask; 3927 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3928 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3929 enable_mask |= I915_USER_INTERRUPT; 3930 3931 if (IS_G4X(dev_priv)) 3932 enable_mask |= I915_BSD_USER_INTERRUPT; 3933 3934 /* Interrupt setup is already guaranteed to be single-threaded, this is 3935 * just to make the assert_spin_locked check happy. */ 3936 spin_lock_irq(&dev_priv->irq_lock); 3937 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3938 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3939 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3940 spin_unlock_irq(&dev_priv->irq_lock); 3941 3942 /* 3943 * Enable some error detection, note the instruction error mask 3944 * bit is reserved, so we leave it masked. 3945 */ 3946 if (IS_G4X(dev_priv)) { 3947 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3948 GM45_ERROR_MEM_PRIV | 3949 GM45_ERROR_CP_PRIV | 3950 I915_ERROR_MEMORY_REFRESH); 3951 } else { 3952 error_mask = ~(I915_ERROR_PAGE_TABLE | 3953 I915_ERROR_MEMORY_REFRESH); 3954 } 3955 I915_WRITE(EMR, error_mask); 3956 3957 I915_WRITE(IMR, dev_priv->irq_mask); 3958 I915_WRITE(IER, enable_mask); 3959 POSTING_READ(IER); 3960 3961 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3962 POSTING_READ(PORT_HOTPLUG_EN); 3963 3964 i915_enable_asle_pipestat(dev_priv); 3965 3966 return 0; 3967 } 3968 3969 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 3970 { 3971 u32 hotplug_en; 3972 3973 lockdep_assert_held(&dev_priv->irq_lock); 3974 3975 /* Note HDMI and DP share hotplug bits */ 3976 /* enable bits are the same for all generations */ 3977 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 3978 /* Programming the CRT detection parameters tends 3979 to generate a spurious hotplug event about three 3980 seconds later. So just do it once. 3981 */ 3982 if (IS_G4X(dev_priv)) 3983 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3984 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3985 3986 /* Ignore TV since it's buggy */ 3987 i915_hotplug_interrupt_update_locked(dev_priv, 3988 HOTPLUG_INT_EN_MASK | 3989 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 3990 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 3991 hotplug_en); 3992 } 3993 3994 static irqreturn_t i965_irq_handler(int irq, void *arg) 3995 { 3996 struct drm_device *dev = arg; 3997 struct drm_i915_private *dev_priv = to_i915(dev); 3998 u32 iir, new_iir; 3999 u32 pipe_stats[I915_MAX_PIPES]; 4000 int ret = IRQ_NONE, pipe; 4001 4002 if (!intel_irqs_enabled(dev_priv)) 4003 return IRQ_NONE; 4004 4005 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4006 disable_rpm_wakeref_asserts(dev_priv); 4007 4008 iir = I915_READ(IIR); 4009 4010 for (;;) { 4011 bool irq_received = (iir) != 0; 4012 bool blc_event = false; 4013 4014 /* Can't rely on pipestat interrupt bit in iir as it might 4015 * have been cleared after the pipestat interrupt was received. 4016 * It doesn't set the bit in iir again, but it still produces 4017 * interrupts (for non-MSI). 4018 */ 4019 spin_lock(&dev_priv->irq_lock); 4020 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4021 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4022 4023 for_each_pipe(dev_priv, pipe) { 4024 i915_reg_t reg = PIPESTAT(pipe); 4025 pipe_stats[pipe] = I915_READ(reg); 4026 4027 /* 4028 * Clear the PIPE*STAT regs before the IIR 4029 */ 4030 if (pipe_stats[pipe] & 0x8000ffff) { 4031 I915_WRITE(reg, pipe_stats[pipe]); 4032 irq_received = true; 4033 } 4034 } 4035 spin_unlock(&dev_priv->irq_lock); 4036 4037 if (!irq_received) 4038 break; 4039 4040 ret = IRQ_HANDLED; 4041 4042 /* Consume port. Then clear IIR or we'll miss events */ 4043 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4044 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4045 if (hotplug_status) 4046 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4047 } 4048 4049 I915_WRITE(IIR, iir); 4050 new_iir = I915_READ(IIR); /* Flush posted writes */ 4051 4052 if (iir & I915_USER_INTERRUPT) 4053 notify_ring(dev_priv->engine[RCS]); 4054 if (iir & I915_BSD_USER_INTERRUPT) 4055 notify_ring(dev_priv->engine[VCS]); 4056 4057 for_each_pipe(dev_priv, pipe) { 4058 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 4059 drm_handle_vblank(&dev_priv->drm, pipe); 4060 4061 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4062 blc_event = true; 4063 4064 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4065 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4066 4067 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4068 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4069 } 4070 4071 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4072 intel_opregion_asle_intr(dev_priv); 4073 4074 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4075 gmbus_irq_handler(dev_priv); 4076 4077 /* With MSI, interrupts are only generated when iir 4078 * transitions from zero to nonzero. If another bit got 4079 * set while we were handling the existing iir bits, then 4080 * we would never get another interrupt. 4081 * 4082 * This is fine on non-MSI as well, as if we hit this path 4083 * we avoid exiting the interrupt handler only to generate 4084 * another one. 4085 * 4086 * Note that for MSI this could cause a stray interrupt report 4087 * if an interrupt landed in the time between writing IIR and 4088 * the posting read. This should be rare enough to never 4089 * trigger the 99% of 100,000 interrupts test for disabling 4090 * stray interrupts. 4091 */ 4092 iir = new_iir; 4093 } 4094 4095 enable_rpm_wakeref_asserts(dev_priv); 4096 4097 return ret; 4098 } 4099 4100 static void i965_irq_uninstall(struct drm_device * dev) 4101 { 4102 struct drm_i915_private *dev_priv = to_i915(dev); 4103 int pipe; 4104 4105 if (!dev_priv) 4106 return; 4107 4108 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4109 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4110 4111 I915_WRITE(HWSTAM, 0xffffffff); 4112 for_each_pipe(dev_priv, pipe) 4113 I915_WRITE(PIPESTAT(pipe), 0); 4114 I915_WRITE(IMR, 0xffffffff); 4115 I915_WRITE(IER, 0x0); 4116 4117 for_each_pipe(dev_priv, pipe) 4118 I915_WRITE(PIPESTAT(pipe), 4119 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4120 I915_WRITE(IIR, I915_READ(IIR)); 4121 } 4122 4123 /** 4124 * intel_irq_init - initializes irq support 4125 * @dev_priv: i915 device instance 4126 * 4127 * This function initializes all the irq support including work items, timers 4128 * and all the vtables. It does not setup the interrupt itself though. 4129 */ 4130 void intel_irq_init(struct drm_i915_private *dev_priv) 4131 { 4132 struct drm_device *dev = &dev_priv->drm; 4133 int i; 4134 4135 intel_hpd_init_work(dev_priv); 4136 4137 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4138 4139 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4140 for (i = 0; i < MAX_L3_SLICES; ++i) 4141 dev_priv->l3_parity.remap_info[i] = NULL; 4142 4143 if (HAS_GUC_SCHED(dev_priv)) 4144 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4145 4146 /* Let's track the enabled rps events */ 4147 if (IS_VALLEYVIEW(dev_priv)) 4148 /* WaGsvRC0ResidencyMethod:vlv */ 4149 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4150 else 4151 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4152 4153 dev_priv->rps.pm_intrmsk_mbz = 0; 4154 4155 /* 4156 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4157 * if GEN6_PM_UP_EI_EXPIRED is masked. 4158 * 4159 * TODO: verify if this can be reproduced on VLV,CHV. 4160 */ 4161 if (INTEL_GEN(dev_priv) <= 7) 4162 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4163 4164 if (INTEL_GEN(dev_priv) >= 8) 4165 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4166 4167 if (IS_GEN2(dev_priv)) { 4168 /* Gen2 doesn't have a hardware frame counter */ 4169 dev->max_vblank_count = 0; 4170 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4171 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4172 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4173 } else { 4174 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4175 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4176 } 4177 4178 /* 4179 * Opt out of the vblank disable timer on everything except gen2. 4180 * Gen2 doesn't have a hardware frame counter and so depends on 4181 * vblank interrupts to produce sane vblank seuquence numbers. 4182 */ 4183 if (!IS_GEN2(dev_priv)) 4184 dev->vblank_disable_immediate = true; 4185 4186 /* Most platforms treat the display irq block as an always-on 4187 * power domain. vlv/chv can disable it at runtime and need 4188 * special care to avoid writing any of the display block registers 4189 * outside of the power domain. We defer setting up the display irqs 4190 * in this case to the runtime pm. 4191 */ 4192 dev_priv->display_irqs_enabled = true; 4193 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4194 dev_priv->display_irqs_enabled = false; 4195 4196 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4197 4198 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4199 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4200 4201 if (IS_CHERRYVIEW(dev_priv)) { 4202 dev->driver->irq_handler = cherryview_irq_handler; 4203 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4204 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4205 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4206 dev->driver->enable_vblank = i965_enable_vblank; 4207 dev->driver->disable_vblank = i965_disable_vblank; 4208 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4209 } else if (IS_VALLEYVIEW(dev_priv)) { 4210 dev->driver->irq_handler = valleyview_irq_handler; 4211 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4212 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4213 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4214 dev->driver->enable_vblank = i965_enable_vblank; 4215 dev->driver->disable_vblank = i965_disable_vblank; 4216 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4217 } else if (INTEL_GEN(dev_priv) >= 8) { 4218 dev->driver->irq_handler = gen8_irq_handler; 4219 dev->driver->irq_preinstall = gen8_irq_reset; 4220 dev->driver->irq_postinstall = gen8_irq_postinstall; 4221 dev->driver->irq_uninstall = gen8_irq_uninstall; 4222 dev->driver->enable_vblank = gen8_enable_vblank; 4223 dev->driver->disable_vblank = gen8_disable_vblank; 4224 if (IS_GEN9_LP(dev_priv)) 4225 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4226 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4227 HAS_PCH_CNP(dev_priv)) 4228 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4229 else 4230 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4231 } else if (HAS_PCH_SPLIT(dev_priv)) { 4232 dev->driver->irq_handler = ironlake_irq_handler; 4233 dev->driver->irq_preinstall = ironlake_irq_reset; 4234 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4235 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4236 dev->driver->enable_vblank = ironlake_enable_vblank; 4237 dev->driver->disable_vblank = ironlake_disable_vblank; 4238 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4239 } else { 4240 if (IS_GEN2(dev_priv)) { 4241 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4242 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4243 dev->driver->irq_handler = i8xx_irq_handler; 4244 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4245 dev->driver->enable_vblank = i8xx_enable_vblank; 4246 dev->driver->disable_vblank = i8xx_disable_vblank; 4247 } else if (IS_GEN3(dev_priv)) { 4248 dev->driver->irq_preinstall = i915_irq_preinstall; 4249 dev->driver->irq_postinstall = i915_irq_postinstall; 4250 dev->driver->irq_uninstall = i915_irq_uninstall; 4251 dev->driver->irq_handler = i915_irq_handler; 4252 dev->driver->enable_vblank = i8xx_enable_vblank; 4253 dev->driver->disable_vblank = i8xx_disable_vblank; 4254 } else { 4255 dev->driver->irq_preinstall = i965_irq_preinstall; 4256 dev->driver->irq_postinstall = i965_irq_postinstall; 4257 dev->driver->irq_uninstall = i965_irq_uninstall; 4258 dev->driver->irq_handler = i965_irq_handler; 4259 dev->driver->enable_vblank = i965_enable_vblank; 4260 dev->driver->disable_vblank = i965_disable_vblank; 4261 } 4262 if (I915_HAS_HOTPLUG(dev_priv)) 4263 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4264 } 4265 } 4266 4267 /** 4268 * intel_irq_fini - deinitializes IRQ support 4269 * @i915: i915 device instance 4270 * 4271 * This function deinitializes all the IRQ support. 4272 */ 4273 void intel_irq_fini(struct drm_i915_private *i915) 4274 { 4275 int i; 4276 4277 for (i = 0; i < MAX_L3_SLICES; ++i) 4278 kfree(i915->l3_parity.remap_info[i]); 4279 } 4280 4281 /** 4282 * intel_irq_install - enables the hardware interrupt 4283 * @dev_priv: i915 device instance 4284 * 4285 * This function enables the hardware interrupt handling, but leaves the hotplug 4286 * handling still disabled. It is called after intel_irq_init(). 4287 * 4288 * In the driver load and resume code we need working interrupts in a few places 4289 * but don't want to deal with the hassle of concurrent probe and hotplug 4290 * workers. Hence the split into this two-stage approach. 4291 */ 4292 int intel_irq_install(struct drm_i915_private *dev_priv) 4293 { 4294 /* 4295 * We enable some interrupt sources in our postinstall hooks, so mark 4296 * interrupts as enabled _before_ actually enabling them to avoid 4297 * special cases in our ordering checks. 4298 */ 4299 dev_priv->pm.irqs_enabled = true; 4300 4301 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4302 } 4303 4304 /** 4305 * intel_irq_uninstall - finilizes all irq handling 4306 * @dev_priv: i915 device instance 4307 * 4308 * This stops interrupt and hotplug handling and unregisters and frees all 4309 * resources acquired in the init functions. 4310 */ 4311 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4312 { 4313 drm_irq_uninstall(&dev_priv->drm); 4314 intel_hpd_cancel_work(dev_priv); 4315 dev_priv->pm.irqs_enabled = false; 4316 } 4317 4318 /** 4319 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4320 * @dev_priv: i915 device instance 4321 * 4322 * This function is used to disable interrupts at runtime, both in the runtime 4323 * pm and the system suspend/resume code. 4324 */ 4325 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4326 { 4327 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4328 dev_priv->pm.irqs_enabled = false; 4329 synchronize_irq(dev_priv->drm.irq); 4330 } 4331 4332 /** 4333 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4334 * @dev_priv: i915 device instance 4335 * 4336 * This function is used to enable interrupts at runtime, both in the runtime 4337 * pm and the system suspend/resume code. 4338 */ 4339 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4340 { 4341 dev_priv->pm.irqs_enabled = true; 4342 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4343 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4344 } 4345