1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 174 175 /* For display hotplug interrupt */ 176 static inline void 177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 178 uint32_t mask, 179 uint32_t bits) 180 { 181 uint32_t val; 182 183 assert_spin_locked(&dev_priv->irq_lock); 184 WARN_ON(bits & ~mask); 185 186 val = I915_READ(PORT_HOTPLUG_EN); 187 val &= ~mask; 188 val |= bits; 189 I915_WRITE(PORT_HOTPLUG_EN, val); 190 } 191 192 /** 193 * i915_hotplug_interrupt_update - update hotplug interrupt enable 194 * @dev_priv: driver private 195 * @mask: bits to update 196 * @bits: bits to enable 197 * NOTE: the HPD enable bits are modified both inside and outside 198 * of an interrupt context. To avoid that read-modify-write cycles 199 * interfer, these bits are protected by a spinlock. Since this 200 * function is usually not called from a context where the lock is 201 * held already, this function acquires the lock itself. A non-locking 202 * version is also available. 203 */ 204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 205 uint32_t mask, 206 uint32_t bits) 207 { 208 spin_lock_irq(&dev_priv->irq_lock); 209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 210 spin_unlock_irq(&dev_priv->irq_lock); 211 } 212 213 /** 214 * ilk_update_display_irq - update DEIMR 215 * @dev_priv: driver private 216 * @interrupt_mask: mask of interrupt bits to update 217 * @enabled_irq_mask: mask of interrupt bits to enable 218 */ 219 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 220 uint32_t interrupt_mask, 221 uint32_t enabled_irq_mask) 222 { 223 uint32_t new_val; 224 225 assert_spin_locked(&dev_priv->irq_lock); 226 227 WARN_ON(enabled_irq_mask & ~interrupt_mask); 228 229 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 230 return; 231 232 new_val = dev_priv->irq_mask; 233 new_val &= ~interrupt_mask; 234 new_val |= (~enabled_irq_mask & interrupt_mask); 235 236 if (new_val != dev_priv->irq_mask) { 237 dev_priv->irq_mask = new_val; 238 I915_WRITE(DEIMR, dev_priv->irq_mask); 239 POSTING_READ(DEIMR); 240 } 241 } 242 243 /** 244 * ilk_update_gt_irq - update GTIMR 245 * @dev_priv: driver private 246 * @interrupt_mask: mask of interrupt bits to update 247 * @enabled_irq_mask: mask of interrupt bits to enable 248 */ 249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 250 uint32_t interrupt_mask, 251 uint32_t enabled_irq_mask) 252 { 253 assert_spin_locked(&dev_priv->irq_lock); 254 255 WARN_ON(enabled_irq_mask & ~interrupt_mask); 256 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 dev_priv->gt_irq_mask &= ~interrupt_mask; 261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 POSTING_READ_FW(GTIMR); 269 } 270 271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 ilk_update_gt_irq(dev_priv, mask, 0); 274 } 275 276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 277 { 278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 279 } 280 281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 282 { 283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 284 } 285 286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 287 { 288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 289 } 290 291 /** 292 * snb_update_pm_irq - update GEN6_PMIMR 293 * @dev_priv: driver private 294 * @interrupt_mask: mask of interrupt bits to update 295 * @enabled_irq_mask: mask of interrupt bits to enable 296 */ 297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 298 uint32_t interrupt_mask, 299 uint32_t enabled_irq_mask) 300 { 301 uint32_t new_val; 302 303 WARN_ON(enabled_irq_mask & ~interrupt_mask); 304 305 assert_spin_locked(&dev_priv->irq_lock); 306 307 new_val = dev_priv->pm_imr; 308 new_val &= ~interrupt_mask; 309 new_val |= (~enabled_irq_mask & interrupt_mask); 310 311 if (new_val != dev_priv->pm_imr) { 312 dev_priv->pm_imr = new_val; 313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 314 POSTING_READ(gen6_pm_imr(dev_priv)); 315 } 316 } 317 318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 319 { 320 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 321 return; 322 323 snb_update_pm_irq(dev_priv, mask, mask); 324 } 325 326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_mask_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 assert_spin_locked(&dev_priv->irq_lock); 344 345 I915_WRITE(reg, reset_mask); 346 I915_WRITE(reg, reset_mask); 347 POSTING_READ(reg); 348 } 349 350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 351 { 352 assert_spin_locked(&dev_priv->irq_lock); 353 354 dev_priv->pm_ier |= enable_mask; 355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 356 gen6_unmask_pm_irq(dev_priv, enable_mask); 357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 358 } 359 360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 361 { 362 assert_spin_locked(&dev_priv->irq_lock); 363 364 dev_priv->pm_ier &= ~disable_mask; 365 __gen6_mask_pm_irq(dev_priv, disable_mask); 366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 367 /* though a barrier is missing here, but don't really need a one */ 368 } 369 370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 371 { 372 spin_lock_irq(&dev_priv->irq_lock); 373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 374 dev_priv->rps.pm_iir = 0; 375 spin_unlock_irq(&dev_priv->irq_lock); 376 } 377 378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 379 { 380 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 381 return; 382 383 spin_lock_irq(&dev_priv->irq_lock); 384 WARN_ON_ONCE(dev_priv->rps.pm_iir); 385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 386 dev_priv->rps.interrupts_enabled = true; 387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 388 389 spin_unlock_irq(&dev_priv->irq_lock); 390 } 391 392 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 393 { 394 return (mask & ~dev_priv->rps.pm_intr_keep); 395 } 396 397 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 398 { 399 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 400 return; 401 402 spin_lock_irq(&dev_priv->irq_lock); 403 dev_priv->rps.interrupts_enabled = false; 404 405 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 406 407 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 408 409 spin_unlock_irq(&dev_priv->irq_lock); 410 synchronize_irq(dev_priv->drm.irq); 411 412 /* Now that we will not be generating any more work, flush any 413 * outsanding tasks. As we are called on the RPS idle path, 414 * we will reset the GPU to minimum frequencies, so the current 415 * state of the worker can be discarded. 416 */ 417 cancel_work_sync(&dev_priv->rps.work); 418 gen6_reset_rps_interrupts(dev_priv); 419 } 420 421 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 422 { 423 spin_lock_irq(&dev_priv->irq_lock); 424 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 425 spin_unlock_irq(&dev_priv->irq_lock); 426 } 427 428 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 429 { 430 spin_lock_irq(&dev_priv->irq_lock); 431 if (!dev_priv->guc.interrupts_enabled) { 432 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 433 dev_priv->pm_guc_events); 434 dev_priv->guc.interrupts_enabled = true; 435 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 436 } 437 spin_unlock_irq(&dev_priv->irq_lock); 438 } 439 440 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 441 { 442 spin_lock_irq(&dev_priv->irq_lock); 443 dev_priv->guc.interrupts_enabled = false; 444 445 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 446 447 spin_unlock_irq(&dev_priv->irq_lock); 448 synchronize_irq(dev_priv->drm.irq); 449 450 gen9_reset_guc_interrupts(dev_priv); 451 } 452 453 /** 454 * bdw_update_port_irq - update DE port interrupt 455 * @dev_priv: driver private 456 * @interrupt_mask: mask of interrupt bits to update 457 * @enabled_irq_mask: mask of interrupt bits to enable 458 */ 459 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 460 uint32_t interrupt_mask, 461 uint32_t enabled_irq_mask) 462 { 463 uint32_t new_val; 464 uint32_t old_val; 465 466 assert_spin_locked(&dev_priv->irq_lock); 467 468 WARN_ON(enabled_irq_mask & ~interrupt_mask); 469 470 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 471 return; 472 473 old_val = I915_READ(GEN8_DE_PORT_IMR); 474 475 new_val = old_val; 476 new_val &= ~interrupt_mask; 477 new_val |= (~enabled_irq_mask & interrupt_mask); 478 479 if (new_val != old_val) { 480 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 481 POSTING_READ(GEN8_DE_PORT_IMR); 482 } 483 } 484 485 /** 486 * bdw_update_pipe_irq - update DE pipe interrupt 487 * @dev_priv: driver private 488 * @pipe: pipe whose interrupt to update 489 * @interrupt_mask: mask of interrupt bits to update 490 * @enabled_irq_mask: mask of interrupt bits to enable 491 */ 492 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 493 enum pipe pipe, 494 uint32_t interrupt_mask, 495 uint32_t enabled_irq_mask) 496 { 497 uint32_t new_val; 498 499 assert_spin_locked(&dev_priv->irq_lock); 500 501 WARN_ON(enabled_irq_mask & ~interrupt_mask); 502 503 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 504 return; 505 506 new_val = dev_priv->de_irq_mask[pipe]; 507 new_val &= ~interrupt_mask; 508 new_val |= (~enabled_irq_mask & interrupt_mask); 509 510 if (new_val != dev_priv->de_irq_mask[pipe]) { 511 dev_priv->de_irq_mask[pipe] = new_val; 512 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 513 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 514 } 515 } 516 517 /** 518 * ibx_display_interrupt_update - update SDEIMR 519 * @dev_priv: driver private 520 * @interrupt_mask: mask of interrupt bits to update 521 * @enabled_irq_mask: mask of interrupt bits to enable 522 */ 523 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 524 uint32_t interrupt_mask, 525 uint32_t enabled_irq_mask) 526 { 527 uint32_t sdeimr = I915_READ(SDEIMR); 528 sdeimr &= ~interrupt_mask; 529 sdeimr |= (~enabled_irq_mask & interrupt_mask); 530 531 WARN_ON(enabled_irq_mask & ~interrupt_mask); 532 533 assert_spin_locked(&dev_priv->irq_lock); 534 535 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 536 return; 537 538 I915_WRITE(SDEIMR, sdeimr); 539 POSTING_READ(SDEIMR); 540 } 541 542 static void 543 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 544 u32 enable_mask, u32 status_mask) 545 { 546 i915_reg_t reg = PIPESTAT(pipe); 547 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 548 549 assert_spin_locked(&dev_priv->irq_lock); 550 WARN_ON(!intel_irqs_enabled(dev_priv)); 551 552 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 553 status_mask & ~PIPESTAT_INT_STATUS_MASK, 554 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 555 pipe_name(pipe), enable_mask, status_mask)) 556 return; 557 558 if ((pipestat & enable_mask) == enable_mask) 559 return; 560 561 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 562 563 /* Enable the interrupt, clear any pending status */ 564 pipestat |= enable_mask | status_mask; 565 I915_WRITE(reg, pipestat); 566 POSTING_READ(reg); 567 } 568 569 static void 570 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 571 u32 enable_mask, u32 status_mask) 572 { 573 i915_reg_t reg = PIPESTAT(pipe); 574 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 575 576 assert_spin_locked(&dev_priv->irq_lock); 577 WARN_ON(!intel_irqs_enabled(dev_priv)); 578 579 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 580 status_mask & ~PIPESTAT_INT_STATUS_MASK, 581 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 582 pipe_name(pipe), enable_mask, status_mask)) 583 return; 584 585 if ((pipestat & enable_mask) == 0) 586 return; 587 588 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 589 590 pipestat &= ~enable_mask; 591 I915_WRITE(reg, pipestat); 592 POSTING_READ(reg); 593 } 594 595 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 596 { 597 u32 enable_mask = status_mask << 16; 598 599 /* 600 * On pipe A we don't support the PSR interrupt yet, 601 * on pipe B and C the same bit MBZ. 602 */ 603 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 604 return 0; 605 /* 606 * On pipe B and C we don't support the PSR interrupt yet, on pipe 607 * A the same bit is for perf counters which we don't use either. 608 */ 609 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 610 return 0; 611 612 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 613 SPRITE0_FLIP_DONE_INT_EN_VLV | 614 SPRITE1_FLIP_DONE_INT_EN_VLV); 615 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 616 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 617 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 618 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 619 620 return enable_mask; 621 } 622 623 void 624 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 625 u32 status_mask) 626 { 627 u32 enable_mask; 628 629 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 630 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 631 status_mask); 632 else 633 enable_mask = status_mask << 16; 634 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 635 } 636 637 void 638 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 639 u32 status_mask) 640 { 641 u32 enable_mask; 642 643 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 644 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 645 status_mask); 646 else 647 enable_mask = status_mask << 16; 648 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 649 } 650 651 /** 652 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 653 * @dev_priv: i915 device private 654 */ 655 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 656 { 657 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 658 return; 659 660 spin_lock_irq(&dev_priv->irq_lock); 661 662 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 663 if (INTEL_GEN(dev_priv) >= 4) 664 i915_enable_pipestat(dev_priv, PIPE_A, 665 PIPE_LEGACY_BLC_EVENT_STATUS); 666 667 spin_unlock_irq(&dev_priv->irq_lock); 668 } 669 670 /* 671 * This timing diagram depicts the video signal in and 672 * around the vertical blanking period. 673 * 674 * Assumptions about the fictitious mode used in this example: 675 * vblank_start >= 3 676 * vsync_start = vblank_start + 1 677 * vsync_end = vblank_start + 2 678 * vtotal = vblank_start + 3 679 * 680 * start of vblank: 681 * latch double buffered registers 682 * increment frame counter (ctg+) 683 * generate start of vblank interrupt (gen4+) 684 * | 685 * | frame start: 686 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 687 * | may be shifted forward 1-3 extra lines via PIPECONF 688 * | | 689 * | | start of vsync: 690 * | | generate vsync interrupt 691 * | | | 692 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 693 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 694 * ----va---> <-----------------vb--------------------> <--------va------------- 695 * | | <----vs-----> | 696 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 697 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 698 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 699 * | | | 700 * last visible pixel first visible pixel 701 * | increment frame counter (gen3/4) 702 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 703 * 704 * x = horizontal active 705 * _ = horizontal blanking 706 * hs = horizontal sync 707 * va = vertical active 708 * vb = vertical blanking 709 * vs = vertical sync 710 * vbs = vblank_start (number) 711 * 712 * Summary: 713 * - most events happen at the start of horizontal sync 714 * - frame start happens at the start of horizontal blank, 1-4 lines 715 * (depending on PIPECONF settings) after the start of vblank 716 * - gen3/4 pixel and frame counter are synchronized with the start 717 * of horizontal active on the first line of vertical active 718 */ 719 720 /* Called from drm generic code, passed a 'crtc', which 721 * we use as a pipe index 722 */ 723 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 724 { 725 struct drm_i915_private *dev_priv = to_i915(dev); 726 i915_reg_t high_frame, low_frame; 727 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 728 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 729 pipe); 730 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 731 732 htotal = mode->crtc_htotal; 733 hsync_start = mode->crtc_hsync_start; 734 vbl_start = mode->crtc_vblank_start; 735 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 736 vbl_start = DIV_ROUND_UP(vbl_start, 2); 737 738 /* Convert to pixel count */ 739 vbl_start *= htotal; 740 741 /* Start of vblank event occurs at start of hsync */ 742 vbl_start -= htotal - hsync_start; 743 744 high_frame = PIPEFRAME(pipe); 745 low_frame = PIPEFRAMEPIXEL(pipe); 746 747 /* 748 * High & low register fields aren't synchronized, so make sure 749 * we get a low value that's stable across two reads of the high 750 * register. 751 */ 752 do { 753 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 754 low = I915_READ(low_frame); 755 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 756 } while (high1 != high2); 757 758 high1 >>= PIPE_FRAME_HIGH_SHIFT; 759 pixel = low & PIPE_PIXEL_MASK; 760 low >>= PIPE_FRAME_LOW_SHIFT; 761 762 /* 763 * The frame counter increments at beginning of active. 764 * Cook up a vblank counter by also checking the pixel 765 * counter against vblank start. 766 */ 767 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 768 } 769 770 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 771 { 772 struct drm_i915_private *dev_priv = to_i915(dev); 773 774 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 775 } 776 777 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 778 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 779 { 780 struct drm_device *dev = crtc->base.dev; 781 struct drm_i915_private *dev_priv = to_i915(dev); 782 const struct drm_display_mode *mode = &crtc->base.hwmode; 783 enum pipe pipe = crtc->pipe; 784 int position, vtotal; 785 786 vtotal = mode->crtc_vtotal; 787 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 788 vtotal /= 2; 789 790 if (IS_GEN2(dev_priv)) 791 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 792 else 793 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 794 795 /* 796 * On HSW, the DSL reg (0x70000) appears to return 0 if we 797 * read it just before the start of vblank. So try it again 798 * so we don't accidentally end up spanning a vblank frame 799 * increment, causing the pipe_update_end() code to squak at us. 800 * 801 * The nature of this problem means we can't simply check the ISR 802 * bit and return the vblank start value; nor can we use the scanline 803 * debug register in the transcoder as it appears to have the same 804 * problem. We may need to extend this to include other platforms, 805 * but so far testing only shows the problem on HSW. 806 */ 807 if (HAS_DDI(dev_priv) && !position) { 808 int i, temp; 809 810 for (i = 0; i < 100; i++) { 811 udelay(1); 812 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 813 DSL_LINEMASK_GEN3; 814 if (temp != position) { 815 position = temp; 816 break; 817 } 818 } 819 } 820 821 /* 822 * See update_scanline_offset() for the details on the 823 * scanline_offset adjustment. 824 */ 825 return (position + crtc->scanline_offset) % vtotal; 826 } 827 828 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 829 unsigned int flags, int *vpos, int *hpos, 830 ktime_t *stime, ktime_t *etime, 831 const struct drm_display_mode *mode) 832 { 833 struct drm_i915_private *dev_priv = to_i915(dev); 834 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 835 pipe); 836 int position; 837 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 838 bool in_vbl = true; 839 int ret = 0; 840 unsigned long irqflags; 841 842 if (WARN_ON(!mode->crtc_clock)) { 843 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 844 "pipe %c\n", pipe_name(pipe)); 845 return 0; 846 } 847 848 htotal = mode->crtc_htotal; 849 hsync_start = mode->crtc_hsync_start; 850 vtotal = mode->crtc_vtotal; 851 vbl_start = mode->crtc_vblank_start; 852 vbl_end = mode->crtc_vblank_end; 853 854 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 855 vbl_start = DIV_ROUND_UP(vbl_start, 2); 856 vbl_end /= 2; 857 vtotal /= 2; 858 } 859 860 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 861 862 /* 863 * Lock uncore.lock, as we will do multiple timing critical raw 864 * register reads, potentially with preemption disabled, so the 865 * following code must not block on uncore.lock. 866 */ 867 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 868 869 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 870 871 /* Get optional system timestamp before query. */ 872 if (stime) 873 *stime = ktime_get(); 874 875 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 876 /* No obvious pixelcount register. Only query vertical 877 * scanout position from Display scan line register. 878 */ 879 position = __intel_get_crtc_scanline(intel_crtc); 880 } else { 881 /* Have access to pixelcount since start of frame. 882 * We can split this into vertical and horizontal 883 * scanout position. 884 */ 885 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 886 887 /* convert to pixel counts */ 888 vbl_start *= htotal; 889 vbl_end *= htotal; 890 vtotal *= htotal; 891 892 /* 893 * In interlaced modes, the pixel counter counts all pixels, 894 * so one field will have htotal more pixels. In order to avoid 895 * the reported position from jumping backwards when the pixel 896 * counter is beyond the length of the shorter field, just 897 * clamp the position the length of the shorter field. This 898 * matches how the scanline counter based position works since 899 * the scanline counter doesn't count the two half lines. 900 */ 901 if (position >= vtotal) 902 position = vtotal - 1; 903 904 /* 905 * Start of vblank interrupt is triggered at start of hsync, 906 * just prior to the first active line of vblank. However we 907 * consider lines to start at the leading edge of horizontal 908 * active. So, should we get here before we've crossed into 909 * the horizontal active of the first line in vblank, we would 910 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 911 * always add htotal-hsync_start to the current pixel position. 912 */ 913 position = (position + htotal - hsync_start) % vtotal; 914 } 915 916 /* Get optional system timestamp after query. */ 917 if (etime) 918 *etime = ktime_get(); 919 920 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 921 922 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 923 924 in_vbl = position >= vbl_start && position < vbl_end; 925 926 /* 927 * While in vblank, position will be negative 928 * counting up towards 0 at vbl_end. And outside 929 * vblank, position will be positive counting 930 * up since vbl_end. 931 */ 932 if (position >= vbl_start) 933 position -= vbl_end; 934 else 935 position += vtotal - vbl_end; 936 937 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 938 *vpos = position; 939 *hpos = 0; 940 } else { 941 *vpos = position / htotal; 942 *hpos = position - (*vpos * htotal); 943 } 944 945 /* In vblank? */ 946 if (in_vbl) 947 ret |= DRM_SCANOUTPOS_IN_VBLANK; 948 949 return ret; 950 } 951 952 int intel_get_crtc_scanline(struct intel_crtc *crtc) 953 { 954 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 955 unsigned long irqflags; 956 int position; 957 958 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 959 position = __intel_get_crtc_scanline(crtc); 960 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 961 962 return position; 963 } 964 965 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 966 int *max_error, 967 struct timeval *vblank_time, 968 unsigned flags) 969 { 970 struct drm_i915_private *dev_priv = to_i915(dev); 971 struct intel_crtc *crtc; 972 973 if (pipe >= INTEL_INFO(dev_priv)->num_pipes) { 974 DRM_ERROR("Invalid crtc %u\n", pipe); 975 return -EINVAL; 976 } 977 978 /* Get drm_crtc to timestamp: */ 979 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 980 if (crtc == NULL) { 981 DRM_ERROR("Invalid crtc %u\n", pipe); 982 return -EINVAL; 983 } 984 985 if (!crtc->base.hwmode.crtc_clock) { 986 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 987 return -EBUSY; 988 } 989 990 /* Helper routine in DRM core does all the work: */ 991 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 992 vblank_time, flags, 993 &crtc->base.hwmode); 994 } 995 996 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 997 { 998 u32 busy_up, busy_down, max_avg, min_avg; 999 u8 new_delay; 1000 1001 spin_lock(&mchdev_lock); 1002 1003 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1004 1005 new_delay = dev_priv->ips.cur_delay; 1006 1007 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1008 busy_up = I915_READ(RCPREVBSYTUPAVG); 1009 busy_down = I915_READ(RCPREVBSYTDNAVG); 1010 max_avg = I915_READ(RCBMAXAVG); 1011 min_avg = I915_READ(RCBMINAVG); 1012 1013 /* Handle RCS change request from hw */ 1014 if (busy_up > max_avg) { 1015 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1016 new_delay = dev_priv->ips.cur_delay - 1; 1017 if (new_delay < dev_priv->ips.max_delay) 1018 new_delay = dev_priv->ips.max_delay; 1019 } else if (busy_down < min_avg) { 1020 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1021 new_delay = dev_priv->ips.cur_delay + 1; 1022 if (new_delay > dev_priv->ips.min_delay) 1023 new_delay = dev_priv->ips.min_delay; 1024 } 1025 1026 if (ironlake_set_drps(dev_priv, new_delay)) 1027 dev_priv->ips.cur_delay = new_delay; 1028 1029 spin_unlock(&mchdev_lock); 1030 1031 return; 1032 } 1033 1034 static void notify_ring(struct intel_engine_cs *engine) 1035 { 1036 smp_store_mb(engine->breadcrumbs.irq_posted, true); 1037 if (intel_engine_wakeup(engine)) 1038 trace_i915_gem_request_notify(engine); 1039 } 1040 1041 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1042 struct intel_rps_ei *ei) 1043 { 1044 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1045 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1046 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1047 } 1048 1049 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1050 { 1051 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); 1052 } 1053 1054 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1055 { 1056 const struct intel_rps_ei *prev = &dev_priv->rps.ei; 1057 struct intel_rps_ei now; 1058 u32 events = 0; 1059 1060 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1061 return 0; 1062 1063 vlv_c0_read(dev_priv, &now); 1064 if (now.cz_clock == 0) 1065 return 0; 1066 1067 if (prev->cz_clock) { 1068 u64 time, c0; 1069 unsigned int mul; 1070 1071 mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */ 1072 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1073 mul <<= 8; 1074 1075 time = now.cz_clock - prev->cz_clock; 1076 time *= dev_priv->czclk_freq; 1077 1078 /* Workload can be split between render + media, 1079 * e.g. SwapBuffers being blitted in X after being rendered in 1080 * mesa. To account for this we need to combine both engines 1081 * into our activity counter. 1082 */ 1083 c0 = now.render_c0 - prev->render_c0; 1084 c0 += now.media_c0 - prev->media_c0; 1085 c0 *= mul; 1086 1087 if (c0 > time * dev_priv->rps.up_threshold) 1088 events = GEN6_PM_RP_UP_THRESHOLD; 1089 else if (c0 < time * dev_priv->rps.down_threshold) 1090 events = GEN6_PM_RP_DOWN_THRESHOLD; 1091 } 1092 1093 dev_priv->rps.ei = now; 1094 return events; 1095 } 1096 1097 static bool any_waiters(struct drm_i915_private *dev_priv) 1098 { 1099 struct intel_engine_cs *engine; 1100 enum intel_engine_id id; 1101 1102 for_each_engine(engine, dev_priv, id) 1103 if (intel_engine_has_waiter(engine)) 1104 return true; 1105 1106 return false; 1107 } 1108 1109 static void gen6_pm_rps_work(struct work_struct *work) 1110 { 1111 struct drm_i915_private *dev_priv = 1112 container_of(work, struct drm_i915_private, rps.work); 1113 bool client_boost; 1114 int new_delay, adj, min, max; 1115 u32 pm_iir; 1116 1117 spin_lock_irq(&dev_priv->irq_lock); 1118 /* Speed up work cancelation during disabling rps interrupts. */ 1119 if (!dev_priv->rps.interrupts_enabled) { 1120 spin_unlock_irq(&dev_priv->irq_lock); 1121 return; 1122 } 1123 1124 pm_iir = dev_priv->rps.pm_iir; 1125 dev_priv->rps.pm_iir = 0; 1126 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1127 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1128 client_boost = dev_priv->rps.client_boost; 1129 dev_priv->rps.client_boost = false; 1130 spin_unlock_irq(&dev_priv->irq_lock); 1131 1132 /* Make sure we didn't queue anything we're not going to process. */ 1133 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1134 1135 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1136 return; 1137 1138 mutex_lock(&dev_priv->rps.hw_lock); 1139 1140 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1141 1142 adj = dev_priv->rps.last_adj; 1143 new_delay = dev_priv->rps.cur_freq; 1144 min = dev_priv->rps.min_freq_softlimit; 1145 max = dev_priv->rps.max_freq_softlimit; 1146 if (client_boost || any_waiters(dev_priv)) 1147 max = dev_priv->rps.max_freq; 1148 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1149 new_delay = dev_priv->rps.boost_freq; 1150 adj = 0; 1151 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1152 if (adj > 0) 1153 adj *= 2; 1154 else /* CHV needs even encode values */ 1155 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1156 1157 if (new_delay >= dev_priv->rps.max_freq_softlimit) 1158 adj = 0; 1159 /* 1160 * For better performance, jump directly 1161 * to RPe if we're below it. 1162 */ 1163 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1164 new_delay = dev_priv->rps.efficient_freq; 1165 adj = 0; 1166 } 1167 } else if (client_boost || any_waiters(dev_priv)) { 1168 adj = 0; 1169 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1170 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1171 new_delay = dev_priv->rps.efficient_freq; 1172 else 1173 new_delay = dev_priv->rps.min_freq_softlimit; 1174 adj = 0; 1175 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1176 if (adj < 0) 1177 adj *= 2; 1178 else /* CHV needs even encode values */ 1179 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1180 1181 if (new_delay <= dev_priv->rps.min_freq_softlimit) 1182 adj = 0; 1183 } else { /* unknown event */ 1184 adj = 0; 1185 } 1186 1187 dev_priv->rps.last_adj = adj; 1188 1189 /* sysfs frequency interfaces may have snuck in while servicing the 1190 * interrupt 1191 */ 1192 new_delay += adj; 1193 new_delay = clamp_t(int, new_delay, min, max); 1194 1195 intel_set_rps(dev_priv, new_delay); 1196 1197 mutex_unlock(&dev_priv->rps.hw_lock); 1198 } 1199 1200 1201 /** 1202 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1203 * occurred. 1204 * @work: workqueue struct 1205 * 1206 * Doesn't actually do anything except notify userspace. As a consequence of 1207 * this event, userspace should try to remap the bad rows since statistically 1208 * it is likely the same row is more likely to go bad again. 1209 */ 1210 static void ivybridge_parity_work(struct work_struct *work) 1211 { 1212 struct drm_i915_private *dev_priv = 1213 container_of(work, struct drm_i915_private, l3_parity.error_work); 1214 u32 error_status, row, bank, subbank; 1215 char *parity_event[6]; 1216 uint32_t misccpctl; 1217 uint8_t slice = 0; 1218 1219 /* We must turn off DOP level clock gating to access the L3 registers. 1220 * In order to prevent a get/put style interface, acquire struct mutex 1221 * any time we access those registers. 1222 */ 1223 mutex_lock(&dev_priv->drm.struct_mutex); 1224 1225 /* If we've screwed up tracking, just let the interrupt fire again */ 1226 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1227 goto out; 1228 1229 misccpctl = I915_READ(GEN7_MISCCPCTL); 1230 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1231 POSTING_READ(GEN7_MISCCPCTL); 1232 1233 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1234 i915_reg_t reg; 1235 1236 slice--; 1237 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1238 break; 1239 1240 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1241 1242 reg = GEN7_L3CDERRST1(slice); 1243 1244 error_status = I915_READ(reg); 1245 row = GEN7_PARITY_ERROR_ROW(error_status); 1246 bank = GEN7_PARITY_ERROR_BANK(error_status); 1247 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1248 1249 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1250 POSTING_READ(reg); 1251 1252 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1253 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1254 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1255 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1256 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1257 parity_event[5] = NULL; 1258 1259 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1260 KOBJ_CHANGE, parity_event); 1261 1262 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1263 slice, row, bank, subbank); 1264 1265 kfree(parity_event[4]); 1266 kfree(parity_event[3]); 1267 kfree(parity_event[2]); 1268 kfree(parity_event[1]); 1269 } 1270 1271 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1272 1273 out: 1274 WARN_ON(dev_priv->l3_parity.which_slice); 1275 spin_lock_irq(&dev_priv->irq_lock); 1276 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1277 spin_unlock_irq(&dev_priv->irq_lock); 1278 1279 mutex_unlock(&dev_priv->drm.struct_mutex); 1280 } 1281 1282 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1283 u32 iir) 1284 { 1285 if (!HAS_L3_DPF(dev_priv)) 1286 return; 1287 1288 spin_lock(&dev_priv->irq_lock); 1289 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1290 spin_unlock(&dev_priv->irq_lock); 1291 1292 iir &= GT_PARITY_ERROR(dev_priv); 1293 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1294 dev_priv->l3_parity.which_slice |= 1 << 1; 1295 1296 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1297 dev_priv->l3_parity.which_slice |= 1 << 0; 1298 1299 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1300 } 1301 1302 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1303 u32 gt_iir) 1304 { 1305 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1306 notify_ring(dev_priv->engine[RCS]); 1307 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1308 notify_ring(dev_priv->engine[VCS]); 1309 } 1310 1311 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1312 u32 gt_iir) 1313 { 1314 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1315 notify_ring(dev_priv->engine[RCS]); 1316 if (gt_iir & GT_BSD_USER_INTERRUPT) 1317 notify_ring(dev_priv->engine[VCS]); 1318 if (gt_iir & GT_BLT_USER_INTERRUPT) 1319 notify_ring(dev_priv->engine[BCS]); 1320 1321 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1322 GT_BSD_CS_ERROR_INTERRUPT | 1323 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1324 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1325 1326 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1327 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1328 } 1329 1330 static __always_inline void 1331 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1332 { 1333 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1334 notify_ring(engine); 1335 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1336 tasklet_schedule(&engine->irq_tasklet); 1337 } 1338 1339 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1340 u32 master_ctl, 1341 u32 gt_iir[4]) 1342 { 1343 irqreturn_t ret = IRQ_NONE; 1344 1345 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1346 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1347 if (gt_iir[0]) { 1348 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1349 ret = IRQ_HANDLED; 1350 } else 1351 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1352 } 1353 1354 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1355 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1356 if (gt_iir[1]) { 1357 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1358 ret = IRQ_HANDLED; 1359 } else 1360 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1361 } 1362 1363 if (master_ctl & GEN8_GT_VECS_IRQ) { 1364 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1365 if (gt_iir[3]) { 1366 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1367 ret = IRQ_HANDLED; 1368 } else 1369 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1370 } 1371 1372 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1373 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1374 if (gt_iir[2] & (dev_priv->pm_rps_events | 1375 dev_priv->pm_guc_events)) { 1376 I915_WRITE_FW(GEN8_GT_IIR(2), 1377 gt_iir[2] & (dev_priv->pm_rps_events | 1378 dev_priv->pm_guc_events)); 1379 ret = IRQ_HANDLED; 1380 } else 1381 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1382 } 1383 1384 return ret; 1385 } 1386 1387 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1388 u32 gt_iir[4]) 1389 { 1390 if (gt_iir[0]) { 1391 gen8_cs_irq_handler(dev_priv->engine[RCS], 1392 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1393 gen8_cs_irq_handler(dev_priv->engine[BCS], 1394 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1395 } 1396 1397 if (gt_iir[1]) { 1398 gen8_cs_irq_handler(dev_priv->engine[VCS], 1399 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1400 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1401 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1402 } 1403 1404 if (gt_iir[3]) 1405 gen8_cs_irq_handler(dev_priv->engine[VECS], 1406 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1407 1408 if (gt_iir[2] & dev_priv->pm_rps_events) 1409 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1410 1411 if (gt_iir[2] & dev_priv->pm_guc_events) 1412 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1413 } 1414 1415 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1416 { 1417 switch (port) { 1418 case PORT_A: 1419 return val & PORTA_HOTPLUG_LONG_DETECT; 1420 case PORT_B: 1421 return val & PORTB_HOTPLUG_LONG_DETECT; 1422 case PORT_C: 1423 return val & PORTC_HOTPLUG_LONG_DETECT; 1424 default: 1425 return false; 1426 } 1427 } 1428 1429 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1430 { 1431 switch (port) { 1432 case PORT_E: 1433 return val & PORTE_HOTPLUG_LONG_DETECT; 1434 default: 1435 return false; 1436 } 1437 } 1438 1439 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1440 { 1441 switch (port) { 1442 case PORT_A: 1443 return val & PORTA_HOTPLUG_LONG_DETECT; 1444 case PORT_B: 1445 return val & PORTB_HOTPLUG_LONG_DETECT; 1446 case PORT_C: 1447 return val & PORTC_HOTPLUG_LONG_DETECT; 1448 case PORT_D: 1449 return val & PORTD_HOTPLUG_LONG_DETECT; 1450 default: 1451 return false; 1452 } 1453 } 1454 1455 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1456 { 1457 switch (port) { 1458 case PORT_A: 1459 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1460 default: 1461 return false; 1462 } 1463 } 1464 1465 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1466 { 1467 switch (port) { 1468 case PORT_B: 1469 return val & PORTB_HOTPLUG_LONG_DETECT; 1470 case PORT_C: 1471 return val & PORTC_HOTPLUG_LONG_DETECT; 1472 case PORT_D: 1473 return val & PORTD_HOTPLUG_LONG_DETECT; 1474 default: 1475 return false; 1476 } 1477 } 1478 1479 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1480 { 1481 switch (port) { 1482 case PORT_B: 1483 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1484 case PORT_C: 1485 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1486 case PORT_D: 1487 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1488 default: 1489 return false; 1490 } 1491 } 1492 1493 /* 1494 * Get a bit mask of pins that have triggered, and which ones may be long. 1495 * This can be called multiple times with the same masks to accumulate 1496 * hotplug detection results from several registers. 1497 * 1498 * Note that the caller is expected to zero out the masks initially. 1499 */ 1500 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1501 u32 hotplug_trigger, u32 dig_hotplug_reg, 1502 const u32 hpd[HPD_NUM_PINS], 1503 bool long_pulse_detect(enum port port, u32 val)) 1504 { 1505 enum port port; 1506 int i; 1507 1508 for_each_hpd_pin(i) { 1509 if ((hpd[i] & hotplug_trigger) == 0) 1510 continue; 1511 1512 *pin_mask |= BIT(i); 1513 1514 if (!intel_hpd_pin_to_port(i, &port)) 1515 continue; 1516 1517 if (long_pulse_detect(port, dig_hotplug_reg)) 1518 *long_mask |= BIT(i); 1519 } 1520 1521 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1522 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1523 1524 } 1525 1526 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1527 { 1528 wake_up_all(&dev_priv->gmbus_wait_queue); 1529 } 1530 1531 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1532 { 1533 wake_up_all(&dev_priv->gmbus_wait_queue); 1534 } 1535 1536 #if defined(CONFIG_DEBUG_FS) 1537 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1538 enum pipe pipe, 1539 uint32_t crc0, uint32_t crc1, 1540 uint32_t crc2, uint32_t crc3, 1541 uint32_t crc4) 1542 { 1543 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1544 struct intel_pipe_crc_entry *entry; 1545 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1546 struct drm_driver *driver = dev_priv->drm.driver; 1547 uint32_t crcs[5]; 1548 int head, tail; 1549 1550 spin_lock(&pipe_crc->lock); 1551 if (pipe_crc->source) { 1552 if (!pipe_crc->entries) { 1553 spin_unlock(&pipe_crc->lock); 1554 DRM_DEBUG_KMS("spurious interrupt\n"); 1555 return; 1556 } 1557 1558 head = pipe_crc->head; 1559 tail = pipe_crc->tail; 1560 1561 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1562 spin_unlock(&pipe_crc->lock); 1563 DRM_ERROR("CRC buffer overflowing\n"); 1564 return; 1565 } 1566 1567 entry = &pipe_crc->entries[head]; 1568 1569 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1570 entry->crc[0] = crc0; 1571 entry->crc[1] = crc1; 1572 entry->crc[2] = crc2; 1573 entry->crc[3] = crc3; 1574 entry->crc[4] = crc4; 1575 1576 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1577 pipe_crc->head = head; 1578 1579 spin_unlock(&pipe_crc->lock); 1580 1581 wake_up_interruptible(&pipe_crc->wq); 1582 } else { 1583 /* 1584 * For some not yet identified reason, the first CRC is 1585 * bonkers. So let's just wait for the next vblank and read 1586 * out the buggy result. 1587 * 1588 * On CHV sometimes the second CRC is bonkers as well, so 1589 * don't trust that one either. 1590 */ 1591 if (pipe_crc->skipped == 0 || 1592 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { 1593 pipe_crc->skipped++; 1594 spin_unlock(&pipe_crc->lock); 1595 return; 1596 } 1597 spin_unlock(&pipe_crc->lock); 1598 crcs[0] = crc0; 1599 crcs[1] = crc1; 1600 crcs[2] = crc2; 1601 crcs[3] = crc3; 1602 crcs[4] = crc4; 1603 drm_crtc_add_crc_entry(&crtc->base, true, 1604 drm_accurate_vblank_count(&crtc->base), 1605 crcs); 1606 } 1607 } 1608 #else 1609 static inline void 1610 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1611 enum pipe pipe, 1612 uint32_t crc0, uint32_t crc1, 1613 uint32_t crc2, uint32_t crc3, 1614 uint32_t crc4) {} 1615 #endif 1616 1617 1618 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1619 enum pipe pipe) 1620 { 1621 display_pipe_crc_irq_handler(dev_priv, pipe, 1622 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1623 0, 0, 0, 0); 1624 } 1625 1626 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1627 enum pipe pipe) 1628 { 1629 display_pipe_crc_irq_handler(dev_priv, pipe, 1630 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1631 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1632 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1633 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1634 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1635 } 1636 1637 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1638 enum pipe pipe) 1639 { 1640 uint32_t res1, res2; 1641 1642 if (INTEL_GEN(dev_priv) >= 3) 1643 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1644 else 1645 res1 = 0; 1646 1647 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1648 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1649 else 1650 res2 = 0; 1651 1652 display_pipe_crc_irq_handler(dev_priv, pipe, 1653 I915_READ(PIPE_CRC_RES_RED(pipe)), 1654 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1655 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1656 res1, res2); 1657 } 1658 1659 /* The RPS events need forcewake, so we add them to a work queue and mask their 1660 * IMR bits until the work is done. Other interrupts can be processed without 1661 * the work queue. */ 1662 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1663 { 1664 if (pm_iir & dev_priv->pm_rps_events) { 1665 spin_lock(&dev_priv->irq_lock); 1666 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1667 if (dev_priv->rps.interrupts_enabled) { 1668 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1669 schedule_work(&dev_priv->rps.work); 1670 } 1671 spin_unlock(&dev_priv->irq_lock); 1672 } 1673 1674 if (INTEL_INFO(dev_priv)->gen >= 8) 1675 return; 1676 1677 if (HAS_VEBOX(dev_priv)) { 1678 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1679 notify_ring(dev_priv->engine[VECS]); 1680 1681 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1682 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1683 } 1684 } 1685 1686 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1687 { 1688 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1689 /* Sample the log buffer flush related bits & clear them out now 1690 * itself from the message identity register to minimize the 1691 * probability of losing a flush interrupt, when there are back 1692 * to back flush interrupts. 1693 * There can be a new flush interrupt, for different log buffer 1694 * type (like for ISR), whilst Host is handling one (for DPC). 1695 * Since same bit is used in message register for ISR & DPC, it 1696 * could happen that GuC sets the bit for 2nd interrupt but Host 1697 * clears out the bit on handling the 1st interrupt. 1698 */ 1699 u32 msg, flush; 1700 1701 msg = I915_READ(SOFT_SCRATCH(15)); 1702 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1703 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1704 if (flush) { 1705 /* Clear the message bits that are handled */ 1706 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1707 1708 /* Handle flush interrupt in bottom half */ 1709 queue_work(dev_priv->guc.log.flush_wq, 1710 &dev_priv->guc.log.flush_work); 1711 1712 dev_priv->guc.log.flush_interrupt_count++; 1713 } else { 1714 /* Not clearing of unhandled event bits won't result in 1715 * re-triggering of the interrupt. 1716 */ 1717 } 1718 } 1719 } 1720 1721 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1722 enum pipe pipe) 1723 { 1724 bool ret; 1725 1726 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1727 if (ret) 1728 intel_finish_page_flip_mmio(dev_priv, pipe); 1729 1730 return ret; 1731 } 1732 1733 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1734 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1735 { 1736 int pipe; 1737 1738 spin_lock(&dev_priv->irq_lock); 1739 1740 if (!dev_priv->display_irqs_enabled) { 1741 spin_unlock(&dev_priv->irq_lock); 1742 return; 1743 } 1744 1745 for_each_pipe(dev_priv, pipe) { 1746 i915_reg_t reg; 1747 u32 mask, iir_bit = 0; 1748 1749 /* 1750 * PIPESTAT bits get signalled even when the interrupt is 1751 * disabled with the mask bits, and some of the status bits do 1752 * not generate interrupts at all (like the underrun bit). Hence 1753 * we need to be careful that we only handle what we want to 1754 * handle. 1755 */ 1756 1757 /* fifo underruns are filterered in the underrun handler. */ 1758 mask = PIPE_FIFO_UNDERRUN_STATUS; 1759 1760 switch (pipe) { 1761 case PIPE_A: 1762 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1763 break; 1764 case PIPE_B: 1765 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1766 break; 1767 case PIPE_C: 1768 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1769 break; 1770 } 1771 if (iir & iir_bit) 1772 mask |= dev_priv->pipestat_irq_mask[pipe]; 1773 1774 if (!mask) 1775 continue; 1776 1777 reg = PIPESTAT(pipe); 1778 mask |= PIPESTAT_INT_ENABLE_MASK; 1779 pipe_stats[pipe] = I915_READ(reg) & mask; 1780 1781 /* 1782 * Clear the PIPE*STAT regs before the IIR 1783 */ 1784 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1785 PIPESTAT_INT_STATUS_MASK)) 1786 I915_WRITE(reg, pipe_stats[pipe]); 1787 } 1788 spin_unlock(&dev_priv->irq_lock); 1789 } 1790 1791 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1792 u32 pipe_stats[I915_MAX_PIPES]) 1793 { 1794 enum pipe pipe; 1795 1796 for_each_pipe(dev_priv, pipe) { 1797 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1798 intel_pipe_handle_vblank(dev_priv, pipe)) 1799 intel_check_page_flip(dev_priv, pipe); 1800 1801 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1802 intel_finish_page_flip_cs(dev_priv, pipe); 1803 1804 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1805 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1806 1807 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1808 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1809 } 1810 1811 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1812 gmbus_irq_handler(dev_priv); 1813 } 1814 1815 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1816 { 1817 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1818 1819 if (hotplug_status) 1820 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1821 1822 return hotplug_status; 1823 } 1824 1825 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1826 u32 hotplug_status) 1827 { 1828 u32 pin_mask = 0, long_mask = 0; 1829 1830 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1831 IS_CHERRYVIEW(dev_priv)) { 1832 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1833 1834 if (hotplug_trigger) { 1835 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1836 hotplug_trigger, hpd_status_g4x, 1837 i9xx_port_hotplug_long_detect); 1838 1839 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1840 } 1841 1842 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1843 dp_aux_irq_handler(dev_priv); 1844 } else { 1845 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1846 1847 if (hotplug_trigger) { 1848 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1849 hotplug_trigger, hpd_status_i915, 1850 i9xx_port_hotplug_long_detect); 1851 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1852 } 1853 } 1854 } 1855 1856 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1857 { 1858 struct drm_device *dev = arg; 1859 struct drm_i915_private *dev_priv = to_i915(dev); 1860 irqreturn_t ret = IRQ_NONE; 1861 1862 if (!intel_irqs_enabled(dev_priv)) 1863 return IRQ_NONE; 1864 1865 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1866 disable_rpm_wakeref_asserts(dev_priv); 1867 1868 do { 1869 u32 iir, gt_iir, pm_iir; 1870 u32 pipe_stats[I915_MAX_PIPES] = {}; 1871 u32 hotplug_status = 0; 1872 u32 ier = 0; 1873 1874 gt_iir = I915_READ(GTIIR); 1875 pm_iir = I915_READ(GEN6_PMIIR); 1876 iir = I915_READ(VLV_IIR); 1877 1878 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1879 break; 1880 1881 ret = IRQ_HANDLED; 1882 1883 /* 1884 * Theory on interrupt generation, based on empirical evidence: 1885 * 1886 * x = ((VLV_IIR & VLV_IER) || 1887 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1888 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1889 * 1890 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1891 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1892 * guarantee the CPU interrupt will be raised again even if we 1893 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1894 * bits this time around. 1895 */ 1896 I915_WRITE(VLV_MASTER_IER, 0); 1897 ier = I915_READ(VLV_IER); 1898 I915_WRITE(VLV_IER, 0); 1899 1900 if (gt_iir) 1901 I915_WRITE(GTIIR, gt_iir); 1902 if (pm_iir) 1903 I915_WRITE(GEN6_PMIIR, pm_iir); 1904 1905 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1906 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1907 1908 /* Call regardless, as some status bits might not be 1909 * signalled in iir */ 1910 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1911 1912 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1913 I915_LPE_PIPE_B_INTERRUPT)) 1914 intel_lpe_audio_irq_handler(dev_priv); 1915 1916 /* 1917 * VLV_IIR is single buffered, and reflects the level 1918 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1919 */ 1920 if (iir) 1921 I915_WRITE(VLV_IIR, iir); 1922 1923 I915_WRITE(VLV_IER, ier); 1924 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1925 POSTING_READ(VLV_MASTER_IER); 1926 1927 if (gt_iir) 1928 snb_gt_irq_handler(dev_priv, gt_iir); 1929 if (pm_iir) 1930 gen6_rps_irq_handler(dev_priv, pm_iir); 1931 1932 if (hotplug_status) 1933 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1934 1935 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1936 } while (0); 1937 1938 enable_rpm_wakeref_asserts(dev_priv); 1939 1940 return ret; 1941 } 1942 1943 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1944 { 1945 struct drm_device *dev = arg; 1946 struct drm_i915_private *dev_priv = to_i915(dev); 1947 irqreturn_t ret = IRQ_NONE; 1948 1949 if (!intel_irqs_enabled(dev_priv)) 1950 return IRQ_NONE; 1951 1952 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1953 disable_rpm_wakeref_asserts(dev_priv); 1954 1955 do { 1956 u32 master_ctl, iir; 1957 u32 gt_iir[4] = {}; 1958 u32 pipe_stats[I915_MAX_PIPES] = {}; 1959 u32 hotplug_status = 0; 1960 u32 ier = 0; 1961 1962 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1963 iir = I915_READ(VLV_IIR); 1964 1965 if (master_ctl == 0 && iir == 0) 1966 break; 1967 1968 ret = IRQ_HANDLED; 1969 1970 /* 1971 * Theory on interrupt generation, based on empirical evidence: 1972 * 1973 * x = ((VLV_IIR & VLV_IER) || 1974 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1975 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1976 * 1977 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1978 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1979 * guarantee the CPU interrupt will be raised again even if we 1980 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1981 * bits this time around. 1982 */ 1983 I915_WRITE(GEN8_MASTER_IRQ, 0); 1984 ier = I915_READ(VLV_IER); 1985 I915_WRITE(VLV_IER, 0); 1986 1987 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1988 1989 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1990 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1991 1992 /* Call regardless, as some status bits might not be 1993 * signalled in iir */ 1994 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1995 1996 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 1997 I915_LPE_PIPE_B_INTERRUPT | 1998 I915_LPE_PIPE_C_INTERRUPT)) 1999 intel_lpe_audio_irq_handler(dev_priv); 2000 2001 /* 2002 * VLV_IIR is single buffered, and reflects the level 2003 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2004 */ 2005 if (iir) 2006 I915_WRITE(VLV_IIR, iir); 2007 2008 I915_WRITE(VLV_IER, ier); 2009 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2010 POSTING_READ(GEN8_MASTER_IRQ); 2011 2012 gen8_gt_irq_handler(dev_priv, gt_iir); 2013 2014 if (hotplug_status) 2015 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2016 2017 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2018 } while (0); 2019 2020 enable_rpm_wakeref_asserts(dev_priv); 2021 2022 return ret; 2023 } 2024 2025 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2026 u32 hotplug_trigger, 2027 const u32 hpd[HPD_NUM_PINS]) 2028 { 2029 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2030 2031 /* 2032 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2033 * unless we touch the hotplug register, even if hotplug_trigger is 2034 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2035 * errors. 2036 */ 2037 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2038 if (!hotplug_trigger) { 2039 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2040 PORTD_HOTPLUG_STATUS_MASK | 2041 PORTC_HOTPLUG_STATUS_MASK | 2042 PORTB_HOTPLUG_STATUS_MASK; 2043 dig_hotplug_reg &= ~mask; 2044 } 2045 2046 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2047 if (!hotplug_trigger) 2048 return; 2049 2050 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2051 dig_hotplug_reg, hpd, 2052 pch_port_hotplug_long_detect); 2053 2054 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2055 } 2056 2057 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2058 { 2059 int pipe; 2060 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2061 2062 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2063 2064 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2065 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2066 SDE_AUDIO_POWER_SHIFT); 2067 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2068 port_name(port)); 2069 } 2070 2071 if (pch_iir & SDE_AUX_MASK) 2072 dp_aux_irq_handler(dev_priv); 2073 2074 if (pch_iir & SDE_GMBUS) 2075 gmbus_irq_handler(dev_priv); 2076 2077 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2078 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2079 2080 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2081 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2082 2083 if (pch_iir & SDE_POISON) 2084 DRM_ERROR("PCH poison interrupt\n"); 2085 2086 if (pch_iir & SDE_FDI_MASK) 2087 for_each_pipe(dev_priv, pipe) 2088 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2089 pipe_name(pipe), 2090 I915_READ(FDI_RX_IIR(pipe))); 2091 2092 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2093 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2094 2095 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2096 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2097 2098 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2099 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2100 2101 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2102 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2103 } 2104 2105 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2106 { 2107 u32 err_int = I915_READ(GEN7_ERR_INT); 2108 enum pipe pipe; 2109 2110 if (err_int & ERR_INT_POISON) 2111 DRM_ERROR("Poison interrupt\n"); 2112 2113 for_each_pipe(dev_priv, pipe) { 2114 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2115 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2116 2117 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2118 if (IS_IVYBRIDGE(dev_priv)) 2119 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2120 else 2121 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2122 } 2123 } 2124 2125 I915_WRITE(GEN7_ERR_INT, err_int); 2126 } 2127 2128 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2129 { 2130 u32 serr_int = I915_READ(SERR_INT); 2131 2132 if (serr_int & SERR_INT_POISON) 2133 DRM_ERROR("PCH poison interrupt\n"); 2134 2135 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2136 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2137 2138 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2139 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2140 2141 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2142 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2143 2144 I915_WRITE(SERR_INT, serr_int); 2145 } 2146 2147 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2148 { 2149 int pipe; 2150 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2151 2152 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2153 2154 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2155 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2156 SDE_AUDIO_POWER_SHIFT_CPT); 2157 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2158 port_name(port)); 2159 } 2160 2161 if (pch_iir & SDE_AUX_MASK_CPT) 2162 dp_aux_irq_handler(dev_priv); 2163 2164 if (pch_iir & SDE_GMBUS_CPT) 2165 gmbus_irq_handler(dev_priv); 2166 2167 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2168 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2169 2170 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2171 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2172 2173 if (pch_iir & SDE_FDI_MASK_CPT) 2174 for_each_pipe(dev_priv, pipe) 2175 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2176 pipe_name(pipe), 2177 I915_READ(FDI_RX_IIR(pipe))); 2178 2179 if (pch_iir & SDE_ERROR_CPT) 2180 cpt_serr_int_handler(dev_priv); 2181 } 2182 2183 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2184 { 2185 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2186 ~SDE_PORTE_HOTPLUG_SPT; 2187 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2188 u32 pin_mask = 0, long_mask = 0; 2189 2190 if (hotplug_trigger) { 2191 u32 dig_hotplug_reg; 2192 2193 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2194 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2195 2196 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2197 dig_hotplug_reg, hpd_spt, 2198 spt_port_hotplug_long_detect); 2199 } 2200 2201 if (hotplug2_trigger) { 2202 u32 dig_hotplug_reg; 2203 2204 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2205 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2206 2207 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2208 dig_hotplug_reg, hpd_spt, 2209 spt_port_hotplug2_long_detect); 2210 } 2211 2212 if (pin_mask) 2213 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2214 2215 if (pch_iir & SDE_GMBUS_CPT) 2216 gmbus_irq_handler(dev_priv); 2217 } 2218 2219 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2220 u32 hotplug_trigger, 2221 const u32 hpd[HPD_NUM_PINS]) 2222 { 2223 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2224 2225 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2226 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2227 2228 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2229 dig_hotplug_reg, hpd, 2230 ilk_port_hotplug_long_detect); 2231 2232 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2233 } 2234 2235 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2236 u32 de_iir) 2237 { 2238 enum pipe pipe; 2239 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2240 2241 if (hotplug_trigger) 2242 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2243 2244 if (de_iir & DE_AUX_CHANNEL_A) 2245 dp_aux_irq_handler(dev_priv); 2246 2247 if (de_iir & DE_GSE) 2248 intel_opregion_asle_intr(dev_priv); 2249 2250 if (de_iir & DE_POISON) 2251 DRM_ERROR("Poison interrupt\n"); 2252 2253 for_each_pipe(dev_priv, pipe) { 2254 if (de_iir & DE_PIPE_VBLANK(pipe) && 2255 intel_pipe_handle_vblank(dev_priv, pipe)) 2256 intel_check_page_flip(dev_priv, pipe); 2257 2258 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2259 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2260 2261 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2262 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2263 2264 /* plane/pipes map 1:1 on ilk+ */ 2265 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2266 intel_finish_page_flip_cs(dev_priv, pipe); 2267 } 2268 2269 /* check event from PCH */ 2270 if (de_iir & DE_PCH_EVENT) { 2271 u32 pch_iir = I915_READ(SDEIIR); 2272 2273 if (HAS_PCH_CPT(dev_priv)) 2274 cpt_irq_handler(dev_priv, pch_iir); 2275 else 2276 ibx_irq_handler(dev_priv, pch_iir); 2277 2278 /* should clear PCH hotplug event before clear CPU irq */ 2279 I915_WRITE(SDEIIR, pch_iir); 2280 } 2281 2282 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2283 ironlake_rps_change_irq_handler(dev_priv); 2284 } 2285 2286 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2287 u32 de_iir) 2288 { 2289 enum pipe pipe; 2290 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2291 2292 if (hotplug_trigger) 2293 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2294 2295 if (de_iir & DE_ERR_INT_IVB) 2296 ivb_err_int_handler(dev_priv); 2297 2298 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2299 dp_aux_irq_handler(dev_priv); 2300 2301 if (de_iir & DE_GSE_IVB) 2302 intel_opregion_asle_intr(dev_priv); 2303 2304 for_each_pipe(dev_priv, pipe) { 2305 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2306 intel_pipe_handle_vblank(dev_priv, pipe)) 2307 intel_check_page_flip(dev_priv, pipe); 2308 2309 /* plane/pipes map 1:1 on ilk+ */ 2310 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2311 intel_finish_page_flip_cs(dev_priv, pipe); 2312 } 2313 2314 /* check event from PCH */ 2315 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2316 u32 pch_iir = I915_READ(SDEIIR); 2317 2318 cpt_irq_handler(dev_priv, pch_iir); 2319 2320 /* clear PCH hotplug event before clear CPU irq */ 2321 I915_WRITE(SDEIIR, pch_iir); 2322 } 2323 } 2324 2325 /* 2326 * To handle irqs with the minimum potential races with fresh interrupts, we: 2327 * 1 - Disable Master Interrupt Control. 2328 * 2 - Find the source(s) of the interrupt. 2329 * 3 - Clear the Interrupt Identity bits (IIR). 2330 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2331 * 5 - Re-enable Master Interrupt Control. 2332 */ 2333 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2334 { 2335 struct drm_device *dev = arg; 2336 struct drm_i915_private *dev_priv = to_i915(dev); 2337 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2338 irqreturn_t ret = IRQ_NONE; 2339 2340 if (!intel_irqs_enabled(dev_priv)) 2341 return IRQ_NONE; 2342 2343 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2344 disable_rpm_wakeref_asserts(dev_priv); 2345 2346 /* disable master interrupt before clearing iir */ 2347 de_ier = I915_READ(DEIER); 2348 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2349 POSTING_READ(DEIER); 2350 2351 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2352 * interrupts will will be stored on its back queue, and then we'll be 2353 * able to process them after we restore SDEIER (as soon as we restore 2354 * it, we'll get an interrupt if SDEIIR still has something to process 2355 * due to its back queue). */ 2356 if (!HAS_PCH_NOP(dev_priv)) { 2357 sde_ier = I915_READ(SDEIER); 2358 I915_WRITE(SDEIER, 0); 2359 POSTING_READ(SDEIER); 2360 } 2361 2362 /* Find, clear, then process each source of interrupt */ 2363 2364 gt_iir = I915_READ(GTIIR); 2365 if (gt_iir) { 2366 I915_WRITE(GTIIR, gt_iir); 2367 ret = IRQ_HANDLED; 2368 if (INTEL_GEN(dev_priv) >= 6) 2369 snb_gt_irq_handler(dev_priv, gt_iir); 2370 else 2371 ilk_gt_irq_handler(dev_priv, gt_iir); 2372 } 2373 2374 de_iir = I915_READ(DEIIR); 2375 if (de_iir) { 2376 I915_WRITE(DEIIR, de_iir); 2377 ret = IRQ_HANDLED; 2378 if (INTEL_GEN(dev_priv) >= 7) 2379 ivb_display_irq_handler(dev_priv, de_iir); 2380 else 2381 ilk_display_irq_handler(dev_priv, de_iir); 2382 } 2383 2384 if (INTEL_GEN(dev_priv) >= 6) { 2385 u32 pm_iir = I915_READ(GEN6_PMIIR); 2386 if (pm_iir) { 2387 I915_WRITE(GEN6_PMIIR, pm_iir); 2388 ret = IRQ_HANDLED; 2389 gen6_rps_irq_handler(dev_priv, pm_iir); 2390 } 2391 } 2392 2393 I915_WRITE(DEIER, de_ier); 2394 POSTING_READ(DEIER); 2395 if (!HAS_PCH_NOP(dev_priv)) { 2396 I915_WRITE(SDEIER, sde_ier); 2397 POSTING_READ(SDEIER); 2398 } 2399 2400 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2401 enable_rpm_wakeref_asserts(dev_priv); 2402 2403 return ret; 2404 } 2405 2406 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2407 u32 hotplug_trigger, 2408 const u32 hpd[HPD_NUM_PINS]) 2409 { 2410 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2411 2412 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2413 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2414 2415 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2416 dig_hotplug_reg, hpd, 2417 bxt_port_hotplug_long_detect); 2418 2419 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2420 } 2421 2422 static irqreturn_t 2423 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2424 { 2425 irqreturn_t ret = IRQ_NONE; 2426 u32 iir; 2427 enum pipe pipe; 2428 2429 if (master_ctl & GEN8_DE_MISC_IRQ) { 2430 iir = I915_READ(GEN8_DE_MISC_IIR); 2431 if (iir) { 2432 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2433 ret = IRQ_HANDLED; 2434 if (iir & GEN8_DE_MISC_GSE) 2435 intel_opregion_asle_intr(dev_priv); 2436 else 2437 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2438 } 2439 else 2440 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2441 } 2442 2443 if (master_ctl & GEN8_DE_PORT_IRQ) { 2444 iir = I915_READ(GEN8_DE_PORT_IIR); 2445 if (iir) { 2446 u32 tmp_mask; 2447 bool found = false; 2448 2449 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2450 ret = IRQ_HANDLED; 2451 2452 tmp_mask = GEN8_AUX_CHANNEL_A; 2453 if (INTEL_INFO(dev_priv)->gen >= 9) 2454 tmp_mask |= GEN9_AUX_CHANNEL_B | 2455 GEN9_AUX_CHANNEL_C | 2456 GEN9_AUX_CHANNEL_D; 2457 2458 if (iir & tmp_mask) { 2459 dp_aux_irq_handler(dev_priv); 2460 found = true; 2461 } 2462 2463 if (IS_GEN9_LP(dev_priv)) { 2464 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2465 if (tmp_mask) { 2466 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2467 hpd_bxt); 2468 found = true; 2469 } 2470 } else if (IS_BROADWELL(dev_priv)) { 2471 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2472 if (tmp_mask) { 2473 ilk_hpd_irq_handler(dev_priv, 2474 tmp_mask, hpd_bdw); 2475 found = true; 2476 } 2477 } 2478 2479 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2480 gmbus_irq_handler(dev_priv); 2481 found = true; 2482 } 2483 2484 if (!found) 2485 DRM_ERROR("Unexpected DE Port interrupt\n"); 2486 } 2487 else 2488 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2489 } 2490 2491 for_each_pipe(dev_priv, pipe) { 2492 u32 flip_done, fault_errors; 2493 2494 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2495 continue; 2496 2497 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2498 if (!iir) { 2499 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2500 continue; 2501 } 2502 2503 ret = IRQ_HANDLED; 2504 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2505 2506 if (iir & GEN8_PIPE_VBLANK && 2507 intel_pipe_handle_vblank(dev_priv, pipe)) 2508 intel_check_page_flip(dev_priv, pipe); 2509 2510 flip_done = iir; 2511 if (INTEL_INFO(dev_priv)->gen >= 9) 2512 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2513 else 2514 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2515 2516 if (flip_done) 2517 intel_finish_page_flip_cs(dev_priv, pipe); 2518 2519 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2520 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2521 2522 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2523 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2524 2525 fault_errors = iir; 2526 if (INTEL_INFO(dev_priv)->gen >= 9) 2527 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2528 else 2529 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2530 2531 if (fault_errors) 2532 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2533 pipe_name(pipe), 2534 fault_errors); 2535 } 2536 2537 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2538 master_ctl & GEN8_DE_PCH_IRQ) { 2539 /* 2540 * FIXME(BDW): Assume for now that the new interrupt handling 2541 * scheme also closed the SDE interrupt handling race we've seen 2542 * on older pch-split platforms. But this needs testing. 2543 */ 2544 iir = I915_READ(SDEIIR); 2545 if (iir) { 2546 I915_WRITE(SDEIIR, iir); 2547 ret = IRQ_HANDLED; 2548 2549 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2550 spt_irq_handler(dev_priv, iir); 2551 else 2552 cpt_irq_handler(dev_priv, iir); 2553 } else { 2554 /* 2555 * Like on previous PCH there seems to be something 2556 * fishy going on with forwarding PCH interrupts. 2557 */ 2558 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2559 } 2560 } 2561 2562 return ret; 2563 } 2564 2565 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2566 { 2567 struct drm_device *dev = arg; 2568 struct drm_i915_private *dev_priv = to_i915(dev); 2569 u32 master_ctl; 2570 u32 gt_iir[4] = {}; 2571 irqreturn_t ret; 2572 2573 if (!intel_irqs_enabled(dev_priv)) 2574 return IRQ_NONE; 2575 2576 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2577 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2578 if (!master_ctl) 2579 return IRQ_NONE; 2580 2581 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2582 2583 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2584 disable_rpm_wakeref_asserts(dev_priv); 2585 2586 /* Find, clear, then process each source of interrupt */ 2587 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2588 gen8_gt_irq_handler(dev_priv, gt_iir); 2589 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2590 2591 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2592 POSTING_READ_FW(GEN8_MASTER_IRQ); 2593 2594 enable_rpm_wakeref_asserts(dev_priv); 2595 2596 return ret; 2597 } 2598 2599 static void i915_error_wake_up(struct drm_i915_private *dev_priv) 2600 { 2601 /* 2602 * Notify all waiters for GPU completion events that reset state has 2603 * been changed, and that they need to restart their wait after 2604 * checking for potential errors (and bail out to drop locks if there is 2605 * a gpu reset pending so that i915_error_work_func can acquire them). 2606 */ 2607 2608 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2609 wake_up_all(&dev_priv->gpu_error.wait_queue); 2610 2611 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2612 wake_up_all(&dev_priv->pending_flip_queue); 2613 } 2614 2615 /** 2616 * i915_reset_and_wakeup - do process context error handling work 2617 * @dev_priv: i915 device private 2618 * 2619 * Fire an error uevent so userspace can see that a hang or error 2620 * was detected. 2621 */ 2622 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2623 { 2624 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2625 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2626 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2627 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2628 2629 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2630 2631 DRM_DEBUG_DRIVER("resetting chip\n"); 2632 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2633 2634 /* 2635 * In most cases it's guaranteed that we get here with an RPM 2636 * reference held, for example because there is a pending GPU 2637 * request that won't finish until the reset is done. This 2638 * isn't the case at least when we get here by doing a 2639 * simulated reset via debugs, so get an RPM reference. 2640 */ 2641 intel_runtime_pm_get(dev_priv); 2642 intel_prepare_reset(dev_priv); 2643 2644 do { 2645 /* 2646 * All state reset _must_ be completed before we update the 2647 * reset counter, for otherwise waiters might miss the reset 2648 * pending state and not properly drop locks, resulting in 2649 * deadlocks with the reset work. 2650 */ 2651 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2652 i915_reset(dev_priv); 2653 mutex_unlock(&dev_priv->drm.struct_mutex); 2654 } 2655 2656 /* We need to wait for anyone holding the lock to wakeup */ 2657 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2658 I915_RESET_IN_PROGRESS, 2659 TASK_UNINTERRUPTIBLE, 2660 HZ)); 2661 2662 intel_finish_reset(dev_priv); 2663 intel_runtime_pm_put(dev_priv); 2664 2665 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2666 kobject_uevent_env(kobj, 2667 KOBJ_CHANGE, reset_done_event); 2668 2669 /* 2670 * Note: The wake_up also serves as a memory barrier so that 2671 * waiters see the updated value of the dev_priv->gpu_error. 2672 */ 2673 wake_up_all(&dev_priv->gpu_error.reset_queue); 2674 } 2675 2676 static inline void 2677 i915_err_print_instdone(struct drm_i915_private *dev_priv, 2678 struct intel_instdone *instdone) 2679 { 2680 int slice; 2681 int subslice; 2682 2683 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone); 2684 2685 if (INTEL_GEN(dev_priv) <= 3) 2686 return; 2687 2688 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common); 2689 2690 if (INTEL_GEN(dev_priv) <= 6) 2691 return; 2692 2693 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2694 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 2695 slice, subslice, instdone->sampler[slice][subslice]); 2696 2697 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 2698 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n", 2699 slice, subslice, instdone->row[slice][subslice]); 2700 } 2701 2702 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2703 { 2704 u32 eir; 2705 2706 if (!IS_GEN2(dev_priv)) 2707 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2708 2709 if (INTEL_GEN(dev_priv) < 4) 2710 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2711 else 2712 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2713 2714 I915_WRITE(EIR, I915_READ(EIR)); 2715 eir = I915_READ(EIR); 2716 if (eir) { 2717 /* 2718 * some errors might have become stuck, 2719 * mask them. 2720 */ 2721 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2722 I915_WRITE(EMR, I915_READ(EMR) | eir); 2723 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2724 } 2725 } 2726 2727 /** 2728 * i915_handle_error - handle a gpu error 2729 * @dev_priv: i915 device private 2730 * @engine_mask: mask representing engines that are hung 2731 * @fmt: Error message format string 2732 * 2733 * Do some basic checking of register state at error time and 2734 * dump it to the syslog. Also call i915_capture_error_state() to make 2735 * sure we get a record and make it available in debugfs. Fire a uevent 2736 * so userspace knows something bad happened (should trigger collection 2737 * of a ring dump etc.). 2738 */ 2739 void i915_handle_error(struct drm_i915_private *dev_priv, 2740 u32 engine_mask, 2741 const char *fmt, ...) 2742 { 2743 va_list args; 2744 char error_msg[80]; 2745 2746 va_start(args, fmt); 2747 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2748 va_end(args); 2749 2750 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2751 i915_clear_error_registers(dev_priv); 2752 2753 if (!engine_mask) 2754 return; 2755 2756 if (test_and_set_bit(I915_RESET_IN_PROGRESS, 2757 &dev_priv->gpu_error.flags)) 2758 return; 2759 2760 /* 2761 * Wakeup waiting processes so that the reset function 2762 * i915_reset_and_wakeup doesn't deadlock trying to grab 2763 * various locks. By bumping the reset counter first, the woken 2764 * processes will see a reset in progress and back off, 2765 * releasing their locks and then wait for the reset completion. 2766 * We must do this for _all_ gpu waiters that might hold locks 2767 * that the reset work needs to acquire. 2768 * 2769 * Note: The wake_up also provides a memory barrier to ensure that the 2770 * waiters see the updated value of the reset flags. 2771 */ 2772 i915_error_wake_up(dev_priv); 2773 2774 i915_reset_and_wakeup(dev_priv); 2775 } 2776 2777 /* Called from drm generic code, passed 'crtc' which 2778 * we use as a pipe index 2779 */ 2780 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2781 { 2782 struct drm_i915_private *dev_priv = to_i915(dev); 2783 unsigned long irqflags; 2784 2785 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2786 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2787 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2788 2789 return 0; 2790 } 2791 2792 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2793 { 2794 struct drm_i915_private *dev_priv = to_i915(dev); 2795 unsigned long irqflags; 2796 2797 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2798 i915_enable_pipestat(dev_priv, pipe, 2799 PIPE_START_VBLANK_INTERRUPT_STATUS); 2800 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2801 2802 return 0; 2803 } 2804 2805 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2806 { 2807 struct drm_i915_private *dev_priv = to_i915(dev); 2808 unsigned long irqflags; 2809 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2810 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2811 2812 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2813 ilk_enable_display_irq(dev_priv, bit); 2814 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2815 2816 return 0; 2817 } 2818 2819 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2820 { 2821 struct drm_i915_private *dev_priv = to_i915(dev); 2822 unsigned long irqflags; 2823 2824 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2825 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2826 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2827 2828 return 0; 2829 } 2830 2831 /* Called from drm generic code, passed 'crtc' which 2832 * we use as a pipe index 2833 */ 2834 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2835 { 2836 struct drm_i915_private *dev_priv = to_i915(dev); 2837 unsigned long irqflags; 2838 2839 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2840 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2841 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2842 } 2843 2844 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2845 { 2846 struct drm_i915_private *dev_priv = to_i915(dev); 2847 unsigned long irqflags; 2848 2849 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2850 i915_disable_pipestat(dev_priv, pipe, 2851 PIPE_START_VBLANK_INTERRUPT_STATUS); 2852 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2853 } 2854 2855 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2856 { 2857 struct drm_i915_private *dev_priv = to_i915(dev); 2858 unsigned long irqflags; 2859 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2860 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2861 2862 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2863 ilk_disable_display_irq(dev_priv, bit); 2864 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2865 } 2866 2867 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2868 { 2869 struct drm_i915_private *dev_priv = to_i915(dev); 2870 unsigned long irqflags; 2871 2872 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2873 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2874 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2875 } 2876 2877 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 2878 { 2879 if (HAS_PCH_NOP(dev_priv)) 2880 return; 2881 2882 GEN5_IRQ_RESET(SDE); 2883 2884 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2885 I915_WRITE(SERR_INT, 0xffffffff); 2886 } 2887 2888 /* 2889 * SDEIER is also touched by the interrupt handler to work around missed PCH 2890 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2891 * instead we unconditionally enable all PCH interrupt sources here, but then 2892 * only unmask them as needed with SDEIMR. 2893 * 2894 * This function needs to be called before interrupts are enabled. 2895 */ 2896 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2897 { 2898 struct drm_i915_private *dev_priv = to_i915(dev); 2899 2900 if (HAS_PCH_NOP(dev_priv)) 2901 return; 2902 2903 WARN_ON(I915_READ(SDEIER) != 0); 2904 I915_WRITE(SDEIER, 0xffffffff); 2905 POSTING_READ(SDEIER); 2906 } 2907 2908 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 2909 { 2910 GEN5_IRQ_RESET(GT); 2911 if (INTEL_GEN(dev_priv) >= 6) 2912 GEN5_IRQ_RESET(GEN6_PM); 2913 } 2914 2915 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2916 { 2917 enum pipe pipe; 2918 2919 if (IS_CHERRYVIEW(dev_priv)) 2920 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 2921 else 2922 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2923 2924 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2925 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2926 2927 for_each_pipe(dev_priv, pipe) { 2928 I915_WRITE(PIPESTAT(pipe), 2929 PIPE_FIFO_UNDERRUN_STATUS | 2930 PIPESTAT_INT_STATUS_MASK); 2931 dev_priv->pipestat_irq_mask[pipe] = 0; 2932 } 2933 2934 GEN5_IRQ_RESET(VLV_); 2935 dev_priv->irq_mask = ~0; 2936 } 2937 2938 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 2939 { 2940 u32 pipestat_mask; 2941 u32 enable_mask; 2942 enum pipe pipe; 2943 u32 val; 2944 2945 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2946 PIPE_CRC_DONE_INTERRUPT_STATUS; 2947 2948 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 2949 for_each_pipe(dev_priv, pipe) 2950 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 2951 2952 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2953 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2954 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2955 if (IS_CHERRYVIEW(dev_priv)) 2956 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2957 2958 WARN_ON(dev_priv->irq_mask != ~0); 2959 2960 val = (I915_LPE_PIPE_A_INTERRUPT | 2961 I915_LPE_PIPE_B_INTERRUPT | 2962 I915_LPE_PIPE_C_INTERRUPT); 2963 2964 enable_mask |= val; 2965 2966 dev_priv->irq_mask = ~enable_mask; 2967 2968 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 2969 } 2970 2971 /* drm_dma.h hooks 2972 */ 2973 static void ironlake_irq_reset(struct drm_device *dev) 2974 { 2975 struct drm_i915_private *dev_priv = to_i915(dev); 2976 2977 I915_WRITE(HWSTAM, 0xffffffff); 2978 2979 GEN5_IRQ_RESET(DE); 2980 if (IS_GEN7(dev_priv)) 2981 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2982 2983 gen5_gt_irq_reset(dev_priv); 2984 2985 ibx_irq_reset(dev_priv); 2986 } 2987 2988 static void valleyview_irq_preinstall(struct drm_device *dev) 2989 { 2990 struct drm_i915_private *dev_priv = to_i915(dev); 2991 2992 I915_WRITE(VLV_MASTER_IER, 0); 2993 POSTING_READ(VLV_MASTER_IER); 2994 2995 gen5_gt_irq_reset(dev_priv); 2996 2997 spin_lock_irq(&dev_priv->irq_lock); 2998 if (dev_priv->display_irqs_enabled) 2999 vlv_display_irq_reset(dev_priv); 3000 spin_unlock_irq(&dev_priv->irq_lock); 3001 } 3002 3003 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3004 { 3005 GEN8_IRQ_RESET_NDX(GT, 0); 3006 GEN8_IRQ_RESET_NDX(GT, 1); 3007 GEN8_IRQ_RESET_NDX(GT, 2); 3008 GEN8_IRQ_RESET_NDX(GT, 3); 3009 } 3010 3011 static void gen8_irq_reset(struct drm_device *dev) 3012 { 3013 struct drm_i915_private *dev_priv = to_i915(dev); 3014 int pipe; 3015 3016 I915_WRITE(GEN8_MASTER_IRQ, 0); 3017 POSTING_READ(GEN8_MASTER_IRQ); 3018 3019 gen8_gt_irq_reset(dev_priv); 3020 3021 for_each_pipe(dev_priv, pipe) 3022 if (intel_display_power_is_enabled(dev_priv, 3023 POWER_DOMAIN_PIPE(pipe))) 3024 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3025 3026 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3027 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3028 GEN5_IRQ_RESET(GEN8_PCU_); 3029 3030 if (HAS_PCH_SPLIT(dev_priv)) 3031 ibx_irq_reset(dev_priv); 3032 } 3033 3034 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3035 unsigned int pipe_mask) 3036 { 3037 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3038 enum pipe pipe; 3039 3040 spin_lock_irq(&dev_priv->irq_lock); 3041 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3042 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3043 dev_priv->de_irq_mask[pipe], 3044 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3045 spin_unlock_irq(&dev_priv->irq_lock); 3046 } 3047 3048 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3049 unsigned int pipe_mask) 3050 { 3051 enum pipe pipe; 3052 3053 spin_lock_irq(&dev_priv->irq_lock); 3054 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3055 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3056 spin_unlock_irq(&dev_priv->irq_lock); 3057 3058 /* make sure we're done processing display irqs */ 3059 synchronize_irq(dev_priv->drm.irq); 3060 } 3061 3062 static void cherryview_irq_preinstall(struct drm_device *dev) 3063 { 3064 struct drm_i915_private *dev_priv = to_i915(dev); 3065 3066 I915_WRITE(GEN8_MASTER_IRQ, 0); 3067 POSTING_READ(GEN8_MASTER_IRQ); 3068 3069 gen8_gt_irq_reset(dev_priv); 3070 3071 GEN5_IRQ_RESET(GEN8_PCU_); 3072 3073 spin_lock_irq(&dev_priv->irq_lock); 3074 if (dev_priv->display_irqs_enabled) 3075 vlv_display_irq_reset(dev_priv); 3076 spin_unlock_irq(&dev_priv->irq_lock); 3077 } 3078 3079 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3080 const u32 hpd[HPD_NUM_PINS]) 3081 { 3082 struct intel_encoder *encoder; 3083 u32 enabled_irqs = 0; 3084 3085 for_each_intel_encoder(&dev_priv->drm, encoder) 3086 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3087 enabled_irqs |= hpd[encoder->hpd_pin]; 3088 3089 return enabled_irqs; 3090 } 3091 3092 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3093 { 3094 u32 hotplug_irqs, hotplug, enabled_irqs; 3095 3096 if (HAS_PCH_IBX(dev_priv)) { 3097 hotplug_irqs = SDE_HOTPLUG_MASK; 3098 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3099 } else { 3100 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3101 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3102 } 3103 3104 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3105 3106 /* 3107 * Enable digital hotplug on the PCH, and configure the DP short pulse 3108 * duration to 2ms (which is the minimum in the Display Port spec). 3109 * The pulse duration bits are reserved on LPT+. 3110 */ 3111 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3112 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3113 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3114 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3115 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3116 /* 3117 * When CPU and PCH are on the same package, port A 3118 * HPD must be enabled in both north and south. 3119 */ 3120 if (HAS_PCH_LPT_LP(dev_priv)) 3121 hotplug |= PORTA_HOTPLUG_ENABLE; 3122 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3123 } 3124 3125 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3126 { 3127 u32 hotplug; 3128 3129 /* Enable digital hotplug on the PCH */ 3130 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3131 hotplug |= PORTA_HOTPLUG_ENABLE | 3132 PORTB_HOTPLUG_ENABLE | 3133 PORTC_HOTPLUG_ENABLE | 3134 PORTD_HOTPLUG_ENABLE; 3135 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3136 3137 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3138 hotplug |= PORTE_HOTPLUG_ENABLE; 3139 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3140 } 3141 3142 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3143 { 3144 u32 hotplug_irqs, enabled_irqs; 3145 3146 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3147 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3148 3149 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3150 3151 spt_hpd_detection_setup(dev_priv); 3152 } 3153 3154 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3155 { 3156 u32 hotplug_irqs, hotplug, enabled_irqs; 3157 3158 if (INTEL_GEN(dev_priv) >= 8) { 3159 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3160 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3161 3162 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3163 } else if (INTEL_GEN(dev_priv) >= 7) { 3164 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3165 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3166 3167 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3168 } else { 3169 hotplug_irqs = DE_DP_A_HOTPLUG; 3170 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3171 3172 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3173 } 3174 3175 /* 3176 * Enable digital hotplug on the CPU, and configure the DP short pulse 3177 * duration to 2ms (which is the minimum in the Display Port spec) 3178 * The pulse duration bits are reserved on HSW+. 3179 */ 3180 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3181 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3182 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3183 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3184 3185 ibx_hpd_irq_setup(dev_priv); 3186 } 3187 3188 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3189 u32 enabled_irqs) 3190 { 3191 u32 hotplug; 3192 3193 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3194 hotplug |= PORTA_HOTPLUG_ENABLE | 3195 PORTB_HOTPLUG_ENABLE | 3196 PORTC_HOTPLUG_ENABLE; 3197 3198 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3199 hotplug, enabled_irqs); 3200 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3201 3202 /* 3203 * For BXT invert bit has to be set based on AOB design 3204 * for HPD detection logic, update it based on VBT fields. 3205 */ 3206 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3207 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3208 hotplug |= BXT_DDIA_HPD_INVERT; 3209 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3210 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3211 hotplug |= BXT_DDIB_HPD_INVERT; 3212 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3213 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3214 hotplug |= BXT_DDIC_HPD_INVERT; 3215 3216 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3217 } 3218 3219 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3220 { 3221 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3222 } 3223 3224 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3225 { 3226 u32 hotplug_irqs, enabled_irqs; 3227 3228 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3229 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3230 3231 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3232 3233 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3234 } 3235 3236 static void ibx_irq_postinstall(struct drm_device *dev) 3237 { 3238 struct drm_i915_private *dev_priv = to_i915(dev); 3239 u32 mask; 3240 3241 if (HAS_PCH_NOP(dev_priv)) 3242 return; 3243 3244 if (HAS_PCH_IBX(dev_priv)) 3245 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3246 else 3247 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3248 3249 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3250 I915_WRITE(SDEIMR, ~mask); 3251 3252 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3253 HAS_PCH_LPT(dev_priv)) 3254 ; /* TODO: Enable HPD detection on older PCH platforms too */ 3255 else 3256 spt_hpd_detection_setup(dev_priv); 3257 } 3258 3259 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3260 { 3261 struct drm_i915_private *dev_priv = to_i915(dev); 3262 u32 pm_irqs, gt_irqs; 3263 3264 pm_irqs = gt_irqs = 0; 3265 3266 dev_priv->gt_irq_mask = ~0; 3267 if (HAS_L3_DPF(dev_priv)) { 3268 /* L3 parity interrupt is always unmasked. */ 3269 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3270 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3271 } 3272 3273 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3274 if (IS_GEN5(dev_priv)) { 3275 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3276 } else { 3277 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3278 } 3279 3280 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3281 3282 if (INTEL_GEN(dev_priv) >= 6) { 3283 /* 3284 * RPS interrupts will get enabled/disabled on demand when RPS 3285 * itself is enabled/disabled. 3286 */ 3287 if (HAS_VEBOX(dev_priv)) { 3288 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3289 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3290 } 3291 3292 dev_priv->pm_imr = 0xffffffff; 3293 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3294 } 3295 } 3296 3297 static int ironlake_irq_postinstall(struct drm_device *dev) 3298 { 3299 struct drm_i915_private *dev_priv = to_i915(dev); 3300 u32 display_mask, extra_mask; 3301 3302 if (INTEL_GEN(dev_priv) >= 7) { 3303 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3304 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3305 DE_PLANEB_FLIP_DONE_IVB | 3306 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3307 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3308 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3309 DE_DP_A_HOTPLUG_IVB); 3310 } else { 3311 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3312 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3313 DE_AUX_CHANNEL_A | 3314 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3315 DE_POISON); 3316 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3317 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3318 DE_DP_A_HOTPLUG); 3319 } 3320 3321 dev_priv->irq_mask = ~display_mask; 3322 3323 I915_WRITE(HWSTAM, 0xeffe); 3324 3325 ibx_irq_pre_postinstall(dev); 3326 3327 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3328 3329 gen5_gt_irq_postinstall(dev); 3330 3331 ibx_irq_postinstall(dev); 3332 3333 if (IS_IRONLAKE_M(dev_priv)) { 3334 /* Enable PCU event interrupts 3335 * 3336 * spinlocking not required here for correctness since interrupt 3337 * setup is guaranteed to run in single-threaded context. But we 3338 * need it to make the assert_spin_locked happy. */ 3339 spin_lock_irq(&dev_priv->irq_lock); 3340 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3341 spin_unlock_irq(&dev_priv->irq_lock); 3342 } 3343 3344 return 0; 3345 } 3346 3347 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3348 { 3349 assert_spin_locked(&dev_priv->irq_lock); 3350 3351 if (dev_priv->display_irqs_enabled) 3352 return; 3353 3354 dev_priv->display_irqs_enabled = true; 3355 3356 if (intel_irqs_enabled(dev_priv)) { 3357 vlv_display_irq_reset(dev_priv); 3358 vlv_display_irq_postinstall(dev_priv); 3359 } 3360 } 3361 3362 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3363 { 3364 assert_spin_locked(&dev_priv->irq_lock); 3365 3366 if (!dev_priv->display_irqs_enabled) 3367 return; 3368 3369 dev_priv->display_irqs_enabled = false; 3370 3371 if (intel_irqs_enabled(dev_priv)) 3372 vlv_display_irq_reset(dev_priv); 3373 } 3374 3375 3376 static int valleyview_irq_postinstall(struct drm_device *dev) 3377 { 3378 struct drm_i915_private *dev_priv = to_i915(dev); 3379 3380 gen5_gt_irq_postinstall(dev); 3381 3382 spin_lock_irq(&dev_priv->irq_lock); 3383 if (dev_priv->display_irqs_enabled) 3384 vlv_display_irq_postinstall(dev_priv); 3385 spin_unlock_irq(&dev_priv->irq_lock); 3386 3387 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3388 POSTING_READ(VLV_MASTER_IER); 3389 3390 return 0; 3391 } 3392 3393 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3394 { 3395 /* These are interrupts we'll toggle with the ring mask register */ 3396 uint32_t gt_interrupts[] = { 3397 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3398 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3399 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3400 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3401 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3402 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3403 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3404 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3405 0, 3406 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3407 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3408 }; 3409 3410 if (HAS_L3_DPF(dev_priv)) 3411 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3412 3413 dev_priv->pm_ier = 0x0; 3414 dev_priv->pm_imr = ~dev_priv->pm_ier; 3415 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3416 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3417 /* 3418 * RPS interrupts will get enabled/disabled on demand when RPS itself 3419 * is enabled/disabled. Same wil be the case for GuC interrupts. 3420 */ 3421 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3422 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3423 } 3424 3425 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3426 { 3427 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3428 uint32_t de_pipe_enables; 3429 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3430 u32 de_port_enables; 3431 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3432 enum pipe pipe; 3433 3434 if (INTEL_INFO(dev_priv)->gen >= 9) { 3435 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3436 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3437 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3438 GEN9_AUX_CHANNEL_D; 3439 if (IS_GEN9_LP(dev_priv)) 3440 de_port_masked |= BXT_DE_PORT_GMBUS; 3441 } else { 3442 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3443 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3444 } 3445 3446 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3447 GEN8_PIPE_FIFO_UNDERRUN; 3448 3449 de_port_enables = de_port_masked; 3450 if (IS_GEN9_LP(dev_priv)) 3451 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3452 else if (IS_BROADWELL(dev_priv)) 3453 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3454 3455 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3456 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3457 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3458 3459 for_each_pipe(dev_priv, pipe) 3460 if (intel_display_power_is_enabled(dev_priv, 3461 POWER_DOMAIN_PIPE(pipe))) 3462 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3463 dev_priv->de_irq_mask[pipe], 3464 de_pipe_enables); 3465 3466 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3467 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3468 3469 if (IS_GEN9_LP(dev_priv)) 3470 bxt_hpd_detection_setup(dev_priv); 3471 } 3472 3473 static int gen8_irq_postinstall(struct drm_device *dev) 3474 { 3475 struct drm_i915_private *dev_priv = to_i915(dev); 3476 3477 if (HAS_PCH_SPLIT(dev_priv)) 3478 ibx_irq_pre_postinstall(dev); 3479 3480 gen8_gt_irq_postinstall(dev_priv); 3481 gen8_de_irq_postinstall(dev_priv); 3482 3483 if (HAS_PCH_SPLIT(dev_priv)) 3484 ibx_irq_postinstall(dev); 3485 3486 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3487 POSTING_READ(GEN8_MASTER_IRQ); 3488 3489 return 0; 3490 } 3491 3492 static int cherryview_irq_postinstall(struct drm_device *dev) 3493 { 3494 struct drm_i915_private *dev_priv = to_i915(dev); 3495 3496 gen8_gt_irq_postinstall(dev_priv); 3497 3498 spin_lock_irq(&dev_priv->irq_lock); 3499 if (dev_priv->display_irqs_enabled) 3500 vlv_display_irq_postinstall(dev_priv); 3501 spin_unlock_irq(&dev_priv->irq_lock); 3502 3503 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3504 POSTING_READ(GEN8_MASTER_IRQ); 3505 3506 return 0; 3507 } 3508 3509 static void gen8_irq_uninstall(struct drm_device *dev) 3510 { 3511 struct drm_i915_private *dev_priv = to_i915(dev); 3512 3513 if (!dev_priv) 3514 return; 3515 3516 gen8_irq_reset(dev); 3517 } 3518 3519 static void valleyview_irq_uninstall(struct drm_device *dev) 3520 { 3521 struct drm_i915_private *dev_priv = to_i915(dev); 3522 3523 if (!dev_priv) 3524 return; 3525 3526 I915_WRITE(VLV_MASTER_IER, 0); 3527 POSTING_READ(VLV_MASTER_IER); 3528 3529 gen5_gt_irq_reset(dev_priv); 3530 3531 I915_WRITE(HWSTAM, 0xffffffff); 3532 3533 spin_lock_irq(&dev_priv->irq_lock); 3534 if (dev_priv->display_irqs_enabled) 3535 vlv_display_irq_reset(dev_priv); 3536 spin_unlock_irq(&dev_priv->irq_lock); 3537 } 3538 3539 static void cherryview_irq_uninstall(struct drm_device *dev) 3540 { 3541 struct drm_i915_private *dev_priv = to_i915(dev); 3542 3543 if (!dev_priv) 3544 return; 3545 3546 I915_WRITE(GEN8_MASTER_IRQ, 0); 3547 POSTING_READ(GEN8_MASTER_IRQ); 3548 3549 gen8_gt_irq_reset(dev_priv); 3550 3551 GEN5_IRQ_RESET(GEN8_PCU_); 3552 3553 spin_lock_irq(&dev_priv->irq_lock); 3554 if (dev_priv->display_irqs_enabled) 3555 vlv_display_irq_reset(dev_priv); 3556 spin_unlock_irq(&dev_priv->irq_lock); 3557 } 3558 3559 static void ironlake_irq_uninstall(struct drm_device *dev) 3560 { 3561 struct drm_i915_private *dev_priv = to_i915(dev); 3562 3563 if (!dev_priv) 3564 return; 3565 3566 ironlake_irq_reset(dev); 3567 } 3568 3569 static void i8xx_irq_preinstall(struct drm_device * dev) 3570 { 3571 struct drm_i915_private *dev_priv = to_i915(dev); 3572 int pipe; 3573 3574 for_each_pipe(dev_priv, pipe) 3575 I915_WRITE(PIPESTAT(pipe), 0); 3576 I915_WRITE16(IMR, 0xffff); 3577 I915_WRITE16(IER, 0x0); 3578 POSTING_READ16(IER); 3579 } 3580 3581 static int i8xx_irq_postinstall(struct drm_device *dev) 3582 { 3583 struct drm_i915_private *dev_priv = to_i915(dev); 3584 3585 I915_WRITE16(EMR, 3586 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3587 3588 /* Unmask the interrupts that we always want on. */ 3589 dev_priv->irq_mask = 3590 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3591 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3592 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3593 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3594 I915_WRITE16(IMR, dev_priv->irq_mask); 3595 3596 I915_WRITE16(IER, 3597 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3598 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3599 I915_USER_INTERRUPT); 3600 POSTING_READ16(IER); 3601 3602 /* Interrupt setup is already guaranteed to be single-threaded, this is 3603 * just to make the assert_spin_locked check happy. */ 3604 spin_lock_irq(&dev_priv->irq_lock); 3605 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3606 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3607 spin_unlock_irq(&dev_priv->irq_lock); 3608 3609 return 0; 3610 } 3611 3612 /* 3613 * Returns true when a page flip has completed. 3614 */ 3615 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3616 int plane, int pipe, u32 iir) 3617 { 3618 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3619 3620 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3621 return false; 3622 3623 if ((iir & flip_pending) == 0) 3624 goto check_page_flip; 3625 3626 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3627 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3628 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3629 * the flip is completed (no longer pending). Since this doesn't raise 3630 * an interrupt per se, we watch for the change at vblank. 3631 */ 3632 if (I915_READ16(ISR) & flip_pending) 3633 goto check_page_flip; 3634 3635 intel_finish_page_flip_cs(dev_priv, pipe); 3636 return true; 3637 3638 check_page_flip: 3639 intel_check_page_flip(dev_priv, pipe); 3640 return false; 3641 } 3642 3643 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3644 { 3645 struct drm_device *dev = arg; 3646 struct drm_i915_private *dev_priv = to_i915(dev); 3647 u16 iir, new_iir; 3648 u32 pipe_stats[2]; 3649 int pipe; 3650 u16 flip_mask = 3651 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3652 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3653 irqreturn_t ret; 3654 3655 if (!intel_irqs_enabled(dev_priv)) 3656 return IRQ_NONE; 3657 3658 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3659 disable_rpm_wakeref_asserts(dev_priv); 3660 3661 ret = IRQ_NONE; 3662 iir = I915_READ16(IIR); 3663 if (iir == 0) 3664 goto out; 3665 3666 while (iir & ~flip_mask) { 3667 /* Can't rely on pipestat interrupt bit in iir as it might 3668 * have been cleared after the pipestat interrupt was received. 3669 * It doesn't set the bit in iir again, but it still produces 3670 * interrupts (for non-MSI). 3671 */ 3672 spin_lock(&dev_priv->irq_lock); 3673 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3674 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3675 3676 for_each_pipe(dev_priv, pipe) { 3677 i915_reg_t reg = PIPESTAT(pipe); 3678 pipe_stats[pipe] = I915_READ(reg); 3679 3680 /* 3681 * Clear the PIPE*STAT regs before the IIR 3682 */ 3683 if (pipe_stats[pipe] & 0x8000ffff) 3684 I915_WRITE(reg, pipe_stats[pipe]); 3685 } 3686 spin_unlock(&dev_priv->irq_lock); 3687 3688 I915_WRITE16(IIR, iir & ~flip_mask); 3689 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3690 3691 if (iir & I915_USER_INTERRUPT) 3692 notify_ring(dev_priv->engine[RCS]); 3693 3694 for_each_pipe(dev_priv, pipe) { 3695 int plane = pipe; 3696 if (HAS_FBC(dev_priv)) 3697 plane = !plane; 3698 3699 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3700 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 3701 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3702 3703 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3704 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3705 3706 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3707 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3708 pipe); 3709 } 3710 3711 iir = new_iir; 3712 } 3713 ret = IRQ_HANDLED; 3714 3715 out: 3716 enable_rpm_wakeref_asserts(dev_priv); 3717 3718 return ret; 3719 } 3720 3721 static void i8xx_irq_uninstall(struct drm_device * dev) 3722 { 3723 struct drm_i915_private *dev_priv = to_i915(dev); 3724 int pipe; 3725 3726 for_each_pipe(dev_priv, pipe) { 3727 /* Clear enable bits; then clear status bits */ 3728 I915_WRITE(PIPESTAT(pipe), 0); 3729 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3730 } 3731 I915_WRITE16(IMR, 0xffff); 3732 I915_WRITE16(IER, 0x0); 3733 I915_WRITE16(IIR, I915_READ16(IIR)); 3734 } 3735 3736 static void i915_irq_preinstall(struct drm_device * dev) 3737 { 3738 struct drm_i915_private *dev_priv = to_i915(dev); 3739 int pipe; 3740 3741 if (I915_HAS_HOTPLUG(dev_priv)) { 3742 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3743 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3744 } 3745 3746 I915_WRITE16(HWSTAM, 0xeffe); 3747 for_each_pipe(dev_priv, pipe) 3748 I915_WRITE(PIPESTAT(pipe), 0); 3749 I915_WRITE(IMR, 0xffffffff); 3750 I915_WRITE(IER, 0x0); 3751 POSTING_READ(IER); 3752 } 3753 3754 static int i915_irq_postinstall(struct drm_device *dev) 3755 { 3756 struct drm_i915_private *dev_priv = to_i915(dev); 3757 u32 enable_mask; 3758 3759 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3760 3761 /* Unmask the interrupts that we always want on. */ 3762 dev_priv->irq_mask = 3763 ~(I915_ASLE_INTERRUPT | 3764 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3765 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3766 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3767 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3768 3769 enable_mask = 3770 I915_ASLE_INTERRUPT | 3771 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3772 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3773 I915_USER_INTERRUPT; 3774 3775 if (I915_HAS_HOTPLUG(dev_priv)) { 3776 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3777 POSTING_READ(PORT_HOTPLUG_EN); 3778 3779 /* Enable in IER... */ 3780 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3781 /* and unmask in IMR */ 3782 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3783 } 3784 3785 I915_WRITE(IMR, dev_priv->irq_mask); 3786 I915_WRITE(IER, enable_mask); 3787 POSTING_READ(IER); 3788 3789 i915_enable_asle_pipestat(dev_priv); 3790 3791 /* Interrupt setup is already guaranteed to be single-threaded, this is 3792 * just to make the assert_spin_locked check happy. */ 3793 spin_lock_irq(&dev_priv->irq_lock); 3794 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3795 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3796 spin_unlock_irq(&dev_priv->irq_lock); 3797 3798 return 0; 3799 } 3800 3801 /* 3802 * Returns true when a page flip has completed. 3803 */ 3804 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 3805 int plane, int pipe, u32 iir) 3806 { 3807 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3808 3809 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3810 return false; 3811 3812 if ((iir & flip_pending) == 0) 3813 goto check_page_flip; 3814 3815 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3816 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3817 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3818 * the flip is completed (no longer pending). Since this doesn't raise 3819 * an interrupt per se, we watch for the change at vblank. 3820 */ 3821 if (I915_READ(ISR) & flip_pending) 3822 goto check_page_flip; 3823 3824 intel_finish_page_flip_cs(dev_priv, pipe); 3825 return true; 3826 3827 check_page_flip: 3828 intel_check_page_flip(dev_priv, pipe); 3829 return false; 3830 } 3831 3832 static irqreturn_t i915_irq_handler(int irq, void *arg) 3833 { 3834 struct drm_device *dev = arg; 3835 struct drm_i915_private *dev_priv = to_i915(dev); 3836 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3837 u32 flip_mask = 3838 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3839 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3840 int pipe, ret = IRQ_NONE; 3841 3842 if (!intel_irqs_enabled(dev_priv)) 3843 return IRQ_NONE; 3844 3845 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3846 disable_rpm_wakeref_asserts(dev_priv); 3847 3848 iir = I915_READ(IIR); 3849 do { 3850 bool irq_received = (iir & ~flip_mask) != 0; 3851 bool blc_event = false; 3852 3853 /* Can't rely on pipestat interrupt bit in iir as it might 3854 * have been cleared after the pipestat interrupt was received. 3855 * It doesn't set the bit in iir again, but it still produces 3856 * interrupts (for non-MSI). 3857 */ 3858 spin_lock(&dev_priv->irq_lock); 3859 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3860 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3861 3862 for_each_pipe(dev_priv, pipe) { 3863 i915_reg_t reg = PIPESTAT(pipe); 3864 pipe_stats[pipe] = I915_READ(reg); 3865 3866 /* Clear the PIPE*STAT regs before the IIR */ 3867 if (pipe_stats[pipe] & 0x8000ffff) { 3868 I915_WRITE(reg, pipe_stats[pipe]); 3869 irq_received = true; 3870 } 3871 } 3872 spin_unlock(&dev_priv->irq_lock); 3873 3874 if (!irq_received) 3875 break; 3876 3877 /* Consume port. Then clear IIR or we'll miss events */ 3878 if (I915_HAS_HOTPLUG(dev_priv) && 3879 iir & I915_DISPLAY_PORT_INTERRUPT) { 3880 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3881 if (hotplug_status) 3882 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3883 } 3884 3885 I915_WRITE(IIR, iir & ~flip_mask); 3886 new_iir = I915_READ(IIR); /* Flush posted writes */ 3887 3888 if (iir & I915_USER_INTERRUPT) 3889 notify_ring(dev_priv->engine[RCS]); 3890 3891 for_each_pipe(dev_priv, pipe) { 3892 int plane = pipe; 3893 if (HAS_FBC(dev_priv)) 3894 plane = !plane; 3895 3896 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3897 i915_handle_vblank(dev_priv, plane, pipe, iir)) 3898 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3899 3900 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3901 blc_event = true; 3902 3903 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3904 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 3905 3906 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3907 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3908 pipe); 3909 } 3910 3911 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3912 intel_opregion_asle_intr(dev_priv); 3913 3914 /* With MSI, interrupts are only generated when iir 3915 * transitions from zero to nonzero. If another bit got 3916 * set while we were handling the existing iir bits, then 3917 * we would never get another interrupt. 3918 * 3919 * This is fine on non-MSI as well, as if we hit this path 3920 * we avoid exiting the interrupt handler only to generate 3921 * another one. 3922 * 3923 * Note that for MSI this could cause a stray interrupt report 3924 * if an interrupt landed in the time between writing IIR and 3925 * the posting read. This should be rare enough to never 3926 * trigger the 99% of 100,000 interrupts test for disabling 3927 * stray interrupts. 3928 */ 3929 ret = IRQ_HANDLED; 3930 iir = new_iir; 3931 } while (iir & ~flip_mask); 3932 3933 enable_rpm_wakeref_asserts(dev_priv); 3934 3935 return ret; 3936 } 3937 3938 static void i915_irq_uninstall(struct drm_device * dev) 3939 { 3940 struct drm_i915_private *dev_priv = to_i915(dev); 3941 int pipe; 3942 3943 if (I915_HAS_HOTPLUG(dev_priv)) { 3944 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3945 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3946 } 3947 3948 I915_WRITE16(HWSTAM, 0xffff); 3949 for_each_pipe(dev_priv, pipe) { 3950 /* Clear enable bits; then clear status bits */ 3951 I915_WRITE(PIPESTAT(pipe), 0); 3952 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3953 } 3954 I915_WRITE(IMR, 0xffffffff); 3955 I915_WRITE(IER, 0x0); 3956 3957 I915_WRITE(IIR, I915_READ(IIR)); 3958 } 3959 3960 static void i965_irq_preinstall(struct drm_device * dev) 3961 { 3962 struct drm_i915_private *dev_priv = to_i915(dev); 3963 int pipe; 3964 3965 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3966 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3967 3968 I915_WRITE(HWSTAM, 0xeffe); 3969 for_each_pipe(dev_priv, pipe) 3970 I915_WRITE(PIPESTAT(pipe), 0); 3971 I915_WRITE(IMR, 0xffffffff); 3972 I915_WRITE(IER, 0x0); 3973 POSTING_READ(IER); 3974 } 3975 3976 static int i965_irq_postinstall(struct drm_device *dev) 3977 { 3978 struct drm_i915_private *dev_priv = to_i915(dev); 3979 u32 enable_mask; 3980 u32 error_mask; 3981 3982 /* Unmask the interrupts that we always want on. */ 3983 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3984 I915_DISPLAY_PORT_INTERRUPT | 3985 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3986 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3987 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3988 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3989 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3990 3991 enable_mask = ~dev_priv->irq_mask; 3992 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3993 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3994 enable_mask |= I915_USER_INTERRUPT; 3995 3996 if (IS_G4X(dev_priv)) 3997 enable_mask |= I915_BSD_USER_INTERRUPT; 3998 3999 /* Interrupt setup is already guaranteed to be single-threaded, this is 4000 * just to make the assert_spin_locked check happy. */ 4001 spin_lock_irq(&dev_priv->irq_lock); 4002 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4003 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4004 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4005 spin_unlock_irq(&dev_priv->irq_lock); 4006 4007 /* 4008 * Enable some error detection, note the instruction error mask 4009 * bit is reserved, so we leave it masked. 4010 */ 4011 if (IS_G4X(dev_priv)) { 4012 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4013 GM45_ERROR_MEM_PRIV | 4014 GM45_ERROR_CP_PRIV | 4015 I915_ERROR_MEMORY_REFRESH); 4016 } else { 4017 error_mask = ~(I915_ERROR_PAGE_TABLE | 4018 I915_ERROR_MEMORY_REFRESH); 4019 } 4020 I915_WRITE(EMR, error_mask); 4021 4022 I915_WRITE(IMR, dev_priv->irq_mask); 4023 I915_WRITE(IER, enable_mask); 4024 POSTING_READ(IER); 4025 4026 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4027 POSTING_READ(PORT_HOTPLUG_EN); 4028 4029 i915_enable_asle_pipestat(dev_priv); 4030 4031 return 0; 4032 } 4033 4034 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4035 { 4036 u32 hotplug_en; 4037 4038 assert_spin_locked(&dev_priv->irq_lock); 4039 4040 /* Note HDMI and DP share hotplug bits */ 4041 /* enable bits are the same for all generations */ 4042 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4043 /* Programming the CRT detection parameters tends 4044 to generate a spurious hotplug event about three 4045 seconds later. So just do it once. 4046 */ 4047 if (IS_G4X(dev_priv)) 4048 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4049 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4050 4051 /* Ignore TV since it's buggy */ 4052 i915_hotplug_interrupt_update_locked(dev_priv, 4053 HOTPLUG_INT_EN_MASK | 4054 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4055 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4056 hotplug_en); 4057 } 4058 4059 static irqreturn_t i965_irq_handler(int irq, void *arg) 4060 { 4061 struct drm_device *dev = arg; 4062 struct drm_i915_private *dev_priv = to_i915(dev); 4063 u32 iir, new_iir; 4064 u32 pipe_stats[I915_MAX_PIPES]; 4065 int ret = IRQ_NONE, pipe; 4066 u32 flip_mask = 4067 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4068 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4069 4070 if (!intel_irqs_enabled(dev_priv)) 4071 return IRQ_NONE; 4072 4073 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4074 disable_rpm_wakeref_asserts(dev_priv); 4075 4076 iir = I915_READ(IIR); 4077 4078 for (;;) { 4079 bool irq_received = (iir & ~flip_mask) != 0; 4080 bool blc_event = false; 4081 4082 /* Can't rely on pipestat interrupt bit in iir as it might 4083 * have been cleared after the pipestat interrupt was received. 4084 * It doesn't set the bit in iir again, but it still produces 4085 * interrupts (for non-MSI). 4086 */ 4087 spin_lock(&dev_priv->irq_lock); 4088 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4089 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4090 4091 for_each_pipe(dev_priv, pipe) { 4092 i915_reg_t reg = PIPESTAT(pipe); 4093 pipe_stats[pipe] = I915_READ(reg); 4094 4095 /* 4096 * Clear the PIPE*STAT regs before the IIR 4097 */ 4098 if (pipe_stats[pipe] & 0x8000ffff) { 4099 I915_WRITE(reg, pipe_stats[pipe]); 4100 irq_received = true; 4101 } 4102 } 4103 spin_unlock(&dev_priv->irq_lock); 4104 4105 if (!irq_received) 4106 break; 4107 4108 ret = IRQ_HANDLED; 4109 4110 /* Consume port. Then clear IIR or we'll miss events */ 4111 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4112 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4113 if (hotplug_status) 4114 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4115 } 4116 4117 I915_WRITE(IIR, iir & ~flip_mask); 4118 new_iir = I915_READ(IIR); /* Flush posted writes */ 4119 4120 if (iir & I915_USER_INTERRUPT) 4121 notify_ring(dev_priv->engine[RCS]); 4122 if (iir & I915_BSD_USER_INTERRUPT) 4123 notify_ring(dev_priv->engine[VCS]); 4124 4125 for_each_pipe(dev_priv, pipe) { 4126 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4127 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4128 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4129 4130 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4131 blc_event = true; 4132 4133 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4134 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4135 4136 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4137 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4138 } 4139 4140 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4141 intel_opregion_asle_intr(dev_priv); 4142 4143 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4144 gmbus_irq_handler(dev_priv); 4145 4146 /* With MSI, interrupts are only generated when iir 4147 * transitions from zero to nonzero. If another bit got 4148 * set while we were handling the existing iir bits, then 4149 * we would never get another interrupt. 4150 * 4151 * This is fine on non-MSI as well, as if we hit this path 4152 * we avoid exiting the interrupt handler only to generate 4153 * another one. 4154 * 4155 * Note that for MSI this could cause a stray interrupt report 4156 * if an interrupt landed in the time between writing IIR and 4157 * the posting read. This should be rare enough to never 4158 * trigger the 99% of 100,000 interrupts test for disabling 4159 * stray interrupts. 4160 */ 4161 iir = new_iir; 4162 } 4163 4164 enable_rpm_wakeref_asserts(dev_priv); 4165 4166 return ret; 4167 } 4168 4169 static void i965_irq_uninstall(struct drm_device * dev) 4170 { 4171 struct drm_i915_private *dev_priv = to_i915(dev); 4172 int pipe; 4173 4174 if (!dev_priv) 4175 return; 4176 4177 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4178 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4179 4180 I915_WRITE(HWSTAM, 0xffffffff); 4181 for_each_pipe(dev_priv, pipe) 4182 I915_WRITE(PIPESTAT(pipe), 0); 4183 I915_WRITE(IMR, 0xffffffff); 4184 I915_WRITE(IER, 0x0); 4185 4186 for_each_pipe(dev_priv, pipe) 4187 I915_WRITE(PIPESTAT(pipe), 4188 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4189 I915_WRITE(IIR, I915_READ(IIR)); 4190 } 4191 4192 /** 4193 * intel_irq_init - initializes irq support 4194 * @dev_priv: i915 device instance 4195 * 4196 * This function initializes all the irq support including work items, timers 4197 * and all the vtables. It does not setup the interrupt itself though. 4198 */ 4199 void intel_irq_init(struct drm_i915_private *dev_priv) 4200 { 4201 struct drm_device *dev = &dev_priv->drm; 4202 4203 intel_hpd_init_work(dev_priv); 4204 4205 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4206 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4207 4208 if (HAS_GUC_SCHED(dev_priv)) 4209 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4210 4211 /* Let's track the enabled rps events */ 4212 if (IS_VALLEYVIEW(dev_priv)) 4213 /* WaGsvRC0ResidencyMethod:vlv */ 4214 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4215 else 4216 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4217 4218 dev_priv->rps.pm_intr_keep = 0; 4219 4220 /* 4221 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 4222 * if GEN6_PM_UP_EI_EXPIRED is masked. 4223 * 4224 * TODO: verify if this can be reproduced on VLV,CHV. 4225 */ 4226 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 4227 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; 4228 4229 if (INTEL_INFO(dev_priv)->gen >= 8) 4230 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC; 4231 4232 if (IS_GEN2(dev_priv)) { 4233 /* Gen2 doesn't have a hardware frame counter */ 4234 dev->max_vblank_count = 0; 4235 dev->driver->get_vblank_counter = drm_vblank_no_hw_counter; 4236 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4237 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4238 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4239 } else { 4240 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4241 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4242 } 4243 4244 /* 4245 * Opt out of the vblank disable timer on everything except gen2. 4246 * Gen2 doesn't have a hardware frame counter and so depends on 4247 * vblank interrupts to produce sane vblank seuquence numbers. 4248 */ 4249 if (!IS_GEN2(dev_priv)) 4250 dev->vblank_disable_immediate = true; 4251 4252 /* Most platforms treat the display irq block as an always-on 4253 * power domain. vlv/chv can disable it at runtime and need 4254 * special care to avoid writing any of the display block registers 4255 * outside of the power domain. We defer setting up the display irqs 4256 * in this case to the runtime pm. 4257 */ 4258 dev_priv->display_irqs_enabled = true; 4259 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4260 dev_priv->display_irqs_enabled = false; 4261 4262 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4263 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4264 4265 if (IS_CHERRYVIEW(dev_priv)) { 4266 dev->driver->irq_handler = cherryview_irq_handler; 4267 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4268 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4269 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4270 dev->driver->enable_vblank = i965_enable_vblank; 4271 dev->driver->disable_vblank = i965_disable_vblank; 4272 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4273 } else if (IS_VALLEYVIEW(dev_priv)) { 4274 dev->driver->irq_handler = valleyview_irq_handler; 4275 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4276 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4277 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4278 dev->driver->enable_vblank = i965_enable_vblank; 4279 dev->driver->disable_vblank = i965_disable_vblank; 4280 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4281 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4282 dev->driver->irq_handler = gen8_irq_handler; 4283 dev->driver->irq_preinstall = gen8_irq_reset; 4284 dev->driver->irq_postinstall = gen8_irq_postinstall; 4285 dev->driver->irq_uninstall = gen8_irq_uninstall; 4286 dev->driver->enable_vblank = gen8_enable_vblank; 4287 dev->driver->disable_vblank = gen8_disable_vblank; 4288 if (IS_GEN9_LP(dev_priv)) 4289 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4290 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 4291 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4292 else 4293 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4294 } else if (HAS_PCH_SPLIT(dev_priv)) { 4295 dev->driver->irq_handler = ironlake_irq_handler; 4296 dev->driver->irq_preinstall = ironlake_irq_reset; 4297 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4298 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4299 dev->driver->enable_vblank = ironlake_enable_vblank; 4300 dev->driver->disable_vblank = ironlake_disable_vblank; 4301 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4302 } else { 4303 if (IS_GEN2(dev_priv)) { 4304 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4305 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4306 dev->driver->irq_handler = i8xx_irq_handler; 4307 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4308 dev->driver->enable_vblank = i8xx_enable_vblank; 4309 dev->driver->disable_vblank = i8xx_disable_vblank; 4310 } else if (IS_GEN3(dev_priv)) { 4311 dev->driver->irq_preinstall = i915_irq_preinstall; 4312 dev->driver->irq_postinstall = i915_irq_postinstall; 4313 dev->driver->irq_uninstall = i915_irq_uninstall; 4314 dev->driver->irq_handler = i915_irq_handler; 4315 dev->driver->enable_vblank = i8xx_enable_vblank; 4316 dev->driver->disable_vblank = i8xx_disable_vblank; 4317 } else { 4318 dev->driver->irq_preinstall = i965_irq_preinstall; 4319 dev->driver->irq_postinstall = i965_irq_postinstall; 4320 dev->driver->irq_uninstall = i965_irq_uninstall; 4321 dev->driver->irq_handler = i965_irq_handler; 4322 dev->driver->enable_vblank = i965_enable_vblank; 4323 dev->driver->disable_vblank = i965_disable_vblank; 4324 } 4325 if (I915_HAS_HOTPLUG(dev_priv)) 4326 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4327 } 4328 } 4329 4330 /** 4331 * intel_irq_install - enables the hardware interrupt 4332 * @dev_priv: i915 device instance 4333 * 4334 * This function enables the hardware interrupt handling, but leaves the hotplug 4335 * handling still disabled. It is called after intel_irq_init(). 4336 * 4337 * In the driver load and resume code we need working interrupts in a few places 4338 * but don't want to deal with the hassle of concurrent probe and hotplug 4339 * workers. Hence the split into this two-stage approach. 4340 */ 4341 int intel_irq_install(struct drm_i915_private *dev_priv) 4342 { 4343 /* 4344 * We enable some interrupt sources in our postinstall hooks, so mark 4345 * interrupts as enabled _before_ actually enabling them to avoid 4346 * special cases in our ordering checks. 4347 */ 4348 dev_priv->pm.irqs_enabled = true; 4349 4350 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4351 } 4352 4353 /** 4354 * intel_irq_uninstall - finilizes all irq handling 4355 * @dev_priv: i915 device instance 4356 * 4357 * This stops interrupt and hotplug handling and unregisters and frees all 4358 * resources acquired in the init functions. 4359 */ 4360 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4361 { 4362 drm_irq_uninstall(&dev_priv->drm); 4363 intel_hpd_cancel_work(dev_priv); 4364 dev_priv->pm.irqs_enabled = false; 4365 } 4366 4367 /** 4368 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4369 * @dev_priv: i915 device instance 4370 * 4371 * This function is used to disable interrupts at runtime, both in the runtime 4372 * pm and the system suspend/resume code. 4373 */ 4374 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4375 { 4376 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4377 dev_priv->pm.irqs_enabled = false; 4378 synchronize_irq(dev_priv->drm.irq); 4379 } 4380 4381 /** 4382 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4383 * @dev_priv: i915 device instance 4384 * 4385 * This function is used to enable interrupts at runtime, both in the runtime 4386 * pm and the system suspend/resume code. 4387 */ 4388 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4389 { 4390 dev_priv->pm.irqs_enabled = true; 4391 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4392 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4393 } 4394