1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 174 /* For display hotplug interrupt */ 175 static inline void 176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 177 uint32_t mask, 178 uint32_t bits) 179 { 180 uint32_t val; 181 182 assert_spin_locked(&dev_priv->irq_lock); 183 WARN_ON(bits & ~mask); 184 185 val = I915_READ(PORT_HOTPLUG_EN); 186 val &= ~mask; 187 val |= bits; 188 I915_WRITE(PORT_HOTPLUG_EN, val); 189 } 190 191 /** 192 * i915_hotplug_interrupt_update - update hotplug interrupt enable 193 * @dev_priv: driver private 194 * @mask: bits to update 195 * @bits: bits to enable 196 * NOTE: the HPD enable bits are modified both inside and outside 197 * of an interrupt context. To avoid that read-modify-write cycles 198 * interfer, these bits are protected by a spinlock. Since this 199 * function is usually not called from a context where the lock is 200 * held already, this function acquires the lock itself. A non-locking 201 * version is also available. 202 */ 203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 204 uint32_t mask, 205 uint32_t bits) 206 { 207 spin_lock_irq(&dev_priv->irq_lock); 208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 209 spin_unlock_irq(&dev_priv->irq_lock); 210 } 211 212 /** 213 * ilk_update_display_irq - update DEIMR 214 * @dev_priv: driver private 215 * @interrupt_mask: mask of interrupt bits to update 216 * @enabled_irq_mask: mask of interrupt bits to enable 217 */ 218 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 219 uint32_t interrupt_mask, 220 uint32_t enabled_irq_mask) 221 { 222 uint32_t new_val; 223 224 assert_spin_locked(&dev_priv->irq_lock); 225 226 WARN_ON(enabled_irq_mask & ~interrupt_mask); 227 228 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 229 return; 230 231 new_val = dev_priv->irq_mask; 232 new_val &= ~interrupt_mask; 233 new_val |= (~enabled_irq_mask & interrupt_mask); 234 235 if (new_val != dev_priv->irq_mask) { 236 dev_priv->irq_mask = new_val; 237 I915_WRITE(DEIMR, dev_priv->irq_mask); 238 POSTING_READ(DEIMR); 239 } 240 } 241 242 /** 243 * ilk_update_gt_irq - update GTIMR 244 * @dev_priv: driver private 245 * @interrupt_mask: mask of interrupt bits to update 246 * @enabled_irq_mask: mask of interrupt bits to enable 247 */ 248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 249 uint32_t interrupt_mask, 250 uint32_t enabled_irq_mask) 251 { 252 assert_spin_locked(&dev_priv->irq_lock); 253 254 WARN_ON(enabled_irq_mask & ~interrupt_mask); 255 256 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 257 return; 258 259 dev_priv->gt_irq_mask &= ~interrupt_mask; 260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 262 POSTING_READ(GTIMR); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 } 269 270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 271 { 272 ilk_update_gt_irq(dev_priv, mask, 0); 273 } 274 275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 276 { 277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 278 } 279 280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 281 { 282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 283 } 284 285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 286 { 287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 288 } 289 290 /** 291 * snb_update_pm_irq - update GEN6_PMIMR 292 * @dev_priv: driver private 293 * @interrupt_mask: mask of interrupt bits to update 294 * @enabled_irq_mask: mask of interrupt bits to enable 295 */ 296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 297 uint32_t interrupt_mask, 298 uint32_t enabled_irq_mask) 299 { 300 uint32_t new_val; 301 302 WARN_ON(enabled_irq_mask & ~interrupt_mask); 303 304 assert_spin_locked(&dev_priv->irq_lock); 305 306 new_val = dev_priv->pm_irq_mask; 307 new_val &= ~interrupt_mask; 308 new_val |= (~enabled_irq_mask & interrupt_mask); 309 310 if (new_val != dev_priv->pm_irq_mask) { 311 dev_priv->pm_irq_mask = new_val; 312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 313 POSTING_READ(gen6_pm_imr(dev_priv)); 314 } 315 } 316 317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 318 { 319 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 320 return; 321 322 snb_update_pm_irq(dev_priv, mask, mask); 323 } 324 325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 326 uint32_t mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_disable_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_rps_interrupts(struct drm_device *dev) 340 { 341 struct drm_i915_private *dev_priv = dev->dev_private; 342 i915_reg_t reg = gen6_pm_iir(dev_priv); 343 344 spin_lock_irq(&dev_priv->irq_lock); 345 I915_WRITE(reg, dev_priv->pm_rps_events); 346 I915_WRITE(reg, dev_priv->pm_rps_events); 347 POSTING_READ(reg); 348 dev_priv->rps.pm_iir = 0; 349 spin_unlock_irq(&dev_priv->irq_lock); 350 } 351 352 void gen6_enable_rps_interrupts(struct drm_device *dev) 353 { 354 struct drm_i915_private *dev_priv = dev->dev_private; 355 356 spin_lock_irq(&dev_priv->irq_lock); 357 358 WARN_ON(dev_priv->rps.pm_iir); 359 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 360 dev_priv->rps.interrupts_enabled = true; 361 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 362 dev_priv->pm_rps_events); 363 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 364 365 spin_unlock_irq(&dev_priv->irq_lock); 366 } 367 368 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 369 { 370 /* 371 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 372 * if GEN6_PM_UP_EI_EXPIRED is masked. 373 * 374 * TODO: verify if this can be reproduced on VLV,CHV. 375 */ 376 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 377 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 378 379 if (INTEL_INFO(dev_priv)->gen >= 8) 380 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 381 382 return mask; 383 } 384 385 void gen6_disable_rps_interrupts(struct drm_device *dev) 386 { 387 struct drm_i915_private *dev_priv = dev->dev_private; 388 389 spin_lock_irq(&dev_priv->irq_lock); 390 dev_priv->rps.interrupts_enabled = false; 391 spin_unlock_irq(&dev_priv->irq_lock); 392 393 cancel_work_sync(&dev_priv->rps.work); 394 395 spin_lock_irq(&dev_priv->irq_lock); 396 397 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 398 399 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 400 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 401 ~dev_priv->pm_rps_events); 402 403 spin_unlock_irq(&dev_priv->irq_lock); 404 405 synchronize_irq(dev->irq); 406 } 407 408 /** 409 * bdw_update_port_irq - update DE port interrupt 410 * @dev_priv: driver private 411 * @interrupt_mask: mask of interrupt bits to update 412 * @enabled_irq_mask: mask of interrupt bits to enable 413 */ 414 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 415 uint32_t interrupt_mask, 416 uint32_t enabled_irq_mask) 417 { 418 uint32_t new_val; 419 uint32_t old_val; 420 421 assert_spin_locked(&dev_priv->irq_lock); 422 423 WARN_ON(enabled_irq_mask & ~interrupt_mask); 424 425 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 426 return; 427 428 old_val = I915_READ(GEN8_DE_PORT_IMR); 429 430 new_val = old_val; 431 new_val &= ~interrupt_mask; 432 new_val |= (~enabled_irq_mask & interrupt_mask); 433 434 if (new_val != old_val) { 435 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 436 POSTING_READ(GEN8_DE_PORT_IMR); 437 } 438 } 439 440 /** 441 * bdw_update_pipe_irq - update DE pipe interrupt 442 * @dev_priv: driver private 443 * @pipe: pipe whose interrupt to update 444 * @interrupt_mask: mask of interrupt bits to update 445 * @enabled_irq_mask: mask of interrupt bits to enable 446 */ 447 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 448 enum pipe pipe, 449 uint32_t interrupt_mask, 450 uint32_t enabled_irq_mask) 451 { 452 uint32_t new_val; 453 454 assert_spin_locked(&dev_priv->irq_lock); 455 456 WARN_ON(enabled_irq_mask & ~interrupt_mask); 457 458 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 459 return; 460 461 new_val = dev_priv->de_irq_mask[pipe]; 462 new_val &= ~interrupt_mask; 463 new_val |= (~enabled_irq_mask & interrupt_mask); 464 465 if (new_val != dev_priv->de_irq_mask[pipe]) { 466 dev_priv->de_irq_mask[pipe] = new_val; 467 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 468 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 469 } 470 } 471 472 /** 473 * ibx_display_interrupt_update - update SDEIMR 474 * @dev_priv: driver private 475 * @interrupt_mask: mask of interrupt bits to update 476 * @enabled_irq_mask: mask of interrupt bits to enable 477 */ 478 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 479 uint32_t interrupt_mask, 480 uint32_t enabled_irq_mask) 481 { 482 uint32_t sdeimr = I915_READ(SDEIMR); 483 sdeimr &= ~interrupt_mask; 484 sdeimr |= (~enabled_irq_mask & interrupt_mask); 485 486 WARN_ON(enabled_irq_mask & ~interrupt_mask); 487 488 assert_spin_locked(&dev_priv->irq_lock); 489 490 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 491 return; 492 493 I915_WRITE(SDEIMR, sdeimr); 494 POSTING_READ(SDEIMR); 495 } 496 497 static void 498 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 499 u32 enable_mask, u32 status_mask) 500 { 501 i915_reg_t reg = PIPESTAT(pipe); 502 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 503 504 assert_spin_locked(&dev_priv->irq_lock); 505 WARN_ON(!intel_irqs_enabled(dev_priv)); 506 507 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 508 status_mask & ~PIPESTAT_INT_STATUS_MASK, 509 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 510 pipe_name(pipe), enable_mask, status_mask)) 511 return; 512 513 if ((pipestat & enable_mask) == enable_mask) 514 return; 515 516 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 517 518 /* Enable the interrupt, clear any pending status */ 519 pipestat |= enable_mask | status_mask; 520 I915_WRITE(reg, pipestat); 521 POSTING_READ(reg); 522 } 523 524 static void 525 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 526 u32 enable_mask, u32 status_mask) 527 { 528 i915_reg_t reg = PIPESTAT(pipe); 529 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 530 531 assert_spin_locked(&dev_priv->irq_lock); 532 WARN_ON(!intel_irqs_enabled(dev_priv)); 533 534 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 535 status_mask & ~PIPESTAT_INT_STATUS_MASK, 536 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 537 pipe_name(pipe), enable_mask, status_mask)) 538 return; 539 540 if ((pipestat & enable_mask) == 0) 541 return; 542 543 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 544 545 pipestat &= ~enable_mask; 546 I915_WRITE(reg, pipestat); 547 POSTING_READ(reg); 548 } 549 550 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 551 { 552 u32 enable_mask = status_mask << 16; 553 554 /* 555 * On pipe A we don't support the PSR interrupt yet, 556 * on pipe B and C the same bit MBZ. 557 */ 558 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 559 return 0; 560 /* 561 * On pipe B and C we don't support the PSR interrupt yet, on pipe 562 * A the same bit is for perf counters which we don't use either. 563 */ 564 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 565 return 0; 566 567 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 568 SPRITE0_FLIP_DONE_INT_EN_VLV | 569 SPRITE1_FLIP_DONE_INT_EN_VLV); 570 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 571 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 572 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 573 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 574 575 return enable_mask; 576 } 577 578 void 579 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 580 u32 status_mask) 581 { 582 u32 enable_mask; 583 584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 585 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 586 status_mask); 587 else 588 enable_mask = status_mask << 16; 589 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 590 } 591 592 void 593 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 594 u32 status_mask) 595 { 596 u32 enable_mask; 597 598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 599 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 600 status_mask); 601 else 602 enable_mask = status_mask << 16; 603 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 604 } 605 606 /** 607 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 608 * @dev: drm device 609 */ 610 static void i915_enable_asle_pipestat(struct drm_device *dev) 611 { 612 struct drm_i915_private *dev_priv = dev->dev_private; 613 614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 615 return; 616 617 spin_lock_irq(&dev_priv->irq_lock); 618 619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 620 if (INTEL_INFO(dev)->gen >= 4) 621 i915_enable_pipestat(dev_priv, PIPE_A, 622 PIPE_LEGACY_BLC_EVENT_STATUS); 623 624 spin_unlock_irq(&dev_priv->irq_lock); 625 } 626 627 /* 628 * This timing diagram depicts the video signal in and 629 * around the vertical blanking period. 630 * 631 * Assumptions about the fictitious mode used in this example: 632 * vblank_start >= 3 633 * vsync_start = vblank_start + 1 634 * vsync_end = vblank_start + 2 635 * vtotal = vblank_start + 3 636 * 637 * start of vblank: 638 * latch double buffered registers 639 * increment frame counter (ctg+) 640 * generate start of vblank interrupt (gen4+) 641 * | 642 * | frame start: 643 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 644 * | may be shifted forward 1-3 extra lines via PIPECONF 645 * | | 646 * | | start of vsync: 647 * | | generate vsync interrupt 648 * | | | 649 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 650 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 651 * ----va---> <-----------------vb--------------------> <--------va------------- 652 * | | <----vs-----> | 653 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 654 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 655 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 656 * | | | 657 * last visible pixel first visible pixel 658 * | increment frame counter (gen3/4) 659 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 660 * 661 * x = horizontal active 662 * _ = horizontal blanking 663 * hs = horizontal sync 664 * va = vertical active 665 * vb = vertical blanking 666 * vs = vertical sync 667 * vbs = vblank_start (number) 668 * 669 * Summary: 670 * - most events happen at the start of horizontal sync 671 * - frame start happens at the start of horizontal blank, 1-4 lines 672 * (depending on PIPECONF settings) after the start of vblank 673 * - gen3/4 pixel and frame counter are synchronized with the start 674 * of horizontal active on the first line of vertical active 675 */ 676 677 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 678 { 679 /* Gen2 doesn't have a hardware frame counter */ 680 return 0; 681 } 682 683 /* Called from drm generic code, passed a 'crtc', which 684 * we use as a pipe index 685 */ 686 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 687 { 688 struct drm_i915_private *dev_priv = dev->dev_private; 689 i915_reg_t high_frame, low_frame; 690 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 691 struct intel_crtc *intel_crtc = 692 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 693 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 694 695 htotal = mode->crtc_htotal; 696 hsync_start = mode->crtc_hsync_start; 697 vbl_start = mode->crtc_vblank_start; 698 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 699 vbl_start = DIV_ROUND_UP(vbl_start, 2); 700 701 /* Convert to pixel count */ 702 vbl_start *= htotal; 703 704 /* Start of vblank event occurs at start of hsync */ 705 vbl_start -= htotal - hsync_start; 706 707 high_frame = PIPEFRAME(pipe); 708 low_frame = PIPEFRAMEPIXEL(pipe); 709 710 /* 711 * High & low register fields aren't synchronized, so make sure 712 * we get a low value that's stable across two reads of the high 713 * register. 714 */ 715 do { 716 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 717 low = I915_READ(low_frame); 718 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 719 } while (high1 != high2); 720 721 high1 >>= PIPE_FRAME_HIGH_SHIFT; 722 pixel = low & PIPE_PIXEL_MASK; 723 low >>= PIPE_FRAME_LOW_SHIFT; 724 725 /* 726 * The frame counter increments at beginning of active. 727 * Cook up a vblank counter by also checking the pixel 728 * counter against vblank start. 729 */ 730 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 731 } 732 733 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 734 { 735 struct drm_i915_private *dev_priv = dev->dev_private; 736 737 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 738 } 739 740 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 741 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 742 { 743 struct drm_device *dev = crtc->base.dev; 744 struct drm_i915_private *dev_priv = dev->dev_private; 745 const struct drm_display_mode *mode = &crtc->base.hwmode; 746 enum pipe pipe = crtc->pipe; 747 int position, vtotal; 748 749 vtotal = mode->crtc_vtotal; 750 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 751 vtotal /= 2; 752 753 if (IS_GEN2(dev)) 754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 755 else 756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 757 758 /* 759 * On HSW, the DSL reg (0x70000) appears to return 0 if we 760 * read it just before the start of vblank. So try it again 761 * so we don't accidentally end up spanning a vblank frame 762 * increment, causing the pipe_update_end() code to squak at us. 763 * 764 * The nature of this problem means we can't simply check the ISR 765 * bit and return the vblank start value; nor can we use the scanline 766 * debug register in the transcoder as it appears to have the same 767 * problem. We may need to extend this to include other platforms, 768 * but so far testing only shows the problem on HSW. 769 */ 770 if (HAS_DDI(dev) && !position) { 771 int i, temp; 772 773 for (i = 0; i < 100; i++) { 774 udelay(1); 775 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 776 DSL_LINEMASK_GEN3; 777 if (temp != position) { 778 position = temp; 779 break; 780 } 781 } 782 } 783 784 /* 785 * See update_scanline_offset() for the details on the 786 * scanline_offset adjustment. 787 */ 788 return (position + crtc->scanline_offset) % vtotal; 789 } 790 791 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 792 unsigned int flags, int *vpos, int *hpos, 793 ktime_t *stime, ktime_t *etime, 794 const struct drm_display_mode *mode) 795 { 796 struct drm_i915_private *dev_priv = dev->dev_private; 797 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 798 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 799 int position; 800 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 801 bool in_vbl = true; 802 int ret = 0; 803 unsigned long irqflags; 804 805 if (WARN_ON(!mode->crtc_clock)) { 806 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 807 "pipe %c\n", pipe_name(pipe)); 808 return 0; 809 } 810 811 htotal = mode->crtc_htotal; 812 hsync_start = mode->crtc_hsync_start; 813 vtotal = mode->crtc_vtotal; 814 vbl_start = mode->crtc_vblank_start; 815 vbl_end = mode->crtc_vblank_end; 816 817 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 818 vbl_start = DIV_ROUND_UP(vbl_start, 2); 819 vbl_end /= 2; 820 vtotal /= 2; 821 } 822 823 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 824 825 /* 826 * Lock uncore.lock, as we will do multiple timing critical raw 827 * register reads, potentially with preemption disabled, so the 828 * following code must not block on uncore.lock. 829 */ 830 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 831 832 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 833 834 /* Get optional system timestamp before query. */ 835 if (stime) 836 *stime = ktime_get(); 837 838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 839 /* No obvious pixelcount register. Only query vertical 840 * scanout position from Display scan line register. 841 */ 842 position = __intel_get_crtc_scanline(intel_crtc); 843 } else { 844 /* Have access to pixelcount since start of frame. 845 * We can split this into vertical and horizontal 846 * scanout position. 847 */ 848 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 849 850 /* convert to pixel counts */ 851 vbl_start *= htotal; 852 vbl_end *= htotal; 853 vtotal *= htotal; 854 855 /* 856 * In interlaced modes, the pixel counter counts all pixels, 857 * so one field will have htotal more pixels. In order to avoid 858 * the reported position from jumping backwards when the pixel 859 * counter is beyond the length of the shorter field, just 860 * clamp the position the length of the shorter field. This 861 * matches how the scanline counter based position works since 862 * the scanline counter doesn't count the two half lines. 863 */ 864 if (position >= vtotal) 865 position = vtotal - 1; 866 867 /* 868 * Start of vblank interrupt is triggered at start of hsync, 869 * just prior to the first active line of vblank. However we 870 * consider lines to start at the leading edge of horizontal 871 * active. So, should we get here before we've crossed into 872 * the horizontal active of the first line in vblank, we would 873 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 874 * always add htotal-hsync_start to the current pixel position. 875 */ 876 position = (position + htotal - hsync_start) % vtotal; 877 } 878 879 /* Get optional system timestamp after query. */ 880 if (etime) 881 *etime = ktime_get(); 882 883 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 884 885 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 886 887 in_vbl = position >= vbl_start && position < vbl_end; 888 889 /* 890 * While in vblank, position will be negative 891 * counting up towards 0 at vbl_end. And outside 892 * vblank, position will be positive counting 893 * up since vbl_end. 894 */ 895 if (position >= vbl_start) 896 position -= vbl_end; 897 else 898 position += vtotal - vbl_end; 899 900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 901 *vpos = position; 902 *hpos = 0; 903 } else { 904 *vpos = position / htotal; 905 *hpos = position - (*vpos * htotal); 906 } 907 908 /* In vblank? */ 909 if (in_vbl) 910 ret |= DRM_SCANOUTPOS_IN_VBLANK; 911 912 return ret; 913 } 914 915 int intel_get_crtc_scanline(struct intel_crtc *crtc) 916 { 917 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 918 unsigned long irqflags; 919 int position; 920 921 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 922 position = __intel_get_crtc_scanline(crtc); 923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 924 925 return position; 926 } 927 928 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 929 int *max_error, 930 struct timeval *vblank_time, 931 unsigned flags) 932 { 933 struct drm_crtc *crtc; 934 935 if (pipe >= INTEL_INFO(dev)->num_pipes) { 936 DRM_ERROR("Invalid crtc %u\n", pipe); 937 return -EINVAL; 938 } 939 940 /* Get drm_crtc to timestamp: */ 941 crtc = intel_get_crtc_for_pipe(dev, pipe); 942 if (crtc == NULL) { 943 DRM_ERROR("Invalid crtc %u\n", pipe); 944 return -EINVAL; 945 } 946 947 if (!crtc->hwmode.crtc_clock) { 948 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 949 return -EBUSY; 950 } 951 952 /* Helper routine in DRM core does all the work: */ 953 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 954 vblank_time, flags, 955 &crtc->hwmode); 956 } 957 958 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 959 { 960 struct drm_i915_private *dev_priv = dev->dev_private; 961 u32 busy_up, busy_down, max_avg, min_avg; 962 u8 new_delay; 963 964 spin_lock(&mchdev_lock); 965 966 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 967 968 new_delay = dev_priv->ips.cur_delay; 969 970 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 971 busy_up = I915_READ(RCPREVBSYTUPAVG); 972 busy_down = I915_READ(RCPREVBSYTDNAVG); 973 max_avg = I915_READ(RCBMAXAVG); 974 min_avg = I915_READ(RCBMINAVG); 975 976 /* Handle RCS change request from hw */ 977 if (busy_up > max_avg) { 978 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 979 new_delay = dev_priv->ips.cur_delay - 1; 980 if (new_delay < dev_priv->ips.max_delay) 981 new_delay = dev_priv->ips.max_delay; 982 } else if (busy_down < min_avg) { 983 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 984 new_delay = dev_priv->ips.cur_delay + 1; 985 if (new_delay > dev_priv->ips.min_delay) 986 new_delay = dev_priv->ips.min_delay; 987 } 988 989 if (ironlake_set_drps(dev, new_delay)) 990 dev_priv->ips.cur_delay = new_delay; 991 992 spin_unlock(&mchdev_lock); 993 994 return; 995 } 996 997 static void notify_ring(struct intel_engine_cs *engine) 998 { 999 if (!intel_engine_initialized(engine)) 1000 return; 1001 1002 trace_i915_gem_request_notify(engine); 1003 engine->user_interrupts++; 1004 1005 wake_up_all(&engine->irq_queue); 1006 } 1007 1008 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1009 struct intel_rps_ei *ei) 1010 { 1011 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1012 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1013 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1014 } 1015 1016 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 1017 const struct intel_rps_ei *old, 1018 const struct intel_rps_ei *now, 1019 int threshold) 1020 { 1021 u64 time, c0; 1022 unsigned int mul = 100; 1023 1024 if (old->cz_clock == 0) 1025 return false; 1026 1027 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1028 mul <<= 8; 1029 1030 time = now->cz_clock - old->cz_clock; 1031 time *= threshold * dev_priv->czclk_freq; 1032 1033 /* Workload can be split between render + media, e.g. SwapBuffers 1034 * being blitted in X after being rendered in mesa. To account for 1035 * this we need to combine both engines into our activity counter. 1036 */ 1037 c0 = now->render_c0 - old->render_c0; 1038 c0 += now->media_c0 - old->media_c0; 1039 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1040 1041 return c0 >= time; 1042 } 1043 1044 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1045 { 1046 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1047 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1048 } 1049 1050 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1051 { 1052 struct intel_rps_ei now; 1053 u32 events = 0; 1054 1055 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1056 return 0; 1057 1058 vlv_c0_read(dev_priv, &now); 1059 if (now.cz_clock == 0) 1060 return 0; 1061 1062 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1063 if (!vlv_c0_above(dev_priv, 1064 &dev_priv->rps.down_ei, &now, 1065 dev_priv->rps.down_threshold)) 1066 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1067 dev_priv->rps.down_ei = now; 1068 } 1069 1070 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1071 if (vlv_c0_above(dev_priv, 1072 &dev_priv->rps.up_ei, &now, 1073 dev_priv->rps.up_threshold)) 1074 events |= GEN6_PM_RP_UP_THRESHOLD; 1075 dev_priv->rps.up_ei = now; 1076 } 1077 1078 return events; 1079 } 1080 1081 static bool any_waiters(struct drm_i915_private *dev_priv) 1082 { 1083 struct intel_engine_cs *engine; 1084 1085 for_each_engine(engine, dev_priv) 1086 if (engine->irq_refcount) 1087 return true; 1088 1089 return false; 1090 } 1091 1092 static void gen6_pm_rps_work(struct work_struct *work) 1093 { 1094 struct drm_i915_private *dev_priv = 1095 container_of(work, struct drm_i915_private, rps.work); 1096 bool client_boost; 1097 int new_delay, adj, min, max; 1098 u32 pm_iir; 1099 1100 spin_lock_irq(&dev_priv->irq_lock); 1101 /* Speed up work cancelation during disabling rps interrupts. */ 1102 if (!dev_priv->rps.interrupts_enabled) { 1103 spin_unlock_irq(&dev_priv->irq_lock); 1104 return; 1105 } 1106 1107 /* 1108 * The RPS work is synced during runtime suspend, we don't require a 1109 * wakeref. TODO: instead of disabling the asserts make sure that we 1110 * always hold an RPM reference while the work is running. 1111 */ 1112 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); 1113 1114 pm_iir = dev_priv->rps.pm_iir; 1115 dev_priv->rps.pm_iir = 0; 1116 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1117 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1118 client_boost = dev_priv->rps.client_boost; 1119 dev_priv->rps.client_boost = false; 1120 spin_unlock_irq(&dev_priv->irq_lock); 1121 1122 /* Make sure we didn't queue anything we're not going to process. */ 1123 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1124 1125 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1126 goto out; 1127 1128 mutex_lock(&dev_priv->rps.hw_lock); 1129 1130 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1131 1132 adj = dev_priv->rps.last_adj; 1133 new_delay = dev_priv->rps.cur_freq; 1134 min = dev_priv->rps.min_freq_softlimit; 1135 max = dev_priv->rps.max_freq_softlimit; 1136 1137 if (client_boost) { 1138 new_delay = dev_priv->rps.max_freq_softlimit; 1139 adj = 0; 1140 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1141 if (adj > 0) 1142 adj *= 2; 1143 else /* CHV needs even encode values */ 1144 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1145 /* 1146 * For better performance, jump directly 1147 * to RPe if we're below it. 1148 */ 1149 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1150 new_delay = dev_priv->rps.efficient_freq; 1151 adj = 0; 1152 } 1153 } else if (any_waiters(dev_priv)) { 1154 adj = 0; 1155 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1156 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1157 new_delay = dev_priv->rps.efficient_freq; 1158 else 1159 new_delay = dev_priv->rps.min_freq_softlimit; 1160 adj = 0; 1161 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1162 if (adj < 0) 1163 adj *= 2; 1164 else /* CHV needs even encode values */ 1165 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1166 } else { /* unknown event */ 1167 adj = 0; 1168 } 1169 1170 dev_priv->rps.last_adj = adj; 1171 1172 /* sysfs frequency interfaces may have snuck in while servicing the 1173 * interrupt 1174 */ 1175 new_delay += adj; 1176 new_delay = clamp_t(int, new_delay, min, max); 1177 1178 intel_set_rps(dev_priv->dev, new_delay); 1179 1180 mutex_unlock(&dev_priv->rps.hw_lock); 1181 out: 1182 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 1183 } 1184 1185 1186 /** 1187 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1188 * occurred. 1189 * @work: workqueue struct 1190 * 1191 * Doesn't actually do anything except notify userspace. As a consequence of 1192 * this event, userspace should try to remap the bad rows since statistically 1193 * it is likely the same row is more likely to go bad again. 1194 */ 1195 static void ivybridge_parity_work(struct work_struct *work) 1196 { 1197 struct drm_i915_private *dev_priv = 1198 container_of(work, struct drm_i915_private, l3_parity.error_work); 1199 u32 error_status, row, bank, subbank; 1200 char *parity_event[6]; 1201 uint32_t misccpctl; 1202 uint8_t slice = 0; 1203 1204 /* We must turn off DOP level clock gating to access the L3 registers. 1205 * In order to prevent a get/put style interface, acquire struct mutex 1206 * any time we access those registers. 1207 */ 1208 mutex_lock(&dev_priv->dev->struct_mutex); 1209 1210 /* If we've screwed up tracking, just let the interrupt fire again */ 1211 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1212 goto out; 1213 1214 misccpctl = I915_READ(GEN7_MISCCPCTL); 1215 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1216 POSTING_READ(GEN7_MISCCPCTL); 1217 1218 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1219 i915_reg_t reg; 1220 1221 slice--; 1222 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1223 break; 1224 1225 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1226 1227 reg = GEN7_L3CDERRST1(slice); 1228 1229 error_status = I915_READ(reg); 1230 row = GEN7_PARITY_ERROR_ROW(error_status); 1231 bank = GEN7_PARITY_ERROR_BANK(error_status); 1232 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1233 1234 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1235 POSTING_READ(reg); 1236 1237 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1238 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1239 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1240 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1241 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1242 parity_event[5] = NULL; 1243 1244 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1245 KOBJ_CHANGE, parity_event); 1246 1247 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1248 slice, row, bank, subbank); 1249 1250 kfree(parity_event[4]); 1251 kfree(parity_event[3]); 1252 kfree(parity_event[2]); 1253 kfree(parity_event[1]); 1254 } 1255 1256 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1257 1258 out: 1259 WARN_ON(dev_priv->l3_parity.which_slice); 1260 spin_lock_irq(&dev_priv->irq_lock); 1261 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1262 spin_unlock_irq(&dev_priv->irq_lock); 1263 1264 mutex_unlock(&dev_priv->dev->struct_mutex); 1265 } 1266 1267 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1268 u32 iir) 1269 { 1270 if (!HAS_L3_DPF(dev_priv)) 1271 return; 1272 1273 spin_lock(&dev_priv->irq_lock); 1274 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1275 spin_unlock(&dev_priv->irq_lock); 1276 1277 iir &= GT_PARITY_ERROR(dev_priv); 1278 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1279 dev_priv->l3_parity.which_slice |= 1 << 1; 1280 1281 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1282 dev_priv->l3_parity.which_slice |= 1 << 0; 1283 1284 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1285 } 1286 1287 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1288 u32 gt_iir) 1289 { 1290 if (gt_iir & 1291 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1292 notify_ring(&dev_priv->engine[RCS]); 1293 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1294 notify_ring(&dev_priv->engine[VCS]); 1295 } 1296 1297 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1298 u32 gt_iir) 1299 { 1300 1301 if (gt_iir & 1302 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1303 notify_ring(&dev_priv->engine[RCS]); 1304 if (gt_iir & GT_BSD_USER_INTERRUPT) 1305 notify_ring(&dev_priv->engine[VCS]); 1306 if (gt_iir & GT_BLT_USER_INTERRUPT) 1307 notify_ring(&dev_priv->engine[BCS]); 1308 1309 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1310 GT_BSD_CS_ERROR_INTERRUPT | 1311 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1312 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1313 1314 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1315 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1316 } 1317 1318 static __always_inline void 1319 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1320 { 1321 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1322 notify_ring(engine); 1323 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1324 tasklet_schedule(&engine->irq_tasklet); 1325 } 1326 1327 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1328 u32 master_ctl, 1329 u32 gt_iir[4]) 1330 { 1331 irqreturn_t ret = IRQ_NONE; 1332 1333 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1334 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1335 if (gt_iir[0]) { 1336 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1337 ret = IRQ_HANDLED; 1338 } else 1339 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1340 } 1341 1342 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1343 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1344 if (gt_iir[1]) { 1345 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1346 ret = IRQ_HANDLED; 1347 } else 1348 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1349 } 1350 1351 if (master_ctl & GEN8_GT_VECS_IRQ) { 1352 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1353 if (gt_iir[3]) { 1354 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1355 ret = IRQ_HANDLED; 1356 } else 1357 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1358 } 1359 1360 if (master_ctl & GEN8_GT_PM_IRQ) { 1361 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1362 if (gt_iir[2] & dev_priv->pm_rps_events) { 1363 I915_WRITE_FW(GEN8_GT_IIR(2), 1364 gt_iir[2] & dev_priv->pm_rps_events); 1365 ret = IRQ_HANDLED; 1366 } else 1367 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1368 } 1369 1370 return ret; 1371 } 1372 1373 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1374 u32 gt_iir[4]) 1375 { 1376 if (gt_iir[0]) { 1377 gen8_cs_irq_handler(&dev_priv->engine[RCS], 1378 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1379 gen8_cs_irq_handler(&dev_priv->engine[BCS], 1380 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1381 } 1382 1383 if (gt_iir[1]) { 1384 gen8_cs_irq_handler(&dev_priv->engine[VCS], 1385 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1386 gen8_cs_irq_handler(&dev_priv->engine[VCS2], 1387 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1388 } 1389 1390 if (gt_iir[3]) 1391 gen8_cs_irq_handler(&dev_priv->engine[VECS], 1392 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1393 1394 if (gt_iir[2] & dev_priv->pm_rps_events) 1395 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1396 } 1397 1398 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1399 { 1400 switch (port) { 1401 case PORT_A: 1402 return val & PORTA_HOTPLUG_LONG_DETECT; 1403 case PORT_B: 1404 return val & PORTB_HOTPLUG_LONG_DETECT; 1405 case PORT_C: 1406 return val & PORTC_HOTPLUG_LONG_DETECT; 1407 default: 1408 return false; 1409 } 1410 } 1411 1412 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1413 { 1414 switch (port) { 1415 case PORT_E: 1416 return val & PORTE_HOTPLUG_LONG_DETECT; 1417 default: 1418 return false; 1419 } 1420 } 1421 1422 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1423 { 1424 switch (port) { 1425 case PORT_A: 1426 return val & PORTA_HOTPLUG_LONG_DETECT; 1427 case PORT_B: 1428 return val & PORTB_HOTPLUG_LONG_DETECT; 1429 case PORT_C: 1430 return val & PORTC_HOTPLUG_LONG_DETECT; 1431 case PORT_D: 1432 return val & PORTD_HOTPLUG_LONG_DETECT; 1433 default: 1434 return false; 1435 } 1436 } 1437 1438 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1439 { 1440 switch (port) { 1441 case PORT_A: 1442 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1443 default: 1444 return false; 1445 } 1446 } 1447 1448 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1449 { 1450 switch (port) { 1451 case PORT_B: 1452 return val & PORTB_HOTPLUG_LONG_DETECT; 1453 case PORT_C: 1454 return val & PORTC_HOTPLUG_LONG_DETECT; 1455 case PORT_D: 1456 return val & PORTD_HOTPLUG_LONG_DETECT; 1457 default: 1458 return false; 1459 } 1460 } 1461 1462 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1463 { 1464 switch (port) { 1465 case PORT_B: 1466 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1467 case PORT_C: 1468 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1469 case PORT_D: 1470 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1471 default: 1472 return false; 1473 } 1474 } 1475 1476 /* 1477 * Get a bit mask of pins that have triggered, and which ones may be long. 1478 * This can be called multiple times with the same masks to accumulate 1479 * hotplug detection results from several registers. 1480 * 1481 * Note that the caller is expected to zero out the masks initially. 1482 */ 1483 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1484 u32 hotplug_trigger, u32 dig_hotplug_reg, 1485 const u32 hpd[HPD_NUM_PINS], 1486 bool long_pulse_detect(enum port port, u32 val)) 1487 { 1488 enum port port; 1489 int i; 1490 1491 for_each_hpd_pin(i) { 1492 if ((hpd[i] & hotplug_trigger) == 0) 1493 continue; 1494 1495 *pin_mask |= BIT(i); 1496 1497 if (!intel_hpd_pin_to_port(i, &port)) 1498 continue; 1499 1500 if (long_pulse_detect(port, dig_hotplug_reg)) 1501 *long_mask |= BIT(i); 1502 } 1503 1504 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1505 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1506 1507 } 1508 1509 static void gmbus_irq_handler(struct drm_device *dev) 1510 { 1511 struct drm_i915_private *dev_priv = dev->dev_private; 1512 1513 wake_up_all(&dev_priv->gmbus_wait_queue); 1514 } 1515 1516 static void dp_aux_irq_handler(struct drm_device *dev) 1517 { 1518 struct drm_i915_private *dev_priv = dev->dev_private; 1519 1520 wake_up_all(&dev_priv->gmbus_wait_queue); 1521 } 1522 1523 #if defined(CONFIG_DEBUG_FS) 1524 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1525 uint32_t crc0, uint32_t crc1, 1526 uint32_t crc2, uint32_t crc3, 1527 uint32_t crc4) 1528 { 1529 struct drm_i915_private *dev_priv = dev->dev_private; 1530 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1531 struct intel_pipe_crc_entry *entry; 1532 int head, tail; 1533 1534 spin_lock(&pipe_crc->lock); 1535 1536 if (!pipe_crc->entries) { 1537 spin_unlock(&pipe_crc->lock); 1538 DRM_DEBUG_KMS("spurious interrupt\n"); 1539 return; 1540 } 1541 1542 head = pipe_crc->head; 1543 tail = pipe_crc->tail; 1544 1545 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1546 spin_unlock(&pipe_crc->lock); 1547 DRM_ERROR("CRC buffer overflowing\n"); 1548 return; 1549 } 1550 1551 entry = &pipe_crc->entries[head]; 1552 1553 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1554 entry->crc[0] = crc0; 1555 entry->crc[1] = crc1; 1556 entry->crc[2] = crc2; 1557 entry->crc[3] = crc3; 1558 entry->crc[4] = crc4; 1559 1560 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1561 pipe_crc->head = head; 1562 1563 spin_unlock(&pipe_crc->lock); 1564 1565 wake_up_interruptible(&pipe_crc->wq); 1566 } 1567 #else 1568 static inline void 1569 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1570 uint32_t crc0, uint32_t crc1, 1571 uint32_t crc2, uint32_t crc3, 1572 uint32_t crc4) {} 1573 #endif 1574 1575 1576 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1577 { 1578 struct drm_i915_private *dev_priv = dev->dev_private; 1579 1580 display_pipe_crc_irq_handler(dev, pipe, 1581 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1582 0, 0, 0, 0); 1583 } 1584 1585 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1586 { 1587 struct drm_i915_private *dev_priv = dev->dev_private; 1588 1589 display_pipe_crc_irq_handler(dev, pipe, 1590 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1591 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1592 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1593 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1594 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1595 } 1596 1597 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1598 { 1599 struct drm_i915_private *dev_priv = dev->dev_private; 1600 uint32_t res1, res2; 1601 1602 if (INTEL_INFO(dev)->gen >= 3) 1603 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1604 else 1605 res1 = 0; 1606 1607 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1608 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1609 else 1610 res2 = 0; 1611 1612 display_pipe_crc_irq_handler(dev, pipe, 1613 I915_READ(PIPE_CRC_RES_RED(pipe)), 1614 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1615 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1616 res1, res2); 1617 } 1618 1619 /* The RPS events need forcewake, so we add them to a work queue and mask their 1620 * IMR bits until the work is done. Other interrupts can be processed without 1621 * the work queue. */ 1622 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1623 { 1624 if (pm_iir & dev_priv->pm_rps_events) { 1625 spin_lock(&dev_priv->irq_lock); 1626 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1627 if (dev_priv->rps.interrupts_enabled) { 1628 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1629 queue_work(dev_priv->wq, &dev_priv->rps.work); 1630 } 1631 spin_unlock(&dev_priv->irq_lock); 1632 } 1633 1634 if (INTEL_INFO(dev_priv)->gen >= 8) 1635 return; 1636 1637 if (HAS_VEBOX(dev_priv)) { 1638 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1639 notify_ring(&dev_priv->engine[VECS]); 1640 1641 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1642 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1643 } 1644 } 1645 1646 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1647 { 1648 if (!drm_handle_vblank(dev, pipe)) 1649 return false; 1650 1651 return true; 1652 } 1653 1654 static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, 1655 u32 pipe_stats[I915_MAX_PIPES]) 1656 { 1657 struct drm_i915_private *dev_priv = dev->dev_private; 1658 int pipe; 1659 1660 spin_lock(&dev_priv->irq_lock); 1661 1662 if (!dev_priv->display_irqs_enabled) { 1663 spin_unlock(&dev_priv->irq_lock); 1664 return; 1665 } 1666 1667 for_each_pipe(dev_priv, pipe) { 1668 i915_reg_t reg; 1669 u32 mask, iir_bit = 0; 1670 1671 /* 1672 * PIPESTAT bits get signalled even when the interrupt is 1673 * disabled with the mask bits, and some of the status bits do 1674 * not generate interrupts at all (like the underrun bit). Hence 1675 * we need to be careful that we only handle what we want to 1676 * handle. 1677 */ 1678 1679 /* fifo underruns are filterered in the underrun handler. */ 1680 mask = PIPE_FIFO_UNDERRUN_STATUS; 1681 1682 switch (pipe) { 1683 case PIPE_A: 1684 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1685 break; 1686 case PIPE_B: 1687 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1688 break; 1689 case PIPE_C: 1690 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1691 break; 1692 } 1693 if (iir & iir_bit) 1694 mask |= dev_priv->pipestat_irq_mask[pipe]; 1695 1696 if (!mask) 1697 continue; 1698 1699 reg = PIPESTAT(pipe); 1700 mask |= PIPESTAT_INT_ENABLE_MASK; 1701 pipe_stats[pipe] = I915_READ(reg) & mask; 1702 1703 /* 1704 * Clear the PIPE*STAT regs before the IIR 1705 */ 1706 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1707 PIPESTAT_INT_STATUS_MASK)) 1708 I915_WRITE(reg, pipe_stats[pipe]); 1709 } 1710 spin_unlock(&dev_priv->irq_lock); 1711 } 1712 1713 static void valleyview_pipestat_irq_handler(struct drm_device *dev, 1714 u32 pipe_stats[I915_MAX_PIPES]) 1715 { 1716 struct drm_i915_private *dev_priv = to_i915(dev); 1717 enum pipe pipe; 1718 1719 for_each_pipe(dev_priv, pipe) { 1720 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1721 intel_pipe_handle_vblank(dev, pipe)) 1722 intel_check_page_flip(dev, pipe); 1723 1724 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1725 intel_prepare_page_flip(dev, pipe); 1726 intel_finish_page_flip(dev, pipe); 1727 } 1728 1729 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1730 i9xx_pipe_crc_irq_handler(dev, pipe); 1731 1732 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1733 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1734 } 1735 1736 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1737 gmbus_irq_handler(dev); 1738 } 1739 1740 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1741 { 1742 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1743 1744 if (hotplug_status) 1745 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1746 1747 return hotplug_status; 1748 } 1749 1750 static void i9xx_hpd_irq_handler(struct drm_device *dev, 1751 u32 hotplug_status) 1752 { 1753 u32 pin_mask = 0, long_mask = 0; 1754 1755 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1757 1758 if (hotplug_trigger) { 1759 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1760 hotplug_trigger, hpd_status_g4x, 1761 i9xx_port_hotplug_long_detect); 1762 1763 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1764 } 1765 1766 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1767 dp_aux_irq_handler(dev); 1768 } else { 1769 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1770 1771 if (hotplug_trigger) { 1772 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1773 hotplug_trigger, hpd_status_i915, 1774 i9xx_port_hotplug_long_detect); 1775 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1776 } 1777 } 1778 } 1779 1780 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1781 { 1782 struct drm_device *dev = arg; 1783 struct drm_i915_private *dev_priv = dev->dev_private; 1784 irqreturn_t ret = IRQ_NONE; 1785 1786 if (!intel_irqs_enabled(dev_priv)) 1787 return IRQ_NONE; 1788 1789 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1790 disable_rpm_wakeref_asserts(dev_priv); 1791 1792 do { 1793 u32 iir, gt_iir, pm_iir; 1794 u32 pipe_stats[I915_MAX_PIPES] = {}; 1795 u32 hotplug_status = 0; 1796 u32 ier = 0; 1797 1798 gt_iir = I915_READ(GTIIR); 1799 pm_iir = I915_READ(GEN6_PMIIR); 1800 iir = I915_READ(VLV_IIR); 1801 1802 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1803 break; 1804 1805 ret = IRQ_HANDLED; 1806 1807 /* 1808 * Theory on interrupt generation, based on empirical evidence: 1809 * 1810 * x = ((VLV_IIR & VLV_IER) || 1811 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1812 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1813 * 1814 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1815 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1816 * guarantee the CPU interrupt will be raised again even if we 1817 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1818 * bits this time around. 1819 */ 1820 I915_WRITE(VLV_MASTER_IER, 0); 1821 ier = I915_READ(VLV_IER); 1822 I915_WRITE(VLV_IER, 0); 1823 1824 if (gt_iir) 1825 I915_WRITE(GTIIR, gt_iir); 1826 if (pm_iir) 1827 I915_WRITE(GEN6_PMIIR, pm_iir); 1828 1829 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1830 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1831 1832 /* Call regardless, as some status bits might not be 1833 * signalled in iir */ 1834 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1835 1836 /* 1837 * VLV_IIR is single buffered, and reflects the level 1838 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1839 */ 1840 if (iir) 1841 I915_WRITE(VLV_IIR, iir); 1842 1843 I915_WRITE(VLV_IER, ier); 1844 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1845 POSTING_READ(VLV_MASTER_IER); 1846 1847 if (gt_iir) 1848 snb_gt_irq_handler(dev_priv, gt_iir); 1849 if (pm_iir) 1850 gen6_rps_irq_handler(dev_priv, pm_iir); 1851 1852 if (hotplug_status) 1853 i9xx_hpd_irq_handler(dev, hotplug_status); 1854 1855 valleyview_pipestat_irq_handler(dev, pipe_stats); 1856 } while (0); 1857 1858 enable_rpm_wakeref_asserts(dev_priv); 1859 1860 return ret; 1861 } 1862 1863 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1864 { 1865 struct drm_device *dev = arg; 1866 struct drm_i915_private *dev_priv = dev->dev_private; 1867 irqreturn_t ret = IRQ_NONE; 1868 1869 if (!intel_irqs_enabled(dev_priv)) 1870 return IRQ_NONE; 1871 1872 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1873 disable_rpm_wakeref_asserts(dev_priv); 1874 1875 do { 1876 u32 master_ctl, iir; 1877 u32 gt_iir[4] = {}; 1878 u32 pipe_stats[I915_MAX_PIPES] = {}; 1879 u32 hotplug_status = 0; 1880 u32 ier = 0; 1881 1882 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1883 iir = I915_READ(VLV_IIR); 1884 1885 if (master_ctl == 0 && iir == 0) 1886 break; 1887 1888 ret = IRQ_HANDLED; 1889 1890 /* 1891 * Theory on interrupt generation, based on empirical evidence: 1892 * 1893 * x = ((VLV_IIR & VLV_IER) || 1894 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1895 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1896 * 1897 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1898 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1899 * guarantee the CPU interrupt will be raised again even if we 1900 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1901 * bits this time around. 1902 */ 1903 I915_WRITE(GEN8_MASTER_IRQ, 0); 1904 ier = I915_READ(VLV_IER); 1905 I915_WRITE(VLV_IER, 0); 1906 1907 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1908 1909 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1910 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1911 1912 /* Call regardless, as some status bits might not be 1913 * signalled in iir */ 1914 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1915 1916 /* 1917 * VLV_IIR is single buffered, and reflects the level 1918 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1919 */ 1920 if (iir) 1921 I915_WRITE(VLV_IIR, iir); 1922 1923 I915_WRITE(VLV_IER, ier); 1924 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1925 POSTING_READ(GEN8_MASTER_IRQ); 1926 1927 gen8_gt_irq_handler(dev_priv, gt_iir); 1928 1929 if (hotplug_status) 1930 i9xx_hpd_irq_handler(dev, hotplug_status); 1931 1932 valleyview_pipestat_irq_handler(dev, pipe_stats); 1933 } while (0); 1934 1935 enable_rpm_wakeref_asserts(dev_priv); 1936 1937 return ret; 1938 } 1939 1940 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 1941 const u32 hpd[HPD_NUM_PINS]) 1942 { 1943 struct drm_i915_private *dev_priv = to_i915(dev); 1944 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1945 1946 /* 1947 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1948 * unless we touch the hotplug register, even if hotplug_trigger is 1949 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1950 * errors. 1951 */ 1952 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1953 if (!hotplug_trigger) { 1954 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1955 PORTD_HOTPLUG_STATUS_MASK | 1956 PORTC_HOTPLUG_STATUS_MASK | 1957 PORTB_HOTPLUG_STATUS_MASK; 1958 dig_hotplug_reg &= ~mask; 1959 } 1960 1961 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1962 if (!hotplug_trigger) 1963 return; 1964 1965 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1966 dig_hotplug_reg, hpd, 1967 pch_port_hotplug_long_detect); 1968 1969 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1970 } 1971 1972 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1973 { 1974 struct drm_i915_private *dev_priv = dev->dev_private; 1975 int pipe; 1976 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1977 1978 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1979 1980 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1981 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1982 SDE_AUDIO_POWER_SHIFT); 1983 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1984 port_name(port)); 1985 } 1986 1987 if (pch_iir & SDE_AUX_MASK) 1988 dp_aux_irq_handler(dev); 1989 1990 if (pch_iir & SDE_GMBUS) 1991 gmbus_irq_handler(dev); 1992 1993 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1994 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1995 1996 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1997 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1998 1999 if (pch_iir & SDE_POISON) 2000 DRM_ERROR("PCH poison interrupt\n"); 2001 2002 if (pch_iir & SDE_FDI_MASK) 2003 for_each_pipe(dev_priv, pipe) 2004 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2005 pipe_name(pipe), 2006 I915_READ(FDI_RX_IIR(pipe))); 2007 2008 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2009 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2010 2011 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2012 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2013 2014 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2015 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2016 2017 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2019 } 2020 2021 static void ivb_err_int_handler(struct drm_device *dev) 2022 { 2023 struct drm_i915_private *dev_priv = dev->dev_private; 2024 u32 err_int = I915_READ(GEN7_ERR_INT); 2025 enum pipe pipe; 2026 2027 if (err_int & ERR_INT_POISON) 2028 DRM_ERROR("Poison interrupt\n"); 2029 2030 for_each_pipe(dev_priv, pipe) { 2031 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2032 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2033 2034 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2035 if (IS_IVYBRIDGE(dev)) 2036 ivb_pipe_crc_irq_handler(dev, pipe); 2037 else 2038 hsw_pipe_crc_irq_handler(dev, pipe); 2039 } 2040 } 2041 2042 I915_WRITE(GEN7_ERR_INT, err_int); 2043 } 2044 2045 static void cpt_serr_int_handler(struct drm_device *dev) 2046 { 2047 struct drm_i915_private *dev_priv = dev->dev_private; 2048 u32 serr_int = I915_READ(SERR_INT); 2049 2050 if (serr_int & SERR_INT_POISON) 2051 DRM_ERROR("PCH poison interrupt\n"); 2052 2053 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2054 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2055 2056 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2057 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2058 2059 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2060 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2061 2062 I915_WRITE(SERR_INT, serr_int); 2063 } 2064 2065 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2066 { 2067 struct drm_i915_private *dev_priv = dev->dev_private; 2068 int pipe; 2069 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2070 2071 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2072 2073 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2074 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2075 SDE_AUDIO_POWER_SHIFT_CPT); 2076 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2077 port_name(port)); 2078 } 2079 2080 if (pch_iir & SDE_AUX_MASK_CPT) 2081 dp_aux_irq_handler(dev); 2082 2083 if (pch_iir & SDE_GMBUS_CPT) 2084 gmbus_irq_handler(dev); 2085 2086 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2087 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2088 2089 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2090 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2091 2092 if (pch_iir & SDE_FDI_MASK_CPT) 2093 for_each_pipe(dev_priv, pipe) 2094 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2095 pipe_name(pipe), 2096 I915_READ(FDI_RX_IIR(pipe))); 2097 2098 if (pch_iir & SDE_ERROR_CPT) 2099 cpt_serr_int_handler(dev); 2100 } 2101 2102 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) 2103 { 2104 struct drm_i915_private *dev_priv = dev->dev_private; 2105 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2106 ~SDE_PORTE_HOTPLUG_SPT; 2107 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2108 u32 pin_mask = 0, long_mask = 0; 2109 2110 if (hotplug_trigger) { 2111 u32 dig_hotplug_reg; 2112 2113 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2114 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2115 2116 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2117 dig_hotplug_reg, hpd_spt, 2118 spt_port_hotplug_long_detect); 2119 } 2120 2121 if (hotplug2_trigger) { 2122 u32 dig_hotplug_reg; 2123 2124 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2125 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2126 2127 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2128 dig_hotplug_reg, hpd_spt, 2129 spt_port_hotplug2_long_detect); 2130 } 2131 2132 if (pin_mask) 2133 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2134 2135 if (pch_iir & SDE_GMBUS_CPT) 2136 gmbus_irq_handler(dev); 2137 } 2138 2139 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2140 const u32 hpd[HPD_NUM_PINS]) 2141 { 2142 struct drm_i915_private *dev_priv = to_i915(dev); 2143 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2144 2145 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2146 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2147 2148 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2149 dig_hotplug_reg, hpd, 2150 ilk_port_hotplug_long_detect); 2151 2152 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2153 } 2154 2155 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2156 { 2157 struct drm_i915_private *dev_priv = dev->dev_private; 2158 enum pipe pipe; 2159 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2160 2161 if (hotplug_trigger) 2162 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); 2163 2164 if (de_iir & DE_AUX_CHANNEL_A) 2165 dp_aux_irq_handler(dev); 2166 2167 if (de_iir & DE_GSE) 2168 intel_opregion_asle_intr(dev); 2169 2170 if (de_iir & DE_POISON) 2171 DRM_ERROR("Poison interrupt\n"); 2172 2173 for_each_pipe(dev_priv, pipe) { 2174 if (de_iir & DE_PIPE_VBLANK(pipe) && 2175 intel_pipe_handle_vblank(dev, pipe)) 2176 intel_check_page_flip(dev, pipe); 2177 2178 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2179 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2180 2181 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2182 i9xx_pipe_crc_irq_handler(dev, pipe); 2183 2184 /* plane/pipes map 1:1 on ilk+ */ 2185 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2186 intel_prepare_page_flip(dev, pipe); 2187 intel_finish_page_flip_plane(dev, pipe); 2188 } 2189 } 2190 2191 /* check event from PCH */ 2192 if (de_iir & DE_PCH_EVENT) { 2193 u32 pch_iir = I915_READ(SDEIIR); 2194 2195 if (HAS_PCH_CPT(dev)) 2196 cpt_irq_handler(dev, pch_iir); 2197 else 2198 ibx_irq_handler(dev, pch_iir); 2199 2200 /* should clear PCH hotplug event before clear CPU irq */ 2201 I915_WRITE(SDEIIR, pch_iir); 2202 } 2203 2204 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2205 ironlake_rps_change_irq_handler(dev); 2206 } 2207 2208 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2209 { 2210 struct drm_i915_private *dev_priv = dev->dev_private; 2211 enum pipe pipe; 2212 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2213 2214 if (hotplug_trigger) 2215 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); 2216 2217 if (de_iir & DE_ERR_INT_IVB) 2218 ivb_err_int_handler(dev); 2219 2220 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2221 dp_aux_irq_handler(dev); 2222 2223 if (de_iir & DE_GSE_IVB) 2224 intel_opregion_asle_intr(dev); 2225 2226 for_each_pipe(dev_priv, pipe) { 2227 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2228 intel_pipe_handle_vblank(dev, pipe)) 2229 intel_check_page_flip(dev, pipe); 2230 2231 /* plane/pipes map 1:1 on ilk+ */ 2232 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2233 intel_prepare_page_flip(dev, pipe); 2234 intel_finish_page_flip_plane(dev, pipe); 2235 } 2236 } 2237 2238 /* check event from PCH */ 2239 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2240 u32 pch_iir = I915_READ(SDEIIR); 2241 2242 cpt_irq_handler(dev, pch_iir); 2243 2244 /* clear PCH hotplug event before clear CPU irq */ 2245 I915_WRITE(SDEIIR, pch_iir); 2246 } 2247 } 2248 2249 /* 2250 * To handle irqs with the minimum potential races with fresh interrupts, we: 2251 * 1 - Disable Master Interrupt Control. 2252 * 2 - Find the source(s) of the interrupt. 2253 * 3 - Clear the Interrupt Identity bits (IIR). 2254 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2255 * 5 - Re-enable Master Interrupt Control. 2256 */ 2257 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2258 { 2259 struct drm_device *dev = arg; 2260 struct drm_i915_private *dev_priv = dev->dev_private; 2261 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2262 irqreturn_t ret = IRQ_NONE; 2263 2264 if (!intel_irqs_enabled(dev_priv)) 2265 return IRQ_NONE; 2266 2267 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2268 disable_rpm_wakeref_asserts(dev_priv); 2269 2270 /* disable master interrupt before clearing iir */ 2271 de_ier = I915_READ(DEIER); 2272 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2273 POSTING_READ(DEIER); 2274 2275 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2276 * interrupts will will be stored on its back queue, and then we'll be 2277 * able to process them after we restore SDEIER (as soon as we restore 2278 * it, we'll get an interrupt if SDEIIR still has something to process 2279 * due to its back queue). */ 2280 if (!HAS_PCH_NOP(dev)) { 2281 sde_ier = I915_READ(SDEIER); 2282 I915_WRITE(SDEIER, 0); 2283 POSTING_READ(SDEIER); 2284 } 2285 2286 /* Find, clear, then process each source of interrupt */ 2287 2288 gt_iir = I915_READ(GTIIR); 2289 if (gt_iir) { 2290 I915_WRITE(GTIIR, gt_iir); 2291 ret = IRQ_HANDLED; 2292 if (INTEL_INFO(dev)->gen >= 6) 2293 snb_gt_irq_handler(dev_priv, gt_iir); 2294 else 2295 ilk_gt_irq_handler(dev_priv, gt_iir); 2296 } 2297 2298 de_iir = I915_READ(DEIIR); 2299 if (de_iir) { 2300 I915_WRITE(DEIIR, de_iir); 2301 ret = IRQ_HANDLED; 2302 if (INTEL_INFO(dev)->gen >= 7) 2303 ivb_display_irq_handler(dev, de_iir); 2304 else 2305 ilk_display_irq_handler(dev, de_iir); 2306 } 2307 2308 if (INTEL_INFO(dev)->gen >= 6) { 2309 u32 pm_iir = I915_READ(GEN6_PMIIR); 2310 if (pm_iir) { 2311 I915_WRITE(GEN6_PMIIR, pm_iir); 2312 ret = IRQ_HANDLED; 2313 gen6_rps_irq_handler(dev_priv, pm_iir); 2314 } 2315 } 2316 2317 I915_WRITE(DEIER, de_ier); 2318 POSTING_READ(DEIER); 2319 if (!HAS_PCH_NOP(dev)) { 2320 I915_WRITE(SDEIER, sde_ier); 2321 POSTING_READ(SDEIER); 2322 } 2323 2324 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2325 enable_rpm_wakeref_asserts(dev_priv); 2326 2327 return ret; 2328 } 2329 2330 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2331 const u32 hpd[HPD_NUM_PINS]) 2332 { 2333 struct drm_i915_private *dev_priv = to_i915(dev); 2334 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2335 2336 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2337 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2338 2339 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2340 dig_hotplug_reg, hpd, 2341 bxt_port_hotplug_long_detect); 2342 2343 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2344 } 2345 2346 static irqreturn_t 2347 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2348 { 2349 struct drm_device *dev = dev_priv->dev; 2350 irqreturn_t ret = IRQ_NONE; 2351 u32 iir; 2352 enum pipe pipe; 2353 2354 if (master_ctl & GEN8_DE_MISC_IRQ) { 2355 iir = I915_READ(GEN8_DE_MISC_IIR); 2356 if (iir) { 2357 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2358 ret = IRQ_HANDLED; 2359 if (iir & GEN8_DE_MISC_GSE) 2360 intel_opregion_asle_intr(dev); 2361 else 2362 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2363 } 2364 else 2365 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2366 } 2367 2368 if (master_ctl & GEN8_DE_PORT_IRQ) { 2369 iir = I915_READ(GEN8_DE_PORT_IIR); 2370 if (iir) { 2371 u32 tmp_mask; 2372 bool found = false; 2373 2374 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2375 ret = IRQ_HANDLED; 2376 2377 tmp_mask = GEN8_AUX_CHANNEL_A; 2378 if (INTEL_INFO(dev_priv)->gen >= 9) 2379 tmp_mask |= GEN9_AUX_CHANNEL_B | 2380 GEN9_AUX_CHANNEL_C | 2381 GEN9_AUX_CHANNEL_D; 2382 2383 if (iir & tmp_mask) { 2384 dp_aux_irq_handler(dev); 2385 found = true; 2386 } 2387 2388 if (IS_BROXTON(dev_priv)) { 2389 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2390 if (tmp_mask) { 2391 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); 2392 found = true; 2393 } 2394 } else if (IS_BROADWELL(dev_priv)) { 2395 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2396 if (tmp_mask) { 2397 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); 2398 found = true; 2399 } 2400 } 2401 2402 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { 2403 gmbus_irq_handler(dev); 2404 found = true; 2405 } 2406 2407 if (!found) 2408 DRM_ERROR("Unexpected DE Port interrupt\n"); 2409 } 2410 else 2411 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2412 } 2413 2414 for_each_pipe(dev_priv, pipe) { 2415 u32 flip_done, fault_errors; 2416 2417 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2418 continue; 2419 2420 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2421 if (!iir) { 2422 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2423 continue; 2424 } 2425 2426 ret = IRQ_HANDLED; 2427 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2428 2429 if (iir & GEN8_PIPE_VBLANK && 2430 intel_pipe_handle_vblank(dev, pipe)) 2431 intel_check_page_flip(dev, pipe); 2432 2433 flip_done = iir; 2434 if (INTEL_INFO(dev_priv)->gen >= 9) 2435 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2436 else 2437 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2438 2439 if (flip_done) { 2440 intel_prepare_page_flip(dev, pipe); 2441 intel_finish_page_flip_plane(dev, pipe); 2442 } 2443 2444 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2445 hsw_pipe_crc_irq_handler(dev, pipe); 2446 2447 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2448 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2449 2450 fault_errors = iir; 2451 if (INTEL_INFO(dev_priv)->gen >= 9) 2452 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2453 else 2454 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2455 2456 if (fault_errors) 2457 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2458 pipe_name(pipe), 2459 fault_errors); 2460 } 2461 2462 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2463 master_ctl & GEN8_DE_PCH_IRQ) { 2464 /* 2465 * FIXME(BDW): Assume for now that the new interrupt handling 2466 * scheme also closed the SDE interrupt handling race we've seen 2467 * on older pch-split platforms. But this needs testing. 2468 */ 2469 iir = I915_READ(SDEIIR); 2470 if (iir) { 2471 I915_WRITE(SDEIIR, iir); 2472 ret = IRQ_HANDLED; 2473 2474 if (HAS_PCH_SPT(dev_priv)) 2475 spt_irq_handler(dev, iir); 2476 else 2477 cpt_irq_handler(dev, iir); 2478 } else { 2479 /* 2480 * Like on previous PCH there seems to be something 2481 * fishy going on with forwarding PCH interrupts. 2482 */ 2483 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2484 } 2485 } 2486 2487 return ret; 2488 } 2489 2490 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2491 { 2492 struct drm_device *dev = arg; 2493 struct drm_i915_private *dev_priv = dev->dev_private; 2494 u32 master_ctl; 2495 u32 gt_iir[4] = {}; 2496 irqreturn_t ret; 2497 2498 if (!intel_irqs_enabled(dev_priv)) 2499 return IRQ_NONE; 2500 2501 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2502 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2503 if (!master_ctl) 2504 return IRQ_NONE; 2505 2506 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2507 2508 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2509 disable_rpm_wakeref_asserts(dev_priv); 2510 2511 /* Find, clear, then process each source of interrupt */ 2512 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2513 gen8_gt_irq_handler(dev_priv, gt_iir); 2514 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2515 2516 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2517 POSTING_READ_FW(GEN8_MASTER_IRQ); 2518 2519 enable_rpm_wakeref_asserts(dev_priv); 2520 2521 return ret; 2522 } 2523 2524 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2525 bool reset_completed) 2526 { 2527 struct intel_engine_cs *engine; 2528 2529 /* 2530 * Notify all waiters for GPU completion events that reset state has 2531 * been changed, and that they need to restart their wait after 2532 * checking for potential errors (and bail out to drop locks if there is 2533 * a gpu reset pending so that i915_error_work_func can acquire them). 2534 */ 2535 2536 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2537 for_each_engine(engine, dev_priv) 2538 wake_up_all(&engine->irq_queue); 2539 2540 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2541 wake_up_all(&dev_priv->pending_flip_queue); 2542 2543 /* 2544 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2545 * reset state is cleared. 2546 */ 2547 if (reset_completed) 2548 wake_up_all(&dev_priv->gpu_error.reset_queue); 2549 } 2550 2551 /** 2552 * i915_reset_and_wakeup - do process context error handling work 2553 * @dev: drm device 2554 * 2555 * Fire an error uevent so userspace can see that a hang or error 2556 * was detected. 2557 */ 2558 static void i915_reset_and_wakeup(struct drm_device *dev) 2559 { 2560 struct drm_i915_private *dev_priv = to_i915(dev); 2561 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2562 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2563 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2564 int ret; 2565 2566 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2567 2568 /* 2569 * Note that there's only one work item which does gpu resets, so we 2570 * need not worry about concurrent gpu resets potentially incrementing 2571 * error->reset_counter twice. We only need to take care of another 2572 * racing irq/hangcheck declaring the gpu dead for a second time. A 2573 * quick check for that is good enough: schedule_work ensures the 2574 * correct ordering between hang detection and this work item, and since 2575 * the reset in-progress bit is only ever set by code outside of this 2576 * work we don't need to worry about any other races. 2577 */ 2578 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2579 DRM_DEBUG_DRIVER("resetting chip\n"); 2580 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2581 reset_event); 2582 2583 /* 2584 * In most cases it's guaranteed that we get here with an RPM 2585 * reference held, for example because there is a pending GPU 2586 * request that won't finish until the reset is done. This 2587 * isn't the case at least when we get here by doing a 2588 * simulated reset via debugs, so get an RPM reference. 2589 */ 2590 intel_runtime_pm_get(dev_priv); 2591 2592 intel_prepare_reset(dev); 2593 2594 /* 2595 * All state reset _must_ be completed before we update the 2596 * reset counter, for otherwise waiters might miss the reset 2597 * pending state and not properly drop locks, resulting in 2598 * deadlocks with the reset work. 2599 */ 2600 ret = i915_reset(dev); 2601 2602 intel_finish_reset(dev); 2603 2604 intel_runtime_pm_put(dev_priv); 2605 2606 if (ret == 0) 2607 kobject_uevent_env(&dev->primary->kdev->kobj, 2608 KOBJ_CHANGE, reset_done_event); 2609 2610 /* 2611 * Note: The wake_up also serves as a memory barrier so that 2612 * waiters see the update value of the reset counter atomic_t. 2613 */ 2614 i915_error_wake_up(dev_priv, true); 2615 } 2616 } 2617 2618 static void i915_report_and_clear_eir(struct drm_device *dev) 2619 { 2620 struct drm_i915_private *dev_priv = dev->dev_private; 2621 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2622 u32 eir = I915_READ(EIR); 2623 int pipe, i; 2624 2625 if (!eir) 2626 return; 2627 2628 pr_err("render error detected, EIR: 0x%08x\n", eir); 2629 2630 i915_get_extra_instdone(dev, instdone); 2631 2632 if (IS_G4X(dev)) { 2633 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2634 u32 ipeir = I915_READ(IPEIR_I965); 2635 2636 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2637 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2638 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2639 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2640 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2641 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2642 I915_WRITE(IPEIR_I965, ipeir); 2643 POSTING_READ(IPEIR_I965); 2644 } 2645 if (eir & GM45_ERROR_PAGE_TABLE) { 2646 u32 pgtbl_err = I915_READ(PGTBL_ER); 2647 pr_err("page table error\n"); 2648 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2649 I915_WRITE(PGTBL_ER, pgtbl_err); 2650 POSTING_READ(PGTBL_ER); 2651 } 2652 } 2653 2654 if (!IS_GEN2(dev)) { 2655 if (eir & I915_ERROR_PAGE_TABLE) { 2656 u32 pgtbl_err = I915_READ(PGTBL_ER); 2657 pr_err("page table error\n"); 2658 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2659 I915_WRITE(PGTBL_ER, pgtbl_err); 2660 POSTING_READ(PGTBL_ER); 2661 } 2662 } 2663 2664 if (eir & I915_ERROR_MEMORY_REFRESH) { 2665 pr_err("memory refresh error:\n"); 2666 for_each_pipe(dev_priv, pipe) 2667 pr_err("pipe %c stat: 0x%08x\n", 2668 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2669 /* pipestat has already been acked */ 2670 } 2671 if (eir & I915_ERROR_INSTRUCTION) { 2672 pr_err("instruction error\n"); 2673 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2674 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2675 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2676 if (INTEL_INFO(dev)->gen < 4) { 2677 u32 ipeir = I915_READ(IPEIR); 2678 2679 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2680 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2681 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2682 I915_WRITE(IPEIR, ipeir); 2683 POSTING_READ(IPEIR); 2684 } else { 2685 u32 ipeir = I915_READ(IPEIR_I965); 2686 2687 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2688 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2689 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2690 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2691 I915_WRITE(IPEIR_I965, ipeir); 2692 POSTING_READ(IPEIR_I965); 2693 } 2694 } 2695 2696 I915_WRITE(EIR, eir); 2697 POSTING_READ(EIR); 2698 eir = I915_READ(EIR); 2699 if (eir) { 2700 /* 2701 * some errors might have become stuck, 2702 * mask them. 2703 */ 2704 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2705 I915_WRITE(EMR, I915_READ(EMR) | eir); 2706 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2707 } 2708 } 2709 2710 /** 2711 * i915_handle_error - handle a gpu error 2712 * @dev: drm device 2713 * @engine_mask: mask representing engines that are hung 2714 * Do some basic checking of register state at error time and 2715 * dump it to the syslog. Also call i915_capture_error_state() to make 2716 * sure we get a record and make it available in debugfs. Fire a uevent 2717 * so userspace knows something bad happened (should trigger collection 2718 * of a ring dump etc.). 2719 */ 2720 void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2721 const char *fmt, ...) 2722 { 2723 struct drm_i915_private *dev_priv = dev->dev_private; 2724 va_list args; 2725 char error_msg[80]; 2726 2727 va_start(args, fmt); 2728 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2729 va_end(args); 2730 2731 i915_capture_error_state(dev, engine_mask, error_msg); 2732 i915_report_and_clear_eir(dev); 2733 2734 if (engine_mask) { 2735 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2736 &dev_priv->gpu_error.reset_counter); 2737 2738 /* 2739 * Wakeup waiting processes so that the reset function 2740 * i915_reset_and_wakeup doesn't deadlock trying to grab 2741 * various locks. By bumping the reset counter first, the woken 2742 * processes will see a reset in progress and back off, 2743 * releasing their locks and then wait for the reset completion. 2744 * We must do this for _all_ gpu waiters that might hold locks 2745 * that the reset work needs to acquire. 2746 * 2747 * Note: The wake_up serves as the required memory barrier to 2748 * ensure that the waiters see the updated value of the reset 2749 * counter atomic_t. 2750 */ 2751 i915_error_wake_up(dev_priv, false); 2752 } 2753 2754 i915_reset_and_wakeup(dev); 2755 } 2756 2757 /* Called from drm generic code, passed 'crtc' which 2758 * we use as a pipe index 2759 */ 2760 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2761 { 2762 struct drm_i915_private *dev_priv = dev->dev_private; 2763 unsigned long irqflags; 2764 2765 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2766 if (INTEL_INFO(dev)->gen >= 4) 2767 i915_enable_pipestat(dev_priv, pipe, 2768 PIPE_START_VBLANK_INTERRUPT_STATUS); 2769 else 2770 i915_enable_pipestat(dev_priv, pipe, 2771 PIPE_VBLANK_INTERRUPT_STATUS); 2772 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2773 2774 return 0; 2775 } 2776 2777 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2778 { 2779 struct drm_i915_private *dev_priv = dev->dev_private; 2780 unsigned long irqflags; 2781 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2782 DE_PIPE_VBLANK(pipe); 2783 2784 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2785 ilk_enable_display_irq(dev_priv, bit); 2786 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2787 2788 return 0; 2789 } 2790 2791 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2792 { 2793 struct drm_i915_private *dev_priv = dev->dev_private; 2794 unsigned long irqflags; 2795 2796 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2797 i915_enable_pipestat(dev_priv, pipe, 2798 PIPE_START_VBLANK_INTERRUPT_STATUS); 2799 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2800 2801 return 0; 2802 } 2803 2804 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2805 { 2806 struct drm_i915_private *dev_priv = dev->dev_private; 2807 unsigned long irqflags; 2808 2809 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2810 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2811 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2812 2813 return 0; 2814 } 2815 2816 /* Called from drm generic code, passed 'crtc' which 2817 * we use as a pipe index 2818 */ 2819 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2820 { 2821 struct drm_i915_private *dev_priv = dev->dev_private; 2822 unsigned long irqflags; 2823 2824 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2825 i915_disable_pipestat(dev_priv, pipe, 2826 PIPE_VBLANK_INTERRUPT_STATUS | 2827 PIPE_START_VBLANK_INTERRUPT_STATUS); 2828 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2829 } 2830 2831 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2832 { 2833 struct drm_i915_private *dev_priv = dev->dev_private; 2834 unsigned long irqflags; 2835 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2836 DE_PIPE_VBLANK(pipe); 2837 2838 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2839 ilk_disable_display_irq(dev_priv, bit); 2840 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2841 } 2842 2843 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2844 { 2845 struct drm_i915_private *dev_priv = dev->dev_private; 2846 unsigned long irqflags; 2847 2848 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2849 i915_disable_pipestat(dev_priv, pipe, 2850 PIPE_START_VBLANK_INTERRUPT_STATUS); 2851 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2852 } 2853 2854 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2855 { 2856 struct drm_i915_private *dev_priv = dev->dev_private; 2857 unsigned long irqflags; 2858 2859 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2860 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2861 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2862 } 2863 2864 static bool 2865 ring_idle(struct intel_engine_cs *engine, u32 seqno) 2866 { 2867 return i915_seqno_passed(seqno, 2868 READ_ONCE(engine->last_submitted_seqno)); 2869 } 2870 2871 static bool 2872 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2873 { 2874 if (INTEL_INFO(dev)->gen >= 8) { 2875 return (ipehr >> 23) == 0x1c; 2876 } else { 2877 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2878 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2879 MI_SEMAPHORE_REGISTER); 2880 } 2881 } 2882 2883 static struct intel_engine_cs * 2884 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2885 u64 offset) 2886 { 2887 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2888 struct intel_engine_cs *signaller; 2889 2890 if (INTEL_INFO(dev_priv)->gen >= 8) { 2891 for_each_engine(signaller, dev_priv) { 2892 if (engine == signaller) 2893 continue; 2894 2895 if (offset == signaller->semaphore.signal_ggtt[engine->id]) 2896 return signaller; 2897 } 2898 } else { 2899 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2900 2901 for_each_engine(signaller, dev_priv) { 2902 if(engine == signaller) 2903 continue; 2904 2905 if (sync_bits == signaller->semaphore.mbox.wait[engine->id]) 2906 return signaller; 2907 } 2908 } 2909 2910 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2911 engine->id, ipehr, offset); 2912 2913 return NULL; 2914 } 2915 2916 static struct intel_engine_cs * 2917 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2918 { 2919 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2920 u32 cmd, ipehr, head; 2921 u64 offset = 0; 2922 int i, backwards; 2923 2924 /* 2925 * This function does not support execlist mode - any attempt to 2926 * proceed further into this function will result in a kernel panic 2927 * when dereferencing ring->buffer, which is not set up in execlist 2928 * mode. 2929 * 2930 * The correct way of doing it would be to derive the currently 2931 * executing ring buffer from the current context, which is derived 2932 * from the currently running request. Unfortunately, to get the 2933 * current request we would have to grab the struct_mutex before doing 2934 * anything else, which would be ill-advised since some other thread 2935 * might have grabbed it already and managed to hang itself, causing 2936 * the hang checker to deadlock. 2937 * 2938 * Therefore, this function does not support execlist mode in its 2939 * current form. Just return NULL and move on. 2940 */ 2941 if (engine->buffer == NULL) 2942 return NULL; 2943 2944 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2945 if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) 2946 return NULL; 2947 2948 /* 2949 * HEAD is likely pointing to the dword after the actual command, 2950 * so scan backwards until we find the MBOX. But limit it to just 3 2951 * or 4 dwords depending on the semaphore wait command size. 2952 * Note that we don't care about ACTHD here since that might 2953 * point at at batch, and semaphores are always emitted into the 2954 * ringbuffer itself. 2955 */ 2956 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2957 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; 2958 2959 for (i = backwards; i; --i) { 2960 /* 2961 * Be paranoid and presume the hw has gone off into the wild - 2962 * our ring is smaller than what the hardware (and hence 2963 * HEAD_ADDR) allows. Also handles wrap-around. 2964 */ 2965 head &= engine->buffer->size - 1; 2966 2967 /* This here seems to blow up */ 2968 cmd = ioread32(engine->buffer->virtual_start + head); 2969 if (cmd == ipehr) 2970 break; 2971 2972 head -= 4; 2973 } 2974 2975 if (!i) 2976 return NULL; 2977 2978 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2979 if (INTEL_INFO(engine->dev)->gen >= 8) { 2980 offset = ioread32(engine->buffer->virtual_start + head + 12); 2981 offset <<= 32; 2982 offset = ioread32(engine->buffer->virtual_start + head + 8); 2983 } 2984 return semaphore_wait_to_signaller_ring(engine, ipehr, offset); 2985 } 2986 2987 static int semaphore_passed(struct intel_engine_cs *engine) 2988 { 2989 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2990 struct intel_engine_cs *signaller; 2991 u32 seqno; 2992 2993 engine->hangcheck.deadlock++; 2994 2995 signaller = semaphore_waits_for(engine, &seqno); 2996 if (signaller == NULL) 2997 return -1; 2998 2999 /* Prevent pathological recursion due to driver bugs */ 3000 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 3001 return -1; 3002 3003 if (i915_seqno_passed(signaller->get_seqno(signaller), seqno)) 3004 return 1; 3005 3006 /* cursory check for an unkickable deadlock */ 3007 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 3008 semaphore_passed(signaller) < 0) 3009 return -1; 3010 3011 return 0; 3012 } 3013 3014 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 3015 { 3016 struct intel_engine_cs *engine; 3017 3018 for_each_engine(engine, dev_priv) 3019 engine->hangcheck.deadlock = 0; 3020 } 3021 3022 static bool subunits_stuck(struct intel_engine_cs *engine) 3023 { 3024 u32 instdone[I915_NUM_INSTDONE_REG]; 3025 bool stuck; 3026 int i; 3027 3028 if (engine->id != RCS) 3029 return true; 3030 3031 i915_get_extra_instdone(engine->dev, instdone); 3032 3033 /* There might be unstable subunit states even when 3034 * actual head is not moving. Filter out the unstable ones by 3035 * accumulating the undone -> done transitions and only 3036 * consider those as progress. 3037 */ 3038 stuck = true; 3039 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { 3040 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i]; 3041 3042 if (tmp != engine->hangcheck.instdone[i]) 3043 stuck = false; 3044 3045 engine->hangcheck.instdone[i] |= tmp; 3046 } 3047 3048 return stuck; 3049 } 3050 3051 static enum intel_ring_hangcheck_action 3052 head_stuck(struct intel_engine_cs *engine, u64 acthd) 3053 { 3054 if (acthd != engine->hangcheck.acthd) { 3055 3056 /* Clear subunit states on head movement */ 3057 memset(engine->hangcheck.instdone, 0, 3058 sizeof(engine->hangcheck.instdone)); 3059 3060 return HANGCHECK_ACTIVE; 3061 } 3062 3063 if (!subunits_stuck(engine)) 3064 return HANGCHECK_ACTIVE; 3065 3066 return HANGCHECK_HUNG; 3067 } 3068 3069 static enum intel_ring_hangcheck_action 3070 ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3071 { 3072 struct drm_device *dev = engine->dev; 3073 struct drm_i915_private *dev_priv = dev->dev_private; 3074 enum intel_ring_hangcheck_action ha; 3075 u32 tmp; 3076 3077 ha = head_stuck(engine, acthd); 3078 if (ha != HANGCHECK_HUNG) 3079 return ha; 3080 3081 if (IS_GEN2(dev)) 3082 return HANGCHECK_HUNG; 3083 3084 /* Is the chip hanging on a WAIT_FOR_EVENT? 3085 * If so we can simply poke the RB_WAIT bit 3086 * and break the hang. This should work on 3087 * all but the second generation chipsets. 3088 */ 3089 tmp = I915_READ_CTL(engine); 3090 if (tmp & RING_WAIT) { 3091 i915_handle_error(dev, 0, 3092 "Kicking stuck wait on %s", 3093 engine->name); 3094 I915_WRITE_CTL(engine, tmp); 3095 return HANGCHECK_KICK; 3096 } 3097 3098 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3099 switch (semaphore_passed(engine)) { 3100 default: 3101 return HANGCHECK_HUNG; 3102 case 1: 3103 i915_handle_error(dev, 0, 3104 "Kicking stuck semaphore on %s", 3105 engine->name); 3106 I915_WRITE_CTL(engine, tmp); 3107 return HANGCHECK_KICK; 3108 case 0: 3109 return HANGCHECK_WAIT; 3110 } 3111 } 3112 3113 return HANGCHECK_HUNG; 3114 } 3115 3116 static unsigned kick_waiters(struct intel_engine_cs *engine) 3117 { 3118 struct drm_i915_private *i915 = to_i915(engine->dev); 3119 unsigned user_interrupts = READ_ONCE(engine->user_interrupts); 3120 3121 if (engine->hangcheck.user_interrupts == user_interrupts && 3122 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { 3123 if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine))) 3124 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3125 engine->name); 3126 else 3127 DRM_INFO("Fake missed irq on %s\n", 3128 engine->name); 3129 wake_up_all(&engine->irq_queue); 3130 } 3131 3132 return user_interrupts; 3133 } 3134 /* 3135 * This is called when the chip hasn't reported back with completed 3136 * batchbuffers in a long time. We keep track per ring seqno progress and 3137 * if there are no progress, hangcheck score for that ring is increased. 3138 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3139 * we kick the ring. If we see no progress on three subsequent calls 3140 * we assume chip is wedged and try to fix it by resetting the chip. 3141 */ 3142 static void i915_hangcheck_elapsed(struct work_struct *work) 3143 { 3144 struct drm_i915_private *dev_priv = 3145 container_of(work, typeof(*dev_priv), 3146 gpu_error.hangcheck_work.work); 3147 struct drm_device *dev = dev_priv->dev; 3148 struct intel_engine_cs *engine; 3149 enum intel_engine_id id; 3150 int busy_count = 0, rings_hung = 0; 3151 bool stuck[I915_NUM_ENGINES] = { 0 }; 3152 #define BUSY 1 3153 #define KICK 5 3154 #define HUNG 20 3155 #define ACTIVE_DECAY 15 3156 3157 if (!i915.enable_hangcheck) 3158 return; 3159 3160 /* 3161 * The hangcheck work is synced during runtime suspend, we don't 3162 * require a wakeref. TODO: instead of disabling the asserts make 3163 * sure that we hold a reference when this work is running. 3164 */ 3165 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3166 3167 /* As enabling the GPU requires fairly extensive mmio access, 3168 * periodically arm the mmio checker to see if we are triggering 3169 * any invalid access. 3170 */ 3171 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3172 3173 for_each_engine_id(engine, dev_priv, id) { 3174 u64 acthd; 3175 u32 seqno; 3176 unsigned user_interrupts; 3177 bool busy = true; 3178 3179 semaphore_clear_deadlocks(dev_priv); 3180 3181 /* We don't strictly need an irq-barrier here, as we are not 3182 * serving an interrupt request, be paranoid in case the 3183 * barrier has side-effects (such as preventing a broken 3184 * cacheline snoop) and so be sure that we can see the seqno 3185 * advance. If the seqno should stick, due to a stale 3186 * cacheline, we would erroneously declare the GPU hung. 3187 */ 3188 if (engine->irq_seqno_barrier) 3189 engine->irq_seqno_barrier(engine); 3190 3191 acthd = intel_ring_get_active_head(engine); 3192 seqno = engine->get_seqno(engine); 3193 3194 /* Reset stuck interrupts between batch advances */ 3195 user_interrupts = 0; 3196 3197 if (engine->hangcheck.seqno == seqno) { 3198 if (ring_idle(engine, seqno)) { 3199 engine->hangcheck.action = HANGCHECK_IDLE; 3200 if (waitqueue_active(&engine->irq_queue)) { 3201 /* Safeguard against driver failure */ 3202 user_interrupts = kick_waiters(engine); 3203 engine->hangcheck.score += BUSY; 3204 } else 3205 busy = false; 3206 } else { 3207 /* We always increment the hangcheck score 3208 * if the ring is busy and still processing 3209 * the same request, so that no single request 3210 * can run indefinitely (such as a chain of 3211 * batches). The only time we do not increment 3212 * the hangcheck score on this ring, if this 3213 * ring is in a legitimate wait for another 3214 * ring. In that case the waiting ring is a 3215 * victim and we want to be sure we catch the 3216 * right culprit. Then every time we do kick 3217 * the ring, add a small increment to the 3218 * score so that we can catch a batch that is 3219 * being repeatedly kicked and so responsible 3220 * for stalling the machine. 3221 */ 3222 engine->hangcheck.action = ring_stuck(engine, 3223 acthd); 3224 3225 switch (engine->hangcheck.action) { 3226 case HANGCHECK_IDLE: 3227 case HANGCHECK_WAIT: 3228 break; 3229 case HANGCHECK_ACTIVE: 3230 engine->hangcheck.score += BUSY; 3231 break; 3232 case HANGCHECK_KICK: 3233 engine->hangcheck.score += KICK; 3234 break; 3235 case HANGCHECK_HUNG: 3236 engine->hangcheck.score += HUNG; 3237 stuck[id] = true; 3238 break; 3239 } 3240 } 3241 } else { 3242 engine->hangcheck.action = HANGCHECK_ACTIVE; 3243 3244 /* Gradually reduce the count so that we catch DoS 3245 * attempts across multiple batches. 3246 */ 3247 if (engine->hangcheck.score > 0) 3248 engine->hangcheck.score -= ACTIVE_DECAY; 3249 if (engine->hangcheck.score < 0) 3250 engine->hangcheck.score = 0; 3251 3252 /* Clear head and subunit states on seqno movement */ 3253 acthd = 0; 3254 3255 memset(engine->hangcheck.instdone, 0, 3256 sizeof(engine->hangcheck.instdone)); 3257 } 3258 3259 engine->hangcheck.seqno = seqno; 3260 engine->hangcheck.acthd = acthd; 3261 engine->hangcheck.user_interrupts = user_interrupts; 3262 busy_count += busy; 3263 } 3264 3265 for_each_engine_id(engine, dev_priv, id) { 3266 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3267 DRM_INFO("%s on %s\n", 3268 stuck[id] ? "stuck" : "no progress", 3269 engine->name); 3270 rings_hung |= intel_engine_flag(engine); 3271 } 3272 } 3273 3274 if (rings_hung) { 3275 i915_handle_error(dev, rings_hung, "Engine(s) hung"); 3276 goto out; 3277 } 3278 3279 if (busy_count) 3280 /* Reset timer case chip hangs without another request 3281 * being added */ 3282 i915_queue_hangcheck(dev); 3283 3284 out: 3285 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3286 } 3287 3288 void i915_queue_hangcheck(struct drm_device *dev) 3289 { 3290 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3291 3292 if (!i915.enable_hangcheck) 3293 return; 3294 3295 /* Don't continually defer the hangcheck so that it is always run at 3296 * least once after work has been scheduled on any ring. Otherwise, 3297 * we will ignore a hung ring if a second ring is kept busy. 3298 */ 3299 3300 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 3301 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 3302 } 3303 3304 static void ibx_irq_reset(struct drm_device *dev) 3305 { 3306 struct drm_i915_private *dev_priv = dev->dev_private; 3307 3308 if (HAS_PCH_NOP(dev)) 3309 return; 3310 3311 GEN5_IRQ_RESET(SDE); 3312 3313 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3314 I915_WRITE(SERR_INT, 0xffffffff); 3315 } 3316 3317 /* 3318 * SDEIER is also touched by the interrupt handler to work around missed PCH 3319 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3320 * instead we unconditionally enable all PCH interrupt sources here, but then 3321 * only unmask them as needed with SDEIMR. 3322 * 3323 * This function needs to be called before interrupts are enabled. 3324 */ 3325 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3326 { 3327 struct drm_i915_private *dev_priv = dev->dev_private; 3328 3329 if (HAS_PCH_NOP(dev)) 3330 return; 3331 3332 WARN_ON(I915_READ(SDEIER) != 0); 3333 I915_WRITE(SDEIER, 0xffffffff); 3334 POSTING_READ(SDEIER); 3335 } 3336 3337 static void gen5_gt_irq_reset(struct drm_device *dev) 3338 { 3339 struct drm_i915_private *dev_priv = dev->dev_private; 3340 3341 GEN5_IRQ_RESET(GT); 3342 if (INTEL_INFO(dev)->gen >= 6) 3343 GEN5_IRQ_RESET(GEN6_PM); 3344 } 3345 3346 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3347 { 3348 enum pipe pipe; 3349 3350 if (IS_CHERRYVIEW(dev_priv)) 3351 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3352 else 3353 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3354 3355 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3356 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3357 3358 for_each_pipe(dev_priv, pipe) { 3359 I915_WRITE(PIPESTAT(pipe), 3360 PIPE_FIFO_UNDERRUN_STATUS | 3361 PIPESTAT_INT_STATUS_MASK); 3362 dev_priv->pipestat_irq_mask[pipe] = 0; 3363 } 3364 3365 GEN5_IRQ_RESET(VLV_); 3366 dev_priv->irq_mask = ~0; 3367 } 3368 3369 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3370 { 3371 u32 pipestat_mask; 3372 u32 enable_mask; 3373 enum pipe pipe; 3374 3375 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3376 PIPE_CRC_DONE_INTERRUPT_STATUS; 3377 3378 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3379 for_each_pipe(dev_priv, pipe) 3380 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3381 3382 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3383 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3384 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3385 if (IS_CHERRYVIEW(dev_priv)) 3386 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3387 3388 WARN_ON(dev_priv->irq_mask != ~0); 3389 3390 dev_priv->irq_mask = ~enable_mask; 3391 3392 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3393 } 3394 3395 /* drm_dma.h hooks 3396 */ 3397 static void ironlake_irq_reset(struct drm_device *dev) 3398 { 3399 struct drm_i915_private *dev_priv = dev->dev_private; 3400 3401 I915_WRITE(HWSTAM, 0xffffffff); 3402 3403 GEN5_IRQ_RESET(DE); 3404 if (IS_GEN7(dev)) 3405 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3406 3407 gen5_gt_irq_reset(dev); 3408 3409 ibx_irq_reset(dev); 3410 } 3411 3412 static void valleyview_irq_preinstall(struct drm_device *dev) 3413 { 3414 struct drm_i915_private *dev_priv = dev->dev_private; 3415 3416 I915_WRITE(VLV_MASTER_IER, 0); 3417 POSTING_READ(VLV_MASTER_IER); 3418 3419 gen5_gt_irq_reset(dev); 3420 3421 spin_lock_irq(&dev_priv->irq_lock); 3422 if (dev_priv->display_irqs_enabled) 3423 vlv_display_irq_reset(dev_priv); 3424 spin_unlock_irq(&dev_priv->irq_lock); 3425 } 3426 3427 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3428 { 3429 GEN8_IRQ_RESET_NDX(GT, 0); 3430 GEN8_IRQ_RESET_NDX(GT, 1); 3431 GEN8_IRQ_RESET_NDX(GT, 2); 3432 GEN8_IRQ_RESET_NDX(GT, 3); 3433 } 3434 3435 static void gen8_irq_reset(struct drm_device *dev) 3436 { 3437 struct drm_i915_private *dev_priv = dev->dev_private; 3438 int pipe; 3439 3440 I915_WRITE(GEN8_MASTER_IRQ, 0); 3441 POSTING_READ(GEN8_MASTER_IRQ); 3442 3443 gen8_gt_irq_reset(dev_priv); 3444 3445 for_each_pipe(dev_priv, pipe) 3446 if (intel_display_power_is_enabled(dev_priv, 3447 POWER_DOMAIN_PIPE(pipe))) 3448 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3449 3450 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3451 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3452 GEN5_IRQ_RESET(GEN8_PCU_); 3453 3454 if (HAS_PCH_SPLIT(dev)) 3455 ibx_irq_reset(dev); 3456 } 3457 3458 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3459 unsigned int pipe_mask) 3460 { 3461 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3462 enum pipe pipe; 3463 3464 spin_lock_irq(&dev_priv->irq_lock); 3465 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3466 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3467 dev_priv->de_irq_mask[pipe], 3468 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3469 spin_unlock_irq(&dev_priv->irq_lock); 3470 } 3471 3472 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3473 unsigned int pipe_mask) 3474 { 3475 enum pipe pipe; 3476 3477 spin_lock_irq(&dev_priv->irq_lock); 3478 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3479 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3480 spin_unlock_irq(&dev_priv->irq_lock); 3481 3482 /* make sure we're done processing display irqs */ 3483 synchronize_irq(dev_priv->dev->irq); 3484 } 3485 3486 static void cherryview_irq_preinstall(struct drm_device *dev) 3487 { 3488 struct drm_i915_private *dev_priv = dev->dev_private; 3489 3490 I915_WRITE(GEN8_MASTER_IRQ, 0); 3491 POSTING_READ(GEN8_MASTER_IRQ); 3492 3493 gen8_gt_irq_reset(dev_priv); 3494 3495 GEN5_IRQ_RESET(GEN8_PCU_); 3496 3497 spin_lock_irq(&dev_priv->irq_lock); 3498 if (dev_priv->display_irqs_enabled) 3499 vlv_display_irq_reset(dev_priv); 3500 spin_unlock_irq(&dev_priv->irq_lock); 3501 } 3502 3503 static u32 intel_hpd_enabled_irqs(struct drm_device *dev, 3504 const u32 hpd[HPD_NUM_PINS]) 3505 { 3506 struct drm_i915_private *dev_priv = to_i915(dev); 3507 struct intel_encoder *encoder; 3508 u32 enabled_irqs = 0; 3509 3510 for_each_intel_encoder(dev, encoder) 3511 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3512 enabled_irqs |= hpd[encoder->hpd_pin]; 3513 3514 return enabled_irqs; 3515 } 3516 3517 static void ibx_hpd_irq_setup(struct drm_device *dev) 3518 { 3519 struct drm_i915_private *dev_priv = dev->dev_private; 3520 u32 hotplug_irqs, hotplug, enabled_irqs; 3521 3522 if (HAS_PCH_IBX(dev)) { 3523 hotplug_irqs = SDE_HOTPLUG_MASK; 3524 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); 3525 } else { 3526 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3527 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); 3528 } 3529 3530 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3531 3532 /* 3533 * Enable digital hotplug on the PCH, and configure the DP short pulse 3534 * duration to 2ms (which is the minimum in the Display Port spec). 3535 * The pulse duration bits are reserved on LPT+. 3536 */ 3537 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3538 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3539 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3540 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3541 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3542 /* 3543 * When CPU and PCH are on the same package, port A 3544 * HPD must be enabled in both north and south. 3545 */ 3546 if (HAS_PCH_LPT_LP(dev)) 3547 hotplug |= PORTA_HOTPLUG_ENABLE; 3548 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3549 } 3550 3551 static void spt_hpd_irq_setup(struct drm_device *dev) 3552 { 3553 struct drm_i915_private *dev_priv = dev->dev_private; 3554 u32 hotplug_irqs, hotplug, enabled_irqs; 3555 3556 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3557 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); 3558 3559 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3560 3561 /* Enable digital hotplug on the PCH */ 3562 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3563 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | 3564 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; 3565 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3566 3567 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3568 hotplug |= PORTE_HOTPLUG_ENABLE; 3569 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3570 } 3571 3572 static void ilk_hpd_irq_setup(struct drm_device *dev) 3573 { 3574 struct drm_i915_private *dev_priv = dev->dev_private; 3575 u32 hotplug_irqs, hotplug, enabled_irqs; 3576 3577 if (INTEL_INFO(dev)->gen >= 8) { 3578 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3579 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); 3580 3581 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3582 } else if (INTEL_INFO(dev)->gen >= 7) { 3583 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3584 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); 3585 3586 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3587 } else { 3588 hotplug_irqs = DE_DP_A_HOTPLUG; 3589 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); 3590 3591 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3592 } 3593 3594 /* 3595 * Enable digital hotplug on the CPU, and configure the DP short pulse 3596 * duration to 2ms (which is the minimum in the Display Port spec) 3597 * The pulse duration bits are reserved on HSW+. 3598 */ 3599 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3600 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3601 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3602 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3603 3604 ibx_hpd_irq_setup(dev); 3605 } 3606 3607 static void bxt_hpd_irq_setup(struct drm_device *dev) 3608 { 3609 struct drm_i915_private *dev_priv = dev->dev_private; 3610 u32 hotplug_irqs, hotplug, enabled_irqs; 3611 3612 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); 3613 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3614 3615 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3616 3617 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3618 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | 3619 PORTA_HOTPLUG_ENABLE; 3620 3621 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3622 hotplug, enabled_irqs); 3623 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3624 3625 /* 3626 * For BXT invert bit has to be set based on AOB design 3627 * for HPD detection logic, update it based on VBT fields. 3628 */ 3629 3630 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3631 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3632 hotplug |= BXT_DDIA_HPD_INVERT; 3633 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3634 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3635 hotplug |= BXT_DDIB_HPD_INVERT; 3636 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3637 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3638 hotplug |= BXT_DDIC_HPD_INVERT; 3639 3640 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3641 } 3642 3643 static void ibx_irq_postinstall(struct drm_device *dev) 3644 { 3645 struct drm_i915_private *dev_priv = dev->dev_private; 3646 u32 mask; 3647 3648 if (HAS_PCH_NOP(dev)) 3649 return; 3650 3651 if (HAS_PCH_IBX(dev)) 3652 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3653 else 3654 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3655 3656 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3657 I915_WRITE(SDEIMR, ~mask); 3658 } 3659 3660 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3661 { 3662 struct drm_i915_private *dev_priv = dev->dev_private; 3663 u32 pm_irqs, gt_irqs; 3664 3665 pm_irqs = gt_irqs = 0; 3666 3667 dev_priv->gt_irq_mask = ~0; 3668 if (HAS_L3_DPF(dev)) { 3669 /* L3 parity interrupt is always unmasked. */ 3670 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3671 gt_irqs |= GT_PARITY_ERROR(dev); 3672 } 3673 3674 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3675 if (IS_GEN5(dev)) { 3676 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3677 ILK_BSD_USER_INTERRUPT; 3678 } else { 3679 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3680 } 3681 3682 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3683 3684 if (INTEL_INFO(dev)->gen >= 6) { 3685 /* 3686 * RPS interrupts will get enabled/disabled on demand when RPS 3687 * itself is enabled/disabled. 3688 */ 3689 if (HAS_VEBOX(dev)) 3690 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3691 3692 dev_priv->pm_irq_mask = 0xffffffff; 3693 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3694 } 3695 } 3696 3697 static int ironlake_irq_postinstall(struct drm_device *dev) 3698 { 3699 struct drm_i915_private *dev_priv = dev->dev_private; 3700 u32 display_mask, extra_mask; 3701 3702 if (INTEL_INFO(dev)->gen >= 7) { 3703 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3704 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3705 DE_PLANEB_FLIP_DONE_IVB | 3706 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3707 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3708 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3709 DE_DP_A_HOTPLUG_IVB); 3710 } else { 3711 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3712 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3713 DE_AUX_CHANNEL_A | 3714 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3715 DE_POISON); 3716 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3717 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3718 DE_DP_A_HOTPLUG); 3719 } 3720 3721 dev_priv->irq_mask = ~display_mask; 3722 3723 I915_WRITE(HWSTAM, 0xeffe); 3724 3725 ibx_irq_pre_postinstall(dev); 3726 3727 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3728 3729 gen5_gt_irq_postinstall(dev); 3730 3731 ibx_irq_postinstall(dev); 3732 3733 if (IS_IRONLAKE_M(dev)) { 3734 /* Enable PCU event interrupts 3735 * 3736 * spinlocking not required here for correctness since interrupt 3737 * setup is guaranteed to run in single-threaded context. But we 3738 * need it to make the assert_spin_locked happy. */ 3739 spin_lock_irq(&dev_priv->irq_lock); 3740 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3741 spin_unlock_irq(&dev_priv->irq_lock); 3742 } 3743 3744 return 0; 3745 } 3746 3747 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3748 { 3749 assert_spin_locked(&dev_priv->irq_lock); 3750 3751 if (dev_priv->display_irqs_enabled) 3752 return; 3753 3754 dev_priv->display_irqs_enabled = true; 3755 3756 if (intel_irqs_enabled(dev_priv)) { 3757 vlv_display_irq_reset(dev_priv); 3758 vlv_display_irq_postinstall(dev_priv); 3759 } 3760 } 3761 3762 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3763 { 3764 assert_spin_locked(&dev_priv->irq_lock); 3765 3766 if (!dev_priv->display_irqs_enabled) 3767 return; 3768 3769 dev_priv->display_irqs_enabled = false; 3770 3771 if (intel_irqs_enabled(dev_priv)) 3772 vlv_display_irq_reset(dev_priv); 3773 } 3774 3775 3776 static int valleyview_irq_postinstall(struct drm_device *dev) 3777 { 3778 struct drm_i915_private *dev_priv = dev->dev_private; 3779 3780 gen5_gt_irq_postinstall(dev); 3781 3782 spin_lock_irq(&dev_priv->irq_lock); 3783 if (dev_priv->display_irqs_enabled) 3784 vlv_display_irq_postinstall(dev_priv); 3785 spin_unlock_irq(&dev_priv->irq_lock); 3786 3787 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3788 POSTING_READ(VLV_MASTER_IER); 3789 3790 return 0; 3791 } 3792 3793 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3794 { 3795 /* These are interrupts we'll toggle with the ring mask register */ 3796 uint32_t gt_interrupts[] = { 3797 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3798 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3799 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3800 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3801 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3802 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3803 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3804 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3805 0, 3806 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3807 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3808 }; 3809 3810 if (HAS_L3_DPF(dev_priv)) 3811 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3812 3813 dev_priv->pm_irq_mask = 0xffffffff; 3814 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3815 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3816 /* 3817 * RPS interrupts will get enabled/disabled on demand when RPS itself 3818 * is enabled/disabled. 3819 */ 3820 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3821 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3822 } 3823 3824 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3825 { 3826 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3827 uint32_t de_pipe_enables; 3828 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3829 u32 de_port_enables; 3830 enum pipe pipe; 3831 3832 if (INTEL_INFO(dev_priv)->gen >= 9) { 3833 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3834 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3835 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3836 GEN9_AUX_CHANNEL_D; 3837 if (IS_BROXTON(dev_priv)) 3838 de_port_masked |= BXT_DE_PORT_GMBUS; 3839 } else { 3840 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3841 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3842 } 3843 3844 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3845 GEN8_PIPE_FIFO_UNDERRUN; 3846 3847 de_port_enables = de_port_masked; 3848 if (IS_BROXTON(dev_priv)) 3849 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3850 else if (IS_BROADWELL(dev_priv)) 3851 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3852 3853 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3854 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3855 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3856 3857 for_each_pipe(dev_priv, pipe) 3858 if (intel_display_power_is_enabled(dev_priv, 3859 POWER_DOMAIN_PIPE(pipe))) 3860 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3861 dev_priv->de_irq_mask[pipe], 3862 de_pipe_enables); 3863 3864 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3865 } 3866 3867 static int gen8_irq_postinstall(struct drm_device *dev) 3868 { 3869 struct drm_i915_private *dev_priv = dev->dev_private; 3870 3871 if (HAS_PCH_SPLIT(dev)) 3872 ibx_irq_pre_postinstall(dev); 3873 3874 gen8_gt_irq_postinstall(dev_priv); 3875 gen8_de_irq_postinstall(dev_priv); 3876 3877 if (HAS_PCH_SPLIT(dev)) 3878 ibx_irq_postinstall(dev); 3879 3880 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3881 POSTING_READ(GEN8_MASTER_IRQ); 3882 3883 return 0; 3884 } 3885 3886 static int cherryview_irq_postinstall(struct drm_device *dev) 3887 { 3888 struct drm_i915_private *dev_priv = dev->dev_private; 3889 3890 gen8_gt_irq_postinstall(dev_priv); 3891 3892 spin_lock_irq(&dev_priv->irq_lock); 3893 if (dev_priv->display_irqs_enabled) 3894 vlv_display_irq_postinstall(dev_priv); 3895 spin_unlock_irq(&dev_priv->irq_lock); 3896 3897 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3898 POSTING_READ(GEN8_MASTER_IRQ); 3899 3900 return 0; 3901 } 3902 3903 static void gen8_irq_uninstall(struct drm_device *dev) 3904 { 3905 struct drm_i915_private *dev_priv = dev->dev_private; 3906 3907 if (!dev_priv) 3908 return; 3909 3910 gen8_irq_reset(dev); 3911 } 3912 3913 static void valleyview_irq_uninstall(struct drm_device *dev) 3914 { 3915 struct drm_i915_private *dev_priv = dev->dev_private; 3916 3917 if (!dev_priv) 3918 return; 3919 3920 I915_WRITE(VLV_MASTER_IER, 0); 3921 POSTING_READ(VLV_MASTER_IER); 3922 3923 gen5_gt_irq_reset(dev); 3924 3925 I915_WRITE(HWSTAM, 0xffffffff); 3926 3927 spin_lock_irq(&dev_priv->irq_lock); 3928 if (dev_priv->display_irqs_enabled) 3929 vlv_display_irq_reset(dev_priv); 3930 spin_unlock_irq(&dev_priv->irq_lock); 3931 } 3932 3933 static void cherryview_irq_uninstall(struct drm_device *dev) 3934 { 3935 struct drm_i915_private *dev_priv = dev->dev_private; 3936 3937 if (!dev_priv) 3938 return; 3939 3940 I915_WRITE(GEN8_MASTER_IRQ, 0); 3941 POSTING_READ(GEN8_MASTER_IRQ); 3942 3943 gen8_gt_irq_reset(dev_priv); 3944 3945 GEN5_IRQ_RESET(GEN8_PCU_); 3946 3947 spin_lock_irq(&dev_priv->irq_lock); 3948 if (dev_priv->display_irqs_enabled) 3949 vlv_display_irq_reset(dev_priv); 3950 spin_unlock_irq(&dev_priv->irq_lock); 3951 } 3952 3953 static void ironlake_irq_uninstall(struct drm_device *dev) 3954 { 3955 struct drm_i915_private *dev_priv = dev->dev_private; 3956 3957 if (!dev_priv) 3958 return; 3959 3960 ironlake_irq_reset(dev); 3961 } 3962 3963 static void i8xx_irq_preinstall(struct drm_device * dev) 3964 { 3965 struct drm_i915_private *dev_priv = dev->dev_private; 3966 int pipe; 3967 3968 for_each_pipe(dev_priv, pipe) 3969 I915_WRITE(PIPESTAT(pipe), 0); 3970 I915_WRITE16(IMR, 0xffff); 3971 I915_WRITE16(IER, 0x0); 3972 POSTING_READ16(IER); 3973 } 3974 3975 static int i8xx_irq_postinstall(struct drm_device *dev) 3976 { 3977 struct drm_i915_private *dev_priv = dev->dev_private; 3978 3979 I915_WRITE16(EMR, 3980 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3981 3982 /* Unmask the interrupts that we always want on. */ 3983 dev_priv->irq_mask = 3984 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3985 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3986 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3987 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3988 I915_WRITE16(IMR, dev_priv->irq_mask); 3989 3990 I915_WRITE16(IER, 3991 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3992 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3993 I915_USER_INTERRUPT); 3994 POSTING_READ16(IER); 3995 3996 /* Interrupt setup is already guaranteed to be single-threaded, this is 3997 * just to make the assert_spin_locked check happy. */ 3998 spin_lock_irq(&dev_priv->irq_lock); 3999 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4000 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4001 spin_unlock_irq(&dev_priv->irq_lock); 4002 4003 return 0; 4004 } 4005 4006 /* 4007 * Returns true when a page flip has completed. 4008 */ 4009 static bool i8xx_handle_vblank(struct drm_device *dev, 4010 int plane, int pipe, u32 iir) 4011 { 4012 struct drm_i915_private *dev_priv = dev->dev_private; 4013 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4014 4015 if (!intel_pipe_handle_vblank(dev, pipe)) 4016 return false; 4017 4018 if ((iir & flip_pending) == 0) 4019 goto check_page_flip; 4020 4021 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4022 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4023 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4024 * the flip is completed (no longer pending). Since this doesn't raise 4025 * an interrupt per se, we watch for the change at vblank. 4026 */ 4027 if (I915_READ16(ISR) & flip_pending) 4028 goto check_page_flip; 4029 4030 intel_prepare_page_flip(dev, plane); 4031 intel_finish_page_flip(dev, pipe); 4032 return true; 4033 4034 check_page_flip: 4035 intel_check_page_flip(dev, pipe); 4036 return false; 4037 } 4038 4039 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4040 { 4041 struct drm_device *dev = arg; 4042 struct drm_i915_private *dev_priv = dev->dev_private; 4043 u16 iir, new_iir; 4044 u32 pipe_stats[2]; 4045 int pipe; 4046 u16 flip_mask = 4047 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4048 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4049 irqreturn_t ret; 4050 4051 if (!intel_irqs_enabled(dev_priv)) 4052 return IRQ_NONE; 4053 4054 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4055 disable_rpm_wakeref_asserts(dev_priv); 4056 4057 ret = IRQ_NONE; 4058 iir = I915_READ16(IIR); 4059 if (iir == 0) 4060 goto out; 4061 4062 while (iir & ~flip_mask) { 4063 /* Can't rely on pipestat interrupt bit in iir as it might 4064 * have been cleared after the pipestat interrupt was received. 4065 * It doesn't set the bit in iir again, but it still produces 4066 * interrupts (for non-MSI). 4067 */ 4068 spin_lock(&dev_priv->irq_lock); 4069 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4070 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4071 4072 for_each_pipe(dev_priv, pipe) { 4073 i915_reg_t reg = PIPESTAT(pipe); 4074 pipe_stats[pipe] = I915_READ(reg); 4075 4076 /* 4077 * Clear the PIPE*STAT regs before the IIR 4078 */ 4079 if (pipe_stats[pipe] & 0x8000ffff) 4080 I915_WRITE(reg, pipe_stats[pipe]); 4081 } 4082 spin_unlock(&dev_priv->irq_lock); 4083 4084 I915_WRITE16(IIR, iir & ~flip_mask); 4085 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4086 4087 if (iir & I915_USER_INTERRUPT) 4088 notify_ring(&dev_priv->engine[RCS]); 4089 4090 for_each_pipe(dev_priv, pipe) { 4091 int plane = pipe; 4092 if (HAS_FBC(dev)) 4093 plane = !plane; 4094 4095 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4096 i8xx_handle_vblank(dev, plane, pipe, iir)) 4097 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4098 4099 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4100 i9xx_pipe_crc_irq_handler(dev, pipe); 4101 4102 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4103 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4104 pipe); 4105 } 4106 4107 iir = new_iir; 4108 } 4109 ret = IRQ_HANDLED; 4110 4111 out: 4112 enable_rpm_wakeref_asserts(dev_priv); 4113 4114 return ret; 4115 } 4116 4117 static void i8xx_irq_uninstall(struct drm_device * dev) 4118 { 4119 struct drm_i915_private *dev_priv = dev->dev_private; 4120 int pipe; 4121 4122 for_each_pipe(dev_priv, pipe) { 4123 /* Clear enable bits; then clear status bits */ 4124 I915_WRITE(PIPESTAT(pipe), 0); 4125 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4126 } 4127 I915_WRITE16(IMR, 0xffff); 4128 I915_WRITE16(IER, 0x0); 4129 I915_WRITE16(IIR, I915_READ16(IIR)); 4130 } 4131 4132 static void i915_irq_preinstall(struct drm_device * dev) 4133 { 4134 struct drm_i915_private *dev_priv = dev->dev_private; 4135 int pipe; 4136 4137 if (I915_HAS_HOTPLUG(dev)) { 4138 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4139 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4140 } 4141 4142 I915_WRITE16(HWSTAM, 0xeffe); 4143 for_each_pipe(dev_priv, pipe) 4144 I915_WRITE(PIPESTAT(pipe), 0); 4145 I915_WRITE(IMR, 0xffffffff); 4146 I915_WRITE(IER, 0x0); 4147 POSTING_READ(IER); 4148 } 4149 4150 static int i915_irq_postinstall(struct drm_device *dev) 4151 { 4152 struct drm_i915_private *dev_priv = dev->dev_private; 4153 u32 enable_mask; 4154 4155 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4156 4157 /* Unmask the interrupts that we always want on. */ 4158 dev_priv->irq_mask = 4159 ~(I915_ASLE_INTERRUPT | 4160 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4161 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4162 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4163 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4164 4165 enable_mask = 4166 I915_ASLE_INTERRUPT | 4167 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4168 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4169 I915_USER_INTERRUPT; 4170 4171 if (I915_HAS_HOTPLUG(dev)) { 4172 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4173 POSTING_READ(PORT_HOTPLUG_EN); 4174 4175 /* Enable in IER... */ 4176 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4177 /* and unmask in IMR */ 4178 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4179 } 4180 4181 I915_WRITE(IMR, dev_priv->irq_mask); 4182 I915_WRITE(IER, enable_mask); 4183 POSTING_READ(IER); 4184 4185 i915_enable_asle_pipestat(dev); 4186 4187 /* Interrupt setup is already guaranteed to be single-threaded, this is 4188 * just to make the assert_spin_locked check happy. */ 4189 spin_lock_irq(&dev_priv->irq_lock); 4190 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4191 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4192 spin_unlock_irq(&dev_priv->irq_lock); 4193 4194 return 0; 4195 } 4196 4197 /* 4198 * Returns true when a page flip has completed. 4199 */ 4200 static bool i915_handle_vblank(struct drm_device *dev, 4201 int plane, int pipe, u32 iir) 4202 { 4203 struct drm_i915_private *dev_priv = dev->dev_private; 4204 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4205 4206 if (!intel_pipe_handle_vblank(dev, pipe)) 4207 return false; 4208 4209 if ((iir & flip_pending) == 0) 4210 goto check_page_flip; 4211 4212 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4213 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4214 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4215 * the flip is completed (no longer pending). Since this doesn't raise 4216 * an interrupt per se, we watch for the change at vblank. 4217 */ 4218 if (I915_READ(ISR) & flip_pending) 4219 goto check_page_flip; 4220 4221 intel_prepare_page_flip(dev, plane); 4222 intel_finish_page_flip(dev, pipe); 4223 return true; 4224 4225 check_page_flip: 4226 intel_check_page_flip(dev, pipe); 4227 return false; 4228 } 4229 4230 static irqreturn_t i915_irq_handler(int irq, void *arg) 4231 { 4232 struct drm_device *dev = arg; 4233 struct drm_i915_private *dev_priv = dev->dev_private; 4234 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4235 u32 flip_mask = 4236 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4237 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4238 int pipe, ret = IRQ_NONE; 4239 4240 if (!intel_irqs_enabled(dev_priv)) 4241 return IRQ_NONE; 4242 4243 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4244 disable_rpm_wakeref_asserts(dev_priv); 4245 4246 iir = I915_READ(IIR); 4247 do { 4248 bool irq_received = (iir & ~flip_mask) != 0; 4249 bool blc_event = false; 4250 4251 /* Can't rely on pipestat interrupt bit in iir as it might 4252 * have been cleared after the pipestat interrupt was received. 4253 * It doesn't set the bit in iir again, but it still produces 4254 * interrupts (for non-MSI). 4255 */ 4256 spin_lock(&dev_priv->irq_lock); 4257 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4258 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4259 4260 for_each_pipe(dev_priv, pipe) { 4261 i915_reg_t reg = PIPESTAT(pipe); 4262 pipe_stats[pipe] = I915_READ(reg); 4263 4264 /* Clear the PIPE*STAT regs before the IIR */ 4265 if (pipe_stats[pipe] & 0x8000ffff) { 4266 I915_WRITE(reg, pipe_stats[pipe]); 4267 irq_received = true; 4268 } 4269 } 4270 spin_unlock(&dev_priv->irq_lock); 4271 4272 if (!irq_received) 4273 break; 4274 4275 /* Consume port. Then clear IIR or we'll miss events */ 4276 if (I915_HAS_HOTPLUG(dev) && 4277 iir & I915_DISPLAY_PORT_INTERRUPT) { 4278 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4279 if (hotplug_status) 4280 i9xx_hpd_irq_handler(dev, hotplug_status); 4281 } 4282 4283 I915_WRITE(IIR, iir & ~flip_mask); 4284 new_iir = I915_READ(IIR); /* Flush posted writes */ 4285 4286 if (iir & I915_USER_INTERRUPT) 4287 notify_ring(&dev_priv->engine[RCS]); 4288 4289 for_each_pipe(dev_priv, pipe) { 4290 int plane = pipe; 4291 if (HAS_FBC(dev)) 4292 plane = !plane; 4293 4294 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4295 i915_handle_vblank(dev, plane, pipe, iir)) 4296 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4297 4298 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4299 blc_event = true; 4300 4301 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4302 i9xx_pipe_crc_irq_handler(dev, pipe); 4303 4304 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4305 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4306 pipe); 4307 } 4308 4309 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4310 intel_opregion_asle_intr(dev); 4311 4312 /* With MSI, interrupts are only generated when iir 4313 * transitions from zero to nonzero. If another bit got 4314 * set while we were handling the existing iir bits, then 4315 * we would never get another interrupt. 4316 * 4317 * This is fine on non-MSI as well, as if we hit this path 4318 * we avoid exiting the interrupt handler only to generate 4319 * another one. 4320 * 4321 * Note that for MSI this could cause a stray interrupt report 4322 * if an interrupt landed in the time between writing IIR and 4323 * the posting read. This should be rare enough to never 4324 * trigger the 99% of 100,000 interrupts test for disabling 4325 * stray interrupts. 4326 */ 4327 ret = IRQ_HANDLED; 4328 iir = new_iir; 4329 } while (iir & ~flip_mask); 4330 4331 enable_rpm_wakeref_asserts(dev_priv); 4332 4333 return ret; 4334 } 4335 4336 static void i915_irq_uninstall(struct drm_device * dev) 4337 { 4338 struct drm_i915_private *dev_priv = dev->dev_private; 4339 int pipe; 4340 4341 if (I915_HAS_HOTPLUG(dev)) { 4342 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4343 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4344 } 4345 4346 I915_WRITE16(HWSTAM, 0xffff); 4347 for_each_pipe(dev_priv, pipe) { 4348 /* Clear enable bits; then clear status bits */ 4349 I915_WRITE(PIPESTAT(pipe), 0); 4350 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4351 } 4352 I915_WRITE(IMR, 0xffffffff); 4353 I915_WRITE(IER, 0x0); 4354 4355 I915_WRITE(IIR, I915_READ(IIR)); 4356 } 4357 4358 static void i965_irq_preinstall(struct drm_device * dev) 4359 { 4360 struct drm_i915_private *dev_priv = dev->dev_private; 4361 int pipe; 4362 4363 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4364 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4365 4366 I915_WRITE(HWSTAM, 0xeffe); 4367 for_each_pipe(dev_priv, pipe) 4368 I915_WRITE(PIPESTAT(pipe), 0); 4369 I915_WRITE(IMR, 0xffffffff); 4370 I915_WRITE(IER, 0x0); 4371 POSTING_READ(IER); 4372 } 4373 4374 static int i965_irq_postinstall(struct drm_device *dev) 4375 { 4376 struct drm_i915_private *dev_priv = dev->dev_private; 4377 u32 enable_mask; 4378 u32 error_mask; 4379 4380 /* Unmask the interrupts that we always want on. */ 4381 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4382 I915_DISPLAY_PORT_INTERRUPT | 4383 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4384 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4385 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4386 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4387 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4388 4389 enable_mask = ~dev_priv->irq_mask; 4390 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4391 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4392 enable_mask |= I915_USER_INTERRUPT; 4393 4394 if (IS_G4X(dev)) 4395 enable_mask |= I915_BSD_USER_INTERRUPT; 4396 4397 /* Interrupt setup is already guaranteed to be single-threaded, this is 4398 * just to make the assert_spin_locked check happy. */ 4399 spin_lock_irq(&dev_priv->irq_lock); 4400 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4401 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4402 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4403 spin_unlock_irq(&dev_priv->irq_lock); 4404 4405 /* 4406 * Enable some error detection, note the instruction error mask 4407 * bit is reserved, so we leave it masked. 4408 */ 4409 if (IS_G4X(dev)) { 4410 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4411 GM45_ERROR_MEM_PRIV | 4412 GM45_ERROR_CP_PRIV | 4413 I915_ERROR_MEMORY_REFRESH); 4414 } else { 4415 error_mask = ~(I915_ERROR_PAGE_TABLE | 4416 I915_ERROR_MEMORY_REFRESH); 4417 } 4418 I915_WRITE(EMR, error_mask); 4419 4420 I915_WRITE(IMR, dev_priv->irq_mask); 4421 I915_WRITE(IER, enable_mask); 4422 POSTING_READ(IER); 4423 4424 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4425 POSTING_READ(PORT_HOTPLUG_EN); 4426 4427 i915_enable_asle_pipestat(dev); 4428 4429 return 0; 4430 } 4431 4432 static void i915_hpd_irq_setup(struct drm_device *dev) 4433 { 4434 struct drm_i915_private *dev_priv = dev->dev_private; 4435 u32 hotplug_en; 4436 4437 assert_spin_locked(&dev_priv->irq_lock); 4438 4439 /* Note HDMI and DP share hotplug bits */ 4440 /* enable bits are the same for all generations */ 4441 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); 4442 /* Programming the CRT detection parameters tends 4443 to generate a spurious hotplug event about three 4444 seconds later. So just do it once. 4445 */ 4446 if (IS_G4X(dev)) 4447 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4448 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4449 4450 /* Ignore TV since it's buggy */ 4451 i915_hotplug_interrupt_update_locked(dev_priv, 4452 HOTPLUG_INT_EN_MASK | 4453 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4454 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4455 hotplug_en); 4456 } 4457 4458 static irqreturn_t i965_irq_handler(int irq, void *arg) 4459 { 4460 struct drm_device *dev = arg; 4461 struct drm_i915_private *dev_priv = dev->dev_private; 4462 u32 iir, new_iir; 4463 u32 pipe_stats[I915_MAX_PIPES]; 4464 int ret = IRQ_NONE, pipe; 4465 u32 flip_mask = 4466 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4467 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4468 4469 if (!intel_irqs_enabled(dev_priv)) 4470 return IRQ_NONE; 4471 4472 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4473 disable_rpm_wakeref_asserts(dev_priv); 4474 4475 iir = I915_READ(IIR); 4476 4477 for (;;) { 4478 bool irq_received = (iir & ~flip_mask) != 0; 4479 bool blc_event = false; 4480 4481 /* Can't rely on pipestat interrupt bit in iir as it might 4482 * have been cleared after the pipestat interrupt was received. 4483 * It doesn't set the bit in iir again, but it still produces 4484 * interrupts (for non-MSI). 4485 */ 4486 spin_lock(&dev_priv->irq_lock); 4487 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4488 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4489 4490 for_each_pipe(dev_priv, pipe) { 4491 i915_reg_t reg = PIPESTAT(pipe); 4492 pipe_stats[pipe] = I915_READ(reg); 4493 4494 /* 4495 * Clear the PIPE*STAT regs before the IIR 4496 */ 4497 if (pipe_stats[pipe] & 0x8000ffff) { 4498 I915_WRITE(reg, pipe_stats[pipe]); 4499 irq_received = true; 4500 } 4501 } 4502 spin_unlock(&dev_priv->irq_lock); 4503 4504 if (!irq_received) 4505 break; 4506 4507 ret = IRQ_HANDLED; 4508 4509 /* Consume port. Then clear IIR or we'll miss events */ 4510 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4511 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4512 if (hotplug_status) 4513 i9xx_hpd_irq_handler(dev, hotplug_status); 4514 } 4515 4516 I915_WRITE(IIR, iir & ~flip_mask); 4517 new_iir = I915_READ(IIR); /* Flush posted writes */ 4518 4519 if (iir & I915_USER_INTERRUPT) 4520 notify_ring(&dev_priv->engine[RCS]); 4521 if (iir & I915_BSD_USER_INTERRUPT) 4522 notify_ring(&dev_priv->engine[VCS]); 4523 4524 for_each_pipe(dev_priv, pipe) { 4525 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4526 i915_handle_vblank(dev, pipe, pipe, iir)) 4527 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4528 4529 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4530 blc_event = true; 4531 4532 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4533 i9xx_pipe_crc_irq_handler(dev, pipe); 4534 4535 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4536 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4537 } 4538 4539 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4540 intel_opregion_asle_intr(dev); 4541 4542 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4543 gmbus_irq_handler(dev); 4544 4545 /* With MSI, interrupts are only generated when iir 4546 * transitions from zero to nonzero. If another bit got 4547 * set while we were handling the existing iir bits, then 4548 * we would never get another interrupt. 4549 * 4550 * This is fine on non-MSI as well, as if we hit this path 4551 * we avoid exiting the interrupt handler only to generate 4552 * another one. 4553 * 4554 * Note that for MSI this could cause a stray interrupt report 4555 * if an interrupt landed in the time between writing IIR and 4556 * the posting read. This should be rare enough to never 4557 * trigger the 99% of 100,000 interrupts test for disabling 4558 * stray interrupts. 4559 */ 4560 iir = new_iir; 4561 } 4562 4563 enable_rpm_wakeref_asserts(dev_priv); 4564 4565 return ret; 4566 } 4567 4568 static void i965_irq_uninstall(struct drm_device * dev) 4569 { 4570 struct drm_i915_private *dev_priv = dev->dev_private; 4571 int pipe; 4572 4573 if (!dev_priv) 4574 return; 4575 4576 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4577 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4578 4579 I915_WRITE(HWSTAM, 0xffffffff); 4580 for_each_pipe(dev_priv, pipe) 4581 I915_WRITE(PIPESTAT(pipe), 0); 4582 I915_WRITE(IMR, 0xffffffff); 4583 I915_WRITE(IER, 0x0); 4584 4585 for_each_pipe(dev_priv, pipe) 4586 I915_WRITE(PIPESTAT(pipe), 4587 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4588 I915_WRITE(IIR, I915_READ(IIR)); 4589 } 4590 4591 /** 4592 * intel_irq_init - initializes irq support 4593 * @dev_priv: i915 device instance 4594 * 4595 * This function initializes all the irq support including work items, timers 4596 * and all the vtables. It does not setup the interrupt itself though. 4597 */ 4598 void intel_irq_init(struct drm_i915_private *dev_priv) 4599 { 4600 struct drm_device *dev = dev_priv->dev; 4601 4602 intel_hpd_init_work(dev_priv); 4603 4604 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4605 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4606 4607 /* Let's track the enabled rps events */ 4608 if (IS_VALLEYVIEW(dev_priv)) 4609 /* WaGsvRC0ResidencyMethod:vlv */ 4610 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4611 else 4612 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4613 4614 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4615 i915_hangcheck_elapsed); 4616 4617 if (IS_GEN2(dev_priv)) { 4618 dev->max_vblank_count = 0; 4619 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4620 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4621 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4622 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4623 } else { 4624 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4625 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4626 } 4627 4628 /* 4629 * Opt out of the vblank disable timer on everything except gen2. 4630 * Gen2 doesn't have a hardware frame counter and so depends on 4631 * vblank interrupts to produce sane vblank seuquence numbers. 4632 */ 4633 if (!IS_GEN2(dev_priv)) 4634 dev->vblank_disable_immediate = true; 4635 4636 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4637 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4638 4639 if (IS_CHERRYVIEW(dev_priv)) { 4640 dev->driver->irq_handler = cherryview_irq_handler; 4641 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4642 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4643 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4644 dev->driver->enable_vblank = valleyview_enable_vblank; 4645 dev->driver->disable_vblank = valleyview_disable_vblank; 4646 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4647 } else if (IS_VALLEYVIEW(dev_priv)) { 4648 dev->driver->irq_handler = valleyview_irq_handler; 4649 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4650 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4651 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4652 dev->driver->enable_vblank = valleyview_enable_vblank; 4653 dev->driver->disable_vblank = valleyview_disable_vblank; 4654 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4655 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4656 dev->driver->irq_handler = gen8_irq_handler; 4657 dev->driver->irq_preinstall = gen8_irq_reset; 4658 dev->driver->irq_postinstall = gen8_irq_postinstall; 4659 dev->driver->irq_uninstall = gen8_irq_uninstall; 4660 dev->driver->enable_vblank = gen8_enable_vblank; 4661 dev->driver->disable_vblank = gen8_disable_vblank; 4662 if (IS_BROXTON(dev)) 4663 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4664 else if (HAS_PCH_SPT(dev)) 4665 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4666 else 4667 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4668 } else if (HAS_PCH_SPLIT(dev)) { 4669 dev->driver->irq_handler = ironlake_irq_handler; 4670 dev->driver->irq_preinstall = ironlake_irq_reset; 4671 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4672 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4673 dev->driver->enable_vblank = ironlake_enable_vblank; 4674 dev->driver->disable_vblank = ironlake_disable_vblank; 4675 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4676 } else { 4677 if (INTEL_INFO(dev_priv)->gen == 2) { 4678 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4679 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4680 dev->driver->irq_handler = i8xx_irq_handler; 4681 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4682 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4683 dev->driver->irq_preinstall = i915_irq_preinstall; 4684 dev->driver->irq_postinstall = i915_irq_postinstall; 4685 dev->driver->irq_uninstall = i915_irq_uninstall; 4686 dev->driver->irq_handler = i915_irq_handler; 4687 } else { 4688 dev->driver->irq_preinstall = i965_irq_preinstall; 4689 dev->driver->irq_postinstall = i965_irq_postinstall; 4690 dev->driver->irq_uninstall = i965_irq_uninstall; 4691 dev->driver->irq_handler = i965_irq_handler; 4692 } 4693 if (I915_HAS_HOTPLUG(dev_priv)) 4694 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4695 dev->driver->enable_vblank = i915_enable_vblank; 4696 dev->driver->disable_vblank = i915_disable_vblank; 4697 } 4698 } 4699 4700 /** 4701 * intel_irq_install - enables the hardware interrupt 4702 * @dev_priv: i915 device instance 4703 * 4704 * This function enables the hardware interrupt handling, but leaves the hotplug 4705 * handling still disabled. It is called after intel_irq_init(). 4706 * 4707 * In the driver load and resume code we need working interrupts in a few places 4708 * but don't want to deal with the hassle of concurrent probe and hotplug 4709 * workers. Hence the split into this two-stage approach. 4710 */ 4711 int intel_irq_install(struct drm_i915_private *dev_priv) 4712 { 4713 /* 4714 * We enable some interrupt sources in our postinstall hooks, so mark 4715 * interrupts as enabled _before_ actually enabling them to avoid 4716 * special cases in our ordering checks. 4717 */ 4718 dev_priv->pm.irqs_enabled = true; 4719 4720 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4721 } 4722 4723 /** 4724 * intel_irq_uninstall - finilizes all irq handling 4725 * @dev_priv: i915 device instance 4726 * 4727 * This stops interrupt and hotplug handling and unregisters and frees all 4728 * resources acquired in the init functions. 4729 */ 4730 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4731 { 4732 drm_irq_uninstall(dev_priv->dev); 4733 intel_hpd_cancel_work(dev_priv); 4734 dev_priv->pm.irqs_enabled = false; 4735 } 4736 4737 /** 4738 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4739 * @dev_priv: i915 device instance 4740 * 4741 * This function is used to disable interrupts at runtime, both in the runtime 4742 * pm and the system suspend/resume code. 4743 */ 4744 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4745 { 4746 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4747 dev_priv->pm.irqs_enabled = false; 4748 synchronize_irq(dev_priv->dev->irq); 4749 } 4750 4751 /** 4752 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4753 * @dev_priv: i915 device instance 4754 * 4755 * This function is used to enable interrupts at runtime, both in the runtime 4756 * pm and the system suspend/resume code. 4757 */ 4758 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4759 { 4760 dev_priv->pm.irqs_enabled = true; 4761 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4762 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4763 } 4764