1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 174 /* For display hotplug interrupt */ 175 static inline void 176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 177 uint32_t mask, 178 uint32_t bits) 179 { 180 uint32_t val; 181 182 assert_spin_locked(&dev_priv->irq_lock); 183 WARN_ON(bits & ~mask); 184 185 val = I915_READ(PORT_HOTPLUG_EN); 186 val &= ~mask; 187 val |= bits; 188 I915_WRITE(PORT_HOTPLUG_EN, val); 189 } 190 191 /** 192 * i915_hotplug_interrupt_update - update hotplug interrupt enable 193 * @dev_priv: driver private 194 * @mask: bits to update 195 * @bits: bits to enable 196 * NOTE: the HPD enable bits are modified both inside and outside 197 * of an interrupt context. To avoid that read-modify-write cycles 198 * interfer, these bits are protected by a spinlock. Since this 199 * function is usually not called from a context where the lock is 200 * held already, this function acquires the lock itself. A non-locking 201 * version is also available. 202 */ 203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 204 uint32_t mask, 205 uint32_t bits) 206 { 207 spin_lock_irq(&dev_priv->irq_lock); 208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 209 spin_unlock_irq(&dev_priv->irq_lock); 210 } 211 212 /** 213 * ilk_update_display_irq - update DEIMR 214 * @dev_priv: driver private 215 * @interrupt_mask: mask of interrupt bits to update 216 * @enabled_irq_mask: mask of interrupt bits to enable 217 */ 218 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 219 uint32_t interrupt_mask, 220 uint32_t enabled_irq_mask) 221 { 222 uint32_t new_val; 223 224 assert_spin_locked(&dev_priv->irq_lock); 225 226 WARN_ON(enabled_irq_mask & ~interrupt_mask); 227 228 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 229 return; 230 231 new_val = dev_priv->irq_mask; 232 new_val &= ~interrupt_mask; 233 new_val |= (~enabled_irq_mask & interrupt_mask); 234 235 if (new_val != dev_priv->irq_mask) { 236 dev_priv->irq_mask = new_val; 237 I915_WRITE(DEIMR, dev_priv->irq_mask); 238 POSTING_READ(DEIMR); 239 } 240 } 241 242 /** 243 * ilk_update_gt_irq - update GTIMR 244 * @dev_priv: driver private 245 * @interrupt_mask: mask of interrupt bits to update 246 * @enabled_irq_mask: mask of interrupt bits to enable 247 */ 248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 249 uint32_t interrupt_mask, 250 uint32_t enabled_irq_mask) 251 { 252 assert_spin_locked(&dev_priv->irq_lock); 253 254 WARN_ON(enabled_irq_mask & ~interrupt_mask); 255 256 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 257 return; 258 259 dev_priv->gt_irq_mask &= ~interrupt_mask; 260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 262 POSTING_READ(GTIMR); 263 } 264 265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 266 { 267 ilk_update_gt_irq(dev_priv, mask, mask); 268 } 269 270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 271 { 272 ilk_update_gt_irq(dev_priv, mask, 0); 273 } 274 275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 276 { 277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 278 } 279 280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 281 { 282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 283 } 284 285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 286 { 287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 288 } 289 290 /** 291 * snb_update_pm_irq - update GEN6_PMIMR 292 * @dev_priv: driver private 293 * @interrupt_mask: mask of interrupt bits to update 294 * @enabled_irq_mask: mask of interrupt bits to enable 295 */ 296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 297 uint32_t interrupt_mask, 298 uint32_t enabled_irq_mask) 299 { 300 uint32_t new_val; 301 302 WARN_ON(enabled_irq_mask & ~interrupt_mask); 303 304 assert_spin_locked(&dev_priv->irq_lock); 305 306 new_val = dev_priv->pm_irq_mask; 307 new_val &= ~interrupt_mask; 308 new_val |= (~enabled_irq_mask & interrupt_mask); 309 310 if (new_val != dev_priv->pm_irq_mask) { 311 dev_priv->pm_irq_mask = new_val; 312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 313 POSTING_READ(gen6_pm_imr(dev_priv)); 314 } 315 } 316 317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 318 { 319 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 320 return; 321 322 snb_update_pm_irq(dev_priv, mask, mask); 323 } 324 325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 326 uint32_t mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_disable_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_rps_interrupts(struct drm_device *dev) 340 { 341 struct drm_i915_private *dev_priv = dev->dev_private; 342 i915_reg_t reg = gen6_pm_iir(dev_priv); 343 344 spin_lock_irq(&dev_priv->irq_lock); 345 I915_WRITE(reg, dev_priv->pm_rps_events); 346 I915_WRITE(reg, dev_priv->pm_rps_events); 347 POSTING_READ(reg); 348 dev_priv->rps.pm_iir = 0; 349 spin_unlock_irq(&dev_priv->irq_lock); 350 } 351 352 void gen6_enable_rps_interrupts(struct drm_device *dev) 353 { 354 struct drm_i915_private *dev_priv = dev->dev_private; 355 356 spin_lock_irq(&dev_priv->irq_lock); 357 358 WARN_ON(dev_priv->rps.pm_iir); 359 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 360 dev_priv->rps.interrupts_enabled = true; 361 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 362 dev_priv->pm_rps_events); 363 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 364 365 spin_unlock_irq(&dev_priv->irq_lock); 366 } 367 368 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 369 { 370 /* 371 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 372 * if GEN6_PM_UP_EI_EXPIRED is masked. 373 * 374 * TODO: verify if this can be reproduced on VLV,CHV. 375 */ 376 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 377 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 378 379 if (INTEL_INFO(dev_priv)->gen >= 8) 380 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 381 382 return mask; 383 } 384 385 void gen6_disable_rps_interrupts(struct drm_device *dev) 386 { 387 struct drm_i915_private *dev_priv = dev->dev_private; 388 389 spin_lock_irq(&dev_priv->irq_lock); 390 dev_priv->rps.interrupts_enabled = false; 391 spin_unlock_irq(&dev_priv->irq_lock); 392 393 cancel_work_sync(&dev_priv->rps.work); 394 395 spin_lock_irq(&dev_priv->irq_lock); 396 397 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 398 399 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 400 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 401 ~dev_priv->pm_rps_events); 402 403 spin_unlock_irq(&dev_priv->irq_lock); 404 405 synchronize_irq(dev->irq); 406 } 407 408 /** 409 * bdw_update_port_irq - update DE port interrupt 410 * @dev_priv: driver private 411 * @interrupt_mask: mask of interrupt bits to update 412 * @enabled_irq_mask: mask of interrupt bits to enable 413 */ 414 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 415 uint32_t interrupt_mask, 416 uint32_t enabled_irq_mask) 417 { 418 uint32_t new_val; 419 uint32_t old_val; 420 421 assert_spin_locked(&dev_priv->irq_lock); 422 423 WARN_ON(enabled_irq_mask & ~interrupt_mask); 424 425 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 426 return; 427 428 old_val = I915_READ(GEN8_DE_PORT_IMR); 429 430 new_val = old_val; 431 new_val &= ~interrupt_mask; 432 new_val |= (~enabled_irq_mask & interrupt_mask); 433 434 if (new_val != old_val) { 435 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 436 POSTING_READ(GEN8_DE_PORT_IMR); 437 } 438 } 439 440 /** 441 * bdw_update_pipe_irq - update DE pipe interrupt 442 * @dev_priv: driver private 443 * @pipe: pipe whose interrupt to update 444 * @interrupt_mask: mask of interrupt bits to update 445 * @enabled_irq_mask: mask of interrupt bits to enable 446 */ 447 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 448 enum pipe pipe, 449 uint32_t interrupt_mask, 450 uint32_t enabled_irq_mask) 451 { 452 uint32_t new_val; 453 454 assert_spin_locked(&dev_priv->irq_lock); 455 456 WARN_ON(enabled_irq_mask & ~interrupt_mask); 457 458 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 459 return; 460 461 new_val = dev_priv->de_irq_mask[pipe]; 462 new_val &= ~interrupt_mask; 463 new_val |= (~enabled_irq_mask & interrupt_mask); 464 465 if (new_val != dev_priv->de_irq_mask[pipe]) { 466 dev_priv->de_irq_mask[pipe] = new_val; 467 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 468 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 469 } 470 } 471 472 /** 473 * ibx_display_interrupt_update - update SDEIMR 474 * @dev_priv: driver private 475 * @interrupt_mask: mask of interrupt bits to update 476 * @enabled_irq_mask: mask of interrupt bits to enable 477 */ 478 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 479 uint32_t interrupt_mask, 480 uint32_t enabled_irq_mask) 481 { 482 uint32_t sdeimr = I915_READ(SDEIMR); 483 sdeimr &= ~interrupt_mask; 484 sdeimr |= (~enabled_irq_mask & interrupt_mask); 485 486 WARN_ON(enabled_irq_mask & ~interrupt_mask); 487 488 assert_spin_locked(&dev_priv->irq_lock); 489 490 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 491 return; 492 493 I915_WRITE(SDEIMR, sdeimr); 494 POSTING_READ(SDEIMR); 495 } 496 497 static void 498 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 499 u32 enable_mask, u32 status_mask) 500 { 501 i915_reg_t reg = PIPESTAT(pipe); 502 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 503 504 assert_spin_locked(&dev_priv->irq_lock); 505 WARN_ON(!intel_irqs_enabled(dev_priv)); 506 507 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 508 status_mask & ~PIPESTAT_INT_STATUS_MASK, 509 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 510 pipe_name(pipe), enable_mask, status_mask)) 511 return; 512 513 if ((pipestat & enable_mask) == enable_mask) 514 return; 515 516 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 517 518 /* Enable the interrupt, clear any pending status */ 519 pipestat |= enable_mask | status_mask; 520 I915_WRITE(reg, pipestat); 521 POSTING_READ(reg); 522 } 523 524 static void 525 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 526 u32 enable_mask, u32 status_mask) 527 { 528 i915_reg_t reg = PIPESTAT(pipe); 529 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 530 531 assert_spin_locked(&dev_priv->irq_lock); 532 WARN_ON(!intel_irqs_enabled(dev_priv)); 533 534 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 535 status_mask & ~PIPESTAT_INT_STATUS_MASK, 536 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 537 pipe_name(pipe), enable_mask, status_mask)) 538 return; 539 540 if ((pipestat & enable_mask) == 0) 541 return; 542 543 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 544 545 pipestat &= ~enable_mask; 546 I915_WRITE(reg, pipestat); 547 POSTING_READ(reg); 548 } 549 550 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 551 { 552 u32 enable_mask = status_mask << 16; 553 554 /* 555 * On pipe A we don't support the PSR interrupt yet, 556 * on pipe B and C the same bit MBZ. 557 */ 558 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 559 return 0; 560 /* 561 * On pipe B and C we don't support the PSR interrupt yet, on pipe 562 * A the same bit is for perf counters which we don't use either. 563 */ 564 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 565 return 0; 566 567 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 568 SPRITE0_FLIP_DONE_INT_EN_VLV | 569 SPRITE1_FLIP_DONE_INT_EN_VLV); 570 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 571 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 572 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 573 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 574 575 return enable_mask; 576 } 577 578 void 579 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 580 u32 status_mask) 581 { 582 u32 enable_mask; 583 584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 585 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 586 status_mask); 587 else 588 enable_mask = status_mask << 16; 589 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 590 } 591 592 void 593 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 594 u32 status_mask) 595 { 596 u32 enable_mask; 597 598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 599 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 600 status_mask); 601 else 602 enable_mask = status_mask << 16; 603 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 604 } 605 606 /** 607 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 608 * @dev: drm device 609 */ 610 static void i915_enable_asle_pipestat(struct drm_device *dev) 611 { 612 struct drm_i915_private *dev_priv = dev->dev_private; 613 614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 615 return; 616 617 spin_lock_irq(&dev_priv->irq_lock); 618 619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 620 if (INTEL_INFO(dev)->gen >= 4) 621 i915_enable_pipestat(dev_priv, PIPE_A, 622 PIPE_LEGACY_BLC_EVENT_STATUS); 623 624 spin_unlock_irq(&dev_priv->irq_lock); 625 } 626 627 /* 628 * This timing diagram depicts the video signal in and 629 * around the vertical blanking period. 630 * 631 * Assumptions about the fictitious mode used in this example: 632 * vblank_start >= 3 633 * vsync_start = vblank_start + 1 634 * vsync_end = vblank_start + 2 635 * vtotal = vblank_start + 3 636 * 637 * start of vblank: 638 * latch double buffered registers 639 * increment frame counter (ctg+) 640 * generate start of vblank interrupt (gen4+) 641 * | 642 * | frame start: 643 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 644 * | may be shifted forward 1-3 extra lines via PIPECONF 645 * | | 646 * | | start of vsync: 647 * | | generate vsync interrupt 648 * | | | 649 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 650 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 651 * ----va---> <-----------------vb--------------------> <--------va------------- 652 * | | <----vs-----> | 653 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 654 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 655 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 656 * | | | 657 * last visible pixel first visible pixel 658 * | increment frame counter (gen3/4) 659 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 660 * 661 * x = horizontal active 662 * _ = horizontal blanking 663 * hs = horizontal sync 664 * va = vertical active 665 * vb = vertical blanking 666 * vs = vertical sync 667 * vbs = vblank_start (number) 668 * 669 * Summary: 670 * - most events happen at the start of horizontal sync 671 * - frame start happens at the start of horizontal blank, 1-4 lines 672 * (depending on PIPECONF settings) after the start of vblank 673 * - gen3/4 pixel and frame counter are synchronized with the start 674 * of horizontal active on the first line of vertical active 675 */ 676 677 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 678 { 679 /* Gen2 doesn't have a hardware frame counter */ 680 return 0; 681 } 682 683 /* Called from drm generic code, passed a 'crtc', which 684 * we use as a pipe index 685 */ 686 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 687 { 688 struct drm_i915_private *dev_priv = dev->dev_private; 689 i915_reg_t high_frame, low_frame; 690 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 691 struct intel_crtc *intel_crtc = 692 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 693 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 694 695 htotal = mode->crtc_htotal; 696 hsync_start = mode->crtc_hsync_start; 697 vbl_start = mode->crtc_vblank_start; 698 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 699 vbl_start = DIV_ROUND_UP(vbl_start, 2); 700 701 /* Convert to pixel count */ 702 vbl_start *= htotal; 703 704 /* Start of vblank event occurs at start of hsync */ 705 vbl_start -= htotal - hsync_start; 706 707 high_frame = PIPEFRAME(pipe); 708 low_frame = PIPEFRAMEPIXEL(pipe); 709 710 /* 711 * High & low register fields aren't synchronized, so make sure 712 * we get a low value that's stable across two reads of the high 713 * register. 714 */ 715 do { 716 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 717 low = I915_READ(low_frame); 718 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 719 } while (high1 != high2); 720 721 high1 >>= PIPE_FRAME_HIGH_SHIFT; 722 pixel = low & PIPE_PIXEL_MASK; 723 low >>= PIPE_FRAME_LOW_SHIFT; 724 725 /* 726 * The frame counter increments at beginning of active. 727 * Cook up a vblank counter by also checking the pixel 728 * counter against vblank start. 729 */ 730 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 731 } 732 733 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 734 { 735 struct drm_i915_private *dev_priv = dev->dev_private; 736 737 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 738 } 739 740 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 741 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 742 { 743 struct drm_device *dev = crtc->base.dev; 744 struct drm_i915_private *dev_priv = dev->dev_private; 745 const struct drm_display_mode *mode = &crtc->base.hwmode; 746 enum pipe pipe = crtc->pipe; 747 int position, vtotal; 748 749 vtotal = mode->crtc_vtotal; 750 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 751 vtotal /= 2; 752 753 if (IS_GEN2(dev)) 754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 755 else 756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 757 758 /* 759 * On HSW, the DSL reg (0x70000) appears to return 0 if we 760 * read it just before the start of vblank. So try it again 761 * so we don't accidentally end up spanning a vblank frame 762 * increment, causing the pipe_update_end() code to squak at us. 763 * 764 * The nature of this problem means we can't simply check the ISR 765 * bit and return the vblank start value; nor can we use the scanline 766 * debug register in the transcoder as it appears to have the same 767 * problem. We may need to extend this to include other platforms, 768 * but so far testing only shows the problem on HSW. 769 */ 770 if (HAS_DDI(dev) && !position) { 771 int i, temp; 772 773 for (i = 0; i < 100; i++) { 774 udelay(1); 775 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 776 DSL_LINEMASK_GEN3; 777 if (temp != position) { 778 position = temp; 779 break; 780 } 781 } 782 } 783 784 /* 785 * See update_scanline_offset() for the details on the 786 * scanline_offset adjustment. 787 */ 788 return (position + crtc->scanline_offset) % vtotal; 789 } 790 791 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 792 unsigned int flags, int *vpos, int *hpos, 793 ktime_t *stime, ktime_t *etime, 794 const struct drm_display_mode *mode) 795 { 796 struct drm_i915_private *dev_priv = dev->dev_private; 797 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 798 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 799 int position; 800 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 801 bool in_vbl = true; 802 int ret = 0; 803 unsigned long irqflags; 804 805 if (WARN_ON(!mode->crtc_clock)) { 806 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 807 "pipe %c\n", pipe_name(pipe)); 808 return 0; 809 } 810 811 htotal = mode->crtc_htotal; 812 hsync_start = mode->crtc_hsync_start; 813 vtotal = mode->crtc_vtotal; 814 vbl_start = mode->crtc_vblank_start; 815 vbl_end = mode->crtc_vblank_end; 816 817 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 818 vbl_start = DIV_ROUND_UP(vbl_start, 2); 819 vbl_end /= 2; 820 vtotal /= 2; 821 } 822 823 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 824 825 /* 826 * Lock uncore.lock, as we will do multiple timing critical raw 827 * register reads, potentially with preemption disabled, so the 828 * following code must not block on uncore.lock. 829 */ 830 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 831 832 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 833 834 /* Get optional system timestamp before query. */ 835 if (stime) 836 *stime = ktime_get(); 837 838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 839 /* No obvious pixelcount register. Only query vertical 840 * scanout position from Display scan line register. 841 */ 842 position = __intel_get_crtc_scanline(intel_crtc); 843 } else { 844 /* Have access to pixelcount since start of frame. 845 * We can split this into vertical and horizontal 846 * scanout position. 847 */ 848 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 849 850 /* convert to pixel counts */ 851 vbl_start *= htotal; 852 vbl_end *= htotal; 853 vtotal *= htotal; 854 855 /* 856 * In interlaced modes, the pixel counter counts all pixels, 857 * so one field will have htotal more pixels. In order to avoid 858 * the reported position from jumping backwards when the pixel 859 * counter is beyond the length of the shorter field, just 860 * clamp the position the length of the shorter field. This 861 * matches how the scanline counter based position works since 862 * the scanline counter doesn't count the two half lines. 863 */ 864 if (position >= vtotal) 865 position = vtotal - 1; 866 867 /* 868 * Start of vblank interrupt is triggered at start of hsync, 869 * just prior to the first active line of vblank. However we 870 * consider lines to start at the leading edge of horizontal 871 * active. So, should we get here before we've crossed into 872 * the horizontal active of the first line in vblank, we would 873 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 874 * always add htotal-hsync_start to the current pixel position. 875 */ 876 position = (position + htotal - hsync_start) % vtotal; 877 } 878 879 /* Get optional system timestamp after query. */ 880 if (etime) 881 *etime = ktime_get(); 882 883 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 884 885 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 886 887 in_vbl = position >= vbl_start && position < vbl_end; 888 889 /* 890 * While in vblank, position will be negative 891 * counting up towards 0 at vbl_end. And outside 892 * vblank, position will be positive counting 893 * up since vbl_end. 894 */ 895 if (position >= vbl_start) 896 position -= vbl_end; 897 else 898 position += vtotal - vbl_end; 899 900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 901 *vpos = position; 902 *hpos = 0; 903 } else { 904 *vpos = position / htotal; 905 *hpos = position - (*vpos * htotal); 906 } 907 908 /* In vblank? */ 909 if (in_vbl) 910 ret |= DRM_SCANOUTPOS_IN_VBLANK; 911 912 return ret; 913 } 914 915 int intel_get_crtc_scanline(struct intel_crtc *crtc) 916 { 917 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 918 unsigned long irqflags; 919 int position; 920 921 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 922 position = __intel_get_crtc_scanline(crtc); 923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 924 925 return position; 926 } 927 928 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 929 int *max_error, 930 struct timeval *vblank_time, 931 unsigned flags) 932 { 933 struct drm_crtc *crtc; 934 935 if (pipe >= INTEL_INFO(dev)->num_pipes) { 936 DRM_ERROR("Invalid crtc %u\n", pipe); 937 return -EINVAL; 938 } 939 940 /* Get drm_crtc to timestamp: */ 941 crtc = intel_get_crtc_for_pipe(dev, pipe); 942 if (crtc == NULL) { 943 DRM_ERROR("Invalid crtc %u\n", pipe); 944 return -EINVAL; 945 } 946 947 if (!crtc->hwmode.crtc_clock) { 948 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 949 return -EBUSY; 950 } 951 952 /* Helper routine in DRM core does all the work: */ 953 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 954 vblank_time, flags, 955 &crtc->hwmode); 956 } 957 958 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 959 { 960 struct drm_i915_private *dev_priv = dev->dev_private; 961 u32 busy_up, busy_down, max_avg, min_avg; 962 u8 new_delay; 963 964 spin_lock(&mchdev_lock); 965 966 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 967 968 new_delay = dev_priv->ips.cur_delay; 969 970 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 971 busy_up = I915_READ(RCPREVBSYTUPAVG); 972 busy_down = I915_READ(RCPREVBSYTDNAVG); 973 max_avg = I915_READ(RCBMAXAVG); 974 min_avg = I915_READ(RCBMINAVG); 975 976 /* Handle RCS change request from hw */ 977 if (busy_up > max_avg) { 978 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 979 new_delay = dev_priv->ips.cur_delay - 1; 980 if (new_delay < dev_priv->ips.max_delay) 981 new_delay = dev_priv->ips.max_delay; 982 } else if (busy_down < min_avg) { 983 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 984 new_delay = dev_priv->ips.cur_delay + 1; 985 if (new_delay > dev_priv->ips.min_delay) 986 new_delay = dev_priv->ips.min_delay; 987 } 988 989 if (ironlake_set_drps(dev, new_delay)) 990 dev_priv->ips.cur_delay = new_delay; 991 992 spin_unlock(&mchdev_lock); 993 994 return; 995 } 996 997 static void notify_ring(struct intel_engine_cs *ring) 998 { 999 if (!intel_ring_initialized(ring)) 1000 return; 1001 1002 trace_i915_gem_request_notify(ring); 1003 1004 wake_up_all(&ring->irq_queue); 1005 } 1006 1007 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1008 struct intel_rps_ei *ei) 1009 { 1010 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1011 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1012 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1013 } 1014 1015 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 1016 const struct intel_rps_ei *old, 1017 const struct intel_rps_ei *now, 1018 int threshold) 1019 { 1020 u64 time, c0; 1021 unsigned int mul = 100; 1022 1023 if (old->cz_clock == 0) 1024 return false; 1025 1026 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1027 mul <<= 8; 1028 1029 time = now->cz_clock - old->cz_clock; 1030 time *= threshold * dev_priv->czclk_freq; 1031 1032 /* Workload can be split between render + media, e.g. SwapBuffers 1033 * being blitted in X after being rendered in mesa. To account for 1034 * this we need to combine both engines into our activity counter. 1035 */ 1036 c0 = now->render_c0 - old->render_c0; 1037 c0 += now->media_c0 - old->media_c0; 1038 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1039 1040 return c0 >= time; 1041 } 1042 1043 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1044 { 1045 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1046 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1047 } 1048 1049 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1050 { 1051 struct intel_rps_ei now; 1052 u32 events = 0; 1053 1054 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1055 return 0; 1056 1057 vlv_c0_read(dev_priv, &now); 1058 if (now.cz_clock == 0) 1059 return 0; 1060 1061 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1062 if (!vlv_c0_above(dev_priv, 1063 &dev_priv->rps.down_ei, &now, 1064 dev_priv->rps.down_threshold)) 1065 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1066 dev_priv->rps.down_ei = now; 1067 } 1068 1069 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1070 if (vlv_c0_above(dev_priv, 1071 &dev_priv->rps.up_ei, &now, 1072 dev_priv->rps.up_threshold)) 1073 events |= GEN6_PM_RP_UP_THRESHOLD; 1074 dev_priv->rps.up_ei = now; 1075 } 1076 1077 return events; 1078 } 1079 1080 static bool any_waiters(struct drm_i915_private *dev_priv) 1081 { 1082 struct intel_engine_cs *ring; 1083 int i; 1084 1085 for_each_ring(ring, dev_priv, i) 1086 if (ring->irq_refcount) 1087 return true; 1088 1089 return false; 1090 } 1091 1092 static void gen6_pm_rps_work(struct work_struct *work) 1093 { 1094 struct drm_i915_private *dev_priv = 1095 container_of(work, struct drm_i915_private, rps.work); 1096 bool client_boost; 1097 int new_delay, adj, min, max; 1098 u32 pm_iir; 1099 1100 spin_lock_irq(&dev_priv->irq_lock); 1101 /* Speed up work cancelation during disabling rps interrupts. */ 1102 if (!dev_priv->rps.interrupts_enabled) { 1103 spin_unlock_irq(&dev_priv->irq_lock); 1104 return; 1105 } 1106 1107 /* 1108 * The RPS work is synced during runtime suspend, we don't require a 1109 * wakeref. TODO: instead of disabling the asserts make sure that we 1110 * always hold an RPM reference while the work is running. 1111 */ 1112 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); 1113 1114 pm_iir = dev_priv->rps.pm_iir; 1115 dev_priv->rps.pm_iir = 0; 1116 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1117 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1118 client_boost = dev_priv->rps.client_boost; 1119 dev_priv->rps.client_boost = false; 1120 spin_unlock_irq(&dev_priv->irq_lock); 1121 1122 /* Make sure we didn't queue anything we're not going to process. */ 1123 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1124 1125 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1126 goto out; 1127 1128 mutex_lock(&dev_priv->rps.hw_lock); 1129 1130 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1131 1132 adj = dev_priv->rps.last_adj; 1133 new_delay = dev_priv->rps.cur_freq; 1134 min = dev_priv->rps.min_freq_softlimit; 1135 max = dev_priv->rps.max_freq_softlimit; 1136 1137 if (client_boost) { 1138 new_delay = dev_priv->rps.max_freq_softlimit; 1139 adj = 0; 1140 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1141 if (adj > 0) 1142 adj *= 2; 1143 else /* CHV needs even encode values */ 1144 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1145 /* 1146 * For better performance, jump directly 1147 * to RPe if we're below it. 1148 */ 1149 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1150 new_delay = dev_priv->rps.efficient_freq; 1151 adj = 0; 1152 } 1153 } else if (any_waiters(dev_priv)) { 1154 adj = 0; 1155 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1156 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1157 new_delay = dev_priv->rps.efficient_freq; 1158 else 1159 new_delay = dev_priv->rps.min_freq_softlimit; 1160 adj = 0; 1161 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1162 if (adj < 0) 1163 adj *= 2; 1164 else /* CHV needs even encode values */ 1165 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1166 } else { /* unknown event */ 1167 adj = 0; 1168 } 1169 1170 dev_priv->rps.last_adj = adj; 1171 1172 /* sysfs frequency interfaces may have snuck in while servicing the 1173 * interrupt 1174 */ 1175 new_delay += adj; 1176 new_delay = clamp_t(int, new_delay, min, max); 1177 1178 intel_set_rps(dev_priv->dev, new_delay); 1179 1180 mutex_unlock(&dev_priv->rps.hw_lock); 1181 out: 1182 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 1183 } 1184 1185 1186 /** 1187 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1188 * occurred. 1189 * @work: workqueue struct 1190 * 1191 * Doesn't actually do anything except notify userspace. As a consequence of 1192 * this event, userspace should try to remap the bad rows since statistically 1193 * it is likely the same row is more likely to go bad again. 1194 */ 1195 static void ivybridge_parity_work(struct work_struct *work) 1196 { 1197 struct drm_i915_private *dev_priv = 1198 container_of(work, struct drm_i915_private, l3_parity.error_work); 1199 u32 error_status, row, bank, subbank; 1200 char *parity_event[6]; 1201 uint32_t misccpctl; 1202 uint8_t slice = 0; 1203 1204 /* We must turn off DOP level clock gating to access the L3 registers. 1205 * In order to prevent a get/put style interface, acquire struct mutex 1206 * any time we access those registers. 1207 */ 1208 mutex_lock(&dev_priv->dev->struct_mutex); 1209 1210 /* If we've screwed up tracking, just let the interrupt fire again */ 1211 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1212 goto out; 1213 1214 misccpctl = I915_READ(GEN7_MISCCPCTL); 1215 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1216 POSTING_READ(GEN7_MISCCPCTL); 1217 1218 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1219 i915_reg_t reg; 1220 1221 slice--; 1222 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1223 break; 1224 1225 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1226 1227 reg = GEN7_L3CDERRST1(slice); 1228 1229 error_status = I915_READ(reg); 1230 row = GEN7_PARITY_ERROR_ROW(error_status); 1231 bank = GEN7_PARITY_ERROR_BANK(error_status); 1232 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1233 1234 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1235 POSTING_READ(reg); 1236 1237 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1238 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1239 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1240 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1241 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1242 parity_event[5] = NULL; 1243 1244 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1245 KOBJ_CHANGE, parity_event); 1246 1247 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1248 slice, row, bank, subbank); 1249 1250 kfree(parity_event[4]); 1251 kfree(parity_event[3]); 1252 kfree(parity_event[2]); 1253 kfree(parity_event[1]); 1254 } 1255 1256 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1257 1258 out: 1259 WARN_ON(dev_priv->l3_parity.which_slice); 1260 spin_lock_irq(&dev_priv->irq_lock); 1261 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1262 spin_unlock_irq(&dev_priv->irq_lock); 1263 1264 mutex_unlock(&dev_priv->dev->struct_mutex); 1265 } 1266 1267 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1268 { 1269 struct drm_i915_private *dev_priv = dev->dev_private; 1270 1271 if (!HAS_L3_DPF(dev)) 1272 return; 1273 1274 spin_lock(&dev_priv->irq_lock); 1275 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1276 spin_unlock(&dev_priv->irq_lock); 1277 1278 iir &= GT_PARITY_ERROR(dev); 1279 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1280 dev_priv->l3_parity.which_slice |= 1 << 1; 1281 1282 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1283 dev_priv->l3_parity.which_slice |= 1 << 0; 1284 1285 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1286 } 1287 1288 static void ilk_gt_irq_handler(struct drm_device *dev, 1289 struct drm_i915_private *dev_priv, 1290 u32 gt_iir) 1291 { 1292 if (gt_iir & 1293 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1294 notify_ring(&dev_priv->ring[RCS]); 1295 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1296 notify_ring(&dev_priv->ring[VCS]); 1297 } 1298 1299 static void snb_gt_irq_handler(struct drm_device *dev, 1300 struct drm_i915_private *dev_priv, 1301 u32 gt_iir) 1302 { 1303 1304 if (gt_iir & 1305 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1306 notify_ring(&dev_priv->ring[RCS]); 1307 if (gt_iir & GT_BSD_USER_INTERRUPT) 1308 notify_ring(&dev_priv->ring[VCS]); 1309 if (gt_iir & GT_BLT_USER_INTERRUPT) 1310 notify_ring(&dev_priv->ring[BCS]); 1311 1312 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1313 GT_BSD_CS_ERROR_INTERRUPT | 1314 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1315 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1316 1317 if (gt_iir & GT_PARITY_ERROR(dev)) 1318 ivybridge_parity_error_irq_handler(dev, gt_iir); 1319 } 1320 1321 static __always_inline void 1322 gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift) 1323 { 1324 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1325 notify_ring(ring); 1326 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1327 intel_lrc_irq_handler(ring); 1328 } 1329 1330 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1331 u32 master_ctl) 1332 { 1333 irqreturn_t ret = IRQ_NONE; 1334 1335 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1336 u32 iir = I915_READ_FW(GEN8_GT_IIR(0)); 1337 if (iir) { 1338 I915_WRITE_FW(GEN8_GT_IIR(0), iir); 1339 ret = IRQ_HANDLED; 1340 1341 gen8_cs_irq_handler(&dev_priv->ring[RCS], 1342 iir, GEN8_RCS_IRQ_SHIFT); 1343 1344 gen8_cs_irq_handler(&dev_priv->ring[BCS], 1345 iir, GEN8_BCS_IRQ_SHIFT); 1346 } else 1347 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1348 } 1349 1350 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1351 u32 iir = I915_READ_FW(GEN8_GT_IIR(1)); 1352 if (iir) { 1353 I915_WRITE_FW(GEN8_GT_IIR(1), iir); 1354 ret = IRQ_HANDLED; 1355 1356 gen8_cs_irq_handler(&dev_priv->ring[VCS], 1357 iir, GEN8_VCS1_IRQ_SHIFT); 1358 1359 gen8_cs_irq_handler(&dev_priv->ring[VCS2], 1360 iir, GEN8_VCS2_IRQ_SHIFT); 1361 } else 1362 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1363 } 1364 1365 if (master_ctl & GEN8_GT_VECS_IRQ) { 1366 u32 iir = I915_READ_FW(GEN8_GT_IIR(3)); 1367 if (iir) { 1368 I915_WRITE_FW(GEN8_GT_IIR(3), iir); 1369 ret = IRQ_HANDLED; 1370 1371 gen8_cs_irq_handler(&dev_priv->ring[VECS], 1372 iir, GEN8_VECS_IRQ_SHIFT); 1373 } else 1374 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1375 } 1376 1377 if (master_ctl & GEN8_GT_PM_IRQ) { 1378 u32 iir = I915_READ_FW(GEN8_GT_IIR(2)); 1379 if (iir & dev_priv->pm_rps_events) { 1380 I915_WRITE_FW(GEN8_GT_IIR(2), 1381 iir & dev_priv->pm_rps_events); 1382 ret = IRQ_HANDLED; 1383 gen6_rps_irq_handler(dev_priv, iir); 1384 } else 1385 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1386 } 1387 1388 return ret; 1389 } 1390 1391 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1392 { 1393 switch (port) { 1394 case PORT_A: 1395 return val & PORTA_HOTPLUG_LONG_DETECT; 1396 case PORT_B: 1397 return val & PORTB_HOTPLUG_LONG_DETECT; 1398 case PORT_C: 1399 return val & PORTC_HOTPLUG_LONG_DETECT; 1400 default: 1401 return false; 1402 } 1403 } 1404 1405 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1406 { 1407 switch (port) { 1408 case PORT_E: 1409 return val & PORTE_HOTPLUG_LONG_DETECT; 1410 default: 1411 return false; 1412 } 1413 } 1414 1415 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1416 { 1417 switch (port) { 1418 case PORT_A: 1419 return val & PORTA_HOTPLUG_LONG_DETECT; 1420 case PORT_B: 1421 return val & PORTB_HOTPLUG_LONG_DETECT; 1422 case PORT_C: 1423 return val & PORTC_HOTPLUG_LONG_DETECT; 1424 case PORT_D: 1425 return val & PORTD_HOTPLUG_LONG_DETECT; 1426 default: 1427 return false; 1428 } 1429 } 1430 1431 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1432 { 1433 switch (port) { 1434 case PORT_A: 1435 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1436 default: 1437 return false; 1438 } 1439 } 1440 1441 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1442 { 1443 switch (port) { 1444 case PORT_B: 1445 return val & PORTB_HOTPLUG_LONG_DETECT; 1446 case PORT_C: 1447 return val & PORTC_HOTPLUG_LONG_DETECT; 1448 case PORT_D: 1449 return val & PORTD_HOTPLUG_LONG_DETECT; 1450 default: 1451 return false; 1452 } 1453 } 1454 1455 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1456 { 1457 switch (port) { 1458 case PORT_B: 1459 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1460 case PORT_C: 1461 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1462 case PORT_D: 1463 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1464 default: 1465 return false; 1466 } 1467 } 1468 1469 /* 1470 * Get a bit mask of pins that have triggered, and which ones may be long. 1471 * This can be called multiple times with the same masks to accumulate 1472 * hotplug detection results from several registers. 1473 * 1474 * Note that the caller is expected to zero out the masks initially. 1475 */ 1476 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1477 u32 hotplug_trigger, u32 dig_hotplug_reg, 1478 const u32 hpd[HPD_NUM_PINS], 1479 bool long_pulse_detect(enum port port, u32 val)) 1480 { 1481 enum port port; 1482 int i; 1483 1484 for_each_hpd_pin(i) { 1485 if ((hpd[i] & hotplug_trigger) == 0) 1486 continue; 1487 1488 *pin_mask |= BIT(i); 1489 1490 if (!intel_hpd_pin_to_port(i, &port)) 1491 continue; 1492 1493 if (long_pulse_detect(port, dig_hotplug_reg)) 1494 *long_mask |= BIT(i); 1495 } 1496 1497 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1498 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1499 1500 } 1501 1502 static void gmbus_irq_handler(struct drm_device *dev) 1503 { 1504 struct drm_i915_private *dev_priv = dev->dev_private; 1505 1506 wake_up_all(&dev_priv->gmbus_wait_queue); 1507 } 1508 1509 static void dp_aux_irq_handler(struct drm_device *dev) 1510 { 1511 struct drm_i915_private *dev_priv = dev->dev_private; 1512 1513 wake_up_all(&dev_priv->gmbus_wait_queue); 1514 } 1515 1516 #if defined(CONFIG_DEBUG_FS) 1517 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1518 uint32_t crc0, uint32_t crc1, 1519 uint32_t crc2, uint32_t crc3, 1520 uint32_t crc4) 1521 { 1522 struct drm_i915_private *dev_priv = dev->dev_private; 1523 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1524 struct intel_pipe_crc_entry *entry; 1525 int head, tail; 1526 1527 spin_lock(&pipe_crc->lock); 1528 1529 if (!pipe_crc->entries) { 1530 spin_unlock(&pipe_crc->lock); 1531 DRM_DEBUG_KMS("spurious interrupt\n"); 1532 return; 1533 } 1534 1535 head = pipe_crc->head; 1536 tail = pipe_crc->tail; 1537 1538 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1539 spin_unlock(&pipe_crc->lock); 1540 DRM_ERROR("CRC buffer overflowing\n"); 1541 return; 1542 } 1543 1544 entry = &pipe_crc->entries[head]; 1545 1546 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1547 entry->crc[0] = crc0; 1548 entry->crc[1] = crc1; 1549 entry->crc[2] = crc2; 1550 entry->crc[3] = crc3; 1551 entry->crc[4] = crc4; 1552 1553 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1554 pipe_crc->head = head; 1555 1556 spin_unlock(&pipe_crc->lock); 1557 1558 wake_up_interruptible(&pipe_crc->wq); 1559 } 1560 #else 1561 static inline void 1562 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1563 uint32_t crc0, uint32_t crc1, 1564 uint32_t crc2, uint32_t crc3, 1565 uint32_t crc4) {} 1566 #endif 1567 1568 1569 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1570 { 1571 struct drm_i915_private *dev_priv = dev->dev_private; 1572 1573 display_pipe_crc_irq_handler(dev, pipe, 1574 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1575 0, 0, 0, 0); 1576 } 1577 1578 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1579 { 1580 struct drm_i915_private *dev_priv = dev->dev_private; 1581 1582 display_pipe_crc_irq_handler(dev, pipe, 1583 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1584 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1585 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1586 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1587 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1588 } 1589 1590 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1591 { 1592 struct drm_i915_private *dev_priv = dev->dev_private; 1593 uint32_t res1, res2; 1594 1595 if (INTEL_INFO(dev)->gen >= 3) 1596 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1597 else 1598 res1 = 0; 1599 1600 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1601 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1602 else 1603 res2 = 0; 1604 1605 display_pipe_crc_irq_handler(dev, pipe, 1606 I915_READ(PIPE_CRC_RES_RED(pipe)), 1607 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1608 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1609 res1, res2); 1610 } 1611 1612 /* The RPS events need forcewake, so we add them to a work queue and mask their 1613 * IMR bits until the work is done. Other interrupts can be processed without 1614 * the work queue. */ 1615 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1616 { 1617 if (pm_iir & dev_priv->pm_rps_events) { 1618 spin_lock(&dev_priv->irq_lock); 1619 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1620 if (dev_priv->rps.interrupts_enabled) { 1621 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1622 queue_work(dev_priv->wq, &dev_priv->rps.work); 1623 } 1624 spin_unlock(&dev_priv->irq_lock); 1625 } 1626 1627 if (INTEL_INFO(dev_priv)->gen >= 8) 1628 return; 1629 1630 if (HAS_VEBOX(dev_priv->dev)) { 1631 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1632 notify_ring(&dev_priv->ring[VECS]); 1633 1634 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1635 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1636 } 1637 } 1638 1639 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1640 { 1641 if (!drm_handle_vblank(dev, pipe)) 1642 return false; 1643 1644 return true; 1645 } 1646 1647 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1648 { 1649 struct drm_i915_private *dev_priv = dev->dev_private; 1650 u32 pipe_stats[I915_MAX_PIPES] = { }; 1651 int pipe; 1652 1653 spin_lock(&dev_priv->irq_lock); 1654 1655 if (!dev_priv->display_irqs_enabled) { 1656 spin_unlock(&dev_priv->irq_lock); 1657 return; 1658 } 1659 1660 for_each_pipe(dev_priv, pipe) { 1661 i915_reg_t reg; 1662 u32 mask, iir_bit = 0; 1663 1664 /* 1665 * PIPESTAT bits get signalled even when the interrupt is 1666 * disabled with the mask bits, and some of the status bits do 1667 * not generate interrupts at all (like the underrun bit). Hence 1668 * we need to be careful that we only handle what we want to 1669 * handle. 1670 */ 1671 1672 /* fifo underruns are filterered in the underrun handler. */ 1673 mask = PIPE_FIFO_UNDERRUN_STATUS; 1674 1675 switch (pipe) { 1676 case PIPE_A: 1677 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1678 break; 1679 case PIPE_B: 1680 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1681 break; 1682 case PIPE_C: 1683 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1684 break; 1685 } 1686 if (iir & iir_bit) 1687 mask |= dev_priv->pipestat_irq_mask[pipe]; 1688 1689 if (!mask) 1690 continue; 1691 1692 reg = PIPESTAT(pipe); 1693 mask |= PIPESTAT_INT_ENABLE_MASK; 1694 pipe_stats[pipe] = I915_READ(reg) & mask; 1695 1696 /* 1697 * Clear the PIPE*STAT regs before the IIR 1698 */ 1699 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1700 PIPESTAT_INT_STATUS_MASK)) 1701 I915_WRITE(reg, pipe_stats[pipe]); 1702 } 1703 spin_unlock(&dev_priv->irq_lock); 1704 1705 for_each_pipe(dev_priv, pipe) { 1706 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1707 intel_pipe_handle_vblank(dev, pipe)) 1708 intel_check_page_flip(dev, pipe); 1709 1710 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1711 intel_prepare_page_flip(dev, pipe); 1712 intel_finish_page_flip(dev, pipe); 1713 } 1714 1715 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1716 i9xx_pipe_crc_irq_handler(dev, pipe); 1717 1718 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1719 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1720 } 1721 1722 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1723 gmbus_irq_handler(dev); 1724 } 1725 1726 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1727 { 1728 struct drm_i915_private *dev_priv = dev->dev_private; 1729 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1730 u32 pin_mask = 0, long_mask = 0; 1731 1732 if (!hotplug_status) 1733 return; 1734 1735 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1736 /* 1737 * Make sure hotplug status is cleared before we clear IIR, or else we 1738 * may miss hotplug events. 1739 */ 1740 POSTING_READ(PORT_HOTPLUG_STAT); 1741 1742 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1743 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1744 1745 if (hotplug_trigger) { 1746 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1747 hotplug_trigger, hpd_status_g4x, 1748 i9xx_port_hotplug_long_detect); 1749 1750 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1751 } 1752 1753 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1754 dp_aux_irq_handler(dev); 1755 } else { 1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1757 1758 if (hotplug_trigger) { 1759 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1760 hotplug_trigger, hpd_status_i915, 1761 i9xx_port_hotplug_long_detect); 1762 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1763 } 1764 } 1765 } 1766 1767 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1768 { 1769 struct drm_device *dev = arg; 1770 struct drm_i915_private *dev_priv = dev->dev_private; 1771 u32 iir, gt_iir, pm_iir; 1772 irqreturn_t ret = IRQ_NONE; 1773 1774 if (!intel_irqs_enabled(dev_priv)) 1775 return IRQ_NONE; 1776 1777 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1778 disable_rpm_wakeref_asserts(dev_priv); 1779 1780 while (true) { 1781 /* Find, clear, then process each source of interrupt */ 1782 1783 gt_iir = I915_READ(GTIIR); 1784 if (gt_iir) 1785 I915_WRITE(GTIIR, gt_iir); 1786 1787 pm_iir = I915_READ(GEN6_PMIIR); 1788 if (pm_iir) 1789 I915_WRITE(GEN6_PMIIR, pm_iir); 1790 1791 iir = I915_READ(VLV_IIR); 1792 if (iir) { 1793 /* Consume port before clearing IIR or we'll miss events */ 1794 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1795 i9xx_hpd_irq_handler(dev); 1796 I915_WRITE(VLV_IIR, iir); 1797 } 1798 1799 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1800 goto out; 1801 1802 ret = IRQ_HANDLED; 1803 1804 if (gt_iir) 1805 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1806 if (pm_iir) 1807 gen6_rps_irq_handler(dev_priv, pm_iir); 1808 /* Call regardless, as some status bits might not be 1809 * signalled in iir */ 1810 valleyview_pipestat_irq_handler(dev, iir); 1811 } 1812 1813 out: 1814 enable_rpm_wakeref_asserts(dev_priv); 1815 1816 return ret; 1817 } 1818 1819 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1820 { 1821 struct drm_device *dev = arg; 1822 struct drm_i915_private *dev_priv = dev->dev_private; 1823 u32 master_ctl, iir; 1824 irqreturn_t ret = IRQ_NONE; 1825 1826 if (!intel_irqs_enabled(dev_priv)) 1827 return IRQ_NONE; 1828 1829 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1830 disable_rpm_wakeref_asserts(dev_priv); 1831 1832 for (;;) { 1833 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1834 iir = I915_READ(VLV_IIR); 1835 1836 if (master_ctl == 0 && iir == 0) 1837 break; 1838 1839 ret = IRQ_HANDLED; 1840 1841 I915_WRITE(GEN8_MASTER_IRQ, 0); 1842 1843 /* Find, clear, then process each source of interrupt */ 1844 1845 if (iir) { 1846 /* Consume port before clearing IIR or we'll miss events */ 1847 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1848 i9xx_hpd_irq_handler(dev); 1849 I915_WRITE(VLV_IIR, iir); 1850 } 1851 1852 gen8_gt_irq_handler(dev_priv, master_ctl); 1853 1854 /* Call regardless, as some status bits might not be 1855 * signalled in iir */ 1856 valleyview_pipestat_irq_handler(dev, iir); 1857 1858 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1859 POSTING_READ(GEN8_MASTER_IRQ); 1860 } 1861 1862 enable_rpm_wakeref_asserts(dev_priv); 1863 1864 return ret; 1865 } 1866 1867 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 1868 const u32 hpd[HPD_NUM_PINS]) 1869 { 1870 struct drm_i915_private *dev_priv = to_i915(dev); 1871 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1872 1873 /* 1874 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1875 * unless we touch the hotplug register, even if hotplug_trigger is 1876 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1877 * errors. 1878 */ 1879 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1880 if (!hotplug_trigger) { 1881 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1882 PORTD_HOTPLUG_STATUS_MASK | 1883 PORTC_HOTPLUG_STATUS_MASK | 1884 PORTB_HOTPLUG_STATUS_MASK; 1885 dig_hotplug_reg &= ~mask; 1886 } 1887 1888 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1889 if (!hotplug_trigger) 1890 return; 1891 1892 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1893 dig_hotplug_reg, hpd, 1894 pch_port_hotplug_long_detect); 1895 1896 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1897 } 1898 1899 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1900 { 1901 struct drm_i915_private *dev_priv = dev->dev_private; 1902 int pipe; 1903 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1904 1905 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1906 1907 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1908 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1909 SDE_AUDIO_POWER_SHIFT); 1910 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1911 port_name(port)); 1912 } 1913 1914 if (pch_iir & SDE_AUX_MASK) 1915 dp_aux_irq_handler(dev); 1916 1917 if (pch_iir & SDE_GMBUS) 1918 gmbus_irq_handler(dev); 1919 1920 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1921 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1922 1923 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1924 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1925 1926 if (pch_iir & SDE_POISON) 1927 DRM_ERROR("PCH poison interrupt\n"); 1928 1929 if (pch_iir & SDE_FDI_MASK) 1930 for_each_pipe(dev_priv, pipe) 1931 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1932 pipe_name(pipe), 1933 I915_READ(FDI_RX_IIR(pipe))); 1934 1935 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1936 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1937 1938 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1939 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1940 1941 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1942 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1943 1944 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1945 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1946 } 1947 1948 static void ivb_err_int_handler(struct drm_device *dev) 1949 { 1950 struct drm_i915_private *dev_priv = dev->dev_private; 1951 u32 err_int = I915_READ(GEN7_ERR_INT); 1952 enum pipe pipe; 1953 1954 if (err_int & ERR_INT_POISON) 1955 DRM_ERROR("Poison interrupt\n"); 1956 1957 for_each_pipe(dev_priv, pipe) { 1958 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1959 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1960 1961 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1962 if (IS_IVYBRIDGE(dev)) 1963 ivb_pipe_crc_irq_handler(dev, pipe); 1964 else 1965 hsw_pipe_crc_irq_handler(dev, pipe); 1966 } 1967 } 1968 1969 I915_WRITE(GEN7_ERR_INT, err_int); 1970 } 1971 1972 static void cpt_serr_int_handler(struct drm_device *dev) 1973 { 1974 struct drm_i915_private *dev_priv = dev->dev_private; 1975 u32 serr_int = I915_READ(SERR_INT); 1976 1977 if (serr_int & SERR_INT_POISON) 1978 DRM_ERROR("PCH poison interrupt\n"); 1979 1980 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1982 1983 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1984 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1985 1986 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1987 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 1988 1989 I915_WRITE(SERR_INT, serr_int); 1990 } 1991 1992 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1993 { 1994 struct drm_i915_private *dev_priv = dev->dev_private; 1995 int pipe; 1996 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1997 1998 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1999 2000 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2001 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2002 SDE_AUDIO_POWER_SHIFT_CPT); 2003 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2004 port_name(port)); 2005 } 2006 2007 if (pch_iir & SDE_AUX_MASK_CPT) 2008 dp_aux_irq_handler(dev); 2009 2010 if (pch_iir & SDE_GMBUS_CPT) 2011 gmbus_irq_handler(dev); 2012 2013 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2014 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2015 2016 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2017 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2018 2019 if (pch_iir & SDE_FDI_MASK_CPT) 2020 for_each_pipe(dev_priv, pipe) 2021 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2022 pipe_name(pipe), 2023 I915_READ(FDI_RX_IIR(pipe))); 2024 2025 if (pch_iir & SDE_ERROR_CPT) 2026 cpt_serr_int_handler(dev); 2027 } 2028 2029 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) 2030 { 2031 struct drm_i915_private *dev_priv = dev->dev_private; 2032 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2033 ~SDE_PORTE_HOTPLUG_SPT; 2034 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2035 u32 pin_mask = 0, long_mask = 0; 2036 2037 if (hotplug_trigger) { 2038 u32 dig_hotplug_reg; 2039 2040 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2041 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2042 2043 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2044 dig_hotplug_reg, hpd_spt, 2045 spt_port_hotplug_long_detect); 2046 } 2047 2048 if (hotplug2_trigger) { 2049 u32 dig_hotplug_reg; 2050 2051 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2052 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2053 2054 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2055 dig_hotplug_reg, hpd_spt, 2056 spt_port_hotplug2_long_detect); 2057 } 2058 2059 if (pin_mask) 2060 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2061 2062 if (pch_iir & SDE_GMBUS_CPT) 2063 gmbus_irq_handler(dev); 2064 } 2065 2066 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2067 const u32 hpd[HPD_NUM_PINS]) 2068 { 2069 struct drm_i915_private *dev_priv = to_i915(dev); 2070 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2071 2072 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2073 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2074 2075 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2076 dig_hotplug_reg, hpd, 2077 ilk_port_hotplug_long_detect); 2078 2079 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2080 } 2081 2082 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2083 { 2084 struct drm_i915_private *dev_priv = dev->dev_private; 2085 enum pipe pipe; 2086 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2087 2088 if (hotplug_trigger) 2089 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); 2090 2091 if (de_iir & DE_AUX_CHANNEL_A) 2092 dp_aux_irq_handler(dev); 2093 2094 if (de_iir & DE_GSE) 2095 intel_opregion_asle_intr(dev); 2096 2097 if (de_iir & DE_POISON) 2098 DRM_ERROR("Poison interrupt\n"); 2099 2100 for_each_pipe(dev_priv, pipe) { 2101 if (de_iir & DE_PIPE_VBLANK(pipe) && 2102 intel_pipe_handle_vblank(dev, pipe)) 2103 intel_check_page_flip(dev, pipe); 2104 2105 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2106 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2107 2108 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2109 i9xx_pipe_crc_irq_handler(dev, pipe); 2110 2111 /* plane/pipes map 1:1 on ilk+ */ 2112 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2113 intel_prepare_page_flip(dev, pipe); 2114 intel_finish_page_flip_plane(dev, pipe); 2115 } 2116 } 2117 2118 /* check event from PCH */ 2119 if (de_iir & DE_PCH_EVENT) { 2120 u32 pch_iir = I915_READ(SDEIIR); 2121 2122 if (HAS_PCH_CPT(dev)) 2123 cpt_irq_handler(dev, pch_iir); 2124 else 2125 ibx_irq_handler(dev, pch_iir); 2126 2127 /* should clear PCH hotplug event before clear CPU irq */ 2128 I915_WRITE(SDEIIR, pch_iir); 2129 } 2130 2131 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2132 ironlake_rps_change_irq_handler(dev); 2133 } 2134 2135 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2136 { 2137 struct drm_i915_private *dev_priv = dev->dev_private; 2138 enum pipe pipe; 2139 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2140 2141 if (hotplug_trigger) 2142 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); 2143 2144 if (de_iir & DE_ERR_INT_IVB) 2145 ivb_err_int_handler(dev); 2146 2147 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2148 dp_aux_irq_handler(dev); 2149 2150 if (de_iir & DE_GSE_IVB) 2151 intel_opregion_asle_intr(dev); 2152 2153 for_each_pipe(dev_priv, pipe) { 2154 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2155 intel_pipe_handle_vblank(dev, pipe)) 2156 intel_check_page_flip(dev, pipe); 2157 2158 /* plane/pipes map 1:1 on ilk+ */ 2159 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2160 intel_prepare_page_flip(dev, pipe); 2161 intel_finish_page_flip_plane(dev, pipe); 2162 } 2163 } 2164 2165 /* check event from PCH */ 2166 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2167 u32 pch_iir = I915_READ(SDEIIR); 2168 2169 cpt_irq_handler(dev, pch_iir); 2170 2171 /* clear PCH hotplug event before clear CPU irq */ 2172 I915_WRITE(SDEIIR, pch_iir); 2173 } 2174 } 2175 2176 /* 2177 * To handle irqs with the minimum potential races with fresh interrupts, we: 2178 * 1 - Disable Master Interrupt Control. 2179 * 2 - Find the source(s) of the interrupt. 2180 * 3 - Clear the Interrupt Identity bits (IIR). 2181 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2182 * 5 - Re-enable Master Interrupt Control. 2183 */ 2184 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2185 { 2186 struct drm_device *dev = arg; 2187 struct drm_i915_private *dev_priv = dev->dev_private; 2188 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2189 irqreturn_t ret = IRQ_NONE; 2190 2191 if (!intel_irqs_enabled(dev_priv)) 2192 return IRQ_NONE; 2193 2194 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2195 disable_rpm_wakeref_asserts(dev_priv); 2196 2197 /* disable master interrupt before clearing iir */ 2198 de_ier = I915_READ(DEIER); 2199 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2200 POSTING_READ(DEIER); 2201 2202 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2203 * interrupts will will be stored on its back queue, and then we'll be 2204 * able to process them after we restore SDEIER (as soon as we restore 2205 * it, we'll get an interrupt if SDEIIR still has something to process 2206 * due to its back queue). */ 2207 if (!HAS_PCH_NOP(dev)) { 2208 sde_ier = I915_READ(SDEIER); 2209 I915_WRITE(SDEIER, 0); 2210 POSTING_READ(SDEIER); 2211 } 2212 2213 /* Find, clear, then process each source of interrupt */ 2214 2215 gt_iir = I915_READ(GTIIR); 2216 if (gt_iir) { 2217 I915_WRITE(GTIIR, gt_iir); 2218 ret = IRQ_HANDLED; 2219 if (INTEL_INFO(dev)->gen >= 6) 2220 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2221 else 2222 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2223 } 2224 2225 de_iir = I915_READ(DEIIR); 2226 if (de_iir) { 2227 I915_WRITE(DEIIR, de_iir); 2228 ret = IRQ_HANDLED; 2229 if (INTEL_INFO(dev)->gen >= 7) 2230 ivb_display_irq_handler(dev, de_iir); 2231 else 2232 ilk_display_irq_handler(dev, de_iir); 2233 } 2234 2235 if (INTEL_INFO(dev)->gen >= 6) { 2236 u32 pm_iir = I915_READ(GEN6_PMIIR); 2237 if (pm_iir) { 2238 I915_WRITE(GEN6_PMIIR, pm_iir); 2239 ret = IRQ_HANDLED; 2240 gen6_rps_irq_handler(dev_priv, pm_iir); 2241 } 2242 } 2243 2244 I915_WRITE(DEIER, de_ier); 2245 POSTING_READ(DEIER); 2246 if (!HAS_PCH_NOP(dev)) { 2247 I915_WRITE(SDEIER, sde_ier); 2248 POSTING_READ(SDEIER); 2249 } 2250 2251 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2252 enable_rpm_wakeref_asserts(dev_priv); 2253 2254 return ret; 2255 } 2256 2257 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2258 const u32 hpd[HPD_NUM_PINS]) 2259 { 2260 struct drm_i915_private *dev_priv = to_i915(dev); 2261 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2262 2263 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2264 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2265 2266 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2267 dig_hotplug_reg, hpd, 2268 bxt_port_hotplug_long_detect); 2269 2270 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2271 } 2272 2273 static irqreturn_t 2274 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2275 { 2276 struct drm_device *dev = dev_priv->dev; 2277 irqreturn_t ret = IRQ_NONE; 2278 u32 iir; 2279 enum pipe pipe; 2280 2281 if (master_ctl & GEN8_DE_MISC_IRQ) { 2282 iir = I915_READ(GEN8_DE_MISC_IIR); 2283 if (iir) { 2284 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2285 ret = IRQ_HANDLED; 2286 if (iir & GEN8_DE_MISC_GSE) 2287 intel_opregion_asle_intr(dev); 2288 else 2289 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2290 } 2291 else 2292 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2293 } 2294 2295 if (master_ctl & GEN8_DE_PORT_IRQ) { 2296 iir = I915_READ(GEN8_DE_PORT_IIR); 2297 if (iir) { 2298 u32 tmp_mask; 2299 bool found = false; 2300 2301 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2302 ret = IRQ_HANDLED; 2303 2304 tmp_mask = GEN8_AUX_CHANNEL_A; 2305 if (INTEL_INFO(dev_priv)->gen >= 9) 2306 tmp_mask |= GEN9_AUX_CHANNEL_B | 2307 GEN9_AUX_CHANNEL_C | 2308 GEN9_AUX_CHANNEL_D; 2309 2310 if (iir & tmp_mask) { 2311 dp_aux_irq_handler(dev); 2312 found = true; 2313 } 2314 2315 if (IS_BROXTON(dev_priv)) { 2316 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2317 if (tmp_mask) { 2318 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); 2319 found = true; 2320 } 2321 } else if (IS_BROADWELL(dev_priv)) { 2322 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2323 if (tmp_mask) { 2324 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); 2325 found = true; 2326 } 2327 } 2328 2329 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { 2330 gmbus_irq_handler(dev); 2331 found = true; 2332 } 2333 2334 if (!found) 2335 DRM_ERROR("Unexpected DE Port interrupt\n"); 2336 } 2337 else 2338 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2339 } 2340 2341 for_each_pipe(dev_priv, pipe) { 2342 u32 flip_done, fault_errors; 2343 2344 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2345 continue; 2346 2347 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2348 if (!iir) { 2349 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2350 continue; 2351 } 2352 2353 ret = IRQ_HANDLED; 2354 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2355 2356 if (iir & GEN8_PIPE_VBLANK && 2357 intel_pipe_handle_vblank(dev, pipe)) 2358 intel_check_page_flip(dev, pipe); 2359 2360 flip_done = iir; 2361 if (INTEL_INFO(dev_priv)->gen >= 9) 2362 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2363 else 2364 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2365 2366 if (flip_done) { 2367 intel_prepare_page_flip(dev, pipe); 2368 intel_finish_page_flip_plane(dev, pipe); 2369 } 2370 2371 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2372 hsw_pipe_crc_irq_handler(dev, pipe); 2373 2374 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2375 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2376 2377 fault_errors = iir; 2378 if (INTEL_INFO(dev_priv)->gen >= 9) 2379 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2380 else 2381 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2382 2383 if (fault_errors) 2384 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2385 pipe_name(pipe), 2386 fault_errors); 2387 } 2388 2389 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2390 master_ctl & GEN8_DE_PCH_IRQ) { 2391 /* 2392 * FIXME(BDW): Assume for now that the new interrupt handling 2393 * scheme also closed the SDE interrupt handling race we've seen 2394 * on older pch-split platforms. But this needs testing. 2395 */ 2396 iir = I915_READ(SDEIIR); 2397 if (iir) { 2398 I915_WRITE(SDEIIR, iir); 2399 ret = IRQ_HANDLED; 2400 2401 if (HAS_PCH_SPT(dev_priv)) 2402 spt_irq_handler(dev, iir); 2403 else 2404 cpt_irq_handler(dev, iir); 2405 } else { 2406 /* 2407 * Like on previous PCH there seems to be something 2408 * fishy going on with forwarding PCH interrupts. 2409 */ 2410 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2411 } 2412 } 2413 2414 return ret; 2415 } 2416 2417 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2418 { 2419 struct drm_device *dev = arg; 2420 struct drm_i915_private *dev_priv = dev->dev_private; 2421 u32 master_ctl; 2422 irqreturn_t ret; 2423 2424 if (!intel_irqs_enabled(dev_priv)) 2425 return IRQ_NONE; 2426 2427 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2428 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2429 if (!master_ctl) 2430 return IRQ_NONE; 2431 2432 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2433 2434 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2435 disable_rpm_wakeref_asserts(dev_priv); 2436 2437 /* Find, clear, then process each source of interrupt */ 2438 ret = gen8_gt_irq_handler(dev_priv, master_ctl); 2439 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2440 2441 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2442 POSTING_READ_FW(GEN8_MASTER_IRQ); 2443 2444 enable_rpm_wakeref_asserts(dev_priv); 2445 2446 return ret; 2447 } 2448 2449 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2450 bool reset_completed) 2451 { 2452 struct intel_engine_cs *ring; 2453 int i; 2454 2455 /* 2456 * Notify all waiters for GPU completion events that reset state has 2457 * been changed, and that they need to restart their wait after 2458 * checking for potential errors (and bail out to drop locks if there is 2459 * a gpu reset pending so that i915_error_work_func can acquire them). 2460 */ 2461 2462 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2463 for_each_ring(ring, dev_priv, i) 2464 wake_up_all(&ring->irq_queue); 2465 2466 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2467 wake_up_all(&dev_priv->pending_flip_queue); 2468 2469 /* 2470 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2471 * reset state is cleared. 2472 */ 2473 if (reset_completed) 2474 wake_up_all(&dev_priv->gpu_error.reset_queue); 2475 } 2476 2477 /** 2478 * i915_reset_and_wakeup - do process context error handling work 2479 * @dev: drm device 2480 * 2481 * Fire an error uevent so userspace can see that a hang or error 2482 * was detected. 2483 */ 2484 static void i915_reset_and_wakeup(struct drm_device *dev) 2485 { 2486 struct drm_i915_private *dev_priv = to_i915(dev); 2487 struct i915_gpu_error *error = &dev_priv->gpu_error; 2488 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2489 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2490 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2491 int ret; 2492 2493 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2494 2495 /* 2496 * Note that there's only one work item which does gpu resets, so we 2497 * need not worry about concurrent gpu resets potentially incrementing 2498 * error->reset_counter twice. We only need to take care of another 2499 * racing irq/hangcheck declaring the gpu dead for a second time. A 2500 * quick check for that is good enough: schedule_work ensures the 2501 * correct ordering between hang detection and this work item, and since 2502 * the reset in-progress bit is only ever set by code outside of this 2503 * work we don't need to worry about any other races. 2504 */ 2505 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2506 DRM_DEBUG_DRIVER("resetting chip\n"); 2507 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2508 reset_event); 2509 2510 /* 2511 * In most cases it's guaranteed that we get here with an RPM 2512 * reference held, for example because there is a pending GPU 2513 * request that won't finish until the reset is done. This 2514 * isn't the case at least when we get here by doing a 2515 * simulated reset via debugs, so get an RPM reference. 2516 */ 2517 intel_runtime_pm_get(dev_priv); 2518 2519 intel_prepare_reset(dev); 2520 2521 /* 2522 * All state reset _must_ be completed before we update the 2523 * reset counter, for otherwise waiters might miss the reset 2524 * pending state and not properly drop locks, resulting in 2525 * deadlocks with the reset work. 2526 */ 2527 ret = i915_reset(dev); 2528 2529 intel_finish_reset(dev); 2530 2531 intel_runtime_pm_put(dev_priv); 2532 2533 if (ret == 0) { 2534 /* 2535 * After all the gem state is reset, increment the reset 2536 * counter and wake up everyone waiting for the reset to 2537 * complete. 2538 * 2539 * Since unlock operations are a one-sided barrier only, 2540 * we need to insert a barrier here to order any seqno 2541 * updates before 2542 * the counter increment. 2543 */ 2544 smp_mb__before_atomic(); 2545 atomic_inc(&dev_priv->gpu_error.reset_counter); 2546 2547 kobject_uevent_env(&dev->primary->kdev->kobj, 2548 KOBJ_CHANGE, reset_done_event); 2549 } else { 2550 atomic_or(I915_WEDGED, &error->reset_counter); 2551 } 2552 2553 /* 2554 * Note: The wake_up also serves as a memory barrier so that 2555 * waiters see the update value of the reset counter atomic_t. 2556 */ 2557 i915_error_wake_up(dev_priv, true); 2558 } 2559 } 2560 2561 static void i915_report_and_clear_eir(struct drm_device *dev) 2562 { 2563 struct drm_i915_private *dev_priv = dev->dev_private; 2564 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2565 u32 eir = I915_READ(EIR); 2566 int pipe, i; 2567 2568 if (!eir) 2569 return; 2570 2571 pr_err("render error detected, EIR: 0x%08x\n", eir); 2572 2573 i915_get_extra_instdone(dev, instdone); 2574 2575 if (IS_G4X(dev)) { 2576 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2577 u32 ipeir = I915_READ(IPEIR_I965); 2578 2579 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2580 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2581 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2582 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2583 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2584 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2585 I915_WRITE(IPEIR_I965, ipeir); 2586 POSTING_READ(IPEIR_I965); 2587 } 2588 if (eir & GM45_ERROR_PAGE_TABLE) { 2589 u32 pgtbl_err = I915_READ(PGTBL_ER); 2590 pr_err("page table error\n"); 2591 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2592 I915_WRITE(PGTBL_ER, pgtbl_err); 2593 POSTING_READ(PGTBL_ER); 2594 } 2595 } 2596 2597 if (!IS_GEN2(dev)) { 2598 if (eir & I915_ERROR_PAGE_TABLE) { 2599 u32 pgtbl_err = I915_READ(PGTBL_ER); 2600 pr_err("page table error\n"); 2601 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2602 I915_WRITE(PGTBL_ER, pgtbl_err); 2603 POSTING_READ(PGTBL_ER); 2604 } 2605 } 2606 2607 if (eir & I915_ERROR_MEMORY_REFRESH) { 2608 pr_err("memory refresh error:\n"); 2609 for_each_pipe(dev_priv, pipe) 2610 pr_err("pipe %c stat: 0x%08x\n", 2611 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2612 /* pipestat has already been acked */ 2613 } 2614 if (eir & I915_ERROR_INSTRUCTION) { 2615 pr_err("instruction error\n"); 2616 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2617 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2618 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2619 if (INTEL_INFO(dev)->gen < 4) { 2620 u32 ipeir = I915_READ(IPEIR); 2621 2622 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2623 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2624 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2625 I915_WRITE(IPEIR, ipeir); 2626 POSTING_READ(IPEIR); 2627 } else { 2628 u32 ipeir = I915_READ(IPEIR_I965); 2629 2630 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2631 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2632 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2633 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2634 I915_WRITE(IPEIR_I965, ipeir); 2635 POSTING_READ(IPEIR_I965); 2636 } 2637 } 2638 2639 I915_WRITE(EIR, eir); 2640 POSTING_READ(EIR); 2641 eir = I915_READ(EIR); 2642 if (eir) { 2643 /* 2644 * some errors might have become stuck, 2645 * mask them. 2646 */ 2647 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2648 I915_WRITE(EMR, I915_READ(EMR) | eir); 2649 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2650 } 2651 } 2652 2653 /** 2654 * i915_handle_error - handle a gpu error 2655 * @dev: drm device 2656 * 2657 * Do some basic checking of register state at error time and 2658 * dump it to the syslog. Also call i915_capture_error_state() to make 2659 * sure we get a record and make it available in debugfs. Fire a uevent 2660 * so userspace knows something bad happened (should trigger collection 2661 * of a ring dump etc.). 2662 */ 2663 void i915_handle_error(struct drm_device *dev, bool wedged, 2664 const char *fmt, ...) 2665 { 2666 struct drm_i915_private *dev_priv = dev->dev_private; 2667 va_list args; 2668 char error_msg[80]; 2669 2670 va_start(args, fmt); 2671 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2672 va_end(args); 2673 2674 i915_capture_error_state(dev, wedged, error_msg); 2675 i915_report_and_clear_eir(dev); 2676 2677 if (wedged) { 2678 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2679 &dev_priv->gpu_error.reset_counter); 2680 2681 /* 2682 * Wakeup waiting processes so that the reset function 2683 * i915_reset_and_wakeup doesn't deadlock trying to grab 2684 * various locks. By bumping the reset counter first, the woken 2685 * processes will see a reset in progress and back off, 2686 * releasing their locks and then wait for the reset completion. 2687 * We must do this for _all_ gpu waiters that might hold locks 2688 * that the reset work needs to acquire. 2689 * 2690 * Note: The wake_up serves as the required memory barrier to 2691 * ensure that the waiters see the updated value of the reset 2692 * counter atomic_t. 2693 */ 2694 i915_error_wake_up(dev_priv, false); 2695 } 2696 2697 i915_reset_and_wakeup(dev); 2698 } 2699 2700 /* Called from drm generic code, passed 'crtc' which 2701 * we use as a pipe index 2702 */ 2703 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2704 { 2705 struct drm_i915_private *dev_priv = dev->dev_private; 2706 unsigned long irqflags; 2707 2708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2709 if (INTEL_INFO(dev)->gen >= 4) 2710 i915_enable_pipestat(dev_priv, pipe, 2711 PIPE_START_VBLANK_INTERRUPT_STATUS); 2712 else 2713 i915_enable_pipestat(dev_priv, pipe, 2714 PIPE_VBLANK_INTERRUPT_STATUS); 2715 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2716 2717 return 0; 2718 } 2719 2720 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2721 { 2722 struct drm_i915_private *dev_priv = dev->dev_private; 2723 unsigned long irqflags; 2724 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2725 DE_PIPE_VBLANK(pipe); 2726 2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2728 ilk_enable_display_irq(dev_priv, bit); 2729 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2730 2731 return 0; 2732 } 2733 2734 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2735 { 2736 struct drm_i915_private *dev_priv = dev->dev_private; 2737 unsigned long irqflags; 2738 2739 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2740 i915_enable_pipestat(dev_priv, pipe, 2741 PIPE_START_VBLANK_INTERRUPT_STATUS); 2742 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2743 2744 return 0; 2745 } 2746 2747 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2748 { 2749 struct drm_i915_private *dev_priv = dev->dev_private; 2750 unsigned long irqflags; 2751 2752 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2753 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2754 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2755 2756 return 0; 2757 } 2758 2759 /* Called from drm generic code, passed 'crtc' which 2760 * we use as a pipe index 2761 */ 2762 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2763 { 2764 struct drm_i915_private *dev_priv = dev->dev_private; 2765 unsigned long irqflags; 2766 2767 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2768 i915_disable_pipestat(dev_priv, pipe, 2769 PIPE_VBLANK_INTERRUPT_STATUS | 2770 PIPE_START_VBLANK_INTERRUPT_STATUS); 2771 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2772 } 2773 2774 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2775 { 2776 struct drm_i915_private *dev_priv = dev->dev_private; 2777 unsigned long irqflags; 2778 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2779 DE_PIPE_VBLANK(pipe); 2780 2781 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2782 ilk_disable_display_irq(dev_priv, bit); 2783 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2784 } 2785 2786 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2787 { 2788 struct drm_i915_private *dev_priv = dev->dev_private; 2789 unsigned long irqflags; 2790 2791 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2792 i915_disable_pipestat(dev_priv, pipe, 2793 PIPE_START_VBLANK_INTERRUPT_STATUS); 2794 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2795 } 2796 2797 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2798 { 2799 struct drm_i915_private *dev_priv = dev->dev_private; 2800 unsigned long irqflags; 2801 2802 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2803 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2804 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2805 } 2806 2807 static bool 2808 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2809 { 2810 return (list_empty(&ring->request_list) || 2811 i915_seqno_passed(seqno, ring->last_submitted_seqno)); 2812 } 2813 2814 static bool 2815 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2816 { 2817 if (INTEL_INFO(dev)->gen >= 8) { 2818 return (ipehr >> 23) == 0x1c; 2819 } else { 2820 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2821 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2822 MI_SEMAPHORE_REGISTER); 2823 } 2824 } 2825 2826 static struct intel_engine_cs * 2827 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2828 { 2829 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2830 struct intel_engine_cs *signaller; 2831 int i; 2832 2833 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2834 for_each_ring(signaller, dev_priv, i) { 2835 if (ring == signaller) 2836 continue; 2837 2838 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2839 return signaller; 2840 } 2841 } else { 2842 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2843 2844 for_each_ring(signaller, dev_priv, i) { 2845 if(ring == signaller) 2846 continue; 2847 2848 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2849 return signaller; 2850 } 2851 } 2852 2853 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2854 ring->id, ipehr, offset); 2855 2856 return NULL; 2857 } 2858 2859 static struct intel_engine_cs * 2860 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2861 { 2862 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2863 u32 cmd, ipehr, head; 2864 u64 offset = 0; 2865 int i, backwards; 2866 2867 /* 2868 * This function does not support execlist mode - any attempt to 2869 * proceed further into this function will result in a kernel panic 2870 * when dereferencing ring->buffer, which is not set up in execlist 2871 * mode. 2872 * 2873 * The correct way of doing it would be to derive the currently 2874 * executing ring buffer from the current context, which is derived 2875 * from the currently running request. Unfortunately, to get the 2876 * current request we would have to grab the struct_mutex before doing 2877 * anything else, which would be ill-advised since some other thread 2878 * might have grabbed it already and managed to hang itself, causing 2879 * the hang checker to deadlock. 2880 * 2881 * Therefore, this function does not support execlist mode in its 2882 * current form. Just return NULL and move on. 2883 */ 2884 if (ring->buffer == NULL) 2885 return NULL; 2886 2887 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2888 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2889 return NULL; 2890 2891 /* 2892 * HEAD is likely pointing to the dword after the actual command, 2893 * so scan backwards until we find the MBOX. But limit it to just 3 2894 * or 4 dwords depending on the semaphore wait command size. 2895 * Note that we don't care about ACTHD here since that might 2896 * point at at batch, and semaphores are always emitted into the 2897 * ringbuffer itself. 2898 */ 2899 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2900 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2901 2902 for (i = backwards; i; --i) { 2903 /* 2904 * Be paranoid and presume the hw has gone off into the wild - 2905 * our ring is smaller than what the hardware (and hence 2906 * HEAD_ADDR) allows. Also handles wrap-around. 2907 */ 2908 head &= ring->buffer->size - 1; 2909 2910 /* This here seems to blow up */ 2911 cmd = ioread32(ring->buffer->virtual_start + head); 2912 if (cmd == ipehr) 2913 break; 2914 2915 head -= 4; 2916 } 2917 2918 if (!i) 2919 return NULL; 2920 2921 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2922 if (INTEL_INFO(ring->dev)->gen >= 8) { 2923 offset = ioread32(ring->buffer->virtual_start + head + 12); 2924 offset <<= 32; 2925 offset = ioread32(ring->buffer->virtual_start + head + 8); 2926 } 2927 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2928 } 2929 2930 static int semaphore_passed(struct intel_engine_cs *ring) 2931 { 2932 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2933 struct intel_engine_cs *signaller; 2934 u32 seqno; 2935 2936 ring->hangcheck.deadlock++; 2937 2938 signaller = semaphore_waits_for(ring, &seqno); 2939 if (signaller == NULL) 2940 return -1; 2941 2942 /* Prevent pathological recursion due to driver bugs */ 2943 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2944 return -1; 2945 2946 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2947 return 1; 2948 2949 /* cursory check for an unkickable deadlock */ 2950 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2951 semaphore_passed(signaller) < 0) 2952 return -1; 2953 2954 return 0; 2955 } 2956 2957 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2958 { 2959 struct intel_engine_cs *ring; 2960 int i; 2961 2962 for_each_ring(ring, dev_priv, i) 2963 ring->hangcheck.deadlock = 0; 2964 } 2965 2966 static bool subunits_stuck(struct intel_engine_cs *ring) 2967 { 2968 u32 instdone[I915_NUM_INSTDONE_REG]; 2969 bool stuck; 2970 int i; 2971 2972 if (ring->id != RCS) 2973 return true; 2974 2975 i915_get_extra_instdone(ring->dev, instdone); 2976 2977 /* There might be unstable subunit states even when 2978 * actual head is not moving. Filter out the unstable ones by 2979 * accumulating the undone -> done transitions and only 2980 * consider those as progress. 2981 */ 2982 stuck = true; 2983 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { 2984 const u32 tmp = instdone[i] | ring->hangcheck.instdone[i]; 2985 2986 if (tmp != ring->hangcheck.instdone[i]) 2987 stuck = false; 2988 2989 ring->hangcheck.instdone[i] |= tmp; 2990 } 2991 2992 return stuck; 2993 } 2994 2995 static enum intel_ring_hangcheck_action 2996 head_stuck(struct intel_engine_cs *ring, u64 acthd) 2997 { 2998 if (acthd != ring->hangcheck.acthd) { 2999 3000 /* Clear subunit states on head movement */ 3001 memset(ring->hangcheck.instdone, 0, 3002 sizeof(ring->hangcheck.instdone)); 3003 3004 if (acthd > ring->hangcheck.max_acthd) { 3005 ring->hangcheck.max_acthd = acthd; 3006 return HANGCHECK_ACTIVE; 3007 } 3008 3009 return HANGCHECK_ACTIVE_LOOP; 3010 } 3011 3012 if (!subunits_stuck(ring)) 3013 return HANGCHECK_ACTIVE; 3014 3015 return HANGCHECK_HUNG; 3016 } 3017 3018 static enum intel_ring_hangcheck_action 3019 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 3020 { 3021 struct drm_device *dev = ring->dev; 3022 struct drm_i915_private *dev_priv = dev->dev_private; 3023 enum intel_ring_hangcheck_action ha; 3024 u32 tmp; 3025 3026 ha = head_stuck(ring, acthd); 3027 if (ha != HANGCHECK_HUNG) 3028 return ha; 3029 3030 if (IS_GEN2(dev)) 3031 return HANGCHECK_HUNG; 3032 3033 /* Is the chip hanging on a WAIT_FOR_EVENT? 3034 * If so we can simply poke the RB_WAIT bit 3035 * and break the hang. This should work on 3036 * all but the second generation chipsets. 3037 */ 3038 tmp = I915_READ_CTL(ring); 3039 if (tmp & RING_WAIT) { 3040 i915_handle_error(dev, false, 3041 "Kicking stuck wait on %s", 3042 ring->name); 3043 I915_WRITE_CTL(ring, tmp); 3044 return HANGCHECK_KICK; 3045 } 3046 3047 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3048 switch (semaphore_passed(ring)) { 3049 default: 3050 return HANGCHECK_HUNG; 3051 case 1: 3052 i915_handle_error(dev, false, 3053 "Kicking stuck semaphore on %s", 3054 ring->name); 3055 I915_WRITE_CTL(ring, tmp); 3056 return HANGCHECK_KICK; 3057 case 0: 3058 return HANGCHECK_WAIT; 3059 } 3060 } 3061 3062 return HANGCHECK_HUNG; 3063 } 3064 3065 /* 3066 * This is called when the chip hasn't reported back with completed 3067 * batchbuffers in a long time. We keep track per ring seqno progress and 3068 * if there are no progress, hangcheck score for that ring is increased. 3069 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3070 * we kick the ring. If we see no progress on three subsequent calls 3071 * we assume chip is wedged and try to fix it by resetting the chip. 3072 */ 3073 static void i915_hangcheck_elapsed(struct work_struct *work) 3074 { 3075 struct drm_i915_private *dev_priv = 3076 container_of(work, typeof(*dev_priv), 3077 gpu_error.hangcheck_work.work); 3078 struct drm_device *dev = dev_priv->dev; 3079 struct intel_engine_cs *ring; 3080 int i; 3081 int busy_count = 0, rings_hung = 0; 3082 bool stuck[I915_NUM_RINGS] = { 0 }; 3083 #define BUSY 1 3084 #define KICK 5 3085 #define HUNG 20 3086 3087 if (!i915.enable_hangcheck) 3088 return; 3089 3090 /* 3091 * The hangcheck work is synced during runtime suspend, we don't 3092 * require a wakeref. TODO: instead of disabling the asserts make 3093 * sure that we hold a reference when this work is running. 3094 */ 3095 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3096 3097 /* As enabling the GPU requires fairly extensive mmio access, 3098 * periodically arm the mmio checker to see if we are triggering 3099 * any invalid access. 3100 */ 3101 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3102 3103 for_each_ring(ring, dev_priv, i) { 3104 u64 acthd; 3105 u32 seqno; 3106 bool busy = true; 3107 3108 semaphore_clear_deadlocks(dev_priv); 3109 3110 seqno = ring->get_seqno(ring, false); 3111 acthd = intel_ring_get_active_head(ring); 3112 3113 if (ring->hangcheck.seqno == seqno) { 3114 if (ring_idle(ring, seqno)) { 3115 ring->hangcheck.action = HANGCHECK_IDLE; 3116 3117 if (waitqueue_active(&ring->irq_queue)) { 3118 /* Issue a wake-up to catch stuck h/w. */ 3119 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 3120 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 3121 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3122 ring->name); 3123 else 3124 DRM_INFO("Fake missed irq on %s\n", 3125 ring->name); 3126 wake_up_all(&ring->irq_queue); 3127 } 3128 /* Safeguard against driver failure */ 3129 ring->hangcheck.score += BUSY; 3130 } else 3131 busy = false; 3132 } else { 3133 /* We always increment the hangcheck score 3134 * if the ring is busy and still processing 3135 * the same request, so that no single request 3136 * can run indefinitely (such as a chain of 3137 * batches). The only time we do not increment 3138 * the hangcheck score on this ring, if this 3139 * ring is in a legitimate wait for another 3140 * ring. In that case the waiting ring is a 3141 * victim and we want to be sure we catch the 3142 * right culprit. Then every time we do kick 3143 * the ring, add a small increment to the 3144 * score so that we can catch a batch that is 3145 * being repeatedly kicked and so responsible 3146 * for stalling the machine. 3147 */ 3148 ring->hangcheck.action = ring_stuck(ring, 3149 acthd); 3150 3151 switch (ring->hangcheck.action) { 3152 case HANGCHECK_IDLE: 3153 case HANGCHECK_WAIT: 3154 case HANGCHECK_ACTIVE: 3155 break; 3156 case HANGCHECK_ACTIVE_LOOP: 3157 ring->hangcheck.score += BUSY; 3158 break; 3159 case HANGCHECK_KICK: 3160 ring->hangcheck.score += KICK; 3161 break; 3162 case HANGCHECK_HUNG: 3163 ring->hangcheck.score += HUNG; 3164 stuck[i] = true; 3165 break; 3166 } 3167 } 3168 } else { 3169 ring->hangcheck.action = HANGCHECK_ACTIVE; 3170 3171 /* Gradually reduce the count so that we catch DoS 3172 * attempts across multiple batches. 3173 */ 3174 if (ring->hangcheck.score > 0) 3175 ring->hangcheck.score--; 3176 3177 /* Clear head and subunit states on seqno movement */ 3178 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3179 3180 memset(ring->hangcheck.instdone, 0, 3181 sizeof(ring->hangcheck.instdone)); 3182 } 3183 3184 ring->hangcheck.seqno = seqno; 3185 ring->hangcheck.acthd = acthd; 3186 busy_count += busy; 3187 } 3188 3189 for_each_ring(ring, dev_priv, i) { 3190 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3191 DRM_INFO("%s on %s\n", 3192 stuck[i] ? "stuck" : "no progress", 3193 ring->name); 3194 rings_hung++; 3195 } 3196 } 3197 3198 if (rings_hung) { 3199 i915_handle_error(dev, true, "Ring hung"); 3200 goto out; 3201 } 3202 3203 if (busy_count) 3204 /* Reset timer case chip hangs without another request 3205 * being added */ 3206 i915_queue_hangcheck(dev); 3207 3208 out: 3209 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3210 } 3211 3212 void i915_queue_hangcheck(struct drm_device *dev) 3213 { 3214 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3215 3216 if (!i915.enable_hangcheck) 3217 return; 3218 3219 /* Don't continually defer the hangcheck so that it is always run at 3220 * least once after work has been scheduled on any ring. Otherwise, 3221 * we will ignore a hung ring if a second ring is kept busy. 3222 */ 3223 3224 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 3225 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 3226 } 3227 3228 static void ibx_irq_reset(struct drm_device *dev) 3229 { 3230 struct drm_i915_private *dev_priv = dev->dev_private; 3231 3232 if (HAS_PCH_NOP(dev)) 3233 return; 3234 3235 GEN5_IRQ_RESET(SDE); 3236 3237 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3238 I915_WRITE(SERR_INT, 0xffffffff); 3239 } 3240 3241 /* 3242 * SDEIER is also touched by the interrupt handler to work around missed PCH 3243 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3244 * instead we unconditionally enable all PCH interrupt sources here, but then 3245 * only unmask them as needed with SDEIMR. 3246 * 3247 * This function needs to be called before interrupts are enabled. 3248 */ 3249 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3250 { 3251 struct drm_i915_private *dev_priv = dev->dev_private; 3252 3253 if (HAS_PCH_NOP(dev)) 3254 return; 3255 3256 WARN_ON(I915_READ(SDEIER) != 0); 3257 I915_WRITE(SDEIER, 0xffffffff); 3258 POSTING_READ(SDEIER); 3259 } 3260 3261 static void gen5_gt_irq_reset(struct drm_device *dev) 3262 { 3263 struct drm_i915_private *dev_priv = dev->dev_private; 3264 3265 GEN5_IRQ_RESET(GT); 3266 if (INTEL_INFO(dev)->gen >= 6) 3267 GEN5_IRQ_RESET(GEN6_PM); 3268 } 3269 3270 /* drm_dma.h hooks 3271 */ 3272 static void ironlake_irq_reset(struct drm_device *dev) 3273 { 3274 struct drm_i915_private *dev_priv = dev->dev_private; 3275 3276 I915_WRITE(HWSTAM, 0xffffffff); 3277 3278 GEN5_IRQ_RESET(DE); 3279 if (IS_GEN7(dev)) 3280 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3281 3282 gen5_gt_irq_reset(dev); 3283 3284 ibx_irq_reset(dev); 3285 } 3286 3287 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3288 { 3289 enum pipe pipe; 3290 3291 i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0); 3292 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3293 3294 for_each_pipe(dev_priv, pipe) 3295 I915_WRITE(PIPESTAT(pipe), 0xffff); 3296 3297 GEN5_IRQ_RESET(VLV_); 3298 } 3299 3300 static void valleyview_irq_preinstall(struct drm_device *dev) 3301 { 3302 struct drm_i915_private *dev_priv = dev->dev_private; 3303 3304 /* VLV magic */ 3305 I915_WRITE(VLV_IMR, 0); 3306 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3307 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3308 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3309 3310 gen5_gt_irq_reset(dev); 3311 3312 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3313 3314 vlv_display_irq_reset(dev_priv); 3315 } 3316 3317 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3318 { 3319 GEN8_IRQ_RESET_NDX(GT, 0); 3320 GEN8_IRQ_RESET_NDX(GT, 1); 3321 GEN8_IRQ_RESET_NDX(GT, 2); 3322 GEN8_IRQ_RESET_NDX(GT, 3); 3323 } 3324 3325 static void gen8_irq_reset(struct drm_device *dev) 3326 { 3327 struct drm_i915_private *dev_priv = dev->dev_private; 3328 int pipe; 3329 3330 I915_WRITE(GEN8_MASTER_IRQ, 0); 3331 POSTING_READ(GEN8_MASTER_IRQ); 3332 3333 gen8_gt_irq_reset(dev_priv); 3334 3335 for_each_pipe(dev_priv, pipe) 3336 if (intel_display_power_is_enabled(dev_priv, 3337 POWER_DOMAIN_PIPE(pipe))) 3338 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3339 3340 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3341 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3342 GEN5_IRQ_RESET(GEN8_PCU_); 3343 3344 if (HAS_PCH_SPLIT(dev)) 3345 ibx_irq_reset(dev); 3346 } 3347 3348 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3349 unsigned int pipe_mask) 3350 { 3351 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3352 enum pipe pipe; 3353 3354 spin_lock_irq(&dev_priv->irq_lock); 3355 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3356 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3357 dev_priv->de_irq_mask[pipe], 3358 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3359 spin_unlock_irq(&dev_priv->irq_lock); 3360 } 3361 3362 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3363 unsigned int pipe_mask) 3364 { 3365 enum pipe pipe; 3366 3367 spin_lock_irq(&dev_priv->irq_lock); 3368 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3369 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3370 spin_unlock_irq(&dev_priv->irq_lock); 3371 3372 /* make sure we're done processing display irqs */ 3373 synchronize_irq(dev_priv->dev->irq); 3374 } 3375 3376 static void cherryview_irq_preinstall(struct drm_device *dev) 3377 { 3378 struct drm_i915_private *dev_priv = dev->dev_private; 3379 3380 I915_WRITE(GEN8_MASTER_IRQ, 0); 3381 POSTING_READ(GEN8_MASTER_IRQ); 3382 3383 gen8_gt_irq_reset(dev_priv); 3384 3385 GEN5_IRQ_RESET(GEN8_PCU_); 3386 3387 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3388 3389 vlv_display_irq_reset(dev_priv); 3390 } 3391 3392 static u32 intel_hpd_enabled_irqs(struct drm_device *dev, 3393 const u32 hpd[HPD_NUM_PINS]) 3394 { 3395 struct drm_i915_private *dev_priv = to_i915(dev); 3396 struct intel_encoder *encoder; 3397 u32 enabled_irqs = 0; 3398 3399 for_each_intel_encoder(dev, encoder) 3400 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3401 enabled_irqs |= hpd[encoder->hpd_pin]; 3402 3403 return enabled_irqs; 3404 } 3405 3406 static void ibx_hpd_irq_setup(struct drm_device *dev) 3407 { 3408 struct drm_i915_private *dev_priv = dev->dev_private; 3409 u32 hotplug_irqs, hotplug, enabled_irqs; 3410 3411 if (HAS_PCH_IBX(dev)) { 3412 hotplug_irqs = SDE_HOTPLUG_MASK; 3413 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); 3414 } else { 3415 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3416 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); 3417 } 3418 3419 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3420 3421 /* 3422 * Enable digital hotplug on the PCH, and configure the DP short pulse 3423 * duration to 2ms (which is the minimum in the Display Port spec). 3424 * The pulse duration bits are reserved on LPT+. 3425 */ 3426 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3427 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3428 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3429 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3430 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3431 /* 3432 * When CPU and PCH are on the same package, port A 3433 * HPD must be enabled in both north and south. 3434 */ 3435 if (HAS_PCH_LPT_LP(dev)) 3436 hotplug |= PORTA_HOTPLUG_ENABLE; 3437 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3438 } 3439 3440 static void spt_hpd_irq_setup(struct drm_device *dev) 3441 { 3442 struct drm_i915_private *dev_priv = dev->dev_private; 3443 u32 hotplug_irqs, hotplug, enabled_irqs; 3444 3445 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3446 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); 3447 3448 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3449 3450 /* Enable digital hotplug on the PCH */ 3451 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3452 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | 3453 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; 3454 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3455 3456 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3457 hotplug |= PORTE_HOTPLUG_ENABLE; 3458 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3459 } 3460 3461 static void ilk_hpd_irq_setup(struct drm_device *dev) 3462 { 3463 struct drm_i915_private *dev_priv = dev->dev_private; 3464 u32 hotplug_irqs, hotplug, enabled_irqs; 3465 3466 if (INTEL_INFO(dev)->gen >= 8) { 3467 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3468 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); 3469 3470 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3471 } else if (INTEL_INFO(dev)->gen >= 7) { 3472 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3473 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); 3474 3475 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3476 } else { 3477 hotplug_irqs = DE_DP_A_HOTPLUG; 3478 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); 3479 3480 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3481 } 3482 3483 /* 3484 * Enable digital hotplug on the CPU, and configure the DP short pulse 3485 * duration to 2ms (which is the minimum in the Display Port spec) 3486 * The pulse duration bits are reserved on HSW+. 3487 */ 3488 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3489 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3490 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3491 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3492 3493 ibx_hpd_irq_setup(dev); 3494 } 3495 3496 static void bxt_hpd_irq_setup(struct drm_device *dev) 3497 { 3498 struct drm_i915_private *dev_priv = dev->dev_private; 3499 u32 hotplug_irqs, hotplug, enabled_irqs; 3500 3501 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); 3502 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3503 3504 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3505 3506 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3507 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | 3508 PORTA_HOTPLUG_ENABLE; 3509 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3510 } 3511 3512 static void ibx_irq_postinstall(struct drm_device *dev) 3513 { 3514 struct drm_i915_private *dev_priv = dev->dev_private; 3515 u32 mask; 3516 3517 if (HAS_PCH_NOP(dev)) 3518 return; 3519 3520 if (HAS_PCH_IBX(dev)) 3521 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3522 else 3523 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3524 3525 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3526 I915_WRITE(SDEIMR, ~mask); 3527 } 3528 3529 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3530 { 3531 struct drm_i915_private *dev_priv = dev->dev_private; 3532 u32 pm_irqs, gt_irqs; 3533 3534 pm_irqs = gt_irqs = 0; 3535 3536 dev_priv->gt_irq_mask = ~0; 3537 if (HAS_L3_DPF(dev)) { 3538 /* L3 parity interrupt is always unmasked. */ 3539 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3540 gt_irqs |= GT_PARITY_ERROR(dev); 3541 } 3542 3543 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3544 if (IS_GEN5(dev)) { 3545 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3546 ILK_BSD_USER_INTERRUPT; 3547 } else { 3548 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3549 } 3550 3551 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3552 3553 if (INTEL_INFO(dev)->gen >= 6) { 3554 /* 3555 * RPS interrupts will get enabled/disabled on demand when RPS 3556 * itself is enabled/disabled. 3557 */ 3558 if (HAS_VEBOX(dev)) 3559 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3560 3561 dev_priv->pm_irq_mask = 0xffffffff; 3562 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3563 } 3564 } 3565 3566 static int ironlake_irq_postinstall(struct drm_device *dev) 3567 { 3568 struct drm_i915_private *dev_priv = dev->dev_private; 3569 u32 display_mask, extra_mask; 3570 3571 if (INTEL_INFO(dev)->gen >= 7) { 3572 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3573 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3574 DE_PLANEB_FLIP_DONE_IVB | 3575 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3576 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3577 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3578 DE_DP_A_HOTPLUG_IVB); 3579 } else { 3580 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3581 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3582 DE_AUX_CHANNEL_A | 3583 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3584 DE_POISON); 3585 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3586 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3587 DE_DP_A_HOTPLUG); 3588 } 3589 3590 dev_priv->irq_mask = ~display_mask; 3591 3592 I915_WRITE(HWSTAM, 0xeffe); 3593 3594 ibx_irq_pre_postinstall(dev); 3595 3596 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3597 3598 gen5_gt_irq_postinstall(dev); 3599 3600 ibx_irq_postinstall(dev); 3601 3602 if (IS_IRONLAKE_M(dev)) { 3603 /* Enable PCU event interrupts 3604 * 3605 * spinlocking not required here for correctness since interrupt 3606 * setup is guaranteed to run in single-threaded context. But we 3607 * need it to make the assert_spin_locked happy. */ 3608 spin_lock_irq(&dev_priv->irq_lock); 3609 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3610 spin_unlock_irq(&dev_priv->irq_lock); 3611 } 3612 3613 return 0; 3614 } 3615 3616 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3617 { 3618 u32 pipestat_mask; 3619 u32 iir_mask; 3620 enum pipe pipe; 3621 3622 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3623 PIPE_FIFO_UNDERRUN_STATUS; 3624 3625 for_each_pipe(dev_priv, pipe) 3626 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3627 POSTING_READ(PIPESTAT(PIPE_A)); 3628 3629 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3630 PIPE_CRC_DONE_INTERRUPT_STATUS; 3631 3632 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3633 for_each_pipe(dev_priv, pipe) 3634 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3635 3636 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3637 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3638 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3639 if (IS_CHERRYVIEW(dev_priv)) 3640 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3641 dev_priv->irq_mask &= ~iir_mask; 3642 3643 I915_WRITE(VLV_IIR, iir_mask); 3644 I915_WRITE(VLV_IIR, iir_mask); 3645 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3646 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3647 POSTING_READ(VLV_IMR); 3648 } 3649 3650 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3651 { 3652 u32 pipestat_mask; 3653 u32 iir_mask; 3654 enum pipe pipe; 3655 3656 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3657 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3658 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3659 if (IS_CHERRYVIEW(dev_priv)) 3660 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3661 3662 dev_priv->irq_mask |= iir_mask; 3663 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3664 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3665 I915_WRITE(VLV_IIR, iir_mask); 3666 I915_WRITE(VLV_IIR, iir_mask); 3667 POSTING_READ(VLV_IIR); 3668 3669 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3670 PIPE_CRC_DONE_INTERRUPT_STATUS; 3671 3672 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3673 for_each_pipe(dev_priv, pipe) 3674 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3675 3676 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3677 PIPE_FIFO_UNDERRUN_STATUS; 3678 3679 for_each_pipe(dev_priv, pipe) 3680 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3681 POSTING_READ(PIPESTAT(PIPE_A)); 3682 } 3683 3684 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3685 { 3686 assert_spin_locked(&dev_priv->irq_lock); 3687 3688 if (dev_priv->display_irqs_enabled) 3689 return; 3690 3691 dev_priv->display_irqs_enabled = true; 3692 3693 if (intel_irqs_enabled(dev_priv)) 3694 valleyview_display_irqs_install(dev_priv); 3695 } 3696 3697 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3698 { 3699 assert_spin_locked(&dev_priv->irq_lock); 3700 3701 if (!dev_priv->display_irqs_enabled) 3702 return; 3703 3704 dev_priv->display_irqs_enabled = false; 3705 3706 if (intel_irqs_enabled(dev_priv)) 3707 valleyview_display_irqs_uninstall(dev_priv); 3708 } 3709 3710 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3711 { 3712 dev_priv->irq_mask = ~0; 3713 3714 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3715 POSTING_READ(PORT_HOTPLUG_EN); 3716 3717 I915_WRITE(VLV_IIR, 0xffffffff); 3718 I915_WRITE(VLV_IIR, 0xffffffff); 3719 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3720 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3721 POSTING_READ(VLV_IMR); 3722 3723 /* Interrupt setup is already guaranteed to be single-threaded, this is 3724 * just to make the assert_spin_locked check happy. */ 3725 spin_lock_irq(&dev_priv->irq_lock); 3726 if (dev_priv->display_irqs_enabled) 3727 valleyview_display_irqs_install(dev_priv); 3728 spin_unlock_irq(&dev_priv->irq_lock); 3729 } 3730 3731 static int valleyview_irq_postinstall(struct drm_device *dev) 3732 { 3733 struct drm_i915_private *dev_priv = dev->dev_private; 3734 3735 vlv_display_irq_postinstall(dev_priv); 3736 3737 gen5_gt_irq_postinstall(dev); 3738 3739 /* ack & enable invalid PTE error interrupts */ 3740 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3741 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3742 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3743 #endif 3744 3745 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3746 3747 return 0; 3748 } 3749 3750 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3751 { 3752 /* These are interrupts we'll toggle with the ring mask register */ 3753 uint32_t gt_interrupts[] = { 3754 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3755 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3756 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3757 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3758 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3759 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3760 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3761 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3762 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3763 0, 3764 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3765 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3766 }; 3767 3768 dev_priv->pm_irq_mask = 0xffffffff; 3769 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3770 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3771 /* 3772 * RPS interrupts will get enabled/disabled on demand when RPS itself 3773 * is enabled/disabled. 3774 */ 3775 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3776 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3777 } 3778 3779 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3780 { 3781 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3782 uint32_t de_pipe_enables; 3783 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3784 u32 de_port_enables; 3785 enum pipe pipe; 3786 3787 if (INTEL_INFO(dev_priv)->gen >= 9) { 3788 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3789 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3790 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3791 GEN9_AUX_CHANNEL_D; 3792 if (IS_BROXTON(dev_priv)) 3793 de_port_masked |= BXT_DE_PORT_GMBUS; 3794 } else { 3795 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3796 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3797 } 3798 3799 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3800 GEN8_PIPE_FIFO_UNDERRUN; 3801 3802 de_port_enables = de_port_masked; 3803 if (IS_BROXTON(dev_priv)) 3804 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3805 else if (IS_BROADWELL(dev_priv)) 3806 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3807 3808 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3809 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3810 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3811 3812 for_each_pipe(dev_priv, pipe) 3813 if (intel_display_power_is_enabled(dev_priv, 3814 POWER_DOMAIN_PIPE(pipe))) 3815 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3816 dev_priv->de_irq_mask[pipe], 3817 de_pipe_enables); 3818 3819 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3820 } 3821 3822 static int gen8_irq_postinstall(struct drm_device *dev) 3823 { 3824 struct drm_i915_private *dev_priv = dev->dev_private; 3825 3826 if (HAS_PCH_SPLIT(dev)) 3827 ibx_irq_pre_postinstall(dev); 3828 3829 gen8_gt_irq_postinstall(dev_priv); 3830 gen8_de_irq_postinstall(dev_priv); 3831 3832 if (HAS_PCH_SPLIT(dev)) 3833 ibx_irq_postinstall(dev); 3834 3835 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3836 POSTING_READ(GEN8_MASTER_IRQ); 3837 3838 return 0; 3839 } 3840 3841 static int cherryview_irq_postinstall(struct drm_device *dev) 3842 { 3843 struct drm_i915_private *dev_priv = dev->dev_private; 3844 3845 vlv_display_irq_postinstall(dev_priv); 3846 3847 gen8_gt_irq_postinstall(dev_priv); 3848 3849 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3850 POSTING_READ(GEN8_MASTER_IRQ); 3851 3852 return 0; 3853 } 3854 3855 static void gen8_irq_uninstall(struct drm_device *dev) 3856 { 3857 struct drm_i915_private *dev_priv = dev->dev_private; 3858 3859 if (!dev_priv) 3860 return; 3861 3862 gen8_irq_reset(dev); 3863 } 3864 3865 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3866 { 3867 /* Interrupt setup is already guaranteed to be single-threaded, this is 3868 * just to make the assert_spin_locked check happy. */ 3869 spin_lock_irq(&dev_priv->irq_lock); 3870 if (dev_priv->display_irqs_enabled) 3871 valleyview_display_irqs_uninstall(dev_priv); 3872 spin_unlock_irq(&dev_priv->irq_lock); 3873 3874 vlv_display_irq_reset(dev_priv); 3875 3876 dev_priv->irq_mask = ~0; 3877 } 3878 3879 static void valleyview_irq_uninstall(struct drm_device *dev) 3880 { 3881 struct drm_i915_private *dev_priv = dev->dev_private; 3882 3883 if (!dev_priv) 3884 return; 3885 3886 I915_WRITE(VLV_MASTER_IER, 0); 3887 3888 gen5_gt_irq_reset(dev); 3889 3890 I915_WRITE(HWSTAM, 0xffffffff); 3891 3892 vlv_display_irq_uninstall(dev_priv); 3893 } 3894 3895 static void cherryview_irq_uninstall(struct drm_device *dev) 3896 { 3897 struct drm_i915_private *dev_priv = dev->dev_private; 3898 3899 if (!dev_priv) 3900 return; 3901 3902 I915_WRITE(GEN8_MASTER_IRQ, 0); 3903 POSTING_READ(GEN8_MASTER_IRQ); 3904 3905 gen8_gt_irq_reset(dev_priv); 3906 3907 GEN5_IRQ_RESET(GEN8_PCU_); 3908 3909 vlv_display_irq_uninstall(dev_priv); 3910 } 3911 3912 static void ironlake_irq_uninstall(struct drm_device *dev) 3913 { 3914 struct drm_i915_private *dev_priv = dev->dev_private; 3915 3916 if (!dev_priv) 3917 return; 3918 3919 ironlake_irq_reset(dev); 3920 } 3921 3922 static void i8xx_irq_preinstall(struct drm_device * dev) 3923 { 3924 struct drm_i915_private *dev_priv = dev->dev_private; 3925 int pipe; 3926 3927 for_each_pipe(dev_priv, pipe) 3928 I915_WRITE(PIPESTAT(pipe), 0); 3929 I915_WRITE16(IMR, 0xffff); 3930 I915_WRITE16(IER, 0x0); 3931 POSTING_READ16(IER); 3932 } 3933 3934 static int i8xx_irq_postinstall(struct drm_device *dev) 3935 { 3936 struct drm_i915_private *dev_priv = dev->dev_private; 3937 3938 I915_WRITE16(EMR, 3939 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3940 3941 /* Unmask the interrupts that we always want on. */ 3942 dev_priv->irq_mask = 3943 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3944 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3945 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3946 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3947 I915_WRITE16(IMR, dev_priv->irq_mask); 3948 3949 I915_WRITE16(IER, 3950 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3951 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3952 I915_USER_INTERRUPT); 3953 POSTING_READ16(IER); 3954 3955 /* Interrupt setup is already guaranteed to be single-threaded, this is 3956 * just to make the assert_spin_locked check happy. */ 3957 spin_lock_irq(&dev_priv->irq_lock); 3958 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3959 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3960 spin_unlock_irq(&dev_priv->irq_lock); 3961 3962 return 0; 3963 } 3964 3965 /* 3966 * Returns true when a page flip has completed. 3967 */ 3968 static bool i8xx_handle_vblank(struct drm_device *dev, 3969 int plane, int pipe, u32 iir) 3970 { 3971 struct drm_i915_private *dev_priv = dev->dev_private; 3972 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3973 3974 if (!intel_pipe_handle_vblank(dev, pipe)) 3975 return false; 3976 3977 if ((iir & flip_pending) == 0) 3978 goto check_page_flip; 3979 3980 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3981 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3982 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3983 * the flip is completed (no longer pending). Since this doesn't raise 3984 * an interrupt per se, we watch for the change at vblank. 3985 */ 3986 if (I915_READ16(ISR) & flip_pending) 3987 goto check_page_flip; 3988 3989 intel_prepare_page_flip(dev, plane); 3990 intel_finish_page_flip(dev, pipe); 3991 return true; 3992 3993 check_page_flip: 3994 intel_check_page_flip(dev, pipe); 3995 return false; 3996 } 3997 3998 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3999 { 4000 struct drm_device *dev = arg; 4001 struct drm_i915_private *dev_priv = dev->dev_private; 4002 u16 iir, new_iir; 4003 u32 pipe_stats[2]; 4004 int pipe; 4005 u16 flip_mask = 4006 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4007 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4008 irqreturn_t ret; 4009 4010 if (!intel_irqs_enabled(dev_priv)) 4011 return IRQ_NONE; 4012 4013 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4014 disable_rpm_wakeref_asserts(dev_priv); 4015 4016 ret = IRQ_NONE; 4017 iir = I915_READ16(IIR); 4018 if (iir == 0) 4019 goto out; 4020 4021 while (iir & ~flip_mask) { 4022 /* Can't rely on pipestat interrupt bit in iir as it might 4023 * have been cleared after the pipestat interrupt was received. 4024 * It doesn't set the bit in iir again, but it still produces 4025 * interrupts (for non-MSI). 4026 */ 4027 spin_lock(&dev_priv->irq_lock); 4028 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4029 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4030 4031 for_each_pipe(dev_priv, pipe) { 4032 i915_reg_t reg = PIPESTAT(pipe); 4033 pipe_stats[pipe] = I915_READ(reg); 4034 4035 /* 4036 * Clear the PIPE*STAT regs before the IIR 4037 */ 4038 if (pipe_stats[pipe] & 0x8000ffff) 4039 I915_WRITE(reg, pipe_stats[pipe]); 4040 } 4041 spin_unlock(&dev_priv->irq_lock); 4042 4043 I915_WRITE16(IIR, iir & ~flip_mask); 4044 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4045 4046 if (iir & I915_USER_INTERRUPT) 4047 notify_ring(&dev_priv->ring[RCS]); 4048 4049 for_each_pipe(dev_priv, pipe) { 4050 int plane = pipe; 4051 if (HAS_FBC(dev)) 4052 plane = !plane; 4053 4054 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4055 i8xx_handle_vblank(dev, plane, pipe, iir)) 4056 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4057 4058 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4059 i9xx_pipe_crc_irq_handler(dev, pipe); 4060 4061 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4062 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4063 pipe); 4064 } 4065 4066 iir = new_iir; 4067 } 4068 ret = IRQ_HANDLED; 4069 4070 out: 4071 enable_rpm_wakeref_asserts(dev_priv); 4072 4073 return ret; 4074 } 4075 4076 static void i8xx_irq_uninstall(struct drm_device * dev) 4077 { 4078 struct drm_i915_private *dev_priv = dev->dev_private; 4079 int pipe; 4080 4081 for_each_pipe(dev_priv, pipe) { 4082 /* Clear enable bits; then clear status bits */ 4083 I915_WRITE(PIPESTAT(pipe), 0); 4084 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4085 } 4086 I915_WRITE16(IMR, 0xffff); 4087 I915_WRITE16(IER, 0x0); 4088 I915_WRITE16(IIR, I915_READ16(IIR)); 4089 } 4090 4091 static void i915_irq_preinstall(struct drm_device * dev) 4092 { 4093 struct drm_i915_private *dev_priv = dev->dev_private; 4094 int pipe; 4095 4096 if (I915_HAS_HOTPLUG(dev)) { 4097 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4098 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4099 } 4100 4101 I915_WRITE16(HWSTAM, 0xeffe); 4102 for_each_pipe(dev_priv, pipe) 4103 I915_WRITE(PIPESTAT(pipe), 0); 4104 I915_WRITE(IMR, 0xffffffff); 4105 I915_WRITE(IER, 0x0); 4106 POSTING_READ(IER); 4107 } 4108 4109 static int i915_irq_postinstall(struct drm_device *dev) 4110 { 4111 struct drm_i915_private *dev_priv = dev->dev_private; 4112 u32 enable_mask; 4113 4114 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4115 4116 /* Unmask the interrupts that we always want on. */ 4117 dev_priv->irq_mask = 4118 ~(I915_ASLE_INTERRUPT | 4119 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4120 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4121 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4122 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4123 4124 enable_mask = 4125 I915_ASLE_INTERRUPT | 4126 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4127 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4128 I915_USER_INTERRUPT; 4129 4130 if (I915_HAS_HOTPLUG(dev)) { 4131 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4132 POSTING_READ(PORT_HOTPLUG_EN); 4133 4134 /* Enable in IER... */ 4135 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4136 /* and unmask in IMR */ 4137 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4138 } 4139 4140 I915_WRITE(IMR, dev_priv->irq_mask); 4141 I915_WRITE(IER, enable_mask); 4142 POSTING_READ(IER); 4143 4144 i915_enable_asle_pipestat(dev); 4145 4146 /* Interrupt setup is already guaranteed to be single-threaded, this is 4147 * just to make the assert_spin_locked check happy. */ 4148 spin_lock_irq(&dev_priv->irq_lock); 4149 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4150 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4151 spin_unlock_irq(&dev_priv->irq_lock); 4152 4153 return 0; 4154 } 4155 4156 /* 4157 * Returns true when a page flip has completed. 4158 */ 4159 static bool i915_handle_vblank(struct drm_device *dev, 4160 int plane, int pipe, u32 iir) 4161 { 4162 struct drm_i915_private *dev_priv = dev->dev_private; 4163 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4164 4165 if (!intel_pipe_handle_vblank(dev, pipe)) 4166 return false; 4167 4168 if ((iir & flip_pending) == 0) 4169 goto check_page_flip; 4170 4171 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4172 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4173 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4174 * the flip is completed (no longer pending). Since this doesn't raise 4175 * an interrupt per se, we watch for the change at vblank. 4176 */ 4177 if (I915_READ(ISR) & flip_pending) 4178 goto check_page_flip; 4179 4180 intel_prepare_page_flip(dev, plane); 4181 intel_finish_page_flip(dev, pipe); 4182 return true; 4183 4184 check_page_flip: 4185 intel_check_page_flip(dev, pipe); 4186 return false; 4187 } 4188 4189 static irqreturn_t i915_irq_handler(int irq, void *arg) 4190 { 4191 struct drm_device *dev = arg; 4192 struct drm_i915_private *dev_priv = dev->dev_private; 4193 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4194 u32 flip_mask = 4195 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4196 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4197 int pipe, ret = IRQ_NONE; 4198 4199 if (!intel_irqs_enabled(dev_priv)) 4200 return IRQ_NONE; 4201 4202 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4203 disable_rpm_wakeref_asserts(dev_priv); 4204 4205 iir = I915_READ(IIR); 4206 do { 4207 bool irq_received = (iir & ~flip_mask) != 0; 4208 bool blc_event = false; 4209 4210 /* Can't rely on pipestat interrupt bit in iir as it might 4211 * have been cleared after the pipestat interrupt was received. 4212 * It doesn't set the bit in iir again, but it still produces 4213 * interrupts (for non-MSI). 4214 */ 4215 spin_lock(&dev_priv->irq_lock); 4216 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4217 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4218 4219 for_each_pipe(dev_priv, pipe) { 4220 i915_reg_t reg = PIPESTAT(pipe); 4221 pipe_stats[pipe] = I915_READ(reg); 4222 4223 /* Clear the PIPE*STAT regs before the IIR */ 4224 if (pipe_stats[pipe] & 0x8000ffff) { 4225 I915_WRITE(reg, pipe_stats[pipe]); 4226 irq_received = true; 4227 } 4228 } 4229 spin_unlock(&dev_priv->irq_lock); 4230 4231 if (!irq_received) 4232 break; 4233 4234 /* Consume port. Then clear IIR or we'll miss events */ 4235 if (I915_HAS_HOTPLUG(dev) && 4236 iir & I915_DISPLAY_PORT_INTERRUPT) 4237 i9xx_hpd_irq_handler(dev); 4238 4239 I915_WRITE(IIR, iir & ~flip_mask); 4240 new_iir = I915_READ(IIR); /* Flush posted writes */ 4241 4242 if (iir & I915_USER_INTERRUPT) 4243 notify_ring(&dev_priv->ring[RCS]); 4244 4245 for_each_pipe(dev_priv, pipe) { 4246 int plane = pipe; 4247 if (HAS_FBC(dev)) 4248 plane = !plane; 4249 4250 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4251 i915_handle_vblank(dev, plane, pipe, iir)) 4252 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4253 4254 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4255 blc_event = true; 4256 4257 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4258 i9xx_pipe_crc_irq_handler(dev, pipe); 4259 4260 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4261 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4262 pipe); 4263 } 4264 4265 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4266 intel_opregion_asle_intr(dev); 4267 4268 /* With MSI, interrupts are only generated when iir 4269 * transitions from zero to nonzero. If another bit got 4270 * set while we were handling the existing iir bits, then 4271 * we would never get another interrupt. 4272 * 4273 * This is fine on non-MSI as well, as if we hit this path 4274 * we avoid exiting the interrupt handler only to generate 4275 * another one. 4276 * 4277 * Note that for MSI this could cause a stray interrupt report 4278 * if an interrupt landed in the time between writing IIR and 4279 * the posting read. This should be rare enough to never 4280 * trigger the 99% of 100,000 interrupts test for disabling 4281 * stray interrupts. 4282 */ 4283 ret = IRQ_HANDLED; 4284 iir = new_iir; 4285 } while (iir & ~flip_mask); 4286 4287 enable_rpm_wakeref_asserts(dev_priv); 4288 4289 return ret; 4290 } 4291 4292 static void i915_irq_uninstall(struct drm_device * dev) 4293 { 4294 struct drm_i915_private *dev_priv = dev->dev_private; 4295 int pipe; 4296 4297 if (I915_HAS_HOTPLUG(dev)) { 4298 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4299 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4300 } 4301 4302 I915_WRITE16(HWSTAM, 0xffff); 4303 for_each_pipe(dev_priv, pipe) { 4304 /* Clear enable bits; then clear status bits */ 4305 I915_WRITE(PIPESTAT(pipe), 0); 4306 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4307 } 4308 I915_WRITE(IMR, 0xffffffff); 4309 I915_WRITE(IER, 0x0); 4310 4311 I915_WRITE(IIR, I915_READ(IIR)); 4312 } 4313 4314 static void i965_irq_preinstall(struct drm_device * dev) 4315 { 4316 struct drm_i915_private *dev_priv = dev->dev_private; 4317 int pipe; 4318 4319 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4320 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4321 4322 I915_WRITE(HWSTAM, 0xeffe); 4323 for_each_pipe(dev_priv, pipe) 4324 I915_WRITE(PIPESTAT(pipe), 0); 4325 I915_WRITE(IMR, 0xffffffff); 4326 I915_WRITE(IER, 0x0); 4327 POSTING_READ(IER); 4328 } 4329 4330 static int i965_irq_postinstall(struct drm_device *dev) 4331 { 4332 struct drm_i915_private *dev_priv = dev->dev_private; 4333 u32 enable_mask; 4334 u32 error_mask; 4335 4336 /* Unmask the interrupts that we always want on. */ 4337 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4338 I915_DISPLAY_PORT_INTERRUPT | 4339 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4340 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4341 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4342 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4343 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4344 4345 enable_mask = ~dev_priv->irq_mask; 4346 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4347 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4348 enable_mask |= I915_USER_INTERRUPT; 4349 4350 if (IS_G4X(dev)) 4351 enable_mask |= I915_BSD_USER_INTERRUPT; 4352 4353 /* Interrupt setup is already guaranteed to be single-threaded, this is 4354 * just to make the assert_spin_locked check happy. */ 4355 spin_lock_irq(&dev_priv->irq_lock); 4356 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4357 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4358 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4359 spin_unlock_irq(&dev_priv->irq_lock); 4360 4361 /* 4362 * Enable some error detection, note the instruction error mask 4363 * bit is reserved, so we leave it masked. 4364 */ 4365 if (IS_G4X(dev)) { 4366 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4367 GM45_ERROR_MEM_PRIV | 4368 GM45_ERROR_CP_PRIV | 4369 I915_ERROR_MEMORY_REFRESH); 4370 } else { 4371 error_mask = ~(I915_ERROR_PAGE_TABLE | 4372 I915_ERROR_MEMORY_REFRESH); 4373 } 4374 I915_WRITE(EMR, error_mask); 4375 4376 I915_WRITE(IMR, dev_priv->irq_mask); 4377 I915_WRITE(IER, enable_mask); 4378 POSTING_READ(IER); 4379 4380 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4381 POSTING_READ(PORT_HOTPLUG_EN); 4382 4383 i915_enable_asle_pipestat(dev); 4384 4385 return 0; 4386 } 4387 4388 static void i915_hpd_irq_setup(struct drm_device *dev) 4389 { 4390 struct drm_i915_private *dev_priv = dev->dev_private; 4391 u32 hotplug_en; 4392 4393 assert_spin_locked(&dev_priv->irq_lock); 4394 4395 /* Note HDMI and DP share hotplug bits */ 4396 /* enable bits are the same for all generations */ 4397 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); 4398 /* Programming the CRT detection parameters tends 4399 to generate a spurious hotplug event about three 4400 seconds later. So just do it once. 4401 */ 4402 if (IS_G4X(dev)) 4403 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4404 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4405 4406 /* Ignore TV since it's buggy */ 4407 i915_hotplug_interrupt_update_locked(dev_priv, 4408 HOTPLUG_INT_EN_MASK | 4409 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4410 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4411 hotplug_en); 4412 } 4413 4414 static irqreturn_t i965_irq_handler(int irq, void *arg) 4415 { 4416 struct drm_device *dev = arg; 4417 struct drm_i915_private *dev_priv = dev->dev_private; 4418 u32 iir, new_iir; 4419 u32 pipe_stats[I915_MAX_PIPES]; 4420 int ret = IRQ_NONE, pipe; 4421 u32 flip_mask = 4422 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4423 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4424 4425 if (!intel_irqs_enabled(dev_priv)) 4426 return IRQ_NONE; 4427 4428 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4429 disable_rpm_wakeref_asserts(dev_priv); 4430 4431 iir = I915_READ(IIR); 4432 4433 for (;;) { 4434 bool irq_received = (iir & ~flip_mask) != 0; 4435 bool blc_event = false; 4436 4437 /* Can't rely on pipestat interrupt bit in iir as it might 4438 * have been cleared after the pipestat interrupt was received. 4439 * It doesn't set the bit in iir again, but it still produces 4440 * interrupts (for non-MSI). 4441 */ 4442 spin_lock(&dev_priv->irq_lock); 4443 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4444 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4445 4446 for_each_pipe(dev_priv, pipe) { 4447 i915_reg_t reg = PIPESTAT(pipe); 4448 pipe_stats[pipe] = I915_READ(reg); 4449 4450 /* 4451 * Clear the PIPE*STAT regs before the IIR 4452 */ 4453 if (pipe_stats[pipe] & 0x8000ffff) { 4454 I915_WRITE(reg, pipe_stats[pipe]); 4455 irq_received = true; 4456 } 4457 } 4458 spin_unlock(&dev_priv->irq_lock); 4459 4460 if (!irq_received) 4461 break; 4462 4463 ret = IRQ_HANDLED; 4464 4465 /* Consume port. Then clear IIR or we'll miss events */ 4466 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4467 i9xx_hpd_irq_handler(dev); 4468 4469 I915_WRITE(IIR, iir & ~flip_mask); 4470 new_iir = I915_READ(IIR); /* Flush posted writes */ 4471 4472 if (iir & I915_USER_INTERRUPT) 4473 notify_ring(&dev_priv->ring[RCS]); 4474 if (iir & I915_BSD_USER_INTERRUPT) 4475 notify_ring(&dev_priv->ring[VCS]); 4476 4477 for_each_pipe(dev_priv, pipe) { 4478 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4479 i915_handle_vblank(dev, pipe, pipe, iir)) 4480 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4481 4482 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4483 blc_event = true; 4484 4485 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4486 i9xx_pipe_crc_irq_handler(dev, pipe); 4487 4488 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4489 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4490 } 4491 4492 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4493 intel_opregion_asle_intr(dev); 4494 4495 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4496 gmbus_irq_handler(dev); 4497 4498 /* With MSI, interrupts are only generated when iir 4499 * transitions from zero to nonzero. If another bit got 4500 * set while we were handling the existing iir bits, then 4501 * we would never get another interrupt. 4502 * 4503 * This is fine on non-MSI as well, as if we hit this path 4504 * we avoid exiting the interrupt handler only to generate 4505 * another one. 4506 * 4507 * Note that for MSI this could cause a stray interrupt report 4508 * if an interrupt landed in the time between writing IIR and 4509 * the posting read. This should be rare enough to never 4510 * trigger the 99% of 100,000 interrupts test for disabling 4511 * stray interrupts. 4512 */ 4513 iir = new_iir; 4514 } 4515 4516 enable_rpm_wakeref_asserts(dev_priv); 4517 4518 return ret; 4519 } 4520 4521 static void i965_irq_uninstall(struct drm_device * dev) 4522 { 4523 struct drm_i915_private *dev_priv = dev->dev_private; 4524 int pipe; 4525 4526 if (!dev_priv) 4527 return; 4528 4529 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4530 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4531 4532 I915_WRITE(HWSTAM, 0xffffffff); 4533 for_each_pipe(dev_priv, pipe) 4534 I915_WRITE(PIPESTAT(pipe), 0); 4535 I915_WRITE(IMR, 0xffffffff); 4536 I915_WRITE(IER, 0x0); 4537 4538 for_each_pipe(dev_priv, pipe) 4539 I915_WRITE(PIPESTAT(pipe), 4540 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4541 I915_WRITE(IIR, I915_READ(IIR)); 4542 } 4543 4544 /** 4545 * intel_irq_init - initializes irq support 4546 * @dev_priv: i915 device instance 4547 * 4548 * This function initializes all the irq support including work items, timers 4549 * and all the vtables. It does not setup the interrupt itself though. 4550 */ 4551 void intel_irq_init(struct drm_i915_private *dev_priv) 4552 { 4553 struct drm_device *dev = dev_priv->dev; 4554 4555 intel_hpd_init_work(dev_priv); 4556 4557 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4558 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4559 4560 /* Let's track the enabled rps events */ 4561 if (IS_VALLEYVIEW(dev_priv)) 4562 /* WaGsvRC0ResidencyMethod:vlv */ 4563 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4564 else 4565 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4566 4567 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4568 i915_hangcheck_elapsed); 4569 4570 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4571 4572 if (IS_GEN2(dev_priv)) { 4573 dev->max_vblank_count = 0; 4574 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4575 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4576 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4577 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4578 } else { 4579 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4580 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4581 } 4582 4583 /* 4584 * Opt out of the vblank disable timer on everything except gen2. 4585 * Gen2 doesn't have a hardware frame counter and so depends on 4586 * vblank interrupts to produce sane vblank seuquence numbers. 4587 */ 4588 if (!IS_GEN2(dev_priv)) 4589 dev->vblank_disable_immediate = true; 4590 4591 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4592 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4593 4594 if (IS_CHERRYVIEW(dev_priv)) { 4595 dev->driver->irq_handler = cherryview_irq_handler; 4596 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4597 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4598 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4599 dev->driver->enable_vblank = valleyview_enable_vblank; 4600 dev->driver->disable_vblank = valleyview_disable_vblank; 4601 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4602 } else if (IS_VALLEYVIEW(dev_priv)) { 4603 dev->driver->irq_handler = valleyview_irq_handler; 4604 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4605 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4606 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4607 dev->driver->enable_vblank = valleyview_enable_vblank; 4608 dev->driver->disable_vblank = valleyview_disable_vblank; 4609 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4610 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4611 dev->driver->irq_handler = gen8_irq_handler; 4612 dev->driver->irq_preinstall = gen8_irq_reset; 4613 dev->driver->irq_postinstall = gen8_irq_postinstall; 4614 dev->driver->irq_uninstall = gen8_irq_uninstall; 4615 dev->driver->enable_vblank = gen8_enable_vblank; 4616 dev->driver->disable_vblank = gen8_disable_vblank; 4617 if (IS_BROXTON(dev)) 4618 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4619 else if (HAS_PCH_SPT(dev)) 4620 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4621 else 4622 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4623 } else if (HAS_PCH_SPLIT(dev)) { 4624 dev->driver->irq_handler = ironlake_irq_handler; 4625 dev->driver->irq_preinstall = ironlake_irq_reset; 4626 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4627 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4628 dev->driver->enable_vblank = ironlake_enable_vblank; 4629 dev->driver->disable_vblank = ironlake_disable_vblank; 4630 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4631 } else { 4632 if (INTEL_INFO(dev_priv)->gen == 2) { 4633 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4634 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4635 dev->driver->irq_handler = i8xx_irq_handler; 4636 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4637 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4638 dev->driver->irq_preinstall = i915_irq_preinstall; 4639 dev->driver->irq_postinstall = i915_irq_postinstall; 4640 dev->driver->irq_uninstall = i915_irq_uninstall; 4641 dev->driver->irq_handler = i915_irq_handler; 4642 } else { 4643 dev->driver->irq_preinstall = i965_irq_preinstall; 4644 dev->driver->irq_postinstall = i965_irq_postinstall; 4645 dev->driver->irq_uninstall = i965_irq_uninstall; 4646 dev->driver->irq_handler = i965_irq_handler; 4647 } 4648 if (I915_HAS_HOTPLUG(dev_priv)) 4649 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4650 dev->driver->enable_vblank = i915_enable_vblank; 4651 dev->driver->disable_vblank = i915_disable_vblank; 4652 } 4653 } 4654 4655 /** 4656 * intel_irq_install - enables the hardware interrupt 4657 * @dev_priv: i915 device instance 4658 * 4659 * This function enables the hardware interrupt handling, but leaves the hotplug 4660 * handling still disabled. It is called after intel_irq_init(). 4661 * 4662 * In the driver load and resume code we need working interrupts in a few places 4663 * but don't want to deal with the hassle of concurrent probe and hotplug 4664 * workers. Hence the split into this two-stage approach. 4665 */ 4666 int intel_irq_install(struct drm_i915_private *dev_priv) 4667 { 4668 /* 4669 * We enable some interrupt sources in our postinstall hooks, so mark 4670 * interrupts as enabled _before_ actually enabling them to avoid 4671 * special cases in our ordering checks. 4672 */ 4673 dev_priv->pm.irqs_enabled = true; 4674 4675 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4676 } 4677 4678 /** 4679 * intel_irq_uninstall - finilizes all irq handling 4680 * @dev_priv: i915 device instance 4681 * 4682 * This stops interrupt and hotplug handling and unregisters and frees all 4683 * resources acquired in the init functions. 4684 */ 4685 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4686 { 4687 drm_irq_uninstall(dev_priv->dev); 4688 intel_hpd_cancel_work(dev_priv); 4689 dev_priv->pm.irqs_enabled = false; 4690 } 4691 4692 /** 4693 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4694 * @dev_priv: i915 device instance 4695 * 4696 * This function is used to disable interrupts at runtime, both in the runtime 4697 * pm and the system suspend/resume code. 4698 */ 4699 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4700 { 4701 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4702 dev_priv->pm.irqs_enabled = false; 4703 synchronize_irq(dev_priv->dev->irq); 4704 } 4705 4706 /** 4707 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4708 * @dev_priv: i915 device instance 4709 * 4710 * This function is used to enable interrupts at runtime, both in the runtime 4711 * pm and the system suspend/resume code. 4712 */ 4713 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4714 { 4715 dev_priv->pm.irqs_enabled = true; 4716 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4717 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4718 } 4719