1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 /* 140 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 141 */ 142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 143 i915_reg_t reg) 144 { 145 u32 val = I915_READ(reg); 146 147 if (val == 0) 148 return; 149 150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 151 i915_mmio_reg_offset(reg), val); 152 I915_WRITE(reg, 0xffffffff); 153 POSTING_READ(reg); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 } 157 158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 162 POSTING_READ(GEN8_##type##_IMR(which)); \ 163 } while (0) 164 165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 167 I915_WRITE(type##IER, (ier_val)); \ 168 I915_WRITE(type##IMR, (imr_val)); \ 169 POSTING_READ(type##IMR); \ 170 } while (0) 171 172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 173 174 /* For display hotplug interrupt */ 175 static inline void 176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 177 uint32_t mask, 178 uint32_t bits) 179 { 180 uint32_t val; 181 182 assert_spin_locked(&dev_priv->irq_lock); 183 WARN_ON(bits & ~mask); 184 185 val = I915_READ(PORT_HOTPLUG_EN); 186 val &= ~mask; 187 val |= bits; 188 I915_WRITE(PORT_HOTPLUG_EN, val); 189 } 190 191 /** 192 * i915_hotplug_interrupt_update - update hotplug interrupt enable 193 * @dev_priv: driver private 194 * @mask: bits to update 195 * @bits: bits to enable 196 * NOTE: the HPD enable bits are modified both inside and outside 197 * of an interrupt context. To avoid that read-modify-write cycles 198 * interfer, these bits are protected by a spinlock. Since this 199 * function is usually not called from a context where the lock is 200 * held already, this function acquires the lock itself. A non-locking 201 * version is also available. 202 */ 203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 204 uint32_t mask, 205 uint32_t bits) 206 { 207 spin_lock_irq(&dev_priv->irq_lock); 208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 209 spin_unlock_irq(&dev_priv->irq_lock); 210 } 211 212 /** 213 * ilk_update_display_irq - update DEIMR 214 * @dev_priv: driver private 215 * @interrupt_mask: mask of interrupt bits to update 216 * @enabled_irq_mask: mask of interrupt bits to enable 217 */ 218 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 219 uint32_t interrupt_mask, 220 uint32_t enabled_irq_mask) 221 { 222 uint32_t new_val; 223 224 assert_spin_locked(&dev_priv->irq_lock); 225 226 WARN_ON(enabled_irq_mask & ~interrupt_mask); 227 228 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 229 return; 230 231 new_val = dev_priv->irq_mask; 232 new_val &= ~interrupt_mask; 233 new_val |= (~enabled_irq_mask & interrupt_mask); 234 235 if (new_val != dev_priv->irq_mask) { 236 dev_priv->irq_mask = new_val; 237 I915_WRITE(DEIMR, dev_priv->irq_mask); 238 POSTING_READ(DEIMR); 239 } 240 } 241 242 /** 243 * ilk_update_gt_irq - update GTIMR 244 * @dev_priv: driver private 245 * @interrupt_mask: mask of interrupt bits to update 246 * @enabled_irq_mask: mask of interrupt bits to enable 247 */ 248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 249 uint32_t interrupt_mask, 250 uint32_t enabled_irq_mask) 251 { 252 assert_spin_locked(&dev_priv->irq_lock); 253 254 WARN_ON(enabled_irq_mask & ~interrupt_mask); 255 256 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 257 return; 258 259 dev_priv->gt_irq_mask &= ~interrupt_mask; 260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 262 } 263 264 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 265 { 266 ilk_update_gt_irq(dev_priv, mask, mask); 267 POSTING_READ_FW(GTIMR); 268 } 269 270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 271 { 272 ilk_update_gt_irq(dev_priv, mask, 0); 273 } 274 275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 276 { 277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 278 } 279 280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 281 { 282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 283 } 284 285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 286 { 287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 288 } 289 290 /** 291 * snb_update_pm_irq - update GEN6_PMIMR 292 * @dev_priv: driver private 293 * @interrupt_mask: mask of interrupt bits to update 294 * @enabled_irq_mask: mask of interrupt bits to enable 295 */ 296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 297 uint32_t interrupt_mask, 298 uint32_t enabled_irq_mask) 299 { 300 uint32_t new_val; 301 302 WARN_ON(enabled_irq_mask & ~interrupt_mask); 303 304 assert_spin_locked(&dev_priv->irq_lock); 305 306 new_val = dev_priv->pm_irq_mask; 307 new_val &= ~interrupt_mask; 308 new_val |= (~enabled_irq_mask & interrupt_mask); 309 310 if (new_val != dev_priv->pm_irq_mask) { 311 dev_priv->pm_irq_mask = new_val; 312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 313 POSTING_READ(gen6_pm_imr(dev_priv)); 314 } 315 } 316 317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 318 { 319 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 320 return; 321 322 snb_update_pm_irq(dev_priv, mask, mask); 323 } 324 325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 326 uint32_t mask) 327 { 328 snb_update_pm_irq(dev_priv, mask, 0); 329 } 330 331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 332 { 333 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 334 return; 335 336 __gen6_disable_pm_irq(dev_priv, mask); 337 } 338 339 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 340 { 341 i915_reg_t reg = gen6_pm_iir(dev_priv); 342 343 spin_lock_irq(&dev_priv->irq_lock); 344 I915_WRITE(reg, dev_priv->pm_rps_events); 345 I915_WRITE(reg, dev_priv->pm_rps_events); 346 POSTING_READ(reg); 347 dev_priv->rps.pm_iir = 0; 348 spin_unlock_irq(&dev_priv->irq_lock); 349 } 350 351 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 352 { 353 spin_lock_irq(&dev_priv->irq_lock); 354 WARN_ON_ONCE(dev_priv->rps.pm_iir); 355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 356 dev_priv->rps.interrupts_enabled = true; 357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 358 dev_priv->pm_rps_events); 359 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 360 361 spin_unlock_irq(&dev_priv->irq_lock); 362 } 363 364 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 365 { 366 return (mask & ~dev_priv->rps.pm_intr_keep); 367 } 368 369 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 370 { 371 spin_lock_irq(&dev_priv->irq_lock); 372 dev_priv->rps.interrupts_enabled = false; 373 374 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 375 376 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 377 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 378 ~dev_priv->pm_rps_events); 379 380 spin_unlock_irq(&dev_priv->irq_lock); 381 synchronize_irq(dev_priv->drm.irq); 382 383 /* Now that we will not be generating any more work, flush any 384 * outsanding tasks. As we are called on the RPS idle path, 385 * we will reset the GPU to minimum frequencies, so the current 386 * state of the worker can be discarded. 387 */ 388 cancel_work_sync(&dev_priv->rps.work); 389 gen6_reset_rps_interrupts(dev_priv); 390 } 391 392 /** 393 * bdw_update_port_irq - update DE port interrupt 394 * @dev_priv: driver private 395 * @interrupt_mask: mask of interrupt bits to update 396 * @enabled_irq_mask: mask of interrupt bits to enable 397 */ 398 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 399 uint32_t interrupt_mask, 400 uint32_t enabled_irq_mask) 401 { 402 uint32_t new_val; 403 uint32_t old_val; 404 405 assert_spin_locked(&dev_priv->irq_lock); 406 407 WARN_ON(enabled_irq_mask & ~interrupt_mask); 408 409 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 410 return; 411 412 old_val = I915_READ(GEN8_DE_PORT_IMR); 413 414 new_val = old_val; 415 new_val &= ~interrupt_mask; 416 new_val |= (~enabled_irq_mask & interrupt_mask); 417 418 if (new_val != old_val) { 419 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 420 POSTING_READ(GEN8_DE_PORT_IMR); 421 } 422 } 423 424 /** 425 * bdw_update_pipe_irq - update DE pipe interrupt 426 * @dev_priv: driver private 427 * @pipe: pipe whose interrupt to update 428 * @interrupt_mask: mask of interrupt bits to update 429 * @enabled_irq_mask: mask of interrupt bits to enable 430 */ 431 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 432 enum pipe pipe, 433 uint32_t interrupt_mask, 434 uint32_t enabled_irq_mask) 435 { 436 uint32_t new_val; 437 438 assert_spin_locked(&dev_priv->irq_lock); 439 440 WARN_ON(enabled_irq_mask & ~interrupt_mask); 441 442 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 443 return; 444 445 new_val = dev_priv->de_irq_mask[pipe]; 446 new_val &= ~interrupt_mask; 447 new_val |= (~enabled_irq_mask & interrupt_mask); 448 449 if (new_val != dev_priv->de_irq_mask[pipe]) { 450 dev_priv->de_irq_mask[pipe] = new_val; 451 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 452 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 453 } 454 } 455 456 /** 457 * ibx_display_interrupt_update - update SDEIMR 458 * @dev_priv: driver private 459 * @interrupt_mask: mask of interrupt bits to update 460 * @enabled_irq_mask: mask of interrupt bits to enable 461 */ 462 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 463 uint32_t interrupt_mask, 464 uint32_t enabled_irq_mask) 465 { 466 uint32_t sdeimr = I915_READ(SDEIMR); 467 sdeimr &= ~interrupt_mask; 468 sdeimr |= (~enabled_irq_mask & interrupt_mask); 469 470 WARN_ON(enabled_irq_mask & ~interrupt_mask); 471 472 assert_spin_locked(&dev_priv->irq_lock); 473 474 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 475 return; 476 477 I915_WRITE(SDEIMR, sdeimr); 478 POSTING_READ(SDEIMR); 479 } 480 481 static void 482 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 483 u32 enable_mask, u32 status_mask) 484 { 485 i915_reg_t reg = PIPESTAT(pipe); 486 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 487 488 assert_spin_locked(&dev_priv->irq_lock); 489 WARN_ON(!intel_irqs_enabled(dev_priv)); 490 491 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 492 status_mask & ~PIPESTAT_INT_STATUS_MASK, 493 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 494 pipe_name(pipe), enable_mask, status_mask)) 495 return; 496 497 if ((pipestat & enable_mask) == enable_mask) 498 return; 499 500 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 501 502 /* Enable the interrupt, clear any pending status */ 503 pipestat |= enable_mask | status_mask; 504 I915_WRITE(reg, pipestat); 505 POSTING_READ(reg); 506 } 507 508 static void 509 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 510 u32 enable_mask, u32 status_mask) 511 { 512 i915_reg_t reg = PIPESTAT(pipe); 513 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 514 515 assert_spin_locked(&dev_priv->irq_lock); 516 WARN_ON(!intel_irqs_enabled(dev_priv)); 517 518 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 519 status_mask & ~PIPESTAT_INT_STATUS_MASK, 520 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 521 pipe_name(pipe), enable_mask, status_mask)) 522 return; 523 524 if ((pipestat & enable_mask) == 0) 525 return; 526 527 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 528 529 pipestat &= ~enable_mask; 530 I915_WRITE(reg, pipestat); 531 POSTING_READ(reg); 532 } 533 534 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 535 { 536 u32 enable_mask = status_mask << 16; 537 538 /* 539 * On pipe A we don't support the PSR interrupt yet, 540 * on pipe B and C the same bit MBZ. 541 */ 542 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 543 return 0; 544 /* 545 * On pipe B and C we don't support the PSR interrupt yet, on pipe 546 * A the same bit is for perf counters which we don't use either. 547 */ 548 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 549 return 0; 550 551 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 552 SPRITE0_FLIP_DONE_INT_EN_VLV | 553 SPRITE1_FLIP_DONE_INT_EN_VLV); 554 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 555 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 556 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 557 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 558 559 return enable_mask; 560 } 561 562 void 563 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 564 u32 status_mask) 565 { 566 u32 enable_mask; 567 568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 569 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 570 status_mask); 571 else 572 enable_mask = status_mask << 16; 573 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 574 } 575 576 void 577 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 578 u32 status_mask) 579 { 580 u32 enable_mask; 581 582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 583 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 584 status_mask); 585 else 586 enable_mask = status_mask << 16; 587 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 588 } 589 590 /** 591 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 592 * @dev_priv: i915 device private 593 */ 594 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 595 { 596 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 597 return; 598 599 spin_lock_irq(&dev_priv->irq_lock); 600 601 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 602 if (INTEL_GEN(dev_priv) >= 4) 603 i915_enable_pipestat(dev_priv, PIPE_A, 604 PIPE_LEGACY_BLC_EVENT_STATUS); 605 606 spin_unlock_irq(&dev_priv->irq_lock); 607 } 608 609 /* 610 * This timing diagram depicts the video signal in and 611 * around the vertical blanking period. 612 * 613 * Assumptions about the fictitious mode used in this example: 614 * vblank_start >= 3 615 * vsync_start = vblank_start + 1 616 * vsync_end = vblank_start + 2 617 * vtotal = vblank_start + 3 618 * 619 * start of vblank: 620 * latch double buffered registers 621 * increment frame counter (ctg+) 622 * generate start of vblank interrupt (gen4+) 623 * | 624 * | frame start: 625 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 626 * | may be shifted forward 1-3 extra lines via PIPECONF 627 * | | 628 * | | start of vsync: 629 * | | generate vsync interrupt 630 * | | | 631 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 632 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 633 * ----va---> <-----------------vb--------------------> <--------va------------- 634 * | | <----vs-----> | 635 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 636 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 637 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 638 * | | | 639 * last visible pixel first visible pixel 640 * | increment frame counter (gen3/4) 641 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 642 * 643 * x = horizontal active 644 * _ = horizontal blanking 645 * hs = horizontal sync 646 * va = vertical active 647 * vb = vertical blanking 648 * vs = vertical sync 649 * vbs = vblank_start (number) 650 * 651 * Summary: 652 * - most events happen at the start of horizontal sync 653 * - frame start happens at the start of horizontal blank, 1-4 lines 654 * (depending on PIPECONF settings) after the start of vblank 655 * - gen3/4 pixel and frame counter are synchronized with the start 656 * of horizontal active on the first line of vertical active 657 */ 658 659 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 660 { 661 /* Gen2 doesn't have a hardware frame counter */ 662 return 0; 663 } 664 665 /* Called from drm generic code, passed a 'crtc', which 666 * we use as a pipe index 667 */ 668 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 669 { 670 struct drm_i915_private *dev_priv = to_i915(dev); 671 i915_reg_t high_frame, low_frame; 672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 673 struct intel_crtc *intel_crtc = 674 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 675 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 676 677 htotal = mode->crtc_htotal; 678 hsync_start = mode->crtc_hsync_start; 679 vbl_start = mode->crtc_vblank_start; 680 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 681 vbl_start = DIV_ROUND_UP(vbl_start, 2); 682 683 /* Convert to pixel count */ 684 vbl_start *= htotal; 685 686 /* Start of vblank event occurs at start of hsync */ 687 vbl_start -= htotal - hsync_start; 688 689 high_frame = PIPEFRAME(pipe); 690 low_frame = PIPEFRAMEPIXEL(pipe); 691 692 /* 693 * High & low register fields aren't synchronized, so make sure 694 * we get a low value that's stable across two reads of the high 695 * register. 696 */ 697 do { 698 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 699 low = I915_READ(low_frame); 700 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 701 } while (high1 != high2); 702 703 high1 >>= PIPE_FRAME_HIGH_SHIFT; 704 pixel = low & PIPE_PIXEL_MASK; 705 low >>= PIPE_FRAME_LOW_SHIFT; 706 707 /* 708 * The frame counter increments at beginning of active. 709 * Cook up a vblank counter by also checking the pixel 710 * counter against vblank start. 711 */ 712 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 713 } 714 715 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 716 { 717 struct drm_i915_private *dev_priv = to_i915(dev); 718 719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 720 } 721 722 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 723 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 724 { 725 struct drm_device *dev = crtc->base.dev; 726 struct drm_i915_private *dev_priv = to_i915(dev); 727 const struct drm_display_mode *mode = &crtc->base.hwmode; 728 enum pipe pipe = crtc->pipe; 729 int position, vtotal; 730 731 vtotal = mode->crtc_vtotal; 732 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 733 vtotal /= 2; 734 735 if (IS_GEN2(dev_priv)) 736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 737 else 738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 739 740 /* 741 * On HSW, the DSL reg (0x70000) appears to return 0 if we 742 * read it just before the start of vblank. So try it again 743 * so we don't accidentally end up spanning a vblank frame 744 * increment, causing the pipe_update_end() code to squak at us. 745 * 746 * The nature of this problem means we can't simply check the ISR 747 * bit and return the vblank start value; nor can we use the scanline 748 * debug register in the transcoder as it appears to have the same 749 * problem. We may need to extend this to include other platforms, 750 * but so far testing only shows the problem on HSW. 751 */ 752 if (HAS_DDI(dev_priv) && !position) { 753 int i, temp; 754 755 for (i = 0; i < 100; i++) { 756 udelay(1); 757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 758 DSL_LINEMASK_GEN3; 759 if (temp != position) { 760 position = temp; 761 break; 762 } 763 } 764 } 765 766 /* 767 * See update_scanline_offset() for the details on the 768 * scanline_offset adjustment. 769 */ 770 return (position + crtc->scanline_offset) % vtotal; 771 } 772 773 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 774 unsigned int flags, int *vpos, int *hpos, 775 ktime_t *stime, ktime_t *etime, 776 const struct drm_display_mode *mode) 777 { 778 struct drm_i915_private *dev_priv = to_i915(dev); 779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 781 int position; 782 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 783 bool in_vbl = true; 784 int ret = 0; 785 unsigned long irqflags; 786 787 if (WARN_ON(!mode->crtc_clock)) { 788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 789 "pipe %c\n", pipe_name(pipe)); 790 return 0; 791 } 792 793 htotal = mode->crtc_htotal; 794 hsync_start = mode->crtc_hsync_start; 795 vtotal = mode->crtc_vtotal; 796 vbl_start = mode->crtc_vblank_start; 797 vbl_end = mode->crtc_vblank_end; 798 799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 800 vbl_start = DIV_ROUND_UP(vbl_start, 2); 801 vbl_end /= 2; 802 vtotal /= 2; 803 } 804 805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 806 807 /* 808 * Lock uncore.lock, as we will do multiple timing critical raw 809 * register reads, potentially with preemption disabled, so the 810 * following code must not block on uncore.lock. 811 */ 812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 813 814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 815 816 /* Get optional system timestamp before query. */ 817 if (stime) 818 *stime = ktime_get(); 819 820 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 821 /* No obvious pixelcount register. Only query vertical 822 * scanout position from Display scan line register. 823 */ 824 position = __intel_get_crtc_scanline(intel_crtc); 825 } else { 826 /* Have access to pixelcount since start of frame. 827 * We can split this into vertical and horizontal 828 * scanout position. 829 */ 830 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 831 832 /* convert to pixel counts */ 833 vbl_start *= htotal; 834 vbl_end *= htotal; 835 vtotal *= htotal; 836 837 /* 838 * In interlaced modes, the pixel counter counts all pixels, 839 * so one field will have htotal more pixels. In order to avoid 840 * the reported position from jumping backwards when the pixel 841 * counter is beyond the length of the shorter field, just 842 * clamp the position the length of the shorter field. This 843 * matches how the scanline counter based position works since 844 * the scanline counter doesn't count the two half lines. 845 */ 846 if (position >= vtotal) 847 position = vtotal - 1; 848 849 /* 850 * Start of vblank interrupt is triggered at start of hsync, 851 * just prior to the first active line of vblank. However we 852 * consider lines to start at the leading edge of horizontal 853 * active. So, should we get here before we've crossed into 854 * the horizontal active of the first line in vblank, we would 855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 856 * always add htotal-hsync_start to the current pixel position. 857 */ 858 position = (position + htotal - hsync_start) % vtotal; 859 } 860 861 /* Get optional system timestamp after query. */ 862 if (etime) 863 *etime = ktime_get(); 864 865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 866 867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 868 869 in_vbl = position >= vbl_start && position < vbl_end; 870 871 /* 872 * While in vblank, position will be negative 873 * counting up towards 0 at vbl_end. And outside 874 * vblank, position will be positive counting 875 * up since vbl_end. 876 */ 877 if (position >= vbl_start) 878 position -= vbl_end; 879 else 880 position += vtotal - vbl_end; 881 882 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 883 *vpos = position; 884 *hpos = 0; 885 } else { 886 *vpos = position / htotal; 887 *hpos = position - (*vpos * htotal); 888 } 889 890 /* In vblank? */ 891 if (in_vbl) 892 ret |= DRM_SCANOUTPOS_IN_VBLANK; 893 894 return ret; 895 } 896 897 int intel_get_crtc_scanline(struct intel_crtc *crtc) 898 { 899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 900 unsigned long irqflags; 901 int position; 902 903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 904 position = __intel_get_crtc_scanline(crtc); 905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 906 907 return position; 908 } 909 910 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 911 int *max_error, 912 struct timeval *vblank_time, 913 unsigned flags) 914 { 915 struct drm_crtc *crtc; 916 917 if (pipe >= INTEL_INFO(dev)->num_pipes) { 918 DRM_ERROR("Invalid crtc %u\n", pipe); 919 return -EINVAL; 920 } 921 922 /* Get drm_crtc to timestamp: */ 923 crtc = intel_get_crtc_for_pipe(dev, pipe); 924 if (crtc == NULL) { 925 DRM_ERROR("Invalid crtc %u\n", pipe); 926 return -EINVAL; 927 } 928 929 if (!crtc->hwmode.crtc_clock) { 930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 931 return -EBUSY; 932 } 933 934 /* Helper routine in DRM core does all the work: */ 935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 936 vblank_time, flags, 937 &crtc->hwmode); 938 } 939 940 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 941 { 942 u32 busy_up, busy_down, max_avg, min_avg; 943 u8 new_delay; 944 945 spin_lock(&mchdev_lock); 946 947 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 948 949 new_delay = dev_priv->ips.cur_delay; 950 951 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 952 busy_up = I915_READ(RCPREVBSYTUPAVG); 953 busy_down = I915_READ(RCPREVBSYTDNAVG); 954 max_avg = I915_READ(RCBMAXAVG); 955 min_avg = I915_READ(RCBMINAVG); 956 957 /* Handle RCS change request from hw */ 958 if (busy_up > max_avg) { 959 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 960 new_delay = dev_priv->ips.cur_delay - 1; 961 if (new_delay < dev_priv->ips.max_delay) 962 new_delay = dev_priv->ips.max_delay; 963 } else if (busy_down < min_avg) { 964 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 965 new_delay = dev_priv->ips.cur_delay + 1; 966 if (new_delay > dev_priv->ips.min_delay) 967 new_delay = dev_priv->ips.min_delay; 968 } 969 970 if (ironlake_set_drps(dev_priv, new_delay)) 971 dev_priv->ips.cur_delay = new_delay; 972 973 spin_unlock(&mchdev_lock); 974 975 return; 976 } 977 978 static void notify_ring(struct intel_engine_cs *engine) 979 { 980 smp_store_mb(engine->breadcrumbs.irq_posted, true); 981 if (intel_engine_wakeup(engine)) { 982 trace_i915_gem_request_notify(engine); 983 engine->breadcrumbs.irq_wakeups++; 984 } 985 } 986 987 static void vlv_c0_read(struct drm_i915_private *dev_priv, 988 struct intel_rps_ei *ei) 989 { 990 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 991 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 992 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 993 } 994 995 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 996 const struct intel_rps_ei *old, 997 const struct intel_rps_ei *now, 998 int threshold) 999 { 1000 u64 time, c0; 1001 unsigned int mul = 100; 1002 1003 if (old->cz_clock == 0) 1004 return false; 1005 1006 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1007 mul <<= 8; 1008 1009 time = now->cz_clock - old->cz_clock; 1010 time *= threshold * dev_priv->czclk_freq; 1011 1012 /* Workload can be split between render + media, e.g. SwapBuffers 1013 * being blitted in X after being rendered in mesa. To account for 1014 * this we need to combine both engines into our activity counter. 1015 */ 1016 c0 = now->render_c0 - old->render_c0; 1017 c0 += now->media_c0 - old->media_c0; 1018 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1019 1020 return c0 >= time; 1021 } 1022 1023 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1024 { 1025 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1026 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1027 } 1028 1029 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1030 { 1031 struct intel_rps_ei now; 1032 u32 events = 0; 1033 1034 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1035 return 0; 1036 1037 vlv_c0_read(dev_priv, &now); 1038 if (now.cz_clock == 0) 1039 return 0; 1040 1041 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1042 if (!vlv_c0_above(dev_priv, 1043 &dev_priv->rps.down_ei, &now, 1044 dev_priv->rps.down_threshold)) 1045 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1046 dev_priv->rps.down_ei = now; 1047 } 1048 1049 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1050 if (vlv_c0_above(dev_priv, 1051 &dev_priv->rps.up_ei, &now, 1052 dev_priv->rps.up_threshold)) 1053 events |= GEN6_PM_RP_UP_THRESHOLD; 1054 dev_priv->rps.up_ei = now; 1055 } 1056 1057 return events; 1058 } 1059 1060 static bool any_waiters(struct drm_i915_private *dev_priv) 1061 { 1062 struct intel_engine_cs *engine; 1063 1064 for_each_engine(engine, dev_priv) 1065 if (intel_engine_has_waiter(engine)) 1066 return true; 1067 1068 return false; 1069 } 1070 1071 static void gen6_pm_rps_work(struct work_struct *work) 1072 { 1073 struct drm_i915_private *dev_priv = 1074 container_of(work, struct drm_i915_private, rps.work); 1075 bool client_boost; 1076 int new_delay, adj, min, max; 1077 u32 pm_iir; 1078 1079 spin_lock_irq(&dev_priv->irq_lock); 1080 /* Speed up work cancelation during disabling rps interrupts. */ 1081 if (!dev_priv->rps.interrupts_enabled) { 1082 spin_unlock_irq(&dev_priv->irq_lock); 1083 return; 1084 } 1085 1086 pm_iir = dev_priv->rps.pm_iir; 1087 dev_priv->rps.pm_iir = 0; 1088 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1089 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1090 client_boost = dev_priv->rps.client_boost; 1091 dev_priv->rps.client_boost = false; 1092 spin_unlock_irq(&dev_priv->irq_lock); 1093 1094 /* Make sure we didn't queue anything we're not going to process. */ 1095 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1096 1097 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1098 return; 1099 1100 mutex_lock(&dev_priv->rps.hw_lock); 1101 1102 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1103 1104 adj = dev_priv->rps.last_adj; 1105 new_delay = dev_priv->rps.cur_freq; 1106 min = dev_priv->rps.min_freq_softlimit; 1107 max = dev_priv->rps.max_freq_softlimit; 1108 1109 if (client_boost) { 1110 new_delay = dev_priv->rps.max_freq_softlimit; 1111 adj = 0; 1112 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1113 if (adj > 0) 1114 adj *= 2; 1115 else /* CHV needs even encode values */ 1116 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1117 /* 1118 * For better performance, jump directly 1119 * to RPe if we're below it. 1120 */ 1121 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1122 new_delay = dev_priv->rps.efficient_freq; 1123 adj = 0; 1124 } 1125 } else if (any_waiters(dev_priv)) { 1126 adj = 0; 1127 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1128 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1129 new_delay = dev_priv->rps.efficient_freq; 1130 else 1131 new_delay = dev_priv->rps.min_freq_softlimit; 1132 adj = 0; 1133 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1134 if (adj < 0) 1135 adj *= 2; 1136 else /* CHV needs even encode values */ 1137 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1138 } else { /* unknown event */ 1139 adj = 0; 1140 } 1141 1142 dev_priv->rps.last_adj = adj; 1143 1144 /* sysfs frequency interfaces may have snuck in while servicing the 1145 * interrupt 1146 */ 1147 new_delay += adj; 1148 new_delay = clamp_t(int, new_delay, min, max); 1149 1150 intel_set_rps(dev_priv, new_delay); 1151 1152 mutex_unlock(&dev_priv->rps.hw_lock); 1153 } 1154 1155 1156 /** 1157 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1158 * occurred. 1159 * @work: workqueue struct 1160 * 1161 * Doesn't actually do anything except notify userspace. As a consequence of 1162 * this event, userspace should try to remap the bad rows since statistically 1163 * it is likely the same row is more likely to go bad again. 1164 */ 1165 static void ivybridge_parity_work(struct work_struct *work) 1166 { 1167 struct drm_i915_private *dev_priv = 1168 container_of(work, struct drm_i915_private, l3_parity.error_work); 1169 u32 error_status, row, bank, subbank; 1170 char *parity_event[6]; 1171 uint32_t misccpctl; 1172 uint8_t slice = 0; 1173 1174 /* We must turn off DOP level clock gating to access the L3 registers. 1175 * In order to prevent a get/put style interface, acquire struct mutex 1176 * any time we access those registers. 1177 */ 1178 mutex_lock(&dev_priv->drm.struct_mutex); 1179 1180 /* If we've screwed up tracking, just let the interrupt fire again */ 1181 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1182 goto out; 1183 1184 misccpctl = I915_READ(GEN7_MISCCPCTL); 1185 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1186 POSTING_READ(GEN7_MISCCPCTL); 1187 1188 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1189 i915_reg_t reg; 1190 1191 slice--; 1192 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1193 break; 1194 1195 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1196 1197 reg = GEN7_L3CDERRST1(slice); 1198 1199 error_status = I915_READ(reg); 1200 row = GEN7_PARITY_ERROR_ROW(error_status); 1201 bank = GEN7_PARITY_ERROR_BANK(error_status); 1202 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1203 1204 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1205 POSTING_READ(reg); 1206 1207 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1208 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1209 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1210 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1212 parity_event[5] = NULL; 1213 1214 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1215 KOBJ_CHANGE, parity_event); 1216 1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1218 slice, row, bank, subbank); 1219 1220 kfree(parity_event[4]); 1221 kfree(parity_event[3]); 1222 kfree(parity_event[2]); 1223 kfree(parity_event[1]); 1224 } 1225 1226 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1227 1228 out: 1229 WARN_ON(dev_priv->l3_parity.which_slice); 1230 spin_lock_irq(&dev_priv->irq_lock); 1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1232 spin_unlock_irq(&dev_priv->irq_lock); 1233 1234 mutex_unlock(&dev_priv->drm.struct_mutex); 1235 } 1236 1237 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1238 u32 iir) 1239 { 1240 if (!HAS_L3_DPF(dev_priv)) 1241 return; 1242 1243 spin_lock(&dev_priv->irq_lock); 1244 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1245 spin_unlock(&dev_priv->irq_lock); 1246 1247 iir &= GT_PARITY_ERROR(dev_priv); 1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1249 dev_priv->l3_parity.which_slice |= 1 << 1; 1250 1251 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1252 dev_priv->l3_parity.which_slice |= 1 << 0; 1253 1254 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1255 } 1256 1257 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1258 u32 gt_iir) 1259 { 1260 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1261 notify_ring(&dev_priv->engine[RCS]); 1262 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1263 notify_ring(&dev_priv->engine[VCS]); 1264 } 1265 1266 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1267 u32 gt_iir) 1268 { 1269 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1270 notify_ring(&dev_priv->engine[RCS]); 1271 if (gt_iir & GT_BSD_USER_INTERRUPT) 1272 notify_ring(&dev_priv->engine[VCS]); 1273 if (gt_iir & GT_BLT_USER_INTERRUPT) 1274 notify_ring(&dev_priv->engine[BCS]); 1275 1276 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1277 GT_BSD_CS_ERROR_INTERRUPT | 1278 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1279 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1280 1281 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1282 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1283 } 1284 1285 static __always_inline void 1286 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1287 { 1288 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1289 notify_ring(engine); 1290 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1291 tasklet_schedule(&engine->irq_tasklet); 1292 } 1293 1294 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1295 u32 master_ctl, 1296 u32 gt_iir[4]) 1297 { 1298 irqreturn_t ret = IRQ_NONE; 1299 1300 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1301 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1302 if (gt_iir[0]) { 1303 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1304 ret = IRQ_HANDLED; 1305 } else 1306 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1307 } 1308 1309 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1310 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1311 if (gt_iir[1]) { 1312 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1313 ret = IRQ_HANDLED; 1314 } else 1315 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1316 } 1317 1318 if (master_ctl & GEN8_GT_VECS_IRQ) { 1319 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1320 if (gt_iir[3]) { 1321 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1322 ret = IRQ_HANDLED; 1323 } else 1324 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1325 } 1326 1327 if (master_ctl & GEN8_GT_PM_IRQ) { 1328 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1329 if (gt_iir[2] & dev_priv->pm_rps_events) { 1330 I915_WRITE_FW(GEN8_GT_IIR(2), 1331 gt_iir[2] & dev_priv->pm_rps_events); 1332 ret = IRQ_HANDLED; 1333 } else 1334 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1335 } 1336 1337 return ret; 1338 } 1339 1340 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1341 u32 gt_iir[4]) 1342 { 1343 if (gt_iir[0]) { 1344 gen8_cs_irq_handler(&dev_priv->engine[RCS], 1345 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1346 gen8_cs_irq_handler(&dev_priv->engine[BCS], 1347 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1348 } 1349 1350 if (gt_iir[1]) { 1351 gen8_cs_irq_handler(&dev_priv->engine[VCS], 1352 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1353 gen8_cs_irq_handler(&dev_priv->engine[VCS2], 1354 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1355 } 1356 1357 if (gt_iir[3]) 1358 gen8_cs_irq_handler(&dev_priv->engine[VECS], 1359 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1360 1361 if (gt_iir[2] & dev_priv->pm_rps_events) 1362 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1363 } 1364 1365 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1366 { 1367 switch (port) { 1368 case PORT_A: 1369 return val & PORTA_HOTPLUG_LONG_DETECT; 1370 case PORT_B: 1371 return val & PORTB_HOTPLUG_LONG_DETECT; 1372 case PORT_C: 1373 return val & PORTC_HOTPLUG_LONG_DETECT; 1374 default: 1375 return false; 1376 } 1377 } 1378 1379 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1380 { 1381 switch (port) { 1382 case PORT_E: 1383 return val & PORTE_HOTPLUG_LONG_DETECT; 1384 default: 1385 return false; 1386 } 1387 } 1388 1389 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1390 { 1391 switch (port) { 1392 case PORT_A: 1393 return val & PORTA_HOTPLUG_LONG_DETECT; 1394 case PORT_B: 1395 return val & PORTB_HOTPLUG_LONG_DETECT; 1396 case PORT_C: 1397 return val & PORTC_HOTPLUG_LONG_DETECT; 1398 case PORT_D: 1399 return val & PORTD_HOTPLUG_LONG_DETECT; 1400 default: 1401 return false; 1402 } 1403 } 1404 1405 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1406 { 1407 switch (port) { 1408 case PORT_A: 1409 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1410 default: 1411 return false; 1412 } 1413 } 1414 1415 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1416 { 1417 switch (port) { 1418 case PORT_B: 1419 return val & PORTB_HOTPLUG_LONG_DETECT; 1420 case PORT_C: 1421 return val & PORTC_HOTPLUG_LONG_DETECT; 1422 case PORT_D: 1423 return val & PORTD_HOTPLUG_LONG_DETECT; 1424 default: 1425 return false; 1426 } 1427 } 1428 1429 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1430 { 1431 switch (port) { 1432 case PORT_B: 1433 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1434 case PORT_C: 1435 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1436 case PORT_D: 1437 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1438 default: 1439 return false; 1440 } 1441 } 1442 1443 /* 1444 * Get a bit mask of pins that have triggered, and which ones may be long. 1445 * This can be called multiple times with the same masks to accumulate 1446 * hotplug detection results from several registers. 1447 * 1448 * Note that the caller is expected to zero out the masks initially. 1449 */ 1450 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1451 u32 hotplug_trigger, u32 dig_hotplug_reg, 1452 const u32 hpd[HPD_NUM_PINS], 1453 bool long_pulse_detect(enum port port, u32 val)) 1454 { 1455 enum port port; 1456 int i; 1457 1458 for_each_hpd_pin(i) { 1459 if ((hpd[i] & hotplug_trigger) == 0) 1460 continue; 1461 1462 *pin_mask |= BIT(i); 1463 1464 if (!intel_hpd_pin_to_port(i, &port)) 1465 continue; 1466 1467 if (long_pulse_detect(port, dig_hotplug_reg)) 1468 *long_mask |= BIT(i); 1469 } 1470 1471 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1472 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1473 1474 } 1475 1476 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1477 { 1478 wake_up_all(&dev_priv->gmbus_wait_queue); 1479 } 1480 1481 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1482 { 1483 wake_up_all(&dev_priv->gmbus_wait_queue); 1484 } 1485 1486 #if defined(CONFIG_DEBUG_FS) 1487 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1488 enum pipe pipe, 1489 uint32_t crc0, uint32_t crc1, 1490 uint32_t crc2, uint32_t crc3, 1491 uint32_t crc4) 1492 { 1493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1494 struct intel_pipe_crc_entry *entry; 1495 int head, tail; 1496 1497 spin_lock(&pipe_crc->lock); 1498 1499 if (!pipe_crc->entries) { 1500 spin_unlock(&pipe_crc->lock); 1501 DRM_DEBUG_KMS("spurious interrupt\n"); 1502 return; 1503 } 1504 1505 head = pipe_crc->head; 1506 tail = pipe_crc->tail; 1507 1508 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1509 spin_unlock(&pipe_crc->lock); 1510 DRM_ERROR("CRC buffer overflowing\n"); 1511 return; 1512 } 1513 1514 entry = &pipe_crc->entries[head]; 1515 1516 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, 1517 pipe); 1518 entry->crc[0] = crc0; 1519 entry->crc[1] = crc1; 1520 entry->crc[2] = crc2; 1521 entry->crc[3] = crc3; 1522 entry->crc[4] = crc4; 1523 1524 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1525 pipe_crc->head = head; 1526 1527 spin_unlock(&pipe_crc->lock); 1528 1529 wake_up_interruptible(&pipe_crc->wq); 1530 } 1531 #else 1532 static inline void 1533 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1534 enum pipe pipe, 1535 uint32_t crc0, uint32_t crc1, 1536 uint32_t crc2, uint32_t crc3, 1537 uint32_t crc4) {} 1538 #endif 1539 1540 1541 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1542 enum pipe pipe) 1543 { 1544 display_pipe_crc_irq_handler(dev_priv, pipe, 1545 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1546 0, 0, 0, 0); 1547 } 1548 1549 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1550 enum pipe pipe) 1551 { 1552 display_pipe_crc_irq_handler(dev_priv, pipe, 1553 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1554 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1555 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1556 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1557 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1558 } 1559 1560 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1561 enum pipe pipe) 1562 { 1563 uint32_t res1, res2; 1564 1565 if (INTEL_GEN(dev_priv) >= 3) 1566 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1567 else 1568 res1 = 0; 1569 1570 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1571 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1572 else 1573 res2 = 0; 1574 1575 display_pipe_crc_irq_handler(dev_priv, pipe, 1576 I915_READ(PIPE_CRC_RES_RED(pipe)), 1577 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1578 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1579 res1, res2); 1580 } 1581 1582 /* The RPS events need forcewake, so we add them to a work queue and mask their 1583 * IMR bits until the work is done. Other interrupts can be processed without 1584 * the work queue. */ 1585 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1586 { 1587 if (pm_iir & dev_priv->pm_rps_events) { 1588 spin_lock(&dev_priv->irq_lock); 1589 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1590 if (dev_priv->rps.interrupts_enabled) { 1591 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1592 schedule_work(&dev_priv->rps.work); 1593 } 1594 spin_unlock(&dev_priv->irq_lock); 1595 } 1596 1597 if (INTEL_INFO(dev_priv)->gen >= 8) 1598 return; 1599 1600 if (HAS_VEBOX(dev_priv)) { 1601 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1602 notify_ring(&dev_priv->engine[VECS]); 1603 1604 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1605 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1606 } 1607 } 1608 1609 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1610 enum pipe pipe) 1611 { 1612 bool ret; 1613 1614 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1615 if (ret) 1616 intel_finish_page_flip_mmio(dev_priv, pipe); 1617 1618 return ret; 1619 } 1620 1621 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1622 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1623 { 1624 int pipe; 1625 1626 spin_lock(&dev_priv->irq_lock); 1627 1628 if (!dev_priv->display_irqs_enabled) { 1629 spin_unlock(&dev_priv->irq_lock); 1630 return; 1631 } 1632 1633 for_each_pipe(dev_priv, pipe) { 1634 i915_reg_t reg; 1635 u32 mask, iir_bit = 0; 1636 1637 /* 1638 * PIPESTAT bits get signalled even when the interrupt is 1639 * disabled with the mask bits, and some of the status bits do 1640 * not generate interrupts at all (like the underrun bit). Hence 1641 * we need to be careful that we only handle what we want to 1642 * handle. 1643 */ 1644 1645 /* fifo underruns are filterered in the underrun handler. */ 1646 mask = PIPE_FIFO_UNDERRUN_STATUS; 1647 1648 switch (pipe) { 1649 case PIPE_A: 1650 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1651 break; 1652 case PIPE_B: 1653 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1654 break; 1655 case PIPE_C: 1656 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1657 break; 1658 } 1659 if (iir & iir_bit) 1660 mask |= dev_priv->pipestat_irq_mask[pipe]; 1661 1662 if (!mask) 1663 continue; 1664 1665 reg = PIPESTAT(pipe); 1666 mask |= PIPESTAT_INT_ENABLE_MASK; 1667 pipe_stats[pipe] = I915_READ(reg) & mask; 1668 1669 /* 1670 * Clear the PIPE*STAT regs before the IIR 1671 */ 1672 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1673 PIPESTAT_INT_STATUS_MASK)) 1674 I915_WRITE(reg, pipe_stats[pipe]); 1675 } 1676 spin_unlock(&dev_priv->irq_lock); 1677 } 1678 1679 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1680 u32 pipe_stats[I915_MAX_PIPES]) 1681 { 1682 enum pipe pipe; 1683 1684 for_each_pipe(dev_priv, pipe) { 1685 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1686 intel_pipe_handle_vblank(dev_priv, pipe)) 1687 intel_check_page_flip(dev_priv, pipe); 1688 1689 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1690 intel_finish_page_flip_cs(dev_priv, pipe); 1691 1692 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1693 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1694 1695 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1696 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1697 } 1698 1699 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1700 gmbus_irq_handler(dev_priv); 1701 } 1702 1703 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1704 { 1705 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1706 1707 if (hotplug_status) 1708 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1709 1710 return hotplug_status; 1711 } 1712 1713 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1714 u32 hotplug_status) 1715 { 1716 u32 pin_mask = 0, long_mask = 0; 1717 1718 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1719 IS_CHERRYVIEW(dev_priv)) { 1720 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1721 1722 if (hotplug_trigger) { 1723 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1724 hotplug_trigger, hpd_status_g4x, 1725 i9xx_port_hotplug_long_detect); 1726 1727 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1728 } 1729 1730 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1731 dp_aux_irq_handler(dev_priv); 1732 } else { 1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1734 1735 if (hotplug_trigger) { 1736 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1737 hotplug_trigger, hpd_status_i915, 1738 i9xx_port_hotplug_long_detect); 1739 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1740 } 1741 } 1742 } 1743 1744 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1745 { 1746 struct drm_device *dev = arg; 1747 struct drm_i915_private *dev_priv = to_i915(dev); 1748 irqreturn_t ret = IRQ_NONE; 1749 1750 if (!intel_irqs_enabled(dev_priv)) 1751 return IRQ_NONE; 1752 1753 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1754 disable_rpm_wakeref_asserts(dev_priv); 1755 1756 do { 1757 u32 iir, gt_iir, pm_iir; 1758 u32 pipe_stats[I915_MAX_PIPES] = {}; 1759 u32 hotplug_status = 0; 1760 u32 ier = 0; 1761 1762 gt_iir = I915_READ(GTIIR); 1763 pm_iir = I915_READ(GEN6_PMIIR); 1764 iir = I915_READ(VLV_IIR); 1765 1766 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1767 break; 1768 1769 ret = IRQ_HANDLED; 1770 1771 /* 1772 * Theory on interrupt generation, based on empirical evidence: 1773 * 1774 * x = ((VLV_IIR & VLV_IER) || 1775 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1776 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1777 * 1778 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1779 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1780 * guarantee the CPU interrupt will be raised again even if we 1781 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1782 * bits this time around. 1783 */ 1784 I915_WRITE(VLV_MASTER_IER, 0); 1785 ier = I915_READ(VLV_IER); 1786 I915_WRITE(VLV_IER, 0); 1787 1788 if (gt_iir) 1789 I915_WRITE(GTIIR, gt_iir); 1790 if (pm_iir) 1791 I915_WRITE(GEN6_PMIIR, pm_iir); 1792 1793 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1794 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1795 1796 /* Call regardless, as some status bits might not be 1797 * signalled in iir */ 1798 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1799 1800 /* 1801 * VLV_IIR is single buffered, and reflects the level 1802 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1803 */ 1804 if (iir) 1805 I915_WRITE(VLV_IIR, iir); 1806 1807 I915_WRITE(VLV_IER, ier); 1808 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1809 POSTING_READ(VLV_MASTER_IER); 1810 1811 if (gt_iir) 1812 snb_gt_irq_handler(dev_priv, gt_iir); 1813 if (pm_iir) 1814 gen6_rps_irq_handler(dev_priv, pm_iir); 1815 1816 if (hotplug_status) 1817 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1818 1819 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1820 } while (0); 1821 1822 enable_rpm_wakeref_asserts(dev_priv); 1823 1824 return ret; 1825 } 1826 1827 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1828 { 1829 struct drm_device *dev = arg; 1830 struct drm_i915_private *dev_priv = to_i915(dev); 1831 irqreturn_t ret = IRQ_NONE; 1832 1833 if (!intel_irqs_enabled(dev_priv)) 1834 return IRQ_NONE; 1835 1836 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1837 disable_rpm_wakeref_asserts(dev_priv); 1838 1839 do { 1840 u32 master_ctl, iir; 1841 u32 gt_iir[4] = {}; 1842 u32 pipe_stats[I915_MAX_PIPES] = {}; 1843 u32 hotplug_status = 0; 1844 u32 ier = 0; 1845 1846 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1847 iir = I915_READ(VLV_IIR); 1848 1849 if (master_ctl == 0 && iir == 0) 1850 break; 1851 1852 ret = IRQ_HANDLED; 1853 1854 /* 1855 * Theory on interrupt generation, based on empirical evidence: 1856 * 1857 * x = ((VLV_IIR & VLV_IER) || 1858 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1859 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1860 * 1861 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1862 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1863 * guarantee the CPU interrupt will be raised again even if we 1864 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1865 * bits this time around. 1866 */ 1867 I915_WRITE(GEN8_MASTER_IRQ, 0); 1868 ier = I915_READ(VLV_IER); 1869 I915_WRITE(VLV_IER, 0); 1870 1871 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1872 1873 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1874 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1875 1876 /* Call regardless, as some status bits might not be 1877 * signalled in iir */ 1878 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1879 1880 /* 1881 * VLV_IIR is single buffered, and reflects the level 1882 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1883 */ 1884 if (iir) 1885 I915_WRITE(VLV_IIR, iir); 1886 1887 I915_WRITE(VLV_IER, ier); 1888 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1889 POSTING_READ(GEN8_MASTER_IRQ); 1890 1891 gen8_gt_irq_handler(dev_priv, gt_iir); 1892 1893 if (hotplug_status) 1894 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1895 1896 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1897 } while (0); 1898 1899 enable_rpm_wakeref_asserts(dev_priv); 1900 1901 return ret; 1902 } 1903 1904 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1905 u32 hotplug_trigger, 1906 const u32 hpd[HPD_NUM_PINS]) 1907 { 1908 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1909 1910 /* 1911 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1912 * unless we touch the hotplug register, even if hotplug_trigger is 1913 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1914 * errors. 1915 */ 1916 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1917 if (!hotplug_trigger) { 1918 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1919 PORTD_HOTPLUG_STATUS_MASK | 1920 PORTC_HOTPLUG_STATUS_MASK | 1921 PORTB_HOTPLUG_STATUS_MASK; 1922 dig_hotplug_reg &= ~mask; 1923 } 1924 1925 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1926 if (!hotplug_trigger) 1927 return; 1928 1929 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1930 dig_hotplug_reg, hpd, 1931 pch_port_hotplug_long_detect); 1932 1933 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1934 } 1935 1936 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1937 { 1938 int pipe; 1939 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1940 1941 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 1942 1943 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1945 SDE_AUDIO_POWER_SHIFT); 1946 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1947 port_name(port)); 1948 } 1949 1950 if (pch_iir & SDE_AUX_MASK) 1951 dp_aux_irq_handler(dev_priv); 1952 1953 if (pch_iir & SDE_GMBUS) 1954 gmbus_irq_handler(dev_priv); 1955 1956 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1957 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1958 1959 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1960 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1961 1962 if (pch_iir & SDE_POISON) 1963 DRM_ERROR("PCH poison interrupt\n"); 1964 1965 if (pch_iir & SDE_FDI_MASK) 1966 for_each_pipe(dev_priv, pipe) 1967 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1968 pipe_name(pipe), 1969 I915_READ(FDI_RX_IIR(pipe))); 1970 1971 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1972 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1973 1974 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1975 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1976 1977 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1978 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1979 1980 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1982 } 1983 1984 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1985 { 1986 u32 err_int = I915_READ(GEN7_ERR_INT); 1987 enum pipe pipe; 1988 1989 if (err_int & ERR_INT_POISON) 1990 DRM_ERROR("Poison interrupt\n"); 1991 1992 for_each_pipe(dev_priv, pipe) { 1993 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1994 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1995 1996 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1997 if (IS_IVYBRIDGE(dev_priv)) 1998 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1999 else 2000 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2001 } 2002 } 2003 2004 I915_WRITE(GEN7_ERR_INT, err_int); 2005 } 2006 2007 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2008 { 2009 u32 serr_int = I915_READ(SERR_INT); 2010 2011 if (serr_int & SERR_INT_POISON) 2012 DRM_ERROR("PCH poison interrupt\n"); 2013 2014 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2015 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2016 2017 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2019 2020 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2021 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2022 2023 I915_WRITE(SERR_INT, serr_int); 2024 } 2025 2026 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2027 { 2028 int pipe; 2029 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2030 2031 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2032 2033 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2034 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2035 SDE_AUDIO_POWER_SHIFT_CPT); 2036 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2037 port_name(port)); 2038 } 2039 2040 if (pch_iir & SDE_AUX_MASK_CPT) 2041 dp_aux_irq_handler(dev_priv); 2042 2043 if (pch_iir & SDE_GMBUS_CPT) 2044 gmbus_irq_handler(dev_priv); 2045 2046 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2047 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2048 2049 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2050 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2051 2052 if (pch_iir & SDE_FDI_MASK_CPT) 2053 for_each_pipe(dev_priv, pipe) 2054 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2055 pipe_name(pipe), 2056 I915_READ(FDI_RX_IIR(pipe))); 2057 2058 if (pch_iir & SDE_ERROR_CPT) 2059 cpt_serr_int_handler(dev_priv); 2060 } 2061 2062 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2063 { 2064 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2065 ~SDE_PORTE_HOTPLUG_SPT; 2066 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2067 u32 pin_mask = 0, long_mask = 0; 2068 2069 if (hotplug_trigger) { 2070 u32 dig_hotplug_reg; 2071 2072 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2073 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2074 2075 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2076 dig_hotplug_reg, hpd_spt, 2077 spt_port_hotplug_long_detect); 2078 } 2079 2080 if (hotplug2_trigger) { 2081 u32 dig_hotplug_reg; 2082 2083 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2084 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2085 2086 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2087 dig_hotplug_reg, hpd_spt, 2088 spt_port_hotplug2_long_detect); 2089 } 2090 2091 if (pin_mask) 2092 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2093 2094 if (pch_iir & SDE_GMBUS_CPT) 2095 gmbus_irq_handler(dev_priv); 2096 } 2097 2098 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2099 u32 hotplug_trigger, 2100 const u32 hpd[HPD_NUM_PINS]) 2101 { 2102 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2103 2104 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2105 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2106 2107 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2108 dig_hotplug_reg, hpd, 2109 ilk_port_hotplug_long_detect); 2110 2111 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2112 } 2113 2114 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2115 u32 de_iir) 2116 { 2117 enum pipe pipe; 2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2119 2120 if (hotplug_trigger) 2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2122 2123 if (de_iir & DE_AUX_CHANNEL_A) 2124 dp_aux_irq_handler(dev_priv); 2125 2126 if (de_iir & DE_GSE) 2127 intel_opregion_asle_intr(dev_priv); 2128 2129 if (de_iir & DE_POISON) 2130 DRM_ERROR("Poison interrupt\n"); 2131 2132 for_each_pipe(dev_priv, pipe) { 2133 if (de_iir & DE_PIPE_VBLANK(pipe) && 2134 intel_pipe_handle_vblank(dev_priv, pipe)) 2135 intel_check_page_flip(dev_priv, pipe); 2136 2137 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2138 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2139 2140 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2141 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2142 2143 /* plane/pipes map 1:1 on ilk+ */ 2144 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2145 intel_finish_page_flip_cs(dev_priv, pipe); 2146 } 2147 2148 /* check event from PCH */ 2149 if (de_iir & DE_PCH_EVENT) { 2150 u32 pch_iir = I915_READ(SDEIIR); 2151 2152 if (HAS_PCH_CPT(dev_priv)) 2153 cpt_irq_handler(dev_priv, pch_iir); 2154 else 2155 ibx_irq_handler(dev_priv, pch_iir); 2156 2157 /* should clear PCH hotplug event before clear CPU irq */ 2158 I915_WRITE(SDEIIR, pch_iir); 2159 } 2160 2161 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2162 ironlake_rps_change_irq_handler(dev_priv); 2163 } 2164 2165 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2166 u32 de_iir) 2167 { 2168 enum pipe pipe; 2169 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2170 2171 if (hotplug_trigger) 2172 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2173 2174 if (de_iir & DE_ERR_INT_IVB) 2175 ivb_err_int_handler(dev_priv); 2176 2177 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2178 dp_aux_irq_handler(dev_priv); 2179 2180 if (de_iir & DE_GSE_IVB) 2181 intel_opregion_asle_intr(dev_priv); 2182 2183 for_each_pipe(dev_priv, pipe) { 2184 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2185 intel_pipe_handle_vblank(dev_priv, pipe)) 2186 intel_check_page_flip(dev_priv, pipe); 2187 2188 /* plane/pipes map 1:1 on ilk+ */ 2189 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2190 intel_finish_page_flip_cs(dev_priv, pipe); 2191 } 2192 2193 /* check event from PCH */ 2194 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2195 u32 pch_iir = I915_READ(SDEIIR); 2196 2197 cpt_irq_handler(dev_priv, pch_iir); 2198 2199 /* clear PCH hotplug event before clear CPU irq */ 2200 I915_WRITE(SDEIIR, pch_iir); 2201 } 2202 } 2203 2204 /* 2205 * To handle irqs with the minimum potential races with fresh interrupts, we: 2206 * 1 - Disable Master Interrupt Control. 2207 * 2 - Find the source(s) of the interrupt. 2208 * 3 - Clear the Interrupt Identity bits (IIR). 2209 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2210 * 5 - Re-enable Master Interrupt Control. 2211 */ 2212 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2213 { 2214 struct drm_device *dev = arg; 2215 struct drm_i915_private *dev_priv = to_i915(dev); 2216 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2217 irqreturn_t ret = IRQ_NONE; 2218 2219 if (!intel_irqs_enabled(dev_priv)) 2220 return IRQ_NONE; 2221 2222 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2223 disable_rpm_wakeref_asserts(dev_priv); 2224 2225 /* disable master interrupt before clearing iir */ 2226 de_ier = I915_READ(DEIER); 2227 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2228 POSTING_READ(DEIER); 2229 2230 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2231 * interrupts will will be stored on its back queue, and then we'll be 2232 * able to process them after we restore SDEIER (as soon as we restore 2233 * it, we'll get an interrupt if SDEIIR still has something to process 2234 * due to its back queue). */ 2235 if (!HAS_PCH_NOP(dev_priv)) { 2236 sde_ier = I915_READ(SDEIER); 2237 I915_WRITE(SDEIER, 0); 2238 POSTING_READ(SDEIER); 2239 } 2240 2241 /* Find, clear, then process each source of interrupt */ 2242 2243 gt_iir = I915_READ(GTIIR); 2244 if (gt_iir) { 2245 I915_WRITE(GTIIR, gt_iir); 2246 ret = IRQ_HANDLED; 2247 if (INTEL_GEN(dev_priv) >= 6) 2248 snb_gt_irq_handler(dev_priv, gt_iir); 2249 else 2250 ilk_gt_irq_handler(dev_priv, gt_iir); 2251 } 2252 2253 de_iir = I915_READ(DEIIR); 2254 if (de_iir) { 2255 I915_WRITE(DEIIR, de_iir); 2256 ret = IRQ_HANDLED; 2257 if (INTEL_GEN(dev_priv) >= 7) 2258 ivb_display_irq_handler(dev_priv, de_iir); 2259 else 2260 ilk_display_irq_handler(dev_priv, de_iir); 2261 } 2262 2263 if (INTEL_GEN(dev_priv) >= 6) { 2264 u32 pm_iir = I915_READ(GEN6_PMIIR); 2265 if (pm_iir) { 2266 I915_WRITE(GEN6_PMIIR, pm_iir); 2267 ret = IRQ_HANDLED; 2268 gen6_rps_irq_handler(dev_priv, pm_iir); 2269 } 2270 } 2271 2272 I915_WRITE(DEIER, de_ier); 2273 POSTING_READ(DEIER); 2274 if (!HAS_PCH_NOP(dev_priv)) { 2275 I915_WRITE(SDEIER, sde_ier); 2276 POSTING_READ(SDEIER); 2277 } 2278 2279 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2280 enable_rpm_wakeref_asserts(dev_priv); 2281 2282 return ret; 2283 } 2284 2285 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2286 u32 hotplug_trigger, 2287 const u32 hpd[HPD_NUM_PINS]) 2288 { 2289 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2290 2291 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2292 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2293 2294 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2295 dig_hotplug_reg, hpd, 2296 bxt_port_hotplug_long_detect); 2297 2298 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2299 } 2300 2301 static irqreturn_t 2302 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2303 { 2304 irqreturn_t ret = IRQ_NONE; 2305 u32 iir; 2306 enum pipe pipe; 2307 2308 if (master_ctl & GEN8_DE_MISC_IRQ) { 2309 iir = I915_READ(GEN8_DE_MISC_IIR); 2310 if (iir) { 2311 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2312 ret = IRQ_HANDLED; 2313 if (iir & GEN8_DE_MISC_GSE) 2314 intel_opregion_asle_intr(dev_priv); 2315 else 2316 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2317 } 2318 else 2319 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2320 } 2321 2322 if (master_ctl & GEN8_DE_PORT_IRQ) { 2323 iir = I915_READ(GEN8_DE_PORT_IIR); 2324 if (iir) { 2325 u32 tmp_mask; 2326 bool found = false; 2327 2328 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2329 ret = IRQ_HANDLED; 2330 2331 tmp_mask = GEN8_AUX_CHANNEL_A; 2332 if (INTEL_INFO(dev_priv)->gen >= 9) 2333 tmp_mask |= GEN9_AUX_CHANNEL_B | 2334 GEN9_AUX_CHANNEL_C | 2335 GEN9_AUX_CHANNEL_D; 2336 2337 if (iir & tmp_mask) { 2338 dp_aux_irq_handler(dev_priv); 2339 found = true; 2340 } 2341 2342 if (IS_BROXTON(dev_priv)) { 2343 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2344 if (tmp_mask) { 2345 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2346 hpd_bxt); 2347 found = true; 2348 } 2349 } else if (IS_BROADWELL(dev_priv)) { 2350 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2351 if (tmp_mask) { 2352 ilk_hpd_irq_handler(dev_priv, 2353 tmp_mask, hpd_bdw); 2354 found = true; 2355 } 2356 } 2357 2358 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2359 gmbus_irq_handler(dev_priv); 2360 found = true; 2361 } 2362 2363 if (!found) 2364 DRM_ERROR("Unexpected DE Port interrupt\n"); 2365 } 2366 else 2367 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2368 } 2369 2370 for_each_pipe(dev_priv, pipe) { 2371 u32 flip_done, fault_errors; 2372 2373 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2374 continue; 2375 2376 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2377 if (!iir) { 2378 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2379 continue; 2380 } 2381 2382 ret = IRQ_HANDLED; 2383 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2384 2385 if (iir & GEN8_PIPE_VBLANK && 2386 intel_pipe_handle_vblank(dev_priv, pipe)) 2387 intel_check_page_flip(dev_priv, pipe); 2388 2389 flip_done = iir; 2390 if (INTEL_INFO(dev_priv)->gen >= 9) 2391 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2392 else 2393 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2394 2395 if (flip_done) 2396 intel_finish_page_flip_cs(dev_priv, pipe); 2397 2398 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2399 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2400 2401 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2402 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2403 2404 fault_errors = iir; 2405 if (INTEL_INFO(dev_priv)->gen >= 9) 2406 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2407 else 2408 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2409 2410 if (fault_errors) 2411 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2412 pipe_name(pipe), 2413 fault_errors); 2414 } 2415 2416 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2417 master_ctl & GEN8_DE_PCH_IRQ) { 2418 /* 2419 * FIXME(BDW): Assume for now that the new interrupt handling 2420 * scheme also closed the SDE interrupt handling race we've seen 2421 * on older pch-split platforms. But this needs testing. 2422 */ 2423 iir = I915_READ(SDEIIR); 2424 if (iir) { 2425 I915_WRITE(SDEIIR, iir); 2426 ret = IRQ_HANDLED; 2427 2428 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2429 spt_irq_handler(dev_priv, iir); 2430 else 2431 cpt_irq_handler(dev_priv, iir); 2432 } else { 2433 /* 2434 * Like on previous PCH there seems to be something 2435 * fishy going on with forwarding PCH interrupts. 2436 */ 2437 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2438 } 2439 } 2440 2441 return ret; 2442 } 2443 2444 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2445 { 2446 struct drm_device *dev = arg; 2447 struct drm_i915_private *dev_priv = to_i915(dev); 2448 u32 master_ctl; 2449 u32 gt_iir[4] = {}; 2450 irqreturn_t ret; 2451 2452 if (!intel_irqs_enabled(dev_priv)) 2453 return IRQ_NONE; 2454 2455 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2456 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2457 if (!master_ctl) 2458 return IRQ_NONE; 2459 2460 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2461 2462 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2463 disable_rpm_wakeref_asserts(dev_priv); 2464 2465 /* Find, clear, then process each source of interrupt */ 2466 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2467 gen8_gt_irq_handler(dev_priv, gt_iir); 2468 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2469 2470 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2471 POSTING_READ_FW(GEN8_MASTER_IRQ); 2472 2473 enable_rpm_wakeref_asserts(dev_priv); 2474 2475 return ret; 2476 } 2477 2478 static void i915_error_wake_up(struct drm_i915_private *dev_priv) 2479 { 2480 /* 2481 * Notify all waiters for GPU completion events that reset state has 2482 * been changed, and that they need to restart their wait after 2483 * checking for potential errors (and bail out to drop locks if there is 2484 * a gpu reset pending so that i915_error_work_func can acquire them). 2485 */ 2486 2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2488 wake_up_all(&dev_priv->gpu_error.wait_queue); 2489 2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2491 wake_up_all(&dev_priv->pending_flip_queue); 2492 } 2493 2494 /** 2495 * i915_reset_and_wakeup - do process context error handling work 2496 * @dev_priv: i915 device private 2497 * 2498 * Fire an error uevent so userspace can see that a hang or error 2499 * was detected. 2500 */ 2501 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2502 { 2503 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2507 int ret; 2508 2509 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2510 2511 /* 2512 * Note that there's only one work item which does gpu resets, so we 2513 * need not worry about concurrent gpu resets potentially incrementing 2514 * error->reset_counter twice. We only need to take care of another 2515 * racing irq/hangcheck declaring the gpu dead for a second time. A 2516 * quick check for that is good enough: schedule_work ensures the 2517 * correct ordering between hang detection and this work item, and since 2518 * the reset in-progress bit is only ever set by code outside of this 2519 * work we don't need to worry about any other races. 2520 */ 2521 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2522 DRM_DEBUG_DRIVER("resetting chip\n"); 2523 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2524 2525 /* 2526 * In most cases it's guaranteed that we get here with an RPM 2527 * reference held, for example because there is a pending GPU 2528 * request that won't finish until the reset is done. This 2529 * isn't the case at least when we get here by doing a 2530 * simulated reset via debugs, so get an RPM reference. 2531 */ 2532 intel_runtime_pm_get(dev_priv); 2533 2534 intel_prepare_reset(dev_priv); 2535 2536 /* 2537 * All state reset _must_ be completed before we update the 2538 * reset counter, for otherwise waiters might miss the reset 2539 * pending state and not properly drop locks, resulting in 2540 * deadlocks with the reset work. 2541 */ 2542 ret = i915_reset(dev_priv); 2543 2544 intel_finish_reset(dev_priv); 2545 2546 intel_runtime_pm_put(dev_priv); 2547 2548 if (ret == 0) 2549 kobject_uevent_env(kobj, 2550 KOBJ_CHANGE, reset_done_event); 2551 2552 /* 2553 * Note: The wake_up also serves as a memory barrier so that 2554 * waiters see the update value of the reset counter atomic_t. 2555 */ 2556 wake_up_all(&dev_priv->gpu_error.reset_queue); 2557 } 2558 } 2559 2560 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) 2561 { 2562 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2563 u32 eir = I915_READ(EIR); 2564 int pipe, i; 2565 2566 if (!eir) 2567 return; 2568 2569 pr_err("render error detected, EIR: 0x%08x\n", eir); 2570 2571 i915_get_extra_instdone(dev_priv, instdone); 2572 2573 if (IS_G4X(dev_priv)) { 2574 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2575 u32 ipeir = I915_READ(IPEIR_I965); 2576 2577 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2578 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2579 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2580 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2581 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2582 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2583 I915_WRITE(IPEIR_I965, ipeir); 2584 POSTING_READ(IPEIR_I965); 2585 } 2586 if (eir & GM45_ERROR_PAGE_TABLE) { 2587 u32 pgtbl_err = I915_READ(PGTBL_ER); 2588 pr_err("page table error\n"); 2589 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2590 I915_WRITE(PGTBL_ER, pgtbl_err); 2591 POSTING_READ(PGTBL_ER); 2592 } 2593 } 2594 2595 if (!IS_GEN2(dev_priv)) { 2596 if (eir & I915_ERROR_PAGE_TABLE) { 2597 u32 pgtbl_err = I915_READ(PGTBL_ER); 2598 pr_err("page table error\n"); 2599 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2600 I915_WRITE(PGTBL_ER, pgtbl_err); 2601 POSTING_READ(PGTBL_ER); 2602 } 2603 } 2604 2605 if (eir & I915_ERROR_MEMORY_REFRESH) { 2606 pr_err("memory refresh error:\n"); 2607 for_each_pipe(dev_priv, pipe) 2608 pr_err("pipe %c stat: 0x%08x\n", 2609 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2610 /* pipestat has already been acked */ 2611 } 2612 if (eir & I915_ERROR_INSTRUCTION) { 2613 pr_err("instruction error\n"); 2614 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2615 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2616 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2617 if (INTEL_GEN(dev_priv) < 4) { 2618 u32 ipeir = I915_READ(IPEIR); 2619 2620 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2621 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2622 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2623 I915_WRITE(IPEIR, ipeir); 2624 POSTING_READ(IPEIR); 2625 } else { 2626 u32 ipeir = I915_READ(IPEIR_I965); 2627 2628 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2629 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2630 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2631 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2632 I915_WRITE(IPEIR_I965, ipeir); 2633 POSTING_READ(IPEIR_I965); 2634 } 2635 } 2636 2637 I915_WRITE(EIR, eir); 2638 POSTING_READ(EIR); 2639 eir = I915_READ(EIR); 2640 if (eir) { 2641 /* 2642 * some errors might have become stuck, 2643 * mask them. 2644 */ 2645 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2646 I915_WRITE(EMR, I915_READ(EMR) | eir); 2647 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2648 } 2649 } 2650 2651 /** 2652 * i915_handle_error - handle a gpu error 2653 * @dev_priv: i915 device private 2654 * @engine_mask: mask representing engines that are hung 2655 * Do some basic checking of register state at error time and 2656 * dump it to the syslog. Also call i915_capture_error_state() to make 2657 * sure we get a record and make it available in debugfs. Fire a uevent 2658 * so userspace knows something bad happened (should trigger collection 2659 * of a ring dump etc.). 2660 * @fmt: Error message format string 2661 */ 2662 void i915_handle_error(struct drm_i915_private *dev_priv, 2663 u32 engine_mask, 2664 const char *fmt, ...) 2665 { 2666 va_list args; 2667 char error_msg[80]; 2668 2669 va_start(args, fmt); 2670 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2671 va_end(args); 2672 2673 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2674 i915_report_and_clear_eir(dev_priv); 2675 2676 if (engine_mask) { 2677 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2678 &dev_priv->gpu_error.reset_counter); 2679 2680 /* 2681 * Wakeup waiting processes so that the reset function 2682 * i915_reset_and_wakeup doesn't deadlock trying to grab 2683 * various locks. By bumping the reset counter first, the woken 2684 * processes will see a reset in progress and back off, 2685 * releasing their locks and then wait for the reset completion. 2686 * We must do this for _all_ gpu waiters that might hold locks 2687 * that the reset work needs to acquire. 2688 * 2689 * Note: The wake_up serves as the required memory barrier to 2690 * ensure that the waiters see the updated value of the reset 2691 * counter atomic_t. 2692 */ 2693 i915_error_wake_up(dev_priv); 2694 } 2695 2696 i915_reset_and_wakeup(dev_priv); 2697 } 2698 2699 /* Called from drm generic code, passed 'crtc' which 2700 * we use as a pipe index 2701 */ 2702 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2703 { 2704 struct drm_i915_private *dev_priv = to_i915(dev); 2705 unsigned long irqflags; 2706 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2708 if (INTEL_INFO(dev)->gen >= 4) 2709 i915_enable_pipestat(dev_priv, pipe, 2710 PIPE_START_VBLANK_INTERRUPT_STATUS); 2711 else 2712 i915_enable_pipestat(dev_priv, pipe, 2713 PIPE_VBLANK_INTERRUPT_STATUS); 2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2715 2716 return 0; 2717 } 2718 2719 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2720 { 2721 struct drm_i915_private *dev_priv = to_i915(dev); 2722 unsigned long irqflags; 2723 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2724 DE_PIPE_VBLANK(pipe); 2725 2726 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2727 ilk_enable_display_irq(dev_priv, bit); 2728 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2729 2730 return 0; 2731 } 2732 2733 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2734 { 2735 struct drm_i915_private *dev_priv = to_i915(dev); 2736 unsigned long irqflags; 2737 2738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2739 i915_enable_pipestat(dev_priv, pipe, 2740 PIPE_START_VBLANK_INTERRUPT_STATUS); 2741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2742 2743 return 0; 2744 } 2745 2746 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2747 { 2748 struct drm_i915_private *dev_priv = to_i915(dev); 2749 unsigned long irqflags; 2750 2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2752 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2753 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2754 2755 return 0; 2756 } 2757 2758 /* Called from drm generic code, passed 'crtc' which 2759 * we use as a pipe index 2760 */ 2761 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2762 { 2763 struct drm_i915_private *dev_priv = to_i915(dev); 2764 unsigned long irqflags; 2765 2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2767 i915_disable_pipestat(dev_priv, pipe, 2768 PIPE_VBLANK_INTERRUPT_STATUS | 2769 PIPE_START_VBLANK_INTERRUPT_STATUS); 2770 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2771 } 2772 2773 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2774 { 2775 struct drm_i915_private *dev_priv = to_i915(dev); 2776 unsigned long irqflags; 2777 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2778 DE_PIPE_VBLANK(pipe); 2779 2780 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2781 ilk_disable_display_irq(dev_priv, bit); 2782 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2783 } 2784 2785 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2786 { 2787 struct drm_i915_private *dev_priv = to_i915(dev); 2788 unsigned long irqflags; 2789 2790 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2791 i915_disable_pipestat(dev_priv, pipe, 2792 PIPE_START_VBLANK_INTERRUPT_STATUS); 2793 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2794 } 2795 2796 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2797 { 2798 struct drm_i915_private *dev_priv = to_i915(dev); 2799 unsigned long irqflags; 2800 2801 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2802 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2803 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2804 } 2805 2806 static bool 2807 ring_idle(struct intel_engine_cs *engine, u32 seqno) 2808 { 2809 return i915_seqno_passed(seqno, 2810 READ_ONCE(engine->last_submitted_seqno)); 2811 } 2812 2813 static bool 2814 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) 2815 { 2816 if (INTEL_GEN(engine->i915) >= 8) { 2817 return (ipehr >> 23) == 0x1c; 2818 } else { 2819 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2820 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2821 MI_SEMAPHORE_REGISTER); 2822 } 2823 } 2824 2825 static struct intel_engine_cs * 2826 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2827 u64 offset) 2828 { 2829 struct drm_i915_private *dev_priv = engine->i915; 2830 struct intel_engine_cs *signaller; 2831 2832 if (INTEL_GEN(dev_priv) >= 8) { 2833 for_each_engine(signaller, dev_priv) { 2834 if (engine == signaller) 2835 continue; 2836 2837 if (offset == signaller->semaphore.signal_ggtt[engine->id]) 2838 return signaller; 2839 } 2840 } else { 2841 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2842 2843 for_each_engine(signaller, dev_priv) { 2844 if(engine == signaller) 2845 continue; 2846 2847 if (sync_bits == signaller->semaphore.mbox.wait[engine->id]) 2848 return signaller; 2849 } 2850 } 2851 2852 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2853 engine->id, ipehr, offset); 2854 2855 return NULL; 2856 } 2857 2858 static struct intel_engine_cs * 2859 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2860 { 2861 struct drm_i915_private *dev_priv = engine->i915; 2862 u32 cmd, ipehr, head; 2863 u64 offset = 0; 2864 int i, backwards; 2865 2866 /* 2867 * This function does not support execlist mode - any attempt to 2868 * proceed further into this function will result in a kernel panic 2869 * when dereferencing ring->buffer, which is not set up in execlist 2870 * mode. 2871 * 2872 * The correct way of doing it would be to derive the currently 2873 * executing ring buffer from the current context, which is derived 2874 * from the currently running request. Unfortunately, to get the 2875 * current request we would have to grab the struct_mutex before doing 2876 * anything else, which would be ill-advised since some other thread 2877 * might have grabbed it already and managed to hang itself, causing 2878 * the hang checker to deadlock. 2879 * 2880 * Therefore, this function does not support execlist mode in its 2881 * current form. Just return NULL and move on. 2882 */ 2883 if (engine->buffer == NULL) 2884 return NULL; 2885 2886 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2887 if (!ipehr_is_semaphore_wait(engine, ipehr)) 2888 return NULL; 2889 2890 /* 2891 * HEAD is likely pointing to the dword after the actual command, 2892 * so scan backwards until we find the MBOX. But limit it to just 3 2893 * or 4 dwords depending on the semaphore wait command size. 2894 * Note that we don't care about ACTHD here since that might 2895 * point at at batch, and semaphores are always emitted into the 2896 * ringbuffer itself. 2897 */ 2898 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2899 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; 2900 2901 for (i = backwards; i; --i) { 2902 /* 2903 * Be paranoid and presume the hw has gone off into the wild - 2904 * our ring is smaller than what the hardware (and hence 2905 * HEAD_ADDR) allows. Also handles wrap-around. 2906 */ 2907 head &= engine->buffer->size - 1; 2908 2909 /* This here seems to blow up */ 2910 cmd = ioread32(engine->buffer->virtual_start + head); 2911 if (cmd == ipehr) 2912 break; 2913 2914 head -= 4; 2915 } 2916 2917 if (!i) 2918 return NULL; 2919 2920 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2921 if (INTEL_GEN(dev_priv) >= 8) { 2922 offset = ioread32(engine->buffer->virtual_start + head + 12); 2923 offset <<= 32; 2924 offset = ioread32(engine->buffer->virtual_start + head + 8); 2925 } 2926 return semaphore_wait_to_signaller_ring(engine, ipehr, offset); 2927 } 2928 2929 static int semaphore_passed(struct intel_engine_cs *engine) 2930 { 2931 struct drm_i915_private *dev_priv = engine->i915; 2932 struct intel_engine_cs *signaller; 2933 u32 seqno; 2934 2935 engine->hangcheck.deadlock++; 2936 2937 signaller = semaphore_waits_for(engine, &seqno); 2938 if (signaller == NULL) 2939 return -1; 2940 2941 /* Prevent pathological recursion due to driver bugs */ 2942 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 2943 return -1; 2944 2945 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) 2946 return 1; 2947 2948 /* cursory check for an unkickable deadlock */ 2949 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2950 semaphore_passed(signaller) < 0) 2951 return -1; 2952 2953 return 0; 2954 } 2955 2956 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2957 { 2958 struct intel_engine_cs *engine; 2959 2960 for_each_engine(engine, dev_priv) 2961 engine->hangcheck.deadlock = 0; 2962 } 2963 2964 static bool subunits_stuck(struct intel_engine_cs *engine) 2965 { 2966 u32 instdone[I915_NUM_INSTDONE_REG]; 2967 bool stuck; 2968 int i; 2969 2970 if (engine->id != RCS) 2971 return true; 2972 2973 i915_get_extra_instdone(engine->i915, instdone); 2974 2975 /* There might be unstable subunit states even when 2976 * actual head is not moving. Filter out the unstable ones by 2977 * accumulating the undone -> done transitions and only 2978 * consider those as progress. 2979 */ 2980 stuck = true; 2981 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { 2982 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i]; 2983 2984 if (tmp != engine->hangcheck.instdone[i]) 2985 stuck = false; 2986 2987 engine->hangcheck.instdone[i] |= tmp; 2988 } 2989 2990 return stuck; 2991 } 2992 2993 static enum intel_ring_hangcheck_action 2994 head_stuck(struct intel_engine_cs *engine, u64 acthd) 2995 { 2996 if (acthd != engine->hangcheck.acthd) { 2997 2998 /* Clear subunit states on head movement */ 2999 memset(engine->hangcheck.instdone, 0, 3000 sizeof(engine->hangcheck.instdone)); 3001 3002 return HANGCHECK_ACTIVE; 3003 } 3004 3005 if (!subunits_stuck(engine)) 3006 return HANGCHECK_ACTIVE; 3007 3008 return HANGCHECK_HUNG; 3009 } 3010 3011 static enum intel_ring_hangcheck_action 3012 ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3013 { 3014 struct drm_i915_private *dev_priv = engine->i915; 3015 enum intel_ring_hangcheck_action ha; 3016 u32 tmp; 3017 3018 ha = head_stuck(engine, acthd); 3019 if (ha != HANGCHECK_HUNG) 3020 return ha; 3021 3022 if (IS_GEN2(dev_priv)) 3023 return HANGCHECK_HUNG; 3024 3025 /* Is the chip hanging on a WAIT_FOR_EVENT? 3026 * If so we can simply poke the RB_WAIT bit 3027 * and break the hang. This should work on 3028 * all but the second generation chipsets. 3029 */ 3030 tmp = I915_READ_CTL(engine); 3031 if (tmp & RING_WAIT) { 3032 i915_handle_error(dev_priv, 0, 3033 "Kicking stuck wait on %s", 3034 engine->name); 3035 I915_WRITE_CTL(engine, tmp); 3036 return HANGCHECK_KICK; 3037 } 3038 3039 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3040 switch (semaphore_passed(engine)) { 3041 default: 3042 return HANGCHECK_HUNG; 3043 case 1: 3044 i915_handle_error(dev_priv, 0, 3045 "Kicking stuck semaphore on %s", 3046 engine->name); 3047 I915_WRITE_CTL(engine, tmp); 3048 return HANGCHECK_KICK; 3049 case 0: 3050 return HANGCHECK_WAIT; 3051 } 3052 } 3053 3054 return HANGCHECK_HUNG; 3055 } 3056 3057 static unsigned long kick_waiters(struct intel_engine_cs *engine) 3058 { 3059 struct drm_i915_private *i915 = engine->i915; 3060 unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups); 3061 3062 if (engine->hangcheck.user_interrupts == irq_count && 3063 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { 3064 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) 3065 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3066 engine->name); 3067 3068 intel_engine_enable_fake_irq(engine); 3069 } 3070 3071 return irq_count; 3072 } 3073 /* 3074 * This is called when the chip hasn't reported back with completed 3075 * batchbuffers in a long time. We keep track per ring seqno progress and 3076 * if there are no progress, hangcheck score for that ring is increased. 3077 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3078 * we kick the ring. If we see no progress on three subsequent calls 3079 * we assume chip is wedged and try to fix it by resetting the chip. 3080 */ 3081 static void i915_hangcheck_elapsed(struct work_struct *work) 3082 { 3083 struct drm_i915_private *dev_priv = 3084 container_of(work, typeof(*dev_priv), 3085 gpu_error.hangcheck_work.work); 3086 struct intel_engine_cs *engine; 3087 unsigned int hung = 0, stuck = 0; 3088 int busy_count = 0; 3089 #define BUSY 1 3090 #define KICK 5 3091 #define HUNG 20 3092 #define ACTIVE_DECAY 15 3093 3094 if (!i915.enable_hangcheck) 3095 return; 3096 3097 if (!READ_ONCE(dev_priv->gt.awake)) 3098 return; 3099 3100 /* As enabling the GPU requires fairly extensive mmio access, 3101 * periodically arm the mmio checker to see if we are triggering 3102 * any invalid access. 3103 */ 3104 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3105 3106 for_each_engine(engine, dev_priv) { 3107 bool busy = intel_engine_has_waiter(engine); 3108 u64 acthd; 3109 u32 seqno; 3110 unsigned user_interrupts; 3111 3112 semaphore_clear_deadlocks(dev_priv); 3113 3114 /* We don't strictly need an irq-barrier here, as we are not 3115 * serving an interrupt request, be paranoid in case the 3116 * barrier has side-effects (such as preventing a broken 3117 * cacheline snoop) and so be sure that we can see the seqno 3118 * advance. If the seqno should stick, due to a stale 3119 * cacheline, we would erroneously declare the GPU hung. 3120 */ 3121 if (engine->irq_seqno_barrier) 3122 engine->irq_seqno_barrier(engine); 3123 3124 acthd = intel_ring_get_active_head(engine); 3125 seqno = intel_engine_get_seqno(engine); 3126 3127 /* Reset stuck interrupts between batch advances */ 3128 user_interrupts = 0; 3129 3130 if (engine->hangcheck.seqno == seqno) { 3131 if (ring_idle(engine, seqno)) { 3132 engine->hangcheck.action = HANGCHECK_IDLE; 3133 if (busy) { 3134 /* Safeguard against driver failure */ 3135 user_interrupts = kick_waiters(engine); 3136 engine->hangcheck.score += BUSY; 3137 } 3138 } else { 3139 /* We always increment the hangcheck score 3140 * if the ring is busy and still processing 3141 * the same request, so that no single request 3142 * can run indefinitely (such as a chain of 3143 * batches). The only time we do not increment 3144 * the hangcheck score on this ring, if this 3145 * ring is in a legitimate wait for another 3146 * ring. In that case the waiting ring is a 3147 * victim and we want to be sure we catch the 3148 * right culprit. Then every time we do kick 3149 * the ring, add a small increment to the 3150 * score so that we can catch a batch that is 3151 * being repeatedly kicked and so responsible 3152 * for stalling the machine. 3153 */ 3154 engine->hangcheck.action = ring_stuck(engine, 3155 acthd); 3156 3157 switch (engine->hangcheck.action) { 3158 case HANGCHECK_IDLE: 3159 case HANGCHECK_WAIT: 3160 break; 3161 case HANGCHECK_ACTIVE: 3162 engine->hangcheck.score += BUSY; 3163 break; 3164 case HANGCHECK_KICK: 3165 engine->hangcheck.score += KICK; 3166 break; 3167 case HANGCHECK_HUNG: 3168 engine->hangcheck.score += HUNG; 3169 break; 3170 } 3171 } 3172 3173 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3174 hung |= intel_engine_flag(engine); 3175 if (engine->hangcheck.action != HANGCHECK_HUNG) 3176 stuck |= intel_engine_flag(engine); 3177 } 3178 } else { 3179 engine->hangcheck.action = HANGCHECK_ACTIVE; 3180 3181 /* Gradually reduce the count so that we catch DoS 3182 * attempts across multiple batches. 3183 */ 3184 if (engine->hangcheck.score > 0) 3185 engine->hangcheck.score -= ACTIVE_DECAY; 3186 if (engine->hangcheck.score < 0) 3187 engine->hangcheck.score = 0; 3188 3189 /* Clear head and subunit states on seqno movement */ 3190 acthd = 0; 3191 3192 memset(engine->hangcheck.instdone, 0, 3193 sizeof(engine->hangcheck.instdone)); 3194 } 3195 3196 engine->hangcheck.seqno = seqno; 3197 engine->hangcheck.acthd = acthd; 3198 engine->hangcheck.user_interrupts = user_interrupts; 3199 busy_count += busy; 3200 } 3201 3202 if (hung) { 3203 char msg[80]; 3204 int len; 3205 3206 /* If some rings hung but others were still busy, only 3207 * blame the hanging rings in the synopsis. 3208 */ 3209 if (stuck != hung) 3210 hung &= ~stuck; 3211 len = scnprintf(msg, sizeof(msg), 3212 "%s on ", stuck == hung ? "No progress" : "Hang"); 3213 for_each_engine_masked(engine, dev_priv, hung) 3214 len += scnprintf(msg + len, sizeof(msg) - len, 3215 "%s, ", engine->name); 3216 msg[len-2] = '\0'; 3217 3218 return i915_handle_error(dev_priv, hung, msg); 3219 } 3220 3221 /* Reset timer in case GPU hangs without another request being added */ 3222 if (busy_count) 3223 i915_queue_hangcheck(dev_priv); 3224 } 3225 3226 static void ibx_irq_reset(struct drm_device *dev) 3227 { 3228 struct drm_i915_private *dev_priv = to_i915(dev); 3229 3230 if (HAS_PCH_NOP(dev)) 3231 return; 3232 3233 GEN5_IRQ_RESET(SDE); 3234 3235 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3236 I915_WRITE(SERR_INT, 0xffffffff); 3237 } 3238 3239 /* 3240 * SDEIER is also touched by the interrupt handler to work around missed PCH 3241 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3242 * instead we unconditionally enable all PCH interrupt sources here, but then 3243 * only unmask them as needed with SDEIMR. 3244 * 3245 * This function needs to be called before interrupts are enabled. 3246 */ 3247 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3248 { 3249 struct drm_i915_private *dev_priv = to_i915(dev); 3250 3251 if (HAS_PCH_NOP(dev)) 3252 return; 3253 3254 WARN_ON(I915_READ(SDEIER) != 0); 3255 I915_WRITE(SDEIER, 0xffffffff); 3256 POSTING_READ(SDEIER); 3257 } 3258 3259 static void gen5_gt_irq_reset(struct drm_device *dev) 3260 { 3261 struct drm_i915_private *dev_priv = to_i915(dev); 3262 3263 GEN5_IRQ_RESET(GT); 3264 if (INTEL_INFO(dev)->gen >= 6) 3265 GEN5_IRQ_RESET(GEN6_PM); 3266 } 3267 3268 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3269 { 3270 enum pipe pipe; 3271 3272 if (IS_CHERRYVIEW(dev_priv)) 3273 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3274 else 3275 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3276 3277 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3278 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3279 3280 for_each_pipe(dev_priv, pipe) { 3281 I915_WRITE(PIPESTAT(pipe), 3282 PIPE_FIFO_UNDERRUN_STATUS | 3283 PIPESTAT_INT_STATUS_MASK); 3284 dev_priv->pipestat_irq_mask[pipe] = 0; 3285 } 3286 3287 GEN5_IRQ_RESET(VLV_); 3288 dev_priv->irq_mask = ~0; 3289 } 3290 3291 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3292 { 3293 u32 pipestat_mask; 3294 u32 enable_mask; 3295 enum pipe pipe; 3296 3297 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3298 PIPE_CRC_DONE_INTERRUPT_STATUS; 3299 3300 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3301 for_each_pipe(dev_priv, pipe) 3302 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3303 3304 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3305 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3306 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3307 if (IS_CHERRYVIEW(dev_priv)) 3308 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3309 3310 WARN_ON(dev_priv->irq_mask != ~0); 3311 3312 dev_priv->irq_mask = ~enable_mask; 3313 3314 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3315 } 3316 3317 /* drm_dma.h hooks 3318 */ 3319 static void ironlake_irq_reset(struct drm_device *dev) 3320 { 3321 struct drm_i915_private *dev_priv = to_i915(dev); 3322 3323 I915_WRITE(HWSTAM, 0xffffffff); 3324 3325 GEN5_IRQ_RESET(DE); 3326 if (IS_GEN7(dev)) 3327 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3328 3329 gen5_gt_irq_reset(dev); 3330 3331 ibx_irq_reset(dev); 3332 } 3333 3334 static void valleyview_irq_preinstall(struct drm_device *dev) 3335 { 3336 struct drm_i915_private *dev_priv = to_i915(dev); 3337 3338 I915_WRITE(VLV_MASTER_IER, 0); 3339 POSTING_READ(VLV_MASTER_IER); 3340 3341 gen5_gt_irq_reset(dev); 3342 3343 spin_lock_irq(&dev_priv->irq_lock); 3344 if (dev_priv->display_irqs_enabled) 3345 vlv_display_irq_reset(dev_priv); 3346 spin_unlock_irq(&dev_priv->irq_lock); 3347 } 3348 3349 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3350 { 3351 GEN8_IRQ_RESET_NDX(GT, 0); 3352 GEN8_IRQ_RESET_NDX(GT, 1); 3353 GEN8_IRQ_RESET_NDX(GT, 2); 3354 GEN8_IRQ_RESET_NDX(GT, 3); 3355 } 3356 3357 static void gen8_irq_reset(struct drm_device *dev) 3358 { 3359 struct drm_i915_private *dev_priv = to_i915(dev); 3360 int pipe; 3361 3362 I915_WRITE(GEN8_MASTER_IRQ, 0); 3363 POSTING_READ(GEN8_MASTER_IRQ); 3364 3365 gen8_gt_irq_reset(dev_priv); 3366 3367 for_each_pipe(dev_priv, pipe) 3368 if (intel_display_power_is_enabled(dev_priv, 3369 POWER_DOMAIN_PIPE(pipe))) 3370 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3371 3372 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3373 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3374 GEN5_IRQ_RESET(GEN8_PCU_); 3375 3376 if (HAS_PCH_SPLIT(dev)) 3377 ibx_irq_reset(dev); 3378 } 3379 3380 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3381 unsigned int pipe_mask) 3382 { 3383 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3384 enum pipe pipe; 3385 3386 spin_lock_irq(&dev_priv->irq_lock); 3387 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3388 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3389 dev_priv->de_irq_mask[pipe], 3390 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3391 spin_unlock_irq(&dev_priv->irq_lock); 3392 } 3393 3394 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3395 unsigned int pipe_mask) 3396 { 3397 enum pipe pipe; 3398 3399 spin_lock_irq(&dev_priv->irq_lock); 3400 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3401 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3402 spin_unlock_irq(&dev_priv->irq_lock); 3403 3404 /* make sure we're done processing display irqs */ 3405 synchronize_irq(dev_priv->drm.irq); 3406 } 3407 3408 static void cherryview_irq_preinstall(struct drm_device *dev) 3409 { 3410 struct drm_i915_private *dev_priv = to_i915(dev); 3411 3412 I915_WRITE(GEN8_MASTER_IRQ, 0); 3413 POSTING_READ(GEN8_MASTER_IRQ); 3414 3415 gen8_gt_irq_reset(dev_priv); 3416 3417 GEN5_IRQ_RESET(GEN8_PCU_); 3418 3419 spin_lock_irq(&dev_priv->irq_lock); 3420 if (dev_priv->display_irqs_enabled) 3421 vlv_display_irq_reset(dev_priv); 3422 spin_unlock_irq(&dev_priv->irq_lock); 3423 } 3424 3425 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3426 const u32 hpd[HPD_NUM_PINS]) 3427 { 3428 struct intel_encoder *encoder; 3429 u32 enabled_irqs = 0; 3430 3431 for_each_intel_encoder(&dev_priv->drm, encoder) 3432 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3433 enabled_irqs |= hpd[encoder->hpd_pin]; 3434 3435 return enabled_irqs; 3436 } 3437 3438 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3439 { 3440 u32 hotplug_irqs, hotplug, enabled_irqs; 3441 3442 if (HAS_PCH_IBX(dev_priv)) { 3443 hotplug_irqs = SDE_HOTPLUG_MASK; 3444 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3445 } else { 3446 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3447 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3448 } 3449 3450 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3451 3452 /* 3453 * Enable digital hotplug on the PCH, and configure the DP short pulse 3454 * duration to 2ms (which is the minimum in the Display Port spec). 3455 * The pulse duration bits are reserved on LPT+. 3456 */ 3457 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3458 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3459 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3460 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3461 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3462 /* 3463 * When CPU and PCH are on the same package, port A 3464 * HPD must be enabled in both north and south. 3465 */ 3466 if (HAS_PCH_LPT_LP(dev_priv)) 3467 hotplug |= PORTA_HOTPLUG_ENABLE; 3468 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3469 } 3470 3471 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3472 { 3473 u32 hotplug_irqs, hotplug, enabled_irqs; 3474 3475 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3476 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3477 3478 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3479 3480 /* Enable digital hotplug on the PCH */ 3481 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3482 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | 3483 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; 3484 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3485 3486 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3487 hotplug |= PORTE_HOTPLUG_ENABLE; 3488 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3489 } 3490 3491 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3492 { 3493 u32 hotplug_irqs, hotplug, enabled_irqs; 3494 3495 if (INTEL_GEN(dev_priv) >= 8) { 3496 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3497 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3498 3499 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3500 } else if (INTEL_GEN(dev_priv) >= 7) { 3501 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3502 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3503 3504 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3505 } else { 3506 hotplug_irqs = DE_DP_A_HOTPLUG; 3507 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3508 3509 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3510 } 3511 3512 /* 3513 * Enable digital hotplug on the CPU, and configure the DP short pulse 3514 * duration to 2ms (which is the minimum in the Display Port spec) 3515 * The pulse duration bits are reserved on HSW+. 3516 */ 3517 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3518 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3519 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3520 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3521 3522 ibx_hpd_irq_setup(dev_priv); 3523 } 3524 3525 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3526 { 3527 u32 hotplug_irqs, hotplug, enabled_irqs; 3528 3529 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3530 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3531 3532 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3533 3534 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3535 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | 3536 PORTA_HOTPLUG_ENABLE; 3537 3538 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3539 hotplug, enabled_irqs); 3540 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3541 3542 /* 3543 * For BXT invert bit has to be set based on AOB design 3544 * for HPD detection logic, update it based on VBT fields. 3545 */ 3546 3547 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3548 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3549 hotplug |= BXT_DDIA_HPD_INVERT; 3550 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3551 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3552 hotplug |= BXT_DDIB_HPD_INVERT; 3553 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3554 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3555 hotplug |= BXT_DDIC_HPD_INVERT; 3556 3557 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3558 } 3559 3560 static void ibx_irq_postinstall(struct drm_device *dev) 3561 { 3562 struct drm_i915_private *dev_priv = to_i915(dev); 3563 u32 mask; 3564 3565 if (HAS_PCH_NOP(dev)) 3566 return; 3567 3568 if (HAS_PCH_IBX(dev)) 3569 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3570 else 3571 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3572 3573 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3574 I915_WRITE(SDEIMR, ~mask); 3575 } 3576 3577 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3578 { 3579 struct drm_i915_private *dev_priv = to_i915(dev); 3580 u32 pm_irqs, gt_irqs; 3581 3582 pm_irqs = gt_irqs = 0; 3583 3584 dev_priv->gt_irq_mask = ~0; 3585 if (HAS_L3_DPF(dev)) { 3586 /* L3 parity interrupt is always unmasked. */ 3587 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3588 gt_irqs |= GT_PARITY_ERROR(dev); 3589 } 3590 3591 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3592 if (IS_GEN5(dev)) { 3593 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3594 } else { 3595 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3596 } 3597 3598 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3599 3600 if (INTEL_INFO(dev)->gen >= 6) { 3601 /* 3602 * RPS interrupts will get enabled/disabled on demand when RPS 3603 * itself is enabled/disabled. 3604 */ 3605 if (HAS_VEBOX(dev)) 3606 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3607 3608 dev_priv->pm_irq_mask = 0xffffffff; 3609 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3610 } 3611 } 3612 3613 static int ironlake_irq_postinstall(struct drm_device *dev) 3614 { 3615 struct drm_i915_private *dev_priv = to_i915(dev); 3616 u32 display_mask, extra_mask; 3617 3618 if (INTEL_INFO(dev)->gen >= 7) { 3619 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3620 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3621 DE_PLANEB_FLIP_DONE_IVB | 3622 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3623 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3624 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3625 DE_DP_A_HOTPLUG_IVB); 3626 } else { 3627 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3628 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3629 DE_AUX_CHANNEL_A | 3630 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3631 DE_POISON); 3632 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3633 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3634 DE_DP_A_HOTPLUG); 3635 } 3636 3637 dev_priv->irq_mask = ~display_mask; 3638 3639 I915_WRITE(HWSTAM, 0xeffe); 3640 3641 ibx_irq_pre_postinstall(dev); 3642 3643 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3644 3645 gen5_gt_irq_postinstall(dev); 3646 3647 ibx_irq_postinstall(dev); 3648 3649 if (IS_IRONLAKE_M(dev)) { 3650 /* Enable PCU event interrupts 3651 * 3652 * spinlocking not required here for correctness since interrupt 3653 * setup is guaranteed to run in single-threaded context. But we 3654 * need it to make the assert_spin_locked happy. */ 3655 spin_lock_irq(&dev_priv->irq_lock); 3656 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3657 spin_unlock_irq(&dev_priv->irq_lock); 3658 } 3659 3660 return 0; 3661 } 3662 3663 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3664 { 3665 assert_spin_locked(&dev_priv->irq_lock); 3666 3667 if (dev_priv->display_irqs_enabled) 3668 return; 3669 3670 dev_priv->display_irqs_enabled = true; 3671 3672 if (intel_irqs_enabled(dev_priv)) { 3673 vlv_display_irq_reset(dev_priv); 3674 vlv_display_irq_postinstall(dev_priv); 3675 } 3676 } 3677 3678 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3679 { 3680 assert_spin_locked(&dev_priv->irq_lock); 3681 3682 if (!dev_priv->display_irqs_enabled) 3683 return; 3684 3685 dev_priv->display_irqs_enabled = false; 3686 3687 if (intel_irqs_enabled(dev_priv)) 3688 vlv_display_irq_reset(dev_priv); 3689 } 3690 3691 3692 static int valleyview_irq_postinstall(struct drm_device *dev) 3693 { 3694 struct drm_i915_private *dev_priv = to_i915(dev); 3695 3696 gen5_gt_irq_postinstall(dev); 3697 3698 spin_lock_irq(&dev_priv->irq_lock); 3699 if (dev_priv->display_irqs_enabled) 3700 vlv_display_irq_postinstall(dev_priv); 3701 spin_unlock_irq(&dev_priv->irq_lock); 3702 3703 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3704 POSTING_READ(VLV_MASTER_IER); 3705 3706 return 0; 3707 } 3708 3709 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3710 { 3711 /* These are interrupts we'll toggle with the ring mask register */ 3712 uint32_t gt_interrupts[] = { 3713 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3714 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3715 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3716 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3717 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3718 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3719 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3720 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3721 0, 3722 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3723 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3724 }; 3725 3726 if (HAS_L3_DPF(dev_priv)) 3727 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3728 3729 dev_priv->pm_irq_mask = 0xffffffff; 3730 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3731 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3732 /* 3733 * RPS interrupts will get enabled/disabled on demand when RPS itself 3734 * is enabled/disabled. 3735 */ 3736 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3737 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3738 } 3739 3740 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3741 { 3742 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3743 uint32_t de_pipe_enables; 3744 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3745 u32 de_port_enables; 3746 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3747 enum pipe pipe; 3748 3749 if (INTEL_INFO(dev_priv)->gen >= 9) { 3750 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3751 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3752 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3753 GEN9_AUX_CHANNEL_D; 3754 if (IS_BROXTON(dev_priv)) 3755 de_port_masked |= BXT_DE_PORT_GMBUS; 3756 } else { 3757 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3758 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3759 } 3760 3761 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3762 GEN8_PIPE_FIFO_UNDERRUN; 3763 3764 de_port_enables = de_port_masked; 3765 if (IS_BROXTON(dev_priv)) 3766 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3767 else if (IS_BROADWELL(dev_priv)) 3768 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3769 3770 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3771 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3772 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3773 3774 for_each_pipe(dev_priv, pipe) 3775 if (intel_display_power_is_enabled(dev_priv, 3776 POWER_DOMAIN_PIPE(pipe))) 3777 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3778 dev_priv->de_irq_mask[pipe], 3779 de_pipe_enables); 3780 3781 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3782 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3783 } 3784 3785 static int gen8_irq_postinstall(struct drm_device *dev) 3786 { 3787 struct drm_i915_private *dev_priv = to_i915(dev); 3788 3789 if (HAS_PCH_SPLIT(dev)) 3790 ibx_irq_pre_postinstall(dev); 3791 3792 gen8_gt_irq_postinstall(dev_priv); 3793 gen8_de_irq_postinstall(dev_priv); 3794 3795 if (HAS_PCH_SPLIT(dev)) 3796 ibx_irq_postinstall(dev); 3797 3798 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3799 POSTING_READ(GEN8_MASTER_IRQ); 3800 3801 return 0; 3802 } 3803 3804 static int cherryview_irq_postinstall(struct drm_device *dev) 3805 { 3806 struct drm_i915_private *dev_priv = to_i915(dev); 3807 3808 gen8_gt_irq_postinstall(dev_priv); 3809 3810 spin_lock_irq(&dev_priv->irq_lock); 3811 if (dev_priv->display_irqs_enabled) 3812 vlv_display_irq_postinstall(dev_priv); 3813 spin_unlock_irq(&dev_priv->irq_lock); 3814 3815 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3816 POSTING_READ(GEN8_MASTER_IRQ); 3817 3818 return 0; 3819 } 3820 3821 static void gen8_irq_uninstall(struct drm_device *dev) 3822 { 3823 struct drm_i915_private *dev_priv = to_i915(dev); 3824 3825 if (!dev_priv) 3826 return; 3827 3828 gen8_irq_reset(dev); 3829 } 3830 3831 static void valleyview_irq_uninstall(struct drm_device *dev) 3832 { 3833 struct drm_i915_private *dev_priv = to_i915(dev); 3834 3835 if (!dev_priv) 3836 return; 3837 3838 I915_WRITE(VLV_MASTER_IER, 0); 3839 POSTING_READ(VLV_MASTER_IER); 3840 3841 gen5_gt_irq_reset(dev); 3842 3843 I915_WRITE(HWSTAM, 0xffffffff); 3844 3845 spin_lock_irq(&dev_priv->irq_lock); 3846 if (dev_priv->display_irqs_enabled) 3847 vlv_display_irq_reset(dev_priv); 3848 spin_unlock_irq(&dev_priv->irq_lock); 3849 } 3850 3851 static void cherryview_irq_uninstall(struct drm_device *dev) 3852 { 3853 struct drm_i915_private *dev_priv = to_i915(dev); 3854 3855 if (!dev_priv) 3856 return; 3857 3858 I915_WRITE(GEN8_MASTER_IRQ, 0); 3859 POSTING_READ(GEN8_MASTER_IRQ); 3860 3861 gen8_gt_irq_reset(dev_priv); 3862 3863 GEN5_IRQ_RESET(GEN8_PCU_); 3864 3865 spin_lock_irq(&dev_priv->irq_lock); 3866 if (dev_priv->display_irqs_enabled) 3867 vlv_display_irq_reset(dev_priv); 3868 spin_unlock_irq(&dev_priv->irq_lock); 3869 } 3870 3871 static void ironlake_irq_uninstall(struct drm_device *dev) 3872 { 3873 struct drm_i915_private *dev_priv = to_i915(dev); 3874 3875 if (!dev_priv) 3876 return; 3877 3878 ironlake_irq_reset(dev); 3879 } 3880 3881 static void i8xx_irq_preinstall(struct drm_device * dev) 3882 { 3883 struct drm_i915_private *dev_priv = to_i915(dev); 3884 int pipe; 3885 3886 for_each_pipe(dev_priv, pipe) 3887 I915_WRITE(PIPESTAT(pipe), 0); 3888 I915_WRITE16(IMR, 0xffff); 3889 I915_WRITE16(IER, 0x0); 3890 POSTING_READ16(IER); 3891 } 3892 3893 static int i8xx_irq_postinstall(struct drm_device *dev) 3894 { 3895 struct drm_i915_private *dev_priv = to_i915(dev); 3896 3897 I915_WRITE16(EMR, 3898 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3899 3900 /* Unmask the interrupts that we always want on. */ 3901 dev_priv->irq_mask = 3902 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3903 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3904 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3905 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3906 I915_WRITE16(IMR, dev_priv->irq_mask); 3907 3908 I915_WRITE16(IER, 3909 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3910 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3911 I915_USER_INTERRUPT); 3912 POSTING_READ16(IER); 3913 3914 /* Interrupt setup is already guaranteed to be single-threaded, this is 3915 * just to make the assert_spin_locked check happy. */ 3916 spin_lock_irq(&dev_priv->irq_lock); 3917 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3918 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3919 spin_unlock_irq(&dev_priv->irq_lock); 3920 3921 return 0; 3922 } 3923 3924 /* 3925 * Returns true when a page flip has completed. 3926 */ 3927 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3928 int plane, int pipe, u32 iir) 3929 { 3930 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3931 3932 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3933 return false; 3934 3935 if ((iir & flip_pending) == 0) 3936 goto check_page_flip; 3937 3938 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3939 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3940 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3941 * the flip is completed (no longer pending). Since this doesn't raise 3942 * an interrupt per se, we watch for the change at vblank. 3943 */ 3944 if (I915_READ16(ISR) & flip_pending) 3945 goto check_page_flip; 3946 3947 intel_finish_page_flip_cs(dev_priv, pipe); 3948 return true; 3949 3950 check_page_flip: 3951 intel_check_page_flip(dev_priv, pipe); 3952 return false; 3953 } 3954 3955 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3956 { 3957 struct drm_device *dev = arg; 3958 struct drm_i915_private *dev_priv = to_i915(dev); 3959 u16 iir, new_iir; 3960 u32 pipe_stats[2]; 3961 int pipe; 3962 u16 flip_mask = 3963 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3964 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3965 irqreturn_t ret; 3966 3967 if (!intel_irqs_enabled(dev_priv)) 3968 return IRQ_NONE; 3969 3970 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3971 disable_rpm_wakeref_asserts(dev_priv); 3972 3973 ret = IRQ_NONE; 3974 iir = I915_READ16(IIR); 3975 if (iir == 0) 3976 goto out; 3977 3978 while (iir & ~flip_mask) { 3979 /* Can't rely on pipestat interrupt bit in iir as it might 3980 * have been cleared after the pipestat interrupt was received. 3981 * It doesn't set the bit in iir again, but it still produces 3982 * interrupts (for non-MSI). 3983 */ 3984 spin_lock(&dev_priv->irq_lock); 3985 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3986 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3987 3988 for_each_pipe(dev_priv, pipe) { 3989 i915_reg_t reg = PIPESTAT(pipe); 3990 pipe_stats[pipe] = I915_READ(reg); 3991 3992 /* 3993 * Clear the PIPE*STAT regs before the IIR 3994 */ 3995 if (pipe_stats[pipe] & 0x8000ffff) 3996 I915_WRITE(reg, pipe_stats[pipe]); 3997 } 3998 spin_unlock(&dev_priv->irq_lock); 3999 4000 I915_WRITE16(IIR, iir & ~flip_mask); 4001 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4002 4003 if (iir & I915_USER_INTERRUPT) 4004 notify_ring(&dev_priv->engine[RCS]); 4005 4006 for_each_pipe(dev_priv, pipe) { 4007 int plane = pipe; 4008 if (HAS_FBC(dev_priv)) 4009 plane = !plane; 4010 4011 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4012 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 4013 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4014 4015 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4016 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4017 4018 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4019 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4020 pipe); 4021 } 4022 4023 iir = new_iir; 4024 } 4025 ret = IRQ_HANDLED; 4026 4027 out: 4028 enable_rpm_wakeref_asserts(dev_priv); 4029 4030 return ret; 4031 } 4032 4033 static void i8xx_irq_uninstall(struct drm_device * dev) 4034 { 4035 struct drm_i915_private *dev_priv = to_i915(dev); 4036 int pipe; 4037 4038 for_each_pipe(dev_priv, pipe) { 4039 /* Clear enable bits; then clear status bits */ 4040 I915_WRITE(PIPESTAT(pipe), 0); 4041 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4042 } 4043 I915_WRITE16(IMR, 0xffff); 4044 I915_WRITE16(IER, 0x0); 4045 I915_WRITE16(IIR, I915_READ16(IIR)); 4046 } 4047 4048 static void i915_irq_preinstall(struct drm_device * dev) 4049 { 4050 struct drm_i915_private *dev_priv = to_i915(dev); 4051 int pipe; 4052 4053 if (I915_HAS_HOTPLUG(dev)) { 4054 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4055 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4056 } 4057 4058 I915_WRITE16(HWSTAM, 0xeffe); 4059 for_each_pipe(dev_priv, pipe) 4060 I915_WRITE(PIPESTAT(pipe), 0); 4061 I915_WRITE(IMR, 0xffffffff); 4062 I915_WRITE(IER, 0x0); 4063 POSTING_READ(IER); 4064 } 4065 4066 static int i915_irq_postinstall(struct drm_device *dev) 4067 { 4068 struct drm_i915_private *dev_priv = to_i915(dev); 4069 u32 enable_mask; 4070 4071 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4072 4073 /* Unmask the interrupts that we always want on. */ 4074 dev_priv->irq_mask = 4075 ~(I915_ASLE_INTERRUPT | 4076 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4077 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4078 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4079 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4080 4081 enable_mask = 4082 I915_ASLE_INTERRUPT | 4083 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4084 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4085 I915_USER_INTERRUPT; 4086 4087 if (I915_HAS_HOTPLUG(dev)) { 4088 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4089 POSTING_READ(PORT_HOTPLUG_EN); 4090 4091 /* Enable in IER... */ 4092 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4093 /* and unmask in IMR */ 4094 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4095 } 4096 4097 I915_WRITE(IMR, dev_priv->irq_mask); 4098 I915_WRITE(IER, enable_mask); 4099 POSTING_READ(IER); 4100 4101 i915_enable_asle_pipestat(dev_priv); 4102 4103 /* Interrupt setup is already guaranteed to be single-threaded, this is 4104 * just to make the assert_spin_locked check happy. */ 4105 spin_lock_irq(&dev_priv->irq_lock); 4106 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4107 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4108 spin_unlock_irq(&dev_priv->irq_lock); 4109 4110 return 0; 4111 } 4112 4113 /* 4114 * Returns true when a page flip has completed. 4115 */ 4116 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 4117 int plane, int pipe, u32 iir) 4118 { 4119 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4120 4121 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 4122 return false; 4123 4124 if ((iir & flip_pending) == 0) 4125 goto check_page_flip; 4126 4127 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4128 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4129 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4130 * the flip is completed (no longer pending). Since this doesn't raise 4131 * an interrupt per se, we watch for the change at vblank. 4132 */ 4133 if (I915_READ(ISR) & flip_pending) 4134 goto check_page_flip; 4135 4136 intel_finish_page_flip_cs(dev_priv, pipe); 4137 return true; 4138 4139 check_page_flip: 4140 intel_check_page_flip(dev_priv, pipe); 4141 return false; 4142 } 4143 4144 static irqreturn_t i915_irq_handler(int irq, void *arg) 4145 { 4146 struct drm_device *dev = arg; 4147 struct drm_i915_private *dev_priv = to_i915(dev); 4148 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4149 u32 flip_mask = 4150 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4151 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4152 int pipe, ret = IRQ_NONE; 4153 4154 if (!intel_irqs_enabled(dev_priv)) 4155 return IRQ_NONE; 4156 4157 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4158 disable_rpm_wakeref_asserts(dev_priv); 4159 4160 iir = I915_READ(IIR); 4161 do { 4162 bool irq_received = (iir & ~flip_mask) != 0; 4163 bool blc_event = false; 4164 4165 /* Can't rely on pipestat interrupt bit in iir as it might 4166 * have been cleared after the pipestat interrupt was received. 4167 * It doesn't set the bit in iir again, but it still produces 4168 * interrupts (for non-MSI). 4169 */ 4170 spin_lock(&dev_priv->irq_lock); 4171 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4172 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4173 4174 for_each_pipe(dev_priv, pipe) { 4175 i915_reg_t reg = PIPESTAT(pipe); 4176 pipe_stats[pipe] = I915_READ(reg); 4177 4178 /* Clear the PIPE*STAT regs before the IIR */ 4179 if (pipe_stats[pipe] & 0x8000ffff) { 4180 I915_WRITE(reg, pipe_stats[pipe]); 4181 irq_received = true; 4182 } 4183 } 4184 spin_unlock(&dev_priv->irq_lock); 4185 4186 if (!irq_received) 4187 break; 4188 4189 /* Consume port. Then clear IIR or we'll miss events */ 4190 if (I915_HAS_HOTPLUG(dev_priv) && 4191 iir & I915_DISPLAY_PORT_INTERRUPT) { 4192 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4193 if (hotplug_status) 4194 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4195 } 4196 4197 I915_WRITE(IIR, iir & ~flip_mask); 4198 new_iir = I915_READ(IIR); /* Flush posted writes */ 4199 4200 if (iir & I915_USER_INTERRUPT) 4201 notify_ring(&dev_priv->engine[RCS]); 4202 4203 for_each_pipe(dev_priv, pipe) { 4204 int plane = pipe; 4205 if (HAS_FBC(dev_priv)) 4206 plane = !plane; 4207 4208 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4209 i915_handle_vblank(dev_priv, plane, pipe, iir)) 4210 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4211 4212 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4213 blc_event = true; 4214 4215 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4216 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4217 4218 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4219 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4220 pipe); 4221 } 4222 4223 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4224 intel_opregion_asle_intr(dev_priv); 4225 4226 /* With MSI, interrupts are only generated when iir 4227 * transitions from zero to nonzero. If another bit got 4228 * set while we were handling the existing iir bits, then 4229 * we would never get another interrupt. 4230 * 4231 * This is fine on non-MSI as well, as if we hit this path 4232 * we avoid exiting the interrupt handler only to generate 4233 * another one. 4234 * 4235 * Note that for MSI this could cause a stray interrupt report 4236 * if an interrupt landed in the time between writing IIR and 4237 * the posting read. This should be rare enough to never 4238 * trigger the 99% of 100,000 interrupts test for disabling 4239 * stray interrupts. 4240 */ 4241 ret = IRQ_HANDLED; 4242 iir = new_iir; 4243 } while (iir & ~flip_mask); 4244 4245 enable_rpm_wakeref_asserts(dev_priv); 4246 4247 return ret; 4248 } 4249 4250 static void i915_irq_uninstall(struct drm_device * dev) 4251 { 4252 struct drm_i915_private *dev_priv = to_i915(dev); 4253 int pipe; 4254 4255 if (I915_HAS_HOTPLUG(dev)) { 4256 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4257 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4258 } 4259 4260 I915_WRITE16(HWSTAM, 0xffff); 4261 for_each_pipe(dev_priv, pipe) { 4262 /* Clear enable bits; then clear status bits */ 4263 I915_WRITE(PIPESTAT(pipe), 0); 4264 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4265 } 4266 I915_WRITE(IMR, 0xffffffff); 4267 I915_WRITE(IER, 0x0); 4268 4269 I915_WRITE(IIR, I915_READ(IIR)); 4270 } 4271 4272 static void i965_irq_preinstall(struct drm_device * dev) 4273 { 4274 struct drm_i915_private *dev_priv = to_i915(dev); 4275 int pipe; 4276 4277 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4278 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4279 4280 I915_WRITE(HWSTAM, 0xeffe); 4281 for_each_pipe(dev_priv, pipe) 4282 I915_WRITE(PIPESTAT(pipe), 0); 4283 I915_WRITE(IMR, 0xffffffff); 4284 I915_WRITE(IER, 0x0); 4285 POSTING_READ(IER); 4286 } 4287 4288 static int i965_irq_postinstall(struct drm_device *dev) 4289 { 4290 struct drm_i915_private *dev_priv = to_i915(dev); 4291 u32 enable_mask; 4292 u32 error_mask; 4293 4294 /* Unmask the interrupts that we always want on. */ 4295 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4296 I915_DISPLAY_PORT_INTERRUPT | 4297 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4298 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4299 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4300 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4301 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4302 4303 enable_mask = ~dev_priv->irq_mask; 4304 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4305 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4306 enable_mask |= I915_USER_INTERRUPT; 4307 4308 if (IS_G4X(dev_priv)) 4309 enable_mask |= I915_BSD_USER_INTERRUPT; 4310 4311 /* Interrupt setup is already guaranteed to be single-threaded, this is 4312 * just to make the assert_spin_locked check happy. */ 4313 spin_lock_irq(&dev_priv->irq_lock); 4314 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4315 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4316 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4317 spin_unlock_irq(&dev_priv->irq_lock); 4318 4319 /* 4320 * Enable some error detection, note the instruction error mask 4321 * bit is reserved, so we leave it masked. 4322 */ 4323 if (IS_G4X(dev_priv)) { 4324 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4325 GM45_ERROR_MEM_PRIV | 4326 GM45_ERROR_CP_PRIV | 4327 I915_ERROR_MEMORY_REFRESH); 4328 } else { 4329 error_mask = ~(I915_ERROR_PAGE_TABLE | 4330 I915_ERROR_MEMORY_REFRESH); 4331 } 4332 I915_WRITE(EMR, error_mask); 4333 4334 I915_WRITE(IMR, dev_priv->irq_mask); 4335 I915_WRITE(IER, enable_mask); 4336 POSTING_READ(IER); 4337 4338 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4339 POSTING_READ(PORT_HOTPLUG_EN); 4340 4341 i915_enable_asle_pipestat(dev_priv); 4342 4343 return 0; 4344 } 4345 4346 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4347 { 4348 u32 hotplug_en; 4349 4350 assert_spin_locked(&dev_priv->irq_lock); 4351 4352 /* Note HDMI and DP share hotplug bits */ 4353 /* enable bits are the same for all generations */ 4354 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4355 /* Programming the CRT detection parameters tends 4356 to generate a spurious hotplug event about three 4357 seconds later. So just do it once. 4358 */ 4359 if (IS_G4X(dev_priv)) 4360 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4361 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4362 4363 /* Ignore TV since it's buggy */ 4364 i915_hotplug_interrupt_update_locked(dev_priv, 4365 HOTPLUG_INT_EN_MASK | 4366 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4367 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4368 hotplug_en); 4369 } 4370 4371 static irqreturn_t i965_irq_handler(int irq, void *arg) 4372 { 4373 struct drm_device *dev = arg; 4374 struct drm_i915_private *dev_priv = to_i915(dev); 4375 u32 iir, new_iir; 4376 u32 pipe_stats[I915_MAX_PIPES]; 4377 int ret = IRQ_NONE, pipe; 4378 u32 flip_mask = 4379 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4380 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4381 4382 if (!intel_irqs_enabled(dev_priv)) 4383 return IRQ_NONE; 4384 4385 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4386 disable_rpm_wakeref_asserts(dev_priv); 4387 4388 iir = I915_READ(IIR); 4389 4390 for (;;) { 4391 bool irq_received = (iir & ~flip_mask) != 0; 4392 bool blc_event = false; 4393 4394 /* Can't rely on pipestat interrupt bit in iir as it might 4395 * have been cleared after the pipestat interrupt was received. 4396 * It doesn't set the bit in iir again, but it still produces 4397 * interrupts (for non-MSI). 4398 */ 4399 spin_lock(&dev_priv->irq_lock); 4400 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4401 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4402 4403 for_each_pipe(dev_priv, pipe) { 4404 i915_reg_t reg = PIPESTAT(pipe); 4405 pipe_stats[pipe] = I915_READ(reg); 4406 4407 /* 4408 * Clear the PIPE*STAT regs before the IIR 4409 */ 4410 if (pipe_stats[pipe] & 0x8000ffff) { 4411 I915_WRITE(reg, pipe_stats[pipe]); 4412 irq_received = true; 4413 } 4414 } 4415 spin_unlock(&dev_priv->irq_lock); 4416 4417 if (!irq_received) 4418 break; 4419 4420 ret = IRQ_HANDLED; 4421 4422 /* Consume port. Then clear IIR or we'll miss events */ 4423 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4424 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4425 if (hotplug_status) 4426 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4427 } 4428 4429 I915_WRITE(IIR, iir & ~flip_mask); 4430 new_iir = I915_READ(IIR); /* Flush posted writes */ 4431 4432 if (iir & I915_USER_INTERRUPT) 4433 notify_ring(&dev_priv->engine[RCS]); 4434 if (iir & I915_BSD_USER_INTERRUPT) 4435 notify_ring(&dev_priv->engine[VCS]); 4436 4437 for_each_pipe(dev_priv, pipe) { 4438 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4439 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4440 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4441 4442 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4443 blc_event = true; 4444 4445 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4446 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4447 4448 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4449 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4450 } 4451 4452 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4453 intel_opregion_asle_intr(dev_priv); 4454 4455 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4456 gmbus_irq_handler(dev_priv); 4457 4458 /* With MSI, interrupts are only generated when iir 4459 * transitions from zero to nonzero. If another bit got 4460 * set while we were handling the existing iir bits, then 4461 * we would never get another interrupt. 4462 * 4463 * This is fine on non-MSI as well, as if we hit this path 4464 * we avoid exiting the interrupt handler only to generate 4465 * another one. 4466 * 4467 * Note that for MSI this could cause a stray interrupt report 4468 * if an interrupt landed in the time between writing IIR and 4469 * the posting read. This should be rare enough to never 4470 * trigger the 99% of 100,000 interrupts test for disabling 4471 * stray interrupts. 4472 */ 4473 iir = new_iir; 4474 } 4475 4476 enable_rpm_wakeref_asserts(dev_priv); 4477 4478 return ret; 4479 } 4480 4481 static void i965_irq_uninstall(struct drm_device * dev) 4482 { 4483 struct drm_i915_private *dev_priv = to_i915(dev); 4484 int pipe; 4485 4486 if (!dev_priv) 4487 return; 4488 4489 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4490 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4491 4492 I915_WRITE(HWSTAM, 0xffffffff); 4493 for_each_pipe(dev_priv, pipe) 4494 I915_WRITE(PIPESTAT(pipe), 0); 4495 I915_WRITE(IMR, 0xffffffff); 4496 I915_WRITE(IER, 0x0); 4497 4498 for_each_pipe(dev_priv, pipe) 4499 I915_WRITE(PIPESTAT(pipe), 4500 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4501 I915_WRITE(IIR, I915_READ(IIR)); 4502 } 4503 4504 /** 4505 * intel_irq_init - initializes irq support 4506 * @dev_priv: i915 device instance 4507 * 4508 * This function initializes all the irq support including work items, timers 4509 * and all the vtables. It does not setup the interrupt itself though. 4510 */ 4511 void intel_irq_init(struct drm_i915_private *dev_priv) 4512 { 4513 struct drm_device *dev = &dev_priv->drm; 4514 4515 intel_hpd_init_work(dev_priv); 4516 4517 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4518 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4519 4520 /* Let's track the enabled rps events */ 4521 if (IS_VALLEYVIEW(dev_priv)) 4522 /* WaGsvRC0ResidencyMethod:vlv */ 4523 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4524 else 4525 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4526 4527 dev_priv->rps.pm_intr_keep = 0; 4528 4529 /* 4530 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 4531 * if GEN6_PM_UP_EI_EXPIRED is masked. 4532 * 4533 * TODO: verify if this can be reproduced on VLV,CHV. 4534 */ 4535 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 4536 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; 4537 4538 if (INTEL_INFO(dev_priv)->gen >= 8) 4539 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; 4540 4541 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4542 i915_hangcheck_elapsed); 4543 4544 if (IS_GEN2(dev_priv)) { 4545 dev->max_vblank_count = 0; 4546 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4547 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4548 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4549 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4550 } else { 4551 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4552 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4553 } 4554 4555 /* 4556 * Opt out of the vblank disable timer on everything except gen2. 4557 * Gen2 doesn't have a hardware frame counter and so depends on 4558 * vblank interrupts to produce sane vblank seuquence numbers. 4559 */ 4560 if (!IS_GEN2(dev_priv)) 4561 dev->vblank_disable_immediate = true; 4562 4563 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4564 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4565 4566 if (IS_CHERRYVIEW(dev_priv)) { 4567 dev->driver->irq_handler = cherryview_irq_handler; 4568 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4569 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4570 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4571 dev->driver->enable_vblank = valleyview_enable_vblank; 4572 dev->driver->disable_vblank = valleyview_disable_vblank; 4573 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4574 } else if (IS_VALLEYVIEW(dev_priv)) { 4575 dev->driver->irq_handler = valleyview_irq_handler; 4576 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4577 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4578 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4579 dev->driver->enable_vblank = valleyview_enable_vblank; 4580 dev->driver->disable_vblank = valleyview_disable_vblank; 4581 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4582 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4583 dev->driver->irq_handler = gen8_irq_handler; 4584 dev->driver->irq_preinstall = gen8_irq_reset; 4585 dev->driver->irq_postinstall = gen8_irq_postinstall; 4586 dev->driver->irq_uninstall = gen8_irq_uninstall; 4587 dev->driver->enable_vblank = gen8_enable_vblank; 4588 dev->driver->disable_vblank = gen8_disable_vblank; 4589 if (IS_BROXTON(dev)) 4590 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4591 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev)) 4592 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4593 else 4594 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4595 } else if (HAS_PCH_SPLIT(dev)) { 4596 dev->driver->irq_handler = ironlake_irq_handler; 4597 dev->driver->irq_preinstall = ironlake_irq_reset; 4598 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4599 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4600 dev->driver->enable_vblank = ironlake_enable_vblank; 4601 dev->driver->disable_vblank = ironlake_disable_vblank; 4602 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4603 } else { 4604 if (IS_GEN2(dev_priv)) { 4605 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4606 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4607 dev->driver->irq_handler = i8xx_irq_handler; 4608 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4609 } else if (IS_GEN3(dev_priv)) { 4610 dev->driver->irq_preinstall = i915_irq_preinstall; 4611 dev->driver->irq_postinstall = i915_irq_postinstall; 4612 dev->driver->irq_uninstall = i915_irq_uninstall; 4613 dev->driver->irq_handler = i915_irq_handler; 4614 } else { 4615 dev->driver->irq_preinstall = i965_irq_preinstall; 4616 dev->driver->irq_postinstall = i965_irq_postinstall; 4617 dev->driver->irq_uninstall = i965_irq_uninstall; 4618 dev->driver->irq_handler = i965_irq_handler; 4619 } 4620 if (I915_HAS_HOTPLUG(dev_priv)) 4621 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4622 dev->driver->enable_vblank = i915_enable_vblank; 4623 dev->driver->disable_vblank = i915_disable_vblank; 4624 } 4625 } 4626 4627 /** 4628 * intel_irq_install - enables the hardware interrupt 4629 * @dev_priv: i915 device instance 4630 * 4631 * This function enables the hardware interrupt handling, but leaves the hotplug 4632 * handling still disabled. It is called after intel_irq_init(). 4633 * 4634 * In the driver load and resume code we need working interrupts in a few places 4635 * but don't want to deal with the hassle of concurrent probe and hotplug 4636 * workers. Hence the split into this two-stage approach. 4637 */ 4638 int intel_irq_install(struct drm_i915_private *dev_priv) 4639 { 4640 /* 4641 * We enable some interrupt sources in our postinstall hooks, so mark 4642 * interrupts as enabled _before_ actually enabling them to avoid 4643 * special cases in our ordering checks. 4644 */ 4645 dev_priv->pm.irqs_enabled = true; 4646 4647 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4648 } 4649 4650 /** 4651 * intel_irq_uninstall - finilizes all irq handling 4652 * @dev_priv: i915 device instance 4653 * 4654 * This stops interrupt and hotplug handling and unregisters and frees all 4655 * resources acquired in the init functions. 4656 */ 4657 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4658 { 4659 drm_irq_uninstall(&dev_priv->drm); 4660 intel_hpd_cancel_work(dev_priv); 4661 dev_priv->pm.irqs_enabled = false; 4662 } 4663 4664 /** 4665 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4666 * @dev_priv: i915 device instance 4667 * 4668 * This function is used to disable interrupts at runtime, both in the runtime 4669 * pm and the system suspend/resume code. 4670 */ 4671 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4672 { 4673 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4674 dev_priv->pm.irqs_enabled = false; 4675 synchronize_irq(dev_priv->drm.irq); 4676 } 4677 4678 /** 4679 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4680 * @dev_priv: i915 device instance 4681 * 4682 * This function is used to enable interrupts at runtime, both in the runtime 4683 * pm and the system suspend/resume code. 4684 */ 4685 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4686 { 4687 dev_priv->pm.irqs_enabled = true; 4688 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4689 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4690 } 4691