1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include "drmP.h" 30 #include "drm.h" 31 #include "i915_drm.h" 32 #include "i915_drv.h" 33 #include "intel_drv.h" 34 35 #define MAX_NOPID ((u32)~0) 36 37 /** 38 * Interrupts that are always left unmasked. 39 * 40 * Since pipe events are edge-triggered from the PIPESTAT register to IIR, 41 * we leave them always unmasked in IMR and then control enabling them through 42 * PIPESTAT alone. 43 */ 44 #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ 45 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 46 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) 47 48 /** Interrupts that we mask and unmask at runtime. */ 49 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 50 51 /** These are all of the interrupts used by the driver */ 52 #define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \ 53 I915_INTERRUPT_ENABLE_VAR) 54 55 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 56 PIPE_VBLANK_INTERRUPT_STATUS) 57 58 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ 59 PIPE_VBLANK_INTERRUPT_ENABLE) 60 61 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 62 DRM_I915_VBLANK_PIPE_B) 63 64 void 65 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 66 { 67 if ((dev_priv->irq_mask_reg & mask) != 0) { 68 dev_priv->irq_mask_reg &= ~mask; 69 I915_WRITE(IMR, dev_priv->irq_mask_reg); 70 (void) I915_READ(IMR); 71 } 72 } 73 74 static inline void 75 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 76 { 77 if ((dev_priv->irq_mask_reg & mask) != mask) { 78 dev_priv->irq_mask_reg |= mask; 79 I915_WRITE(IMR, dev_priv->irq_mask_reg); 80 (void) I915_READ(IMR); 81 } 82 } 83 84 static inline u32 85 i915_pipestat(int pipe) 86 { 87 if (pipe == 0) 88 return PIPEASTAT; 89 if (pipe == 1) 90 return PIPEBSTAT; 91 BUG(); 92 } 93 94 void 95 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 96 { 97 if ((dev_priv->pipestat[pipe] & mask) != mask) { 98 u32 reg = i915_pipestat(pipe); 99 100 dev_priv->pipestat[pipe] |= mask; 101 /* Enable the interrupt, clear any pending status */ 102 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 103 (void) I915_READ(reg); 104 } 105 } 106 107 void 108 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 109 { 110 if ((dev_priv->pipestat[pipe] & mask) != 0) { 111 u32 reg = i915_pipestat(pipe); 112 113 dev_priv->pipestat[pipe] &= ~mask; 114 I915_WRITE(reg, dev_priv->pipestat[pipe]); 115 (void) I915_READ(reg); 116 } 117 } 118 119 /** 120 * i915_pipe_enabled - check if a pipe is enabled 121 * @dev: DRM device 122 * @pipe: pipe to check 123 * 124 * Reading certain registers when the pipe is disabled can hang the chip. 125 * Use this routine to make sure the PLL is running and the pipe is active 126 * before reading such registers if unsure. 127 */ 128 static int 129 i915_pipe_enabled(struct drm_device *dev, int pipe) 130 { 131 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 132 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; 133 134 if (I915_READ(pipeconf) & PIPEACONF_ENABLE) 135 return 1; 136 137 return 0; 138 } 139 140 /* Called from drm generic code, passed a 'crtc', which 141 * we use as a pipe index 142 */ 143 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 144 { 145 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 146 unsigned long high_frame; 147 unsigned long low_frame; 148 u32 high1, high2, low, count; 149 150 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 151 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 152 153 if (!i915_pipe_enabled(dev, pipe)) { 154 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); 155 return 0; 156 } 157 158 /* 159 * High & low register fields aren't synchronized, so make sure 160 * we get a low value that's stable across two reads of the high 161 * register. 162 */ 163 do { 164 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 165 PIPE_FRAME_HIGH_SHIFT); 166 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> 167 PIPE_FRAME_LOW_SHIFT); 168 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 169 PIPE_FRAME_HIGH_SHIFT); 170 } while (high1 != high2); 171 172 count = (high1 << 8) | low; 173 174 return count; 175 } 176 177 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 178 { 179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 180 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; 181 182 if (!i915_pipe_enabled(dev, pipe)) { 183 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); 184 return 0; 185 } 186 187 return I915_READ(reg); 188 } 189 190 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 191 { 192 struct drm_device *dev = (struct drm_device *) arg; 193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 194 struct drm_i915_master_private *master_priv; 195 u32 iir, new_iir; 196 u32 pipea_stats, pipeb_stats; 197 u32 vblank_status; 198 u32 vblank_enable; 199 int vblank = 0; 200 unsigned long irqflags; 201 int irq_received; 202 int ret = IRQ_NONE; 203 204 atomic_inc(&dev_priv->irq_received); 205 206 iir = I915_READ(IIR); 207 208 if (IS_I965G(dev)) { 209 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; 210 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; 211 } else { 212 vblank_status = I915_VBLANK_INTERRUPT_STATUS; 213 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE; 214 } 215 216 for (;;) { 217 irq_received = iir != 0; 218 219 /* Can't rely on pipestat interrupt bit in iir as it might 220 * have been cleared after the pipestat interrupt was received. 221 * It doesn't set the bit in iir again, but it still produces 222 * interrupts (for non-MSI). 223 */ 224 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 225 pipea_stats = I915_READ(PIPEASTAT); 226 pipeb_stats = I915_READ(PIPEBSTAT); 227 228 /* 229 * Clear the PIPE(A|B)STAT regs before the IIR 230 */ 231 if (pipea_stats & 0x8000ffff) { 232 I915_WRITE(PIPEASTAT, pipea_stats); 233 irq_received = 1; 234 } 235 236 if (pipeb_stats & 0x8000ffff) { 237 I915_WRITE(PIPEBSTAT, pipeb_stats); 238 irq_received = 1; 239 } 240 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 241 242 if (!irq_received) 243 break; 244 245 ret = IRQ_HANDLED; 246 247 I915_WRITE(IIR, iir); 248 new_iir = I915_READ(IIR); /* Flush posted writes */ 249 250 if (dev->primary->master) { 251 master_priv = dev->primary->master->driver_priv; 252 if (master_priv->sarea_priv) 253 master_priv->sarea_priv->last_dispatch = 254 READ_BREADCRUMB(dev_priv); 255 } 256 257 if (iir & I915_USER_INTERRUPT) { 258 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 259 DRM_WAKEUP(&dev_priv->irq_queue); 260 } 261 262 if (pipea_stats & vblank_status) { 263 vblank++; 264 drm_handle_vblank(dev, 0); 265 } 266 267 if (pipeb_stats & vblank_status) { 268 vblank++; 269 drm_handle_vblank(dev, 1); 270 } 271 272 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 273 (iir & I915_ASLE_INTERRUPT)) 274 opregion_asle_intr(dev); 275 276 /* With MSI, interrupts are only generated when iir 277 * transitions from zero to nonzero. If another bit got 278 * set while we were handling the existing iir bits, then 279 * we would never get another interrupt. 280 * 281 * This is fine on non-MSI as well, as if we hit this path 282 * we avoid exiting the interrupt handler only to generate 283 * another one. 284 * 285 * Note that for MSI this could cause a stray interrupt report 286 * if an interrupt landed in the time between writing IIR and 287 * the posting read. This should be rare enough to never 288 * trigger the 99% of 100,000 interrupts test for disabling 289 * stray interrupts. 290 */ 291 iir = new_iir; 292 } 293 294 return ret; 295 } 296 297 static int i915_emit_irq(struct drm_device * dev) 298 { 299 drm_i915_private_t *dev_priv = dev->dev_private; 300 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 301 RING_LOCALS; 302 303 i915_kernel_lost_context(dev); 304 305 DRM_DEBUG("\n"); 306 307 dev_priv->counter++; 308 if (dev_priv->counter > 0x7FFFFFFFUL) 309 dev_priv->counter = 1; 310 if (master_priv->sarea_priv) 311 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 312 313 BEGIN_LP_RING(4); 314 OUT_RING(MI_STORE_DWORD_INDEX); 315 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 316 OUT_RING(dev_priv->counter); 317 OUT_RING(MI_USER_INTERRUPT); 318 ADVANCE_LP_RING(); 319 320 return dev_priv->counter; 321 } 322 323 void i915_user_irq_get(struct drm_device *dev) 324 { 325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 326 unsigned long irqflags; 327 328 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 329 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 330 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 331 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 332 } 333 334 void i915_user_irq_put(struct drm_device *dev) 335 { 336 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 337 unsigned long irqflags; 338 339 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 340 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 341 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 342 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 343 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 344 } 345 346 static int i915_wait_irq(struct drm_device * dev, int irq_nr) 347 { 348 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 349 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 350 int ret = 0; 351 352 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 353 READ_BREADCRUMB(dev_priv)); 354 355 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 356 if (master_priv->sarea_priv) 357 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 358 return 0; 359 } 360 361 if (master_priv->sarea_priv) 362 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 363 364 i915_user_irq_get(dev); 365 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 366 READ_BREADCRUMB(dev_priv) >= irq_nr); 367 i915_user_irq_put(dev); 368 369 if (ret == -EBUSY) { 370 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 371 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 372 } 373 374 return ret; 375 } 376 377 /* Needs the lock as it touches the ring. 378 */ 379 int i915_irq_emit(struct drm_device *dev, void *data, 380 struct drm_file *file_priv) 381 { 382 drm_i915_private_t *dev_priv = dev->dev_private; 383 drm_i915_irq_emit_t *emit = data; 384 int result; 385 386 if (!dev_priv) { 387 DRM_ERROR("called with no initialization\n"); 388 return -EINVAL; 389 } 390 391 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 392 393 mutex_lock(&dev->struct_mutex); 394 result = i915_emit_irq(dev); 395 mutex_unlock(&dev->struct_mutex); 396 397 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 398 DRM_ERROR("copy_to_user\n"); 399 return -EFAULT; 400 } 401 402 return 0; 403 } 404 405 /* Doesn't need the hardware lock. 406 */ 407 int i915_irq_wait(struct drm_device *dev, void *data, 408 struct drm_file *file_priv) 409 { 410 drm_i915_private_t *dev_priv = dev->dev_private; 411 drm_i915_irq_wait_t *irqwait = data; 412 413 if (!dev_priv) { 414 DRM_ERROR("called with no initialization\n"); 415 return -EINVAL; 416 } 417 418 return i915_wait_irq(dev, irqwait->irq_seq); 419 } 420 421 /* Called from drm generic code, passed 'crtc' which 422 * we use as a pipe index 423 */ 424 int i915_enable_vblank(struct drm_device *dev, int pipe) 425 { 426 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 427 unsigned long irqflags; 428 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 429 u32 pipeconf; 430 431 pipeconf = I915_READ(pipeconf_reg); 432 if (!(pipeconf & PIPEACONF_ENABLE)) 433 return -EINVAL; 434 435 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 436 if (IS_I965G(dev)) 437 i915_enable_pipestat(dev_priv, pipe, 438 PIPE_START_VBLANK_INTERRUPT_ENABLE); 439 else 440 i915_enable_pipestat(dev_priv, pipe, 441 PIPE_VBLANK_INTERRUPT_ENABLE); 442 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 443 return 0; 444 } 445 446 /* Called from drm generic code, passed 'crtc' which 447 * we use as a pipe index 448 */ 449 void i915_disable_vblank(struct drm_device *dev, int pipe) 450 { 451 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 452 unsigned long irqflags; 453 454 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 455 i915_disable_pipestat(dev_priv, pipe, 456 PIPE_VBLANK_INTERRUPT_ENABLE | 457 PIPE_START_VBLANK_INTERRUPT_ENABLE); 458 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 459 } 460 461 void i915_enable_interrupt (struct drm_device *dev) 462 { 463 struct drm_i915_private *dev_priv = dev->dev_private; 464 opregion_enable_asle(dev); 465 dev_priv->irq_enabled = 1; 466 } 467 468 469 /* Set the vblank monitor pipe 470 */ 471 int i915_vblank_pipe_set(struct drm_device *dev, void *data, 472 struct drm_file *file_priv) 473 { 474 drm_i915_private_t *dev_priv = dev->dev_private; 475 476 if (!dev_priv) { 477 DRM_ERROR("called with no initialization\n"); 478 return -EINVAL; 479 } 480 481 return 0; 482 } 483 484 int i915_vblank_pipe_get(struct drm_device *dev, void *data, 485 struct drm_file *file_priv) 486 { 487 drm_i915_private_t *dev_priv = dev->dev_private; 488 drm_i915_vblank_pipe_t *pipe = data; 489 490 if (!dev_priv) { 491 DRM_ERROR("called with no initialization\n"); 492 return -EINVAL; 493 } 494 495 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 496 497 return 0; 498 } 499 500 /** 501 * Schedule buffer swap at given vertical blank. 502 */ 503 int i915_vblank_swap(struct drm_device *dev, void *data, 504 struct drm_file *file_priv) 505 { 506 /* The delayed swap mechanism was fundamentally racy, and has been 507 * removed. The model was that the client requested a delayed flip/swap 508 * from the kernel, then waited for vblank before continuing to perform 509 * rendering. The problem was that the kernel might wake the client 510 * up before it dispatched the vblank swap (since the lock has to be 511 * held while touching the ringbuffer), in which case the client would 512 * clear and start the next frame before the swap occurred, and 513 * flicker would occur in addition to likely missing the vblank. 514 * 515 * In the absence of this ioctl, userland falls back to a correct path 516 * of waiting for a vblank, then dispatching the swap on its own. 517 * Context switching to userland and back is plenty fast enough for 518 * meeting the requirements of vblank swapping. 519 */ 520 return -EINVAL; 521 } 522 523 /* drm_dma.h hooks 524 */ 525 void i915_driver_irq_preinstall(struct drm_device * dev) 526 { 527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 528 529 atomic_set(&dev_priv->irq_received, 0); 530 531 I915_WRITE(HWSTAM, 0xeffe); 532 I915_WRITE(PIPEASTAT, 0); 533 I915_WRITE(PIPEBSTAT, 0); 534 I915_WRITE(IMR, 0xffffffff); 535 I915_WRITE(IER, 0x0); 536 (void) I915_READ(IER); 537 } 538 539 int i915_driver_irq_postinstall(struct drm_device *dev) 540 { 541 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 542 543 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 544 545 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 546 547 /* Unmask the interrupts that we always want on. */ 548 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 549 550 dev_priv->pipestat[0] = 0; 551 dev_priv->pipestat[1] = 0; 552 553 /* Disable pipe interrupt enables, clear pending pipe status */ 554 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); 555 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); 556 /* Clear pending interrupt status */ 557 I915_WRITE(IIR, I915_READ(IIR)); 558 559 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); 560 I915_WRITE(IMR, dev_priv->irq_mask_reg); 561 (void) I915_READ(IER); 562 563 opregion_enable_asle(dev); 564 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 565 566 return 0; 567 } 568 569 void i915_driver_irq_uninstall(struct drm_device * dev) 570 { 571 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 572 573 if (!dev_priv) 574 return; 575 576 dev_priv->vblank_pipe = 0; 577 578 I915_WRITE(HWSTAM, 0xffffffff); 579 I915_WRITE(PIPEASTAT, 0); 580 I915_WRITE(PIPEBSTAT, 0); 581 I915_WRITE(IMR, 0xffffffff); 582 I915_WRITE(IER, 0x0); 583 584 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); 585 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); 586 I915_WRITE(IIR, I915_READ(IIR)); 587 } 588