1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <linux/sysrq.h> 30 #include <linux/slab.h> 31 #include "drmP.h" 32 #include "drm.h" 33 #include "i915_drm.h" 34 #include "i915_drv.h" 35 #include "i915_trace.h" 36 #include "intel_drv.h" 37 38 #define MAX_NOPID ((u32)~0) 39 40 /** 41 * Interrupts that are always left unmasked. 42 * 43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR, 44 * we leave them always unmasked in IMR and then control enabling them through 45 * PIPESTAT alone. 46 */ 47 #define I915_INTERRUPT_ENABLE_FIX \ 48 (I915_ASLE_INTERRUPT | \ 49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ 52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 54 55 /** Interrupts that we mask and unmask at runtime. */ 56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) 57 58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 59 PIPE_VBLANK_INTERRUPT_STATUS) 60 61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ 62 PIPE_VBLANK_INTERRUPT_ENABLE) 63 64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 65 DRM_I915_VBLANK_PIPE_B) 66 67 /* For display hotplug interrupt */ 68 static void 69 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 70 { 71 if ((dev_priv->irq_mask & mask) != 0) { 72 dev_priv->irq_mask &= ~mask; 73 I915_WRITE(DEIMR, dev_priv->irq_mask); 74 POSTING_READ(DEIMR); 75 } 76 } 77 78 static inline void 79 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 80 { 81 if ((dev_priv->irq_mask & mask) != mask) { 82 dev_priv->irq_mask |= mask; 83 I915_WRITE(DEIMR, dev_priv->irq_mask); 84 POSTING_READ(DEIMR); 85 } 86 } 87 88 void 89 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 90 { 91 if ((dev_priv->pipestat[pipe] & mask) != mask) { 92 u32 reg = PIPESTAT(pipe); 93 94 dev_priv->pipestat[pipe] |= mask; 95 /* Enable the interrupt, clear any pending status */ 96 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 97 POSTING_READ(reg); 98 } 99 } 100 101 void 102 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 103 { 104 if ((dev_priv->pipestat[pipe] & mask) != 0) { 105 u32 reg = PIPESTAT(pipe); 106 107 dev_priv->pipestat[pipe] &= ~mask; 108 I915_WRITE(reg, dev_priv->pipestat[pipe]); 109 POSTING_READ(reg); 110 } 111 } 112 113 /** 114 * intel_enable_asle - enable ASLE interrupt for OpRegion 115 */ 116 void intel_enable_asle(struct drm_device *dev) 117 { 118 drm_i915_private_t *dev_priv = dev->dev_private; 119 unsigned long irqflags; 120 121 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 122 123 if (HAS_PCH_SPLIT(dev)) 124 ironlake_enable_display_irq(dev_priv, DE_GSE); 125 else { 126 i915_enable_pipestat(dev_priv, 1, 127 PIPE_LEGACY_BLC_EVENT_ENABLE); 128 if (INTEL_INFO(dev)->gen >= 4) 129 i915_enable_pipestat(dev_priv, 0, 130 PIPE_LEGACY_BLC_EVENT_ENABLE); 131 } 132 133 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 134 } 135 136 /** 137 * i915_pipe_enabled - check if a pipe is enabled 138 * @dev: DRM device 139 * @pipe: pipe to check 140 * 141 * Reading certain registers when the pipe is disabled can hang the chip. 142 * Use this routine to make sure the PLL is running and the pipe is active 143 * before reading such registers if unsure. 144 */ 145 static int 146 i915_pipe_enabled(struct drm_device *dev, int pipe) 147 { 148 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 149 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 150 } 151 152 /* Called from drm generic code, passed a 'crtc', which 153 * we use as a pipe index 154 */ 155 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 156 { 157 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 158 unsigned long high_frame; 159 unsigned long low_frame; 160 u32 high1, high2, low; 161 162 if (!i915_pipe_enabled(dev, pipe)) { 163 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 164 "pipe %c\n", pipe_name(pipe)); 165 return 0; 166 } 167 168 high_frame = PIPEFRAME(pipe); 169 low_frame = PIPEFRAMEPIXEL(pipe); 170 171 /* 172 * High & low register fields aren't synchronized, so make sure 173 * we get a low value that's stable across two reads of the high 174 * register. 175 */ 176 do { 177 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 178 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 179 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 180 } while (high1 != high2); 181 182 high1 >>= PIPE_FRAME_HIGH_SHIFT; 183 low >>= PIPE_FRAME_LOW_SHIFT; 184 return (high1 << 8) | low; 185 } 186 187 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 188 { 189 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 190 int reg = PIPE_FRMCOUNT_GM45(pipe); 191 192 if (!i915_pipe_enabled(dev, pipe)) { 193 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 194 "pipe %c\n", pipe_name(pipe)); 195 return 0; 196 } 197 198 return I915_READ(reg); 199 } 200 201 int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 202 int *vpos, int *hpos) 203 { 204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 205 u32 vbl = 0, position = 0; 206 int vbl_start, vbl_end, htotal, vtotal; 207 bool in_vbl = true; 208 int ret = 0; 209 210 if (!i915_pipe_enabled(dev, pipe)) { 211 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 212 "pipe %c\n", pipe_name(pipe)); 213 return 0; 214 } 215 216 /* Get vtotal. */ 217 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); 218 219 if (INTEL_INFO(dev)->gen >= 4) { 220 /* No obvious pixelcount register. Only query vertical 221 * scanout position from Display scan line register. 222 */ 223 position = I915_READ(PIPEDSL(pipe)); 224 225 /* Decode into vertical scanout position. Don't have 226 * horizontal scanout position. 227 */ 228 *vpos = position & 0x1fff; 229 *hpos = 0; 230 } else { 231 /* Have access to pixelcount since start of frame. 232 * We can split this into vertical and horizontal 233 * scanout position. 234 */ 235 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 236 237 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); 238 *vpos = position / htotal; 239 *hpos = position - (*vpos * htotal); 240 } 241 242 /* Query vblank area. */ 243 vbl = I915_READ(VBLANK(pipe)); 244 245 /* Test position against vblank region. */ 246 vbl_start = vbl & 0x1fff; 247 vbl_end = (vbl >> 16) & 0x1fff; 248 249 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 250 in_vbl = false; 251 252 /* Inside "upper part" of vblank area? Apply corrective offset: */ 253 if (in_vbl && (*vpos >= vbl_start)) 254 *vpos = *vpos - vtotal; 255 256 /* Readouts valid? */ 257 if (vbl > 0) 258 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 259 260 /* In vblank? */ 261 if (in_vbl) 262 ret |= DRM_SCANOUTPOS_INVBL; 263 264 return ret; 265 } 266 267 int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 268 int *max_error, 269 struct timeval *vblank_time, 270 unsigned flags) 271 { 272 struct drm_i915_private *dev_priv = dev->dev_private; 273 struct drm_crtc *crtc; 274 275 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 276 DRM_ERROR("Invalid crtc %d\n", pipe); 277 return -EINVAL; 278 } 279 280 /* Get drm_crtc to timestamp: */ 281 crtc = intel_get_crtc_for_pipe(dev, pipe); 282 if (crtc == NULL) { 283 DRM_ERROR("Invalid crtc %d\n", pipe); 284 return -EINVAL; 285 } 286 287 if (!crtc->enabled) { 288 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 289 return -EBUSY; 290 } 291 292 /* Helper routine in DRM core does all the work: */ 293 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 294 vblank_time, flags, 295 crtc); 296 } 297 298 /* 299 * Handle hotplug events outside the interrupt handler proper. 300 */ 301 static void i915_hotplug_work_func(struct work_struct *work) 302 { 303 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 304 hotplug_work); 305 struct drm_device *dev = dev_priv->dev; 306 struct drm_mode_config *mode_config = &dev->mode_config; 307 struct intel_encoder *encoder; 308 309 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 310 311 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 312 if (encoder->hot_plug) 313 encoder->hot_plug(encoder); 314 315 /* Just fire off a uevent and let userspace tell us what to do */ 316 drm_helper_hpd_irq_event(dev); 317 } 318 319 static void i915_handle_rps_change(struct drm_device *dev) 320 { 321 drm_i915_private_t *dev_priv = dev->dev_private; 322 u32 busy_up, busy_down, max_avg, min_avg; 323 u8 new_delay = dev_priv->cur_delay; 324 325 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 326 busy_up = I915_READ(RCPREVBSYTUPAVG); 327 busy_down = I915_READ(RCPREVBSYTDNAVG); 328 max_avg = I915_READ(RCBMAXAVG); 329 min_avg = I915_READ(RCBMINAVG); 330 331 /* Handle RCS change request from hw */ 332 if (busy_up > max_avg) { 333 if (dev_priv->cur_delay != dev_priv->max_delay) 334 new_delay = dev_priv->cur_delay - 1; 335 if (new_delay < dev_priv->max_delay) 336 new_delay = dev_priv->max_delay; 337 } else if (busy_down < min_avg) { 338 if (dev_priv->cur_delay != dev_priv->min_delay) 339 new_delay = dev_priv->cur_delay + 1; 340 if (new_delay > dev_priv->min_delay) 341 new_delay = dev_priv->min_delay; 342 } 343 344 if (ironlake_set_drps(dev, new_delay)) 345 dev_priv->cur_delay = new_delay; 346 347 return; 348 } 349 350 static void notify_ring(struct drm_device *dev, 351 struct intel_ring_buffer *ring) 352 { 353 struct drm_i915_private *dev_priv = dev->dev_private; 354 u32 seqno; 355 356 if (ring->obj == NULL) 357 return; 358 359 seqno = ring->get_seqno(ring); 360 trace_i915_gem_request_complete(ring, seqno); 361 362 ring->irq_seqno = seqno; 363 wake_up_all(&ring->irq_queue); 364 365 dev_priv->hangcheck_count = 0; 366 mod_timer(&dev_priv->hangcheck_timer, 367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 368 } 369 370 static void gen6_pm_irq_handler(struct drm_device *dev) 371 { 372 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 373 u8 new_delay = dev_priv->cur_delay; 374 u32 pm_iir; 375 376 pm_iir = I915_READ(GEN6_PMIIR); 377 if (!pm_iir) 378 return; 379 380 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 381 if (dev_priv->cur_delay != dev_priv->max_delay) 382 new_delay = dev_priv->cur_delay + 1; 383 if (new_delay > dev_priv->max_delay) 384 new_delay = dev_priv->max_delay; 385 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { 386 if (dev_priv->cur_delay != dev_priv->min_delay) 387 new_delay = dev_priv->cur_delay - 1; 388 if (new_delay < dev_priv->min_delay) { 389 new_delay = dev_priv->min_delay; 390 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 391 I915_READ(GEN6_RP_INTERRUPT_LIMITS) | 392 ((new_delay << 16) & 0x3f0000)); 393 } else { 394 /* Make sure we continue to get down interrupts 395 * until we hit the minimum frequency */ 396 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 397 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); 398 } 399 400 } 401 402 gen6_set_rps(dev, new_delay); 403 dev_priv->cur_delay = new_delay; 404 405 I915_WRITE(GEN6_PMIIR, pm_iir); 406 } 407 408 static void pch_irq_handler(struct drm_device *dev) 409 { 410 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 411 u32 pch_iir; 412 int pipe; 413 414 pch_iir = I915_READ(SDEIIR); 415 416 if (pch_iir & SDE_AUDIO_POWER_MASK) 417 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 418 (pch_iir & SDE_AUDIO_POWER_MASK) >> 419 SDE_AUDIO_POWER_SHIFT); 420 421 if (pch_iir & SDE_GMBUS) 422 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 423 424 if (pch_iir & SDE_AUDIO_HDCP_MASK) 425 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 426 427 if (pch_iir & SDE_AUDIO_TRANS_MASK) 428 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 429 430 if (pch_iir & SDE_POISON) 431 DRM_ERROR("PCH poison interrupt\n"); 432 433 if (pch_iir & SDE_FDI_MASK) 434 for_each_pipe(pipe) 435 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 436 pipe_name(pipe), 437 I915_READ(FDI_RX_IIR(pipe))); 438 439 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 440 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 441 442 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 443 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 444 445 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 446 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 447 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 448 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 449 } 450 451 static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 452 { 453 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 454 int ret = IRQ_NONE; 455 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 456 u32 hotplug_mask; 457 struct drm_i915_master_private *master_priv; 458 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 459 460 if (IS_GEN6(dev)) 461 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; 462 463 /* disable master interrupt before clearing iir */ 464 de_ier = I915_READ(DEIER); 465 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 466 POSTING_READ(DEIER); 467 468 de_iir = I915_READ(DEIIR); 469 gt_iir = I915_READ(GTIIR); 470 pch_iir = I915_READ(SDEIIR); 471 pm_iir = I915_READ(GEN6_PMIIR); 472 473 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 474 (!IS_GEN6(dev) || pm_iir == 0)) 475 goto done; 476 477 if (HAS_PCH_CPT(dev)) 478 hotplug_mask = SDE_HOTPLUG_MASK_CPT; 479 else 480 hotplug_mask = SDE_HOTPLUG_MASK; 481 482 ret = IRQ_HANDLED; 483 484 if (dev->primary->master) { 485 master_priv = dev->primary->master->driver_priv; 486 if (master_priv->sarea_priv) 487 master_priv->sarea_priv->last_dispatch = 488 READ_BREADCRUMB(dev_priv); 489 } 490 491 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 492 notify_ring(dev, &dev_priv->ring[RCS]); 493 if (gt_iir & bsd_usr_interrupt) 494 notify_ring(dev, &dev_priv->ring[VCS]); 495 if (gt_iir & GT_BLT_USER_INTERRUPT) 496 notify_ring(dev, &dev_priv->ring[BCS]); 497 498 if (de_iir & DE_GSE) 499 intel_opregion_gse_intr(dev); 500 501 if (de_iir & DE_PLANEA_FLIP_DONE) { 502 intel_prepare_page_flip(dev, 0); 503 intel_finish_page_flip_plane(dev, 0); 504 } 505 506 if (de_iir & DE_PLANEB_FLIP_DONE) { 507 intel_prepare_page_flip(dev, 1); 508 intel_finish_page_flip_plane(dev, 1); 509 } 510 511 if (de_iir & DE_PIPEA_VBLANK) 512 drm_handle_vblank(dev, 0); 513 514 if (de_iir & DE_PIPEB_VBLANK) 515 drm_handle_vblank(dev, 1); 516 517 /* check event from PCH */ 518 if (de_iir & DE_PCH_EVENT) { 519 if (pch_iir & hotplug_mask) 520 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 521 pch_irq_handler(dev); 522 } 523 524 if (de_iir & DE_PCU_EVENT) { 525 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 526 i915_handle_rps_change(dev); 527 } 528 529 if (IS_GEN6(dev)) 530 gen6_pm_irq_handler(dev); 531 532 /* should clear PCH hotplug event before clear CPU irq */ 533 I915_WRITE(SDEIIR, pch_iir); 534 I915_WRITE(GTIIR, gt_iir); 535 I915_WRITE(DEIIR, de_iir); 536 537 done: 538 I915_WRITE(DEIER, de_ier); 539 POSTING_READ(DEIER); 540 541 return ret; 542 } 543 544 /** 545 * i915_error_work_func - do process context error handling work 546 * @work: work struct 547 * 548 * Fire an error uevent so userspace can see that a hang or error 549 * was detected. 550 */ 551 static void i915_error_work_func(struct work_struct *work) 552 { 553 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 554 error_work); 555 struct drm_device *dev = dev_priv->dev; 556 char *error_event[] = { "ERROR=1", NULL }; 557 char *reset_event[] = { "RESET=1", NULL }; 558 char *reset_done_event[] = { "ERROR=0", NULL }; 559 560 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 561 562 if (atomic_read(&dev_priv->mm.wedged)) { 563 DRM_DEBUG_DRIVER("resetting chip\n"); 564 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 565 if (!i915_reset(dev, GRDOM_RENDER)) { 566 atomic_set(&dev_priv->mm.wedged, 0); 567 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 568 } 569 complete_all(&dev_priv->error_completion); 570 } 571 } 572 573 #ifdef CONFIG_DEBUG_FS 574 static struct drm_i915_error_object * 575 i915_error_object_create(struct drm_i915_private *dev_priv, 576 struct drm_i915_gem_object *src) 577 { 578 struct drm_i915_error_object *dst; 579 int page, page_count; 580 u32 reloc_offset; 581 582 if (src == NULL || src->pages == NULL) 583 return NULL; 584 585 page_count = src->base.size / PAGE_SIZE; 586 587 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); 588 if (dst == NULL) 589 return NULL; 590 591 reloc_offset = src->gtt_offset; 592 for (page = 0; page < page_count; page++) { 593 unsigned long flags; 594 void __iomem *s; 595 void *d; 596 597 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 598 if (d == NULL) 599 goto unwind; 600 601 local_irq_save(flags); 602 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 603 reloc_offset); 604 memcpy_fromio(d, s, PAGE_SIZE); 605 io_mapping_unmap_atomic(s); 606 local_irq_restore(flags); 607 608 dst->pages[page] = d; 609 610 reloc_offset += PAGE_SIZE; 611 } 612 dst->page_count = page_count; 613 dst->gtt_offset = src->gtt_offset; 614 615 return dst; 616 617 unwind: 618 while (page--) 619 kfree(dst->pages[page]); 620 kfree(dst); 621 return NULL; 622 } 623 624 static void 625 i915_error_object_free(struct drm_i915_error_object *obj) 626 { 627 int page; 628 629 if (obj == NULL) 630 return; 631 632 for (page = 0; page < obj->page_count; page++) 633 kfree(obj->pages[page]); 634 635 kfree(obj); 636 } 637 638 static void 639 i915_error_state_free(struct drm_device *dev, 640 struct drm_i915_error_state *error) 641 { 642 int i; 643 644 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) 645 i915_error_object_free(error->batchbuffer[i]); 646 647 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) 648 i915_error_object_free(error->ringbuffer[i]); 649 650 kfree(error->active_bo); 651 kfree(error->overlay); 652 kfree(error); 653 } 654 655 static u32 capture_bo_list(struct drm_i915_error_buffer *err, 656 int count, 657 struct list_head *head) 658 { 659 struct drm_i915_gem_object *obj; 660 int i = 0; 661 662 list_for_each_entry(obj, head, mm_list) { 663 err->size = obj->base.size; 664 err->name = obj->base.name; 665 err->seqno = obj->last_rendering_seqno; 666 err->gtt_offset = obj->gtt_offset; 667 err->read_domains = obj->base.read_domains; 668 err->write_domain = obj->base.write_domain; 669 err->fence_reg = obj->fence_reg; 670 err->pinned = 0; 671 if (obj->pin_count > 0) 672 err->pinned = 1; 673 if (obj->user_pin_count > 0) 674 err->pinned = -1; 675 err->tiling = obj->tiling_mode; 676 err->dirty = obj->dirty; 677 err->purgeable = obj->madv != I915_MADV_WILLNEED; 678 err->ring = obj->ring ? obj->ring->id : 0; 679 err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY; 680 681 if (++i == count) 682 break; 683 684 err++; 685 } 686 687 return i; 688 } 689 690 static void i915_gem_record_fences(struct drm_device *dev, 691 struct drm_i915_error_state *error) 692 { 693 struct drm_i915_private *dev_priv = dev->dev_private; 694 int i; 695 696 /* Fences */ 697 switch (INTEL_INFO(dev)->gen) { 698 case 6: 699 for (i = 0; i < 16; i++) 700 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 701 break; 702 case 5: 703 case 4: 704 for (i = 0; i < 16; i++) 705 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 706 break; 707 case 3: 708 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 709 for (i = 0; i < 8; i++) 710 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 711 case 2: 712 for (i = 0; i < 8; i++) 713 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 714 break; 715 716 } 717 } 718 719 static struct drm_i915_error_object * 720 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 721 struct intel_ring_buffer *ring) 722 { 723 struct drm_i915_gem_object *obj; 724 u32 seqno; 725 726 if (!ring->get_seqno) 727 return NULL; 728 729 seqno = ring->get_seqno(ring); 730 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 731 if (obj->ring != ring) 732 continue; 733 734 if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) 735 continue; 736 737 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 738 continue; 739 740 /* We need to copy these to an anonymous buffer as the simplest 741 * method to avoid being overwritten by userspace. 742 */ 743 return i915_error_object_create(dev_priv, obj); 744 } 745 746 return NULL; 747 } 748 749 /** 750 * i915_capture_error_state - capture an error record for later analysis 751 * @dev: drm device 752 * 753 * Should be called when an error is detected (either a hang or an error 754 * interrupt) to capture error state from the time of the error. Fills 755 * out a structure which becomes available in debugfs for user level tools 756 * to pick up. 757 */ 758 static void i915_capture_error_state(struct drm_device *dev) 759 { 760 struct drm_i915_private *dev_priv = dev->dev_private; 761 struct drm_i915_gem_object *obj; 762 struct drm_i915_error_state *error; 763 unsigned long flags; 764 int i, pipe; 765 766 spin_lock_irqsave(&dev_priv->error_lock, flags); 767 error = dev_priv->first_error; 768 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 769 if (error) 770 return; 771 772 /* Account for pipe specific data like PIPE*STAT */ 773 error = kmalloc(sizeof(*error), GFP_ATOMIC); 774 if (!error) { 775 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 776 return; 777 } 778 779 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 780 dev->primary->index); 781 782 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); 783 error->eir = I915_READ(EIR); 784 error->pgtbl_er = I915_READ(PGTBL_ER); 785 for_each_pipe(pipe) 786 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 787 error->instpm = I915_READ(INSTPM); 788 error->error = 0; 789 if (INTEL_INFO(dev)->gen >= 6) { 790 error->error = I915_READ(ERROR_GEN6); 791 792 error->bcs_acthd = I915_READ(BCS_ACTHD); 793 error->bcs_ipehr = I915_READ(BCS_IPEHR); 794 error->bcs_ipeir = I915_READ(BCS_IPEIR); 795 error->bcs_instdone = I915_READ(BCS_INSTDONE); 796 error->bcs_seqno = 0; 797 if (dev_priv->ring[BCS].get_seqno) 798 error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); 799 800 error->vcs_acthd = I915_READ(VCS_ACTHD); 801 error->vcs_ipehr = I915_READ(VCS_IPEHR); 802 error->vcs_ipeir = I915_READ(VCS_IPEIR); 803 error->vcs_instdone = I915_READ(VCS_INSTDONE); 804 error->vcs_seqno = 0; 805 if (dev_priv->ring[VCS].get_seqno) 806 error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); 807 } 808 if (INTEL_INFO(dev)->gen >= 4) { 809 error->ipeir = I915_READ(IPEIR_I965); 810 error->ipehr = I915_READ(IPEHR_I965); 811 error->instdone = I915_READ(INSTDONE_I965); 812 error->instps = I915_READ(INSTPS); 813 error->instdone1 = I915_READ(INSTDONE1); 814 error->acthd = I915_READ(ACTHD_I965); 815 error->bbaddr = I915_READ64(BB_ADDR); 816 } else { 817 error->ipeir = I915_READ(IPEIR); 818 error->ipehr = I915_READ(IPEHR); 819 error->instdone = I915_READ(INSTDONE); 820 error->acthd = I915_READ(ACTHD); 821 error->bbaddr = 0; 822 } 823 i915_gem_record_fences(dev, error); 824 825 /* Record the active batch and ring buffers */ 826 for (i = 0; i < I915_NUM_RINGS; i++) { 827 error->batchbuffer[i] = 828 i915_error_first_batchbuffer(dev_priv, 829 &dev_priv->ring[i]); 830 831 error->ringbuffer[i] = 832 i915_error_object_create(dev_priv, 833 dev_priv->ring[i].obj); 834 } 835 836 /* Record buffers on the active and pinned lists. */ 837 error->active_bo = NULL; 838 error->pinned_bo = NULL; 839 840 i = 0; 841 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 842 i++; 843 error->active_bo_count = i; 844 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 845 i++; 846 error->pinned_bo_count = i - error->active_bo_count; 847 848 error->active_bo = NULL; 849 error->pinned_bo = NULL; 850 if (i) { 851 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 852 GFP_ATOMIC); 853 if (error->active_bo) 854 error->pinned_bo = 855 error->active_bo + error->active_bo_count; 856 } 857 858 if (error->active_bo) 859 error->active_bo_count = 860 capture_bo_list(error->active_bo, 861 error->active_bo_count, 862 &dev_priv->mm.active_list); 863 864 if (error->pinned_bo) 865 error->pinned_bo_count = 866 capture_bo_list(error->pinned_bo, 867 error->pinned_bo_count, 868 &dev_priv->mm.pinned_list); 869 870 do_gettimeofday(&error->time); 871 872 error->overlay = intel_overlay_capture_error_state(dev); 873 error->display = intel_display_capture_error_state(dev); 874 875 spin_lock_irqsave(&dev_priv->error_lock, flags); 876 if (dev_priv->first_error == NULL) { 877 dev_priv->first_error = error; 878 error = NULL; 879 } 880 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 881 882 if (error) 883 i915_error_state_free(dev, error); 884 } 885 886 void i915_destroy_error_state(struct drm_device *dev) 887 { 888 struct drm_i915_private *dev_priv = dev->dev_private; 889 struct drm_i915_error_state *error; 890 891 spin_lock(&dev_priv->error_lock); 892 error = dev_priv->first_error; 893 dev_priv->first_error = NULL; 894 spin_unlock(&dev_priv->error_lock); 895 896 if (error) 897 i915_error_state_free(dev, error); 898 } 899 #else 900 #define i915_capture_error_state(x) 901 #endif 902 903 static void i915_report_and_clear_eir(struct drm_device *dev) 904 { 905 struct drm_i915_private *dev_priv = dev->dev_private; 906 u32 eir = I915_READ(EIR); 907 int pipe; 908 909 if (!eir) 910 return; 911 912 printk(KERN_ERR "render error detected, EIR: 0x%08x\n", 913 eir); 914 915 if (IS_G4X(dev)) { 916 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 917 u32 ipeir = I915_READ(IPEIR_I965); 918 919 printk(KERN_ERR " IPEIR: 0x%08x\n", 920 I915_READ(IPEIR_I965)); 921 printk(KERN_ERR " IPEHR: 0x%08x\n", 922 I915_READ(IPEHR_I965)); 923 printk(KERN_ERR " INSTDONE: 0x%08x\n", 924 I915_READ(INSTDONE_I965)); 925 printk(KERN_ERR " INSTPS: 0x%08x\n", 926 I915_READ(INSTPS)); 927 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 928 I915_READ(INSTDONE1)); 929 printk(KERN_ERR " ACTHD: 0x%08x\n", 930 I915_READ(ACTHD_I965)); 931 I915_WRITE(IPEIR_I965, ipeir); 932 POSTING_READ(IPEIR_I965); 933 } 934 if (eir & GM45_ERROR_PAGE_TABLE) { 935 u32 pgtbl_err = I915_READ(PGTBL_ER); 936 printk(KERN_ERR "page table error\n"); 937 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 938 pgtbl_err); 939 I915_WRITE(PGTBL_ER, pgtbl_err); 940 POSTING_READ(PGTBL_ER); 941 } 942 } 943 944 if (!IS_GEN2(dev)) { 945 if (eir & I915_ERROR_PAGE_TABLE) { 946 u32 pgtbl_err = I915_READ(PGTBL_ER); 947 printk(KERN_ERR "page table error\n"); 948 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 949 pgtbl_err); 950 I915_WRITE(PGTBL_ER, pgtbl_err); 951 POSTING_READ(PGTBL_ER); 952 } 953 } 954 955 if (eir & I915_ERROR_MEMORY_REFRESH) { 956 printk(KERN_ERR "memory refresh error:\n"); 957 for_each_pipe(pipe) 958 printk(KERN_ERR "pipe %c stat: 0x%08x\n", 959 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 960 /* pipestat has already been acked */ 961 } 962 if (eir & I915_ERROR_INSTRUCTION) { 963 printk(KERN_ERR "instruction error\n"); 964 printk(KERN_ERR " INSTPM: 0x%08x\n", 965 I915_READ(INSTPM)); 966 if (INTEL_INFO(dev)->gen < 4) { 967 u32 ipeir = I915_READ(IPEIR); 968 969 printk(KERN_ERR " IPEIR: 0x%08x\n", 970 I915_READ(IPEIR)); 971 printk(KERN_ERR " IPEHR: 0x%08x\n", 972 I915_READ(IPEHR)); 973 printk(KERN_ERR " INSTDONE: 0x%08x\n", 974 I915_READ(INSTDONE)); 975 printk(KERN_ERR " ACTHD: 0x%08x\n", 976 I915_READ(ACTHD)); 977 I915_WRITE(IPEIR, ipeir); 978 POSTING_READ(IPEIR); 979 } else { 980 u32 ipeir = I915_READ(IPEIR_I965); 981 982 printk(KERN_ERR " IPEIR: 0x%08x\n", 983 I915_READ(IPEIR_I965)); 984 printk(KERN_ERR " IPEHR: 0x%08x\n", 985 I915_READ(IPEHR_I965)); 986 printk(KERN_ERR " INSTDONE: 0x%08x\n", 987 I915_READ(INSTDONE_I965)); 988 printk(KERN_ERR " INSTPS: 0x%08x\n", 989 I915_READ(INSTPS)); 990 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 991 I915_READ(INSTDONE1)); 992 printk(KERN_ERR " ACTHD: 0x%08x\n", 993 I915_READ(ACTHD_I965)); 994 I915_WRITE(IPEIR_I965, ipeir); 995 POSTING_READ(IPEIR_I965); 996 } 997 } 998 999 I915_WRITE(EIR, eir); 1000 POSTING_READ(EIR); 1001 eir = I915_READ(EIR); 1002 if (eir) { 1003 /* 1004 * some errors might have become stuck, 1005 * mask them. 1006 */ 1007 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1008 I915_WRITE(EMR, I915_READ(EMR) | eir); 1009 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1010 } 1011 } 1012 1013 /** 1014 * i915_handle_error - handle an error interrupt 1015 * @dev: drm device 1016 * 1017 * Do some basic checking of regsiter state at error interrupt time and 1018 * dump it to the syslog. Also call i915_capture_error_state() to make 1019 * sure we get a record and make it available in debugfs. Fire a uevent 1020 * so userspace knows something bad happened (should trigger collection 1021 * of a ring dump etc.). 1022 */ 1023 void i915_handle_error(struct drm_device *dev, bool wedged) 1024 { 1025 struct drm_i915_private *dev_priv = dev->dev_private; 1026 1027 i915_capture_error_state(dev); 1028 i915_report_and_clear_eir(dev); 1029 1030 if (wedged) { 1031 INIT_COMPLETION(dev_priv->error_completion); 1032 atomic_set(&dev_priv->mm.wedged, 1); 1033 1034 /* 1035 * Wakeup waiting processes so they don't hang 1036 */ 1037 wake_up_all(&dev_priv->ring[RCS].irq_queue); 1038 if (HAS_BSD(dev)) 1039 wake_up_all(&dev_priv->ring[VCS].irq_queue); 1040 if (HAS_BLT(dev)) 1041 wake_up_all(&dev_priv->ring[BCS].irq_queue); 1042 } 1043 1044 queue_work(dev_priv->wq, &dev_priv->error_work); 1045 } 1046 1047 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1048 { 1049 drm_i915_private_t *dev_priv = dev->dev_private; 1050 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1051 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1052 struct drm_i915_gem_object *obj; 1053 struct intel_unpin_work *work; 1054 unsigned long flags; 1055 bool stall_detected; 1056 1057 /* Ignore early vblank irqs */ 1058 if (intel_crtc == NULL) 1059 return; 1060 1061 spin_lock_irqsave(&dev->event_lock, flags); 1062 work = intel_crtc->unpin_work; 1063 1064 if (work == NULL || work->pending || !work->enable_stall_check) { 1065 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1066 spin_unlock_irqrestore(&dev->event_lock, flags); 1067 return; 1068 } 1069 1070 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1071 obj = work->pending_flip_obj; 1072 if (INTEL_INFO(dev)->gen >= 4) { 1073 int dspsurf = DSPSURF(intel_crtc->plane); 1074 stall_detected = I915_READ(dspsurf) == obj->gtt_offset; 1075 } else { 1076 int dspaddr = DSPADDR(intel_crtc->plane); 1077 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1078 crtc->y * crtc->fb->pitch + 1079 crtc->x * crtc->fb->bits_per_pixel/8); 1080 } 1081 1082 spin_unlock_irqrestore(&dev->event_lock, flags); 1083 1084 if (stall_detected) { 1085 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1086 intel_prepare_page_flip(dev, intel_crtc->plane); 1087 } 1088 } 1089 1090 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 1091 { 1092 struct drm_device *dev = (struct drm_device *) arg; 1093 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1094 struct drm_i915_master_private *master_priv; 1095 u32 iir, new_iir; 1096 u32 pipe_stats[I915_MAX_PIPES]; 1097 u32 vblank_status; 1098 int vblank = 0; 1099 unsigned long irqflags; 1100 int irq_received; 1101 int ret = IRQ_NONE, pipe; 1102 bool blc_event = false; 1103 1104 atomic_inc(&dev_priv->irq_received); 1105 1106 if (HAS_PCH_SPLIT(dev)) 1107 return ironlake_irq_handler(dev); 1108 1109 iir = I915_READ(IIR); 1110 1111 if (INTEL_INFO(dev)->gen >= 4) 1112 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; 1113 else 1114 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; 1115 1116 for (;;) { 1117 irq_received = iir != 0; 1118 1119 /* Can't rely on pipestat interrupt bit in iir as it might 1120 * have been cleared after the pipestat interrupt was received. 1121 * It doesn't set the bit in iir again, but it still produces 1122 * interrupts (for non-MSI). 1123 */ 1124 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1125 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 1126 i915_handle_error(dev, false); 1127 1128 for_each_pipe(pipe) { 1129 int reg = PIPESTAT(pipe); 1130 pipe_stats[pipe] = I915_READ(reg); 1131 1132 /* 1133 * Clear the PIPE*STAT regs before the IIR 1134 */ 1135 if (pipe_stats[pipe] & 0x8000ffff) { 1136 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1137 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1138 pipe_name(pipe)); 1139 I915_WRITE(reg, pipe_stats[pipe]); 1140 irq_received = 1; 1141 } 1142 } 1143 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1144 1145 if (!irq_received) 1146 break; 1147 1148 ret = IRQ_HANDLED; 1149 1150 /* Consume port. Then clear IIR or we'll miss events */ 1151 if ((I915_HAS_HOTPLUG(dev)) && 1152 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 1153 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1154 1155 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1156 hotplug_status); 1157 if (hotplug_status & dev_priv->hotplug_supported_mask) 1158 queue_work(dev_priv->wq, 1159 &dev_priv->hotplug_work); 1160 1161 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1162 I915_READ(PORT_HOTPLUG_STAT); 1163 } 1164 1165 I915_WRITE(IIR, iir); 1166 new_iir = I915_READ(IIR); /* Flush posted writes */ 1167 1168 if (dev->primary->master) { 1169 master_priv = dev->primary->master->driver_priv; 1170 if (master_priv->sarea_priv) 1171 master_priv->sarea_priv->last_dispatch = 1172 READ_BREADCRUMB(dev_priv); 1173 } 1174 1175 if (iir & I915_USER_INTERRUPT) 1176 notify_ring(dev, &dev_priv->ring[RCS]); 1177 if (iir & I915_BSD_USER_INTERRUPT) 1178 notify_ring(dev, &dev_priv->ring[VCS]); 1179 1180 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1181 intel_prepare_page_flip(dev, 0); 1182 if (dev_priv->flip_pending_is_done) 1183 intel_finish_page_flip_plane(dev, 0); 1184 } 1185 1186 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 1187 intel_prepare_page_flip(dev, 1); 1188 if (dev_priv->flip_pending_is_done) 1189 intel_finish_page_flip_plane(dev, 1); 1190 } 1191 1192 for_each_pipe(pipe) { 1193 if (pipe_stats[pipe] & vblank_status && 1194 drm_handle_vblank(dev, pipe)) { 1195 vblank++; 1196 if (!dev_priv->flip_pending_is_done) { 1197 i915_pageflip_stall_check(dev, pipe); 1198 intel_finish_page_flip(dev, pipe); 1199 } 1200 } 1201 1202 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1203 blc_event = true; 1204 } 1205 1206 1207 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1208 intel_opregion_asle_intr(dev); 1209 1210 /* With MSI, interrupts are only generated when iir 1211 * transitions from zero to nonzero. If another bit got 1212 * set while we were handling the existing iir bits, then 1213 * we would never get another interrupt. 1214 * 1215 * This is fine on non-MSI as well, as if we hit this path 1216 * we avoid exiting the interrupt handler only to generate 1217 * another one. 1218 * 1219 * Note that for MSI this could cause a stray interrupt report 1220 * if an interrupt landed in the time between writing IIR and 1221 * the posting read. This should be rare enough to never 1222 * trigger the 99% of 100,000 interrupts test for disabling 1223 * stray interrupts. 1224 */ 1225 iir = new_iir; 1226 } 1227 1228 return ret; 1229 } 1230 1231 static int i915_emit_irq(struct drm_device * dev) 1232 { 1233 drm_i915_private_t *dev_priv = dev->dev_private; 1234 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1235 1236 i915_kernel_lost_context(dev); 1237 1238 DRM_DEBUG_DRIVER("\n"); 1239 1240 dev_priv->counter++; 1241 if (dev_priv->counter > 0x7FFFFFFFUL) 1242 dev_priv->counter = 1; 1243 if (master_priv->sarea_priv) 1244 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 1245 1246 if (BEGIN_LP_RING(4) == 0) { 1247 OUT_RING(MI_STORE_DWORD_INDEX); 1248 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1249 OUT_RING(dev_priv->counter); 1250 OUT_RING(MI_USER_INTERRUPT); 1251 ADVANCE_LP_RING(); 1252 } 1253 1254 return dev_priv->counter; 1255 } 1256 1257 static int i915_wait_irq(struct drm_device * dev, int irq_nr) 1258 { 1259 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1260 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1261 int ret = 0; 1262 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1263 1264 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1265 READ_BREADCRUMB(dev_priv)); 1266 1267 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 1268 if (master_priv->sarea_priv) 1269 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 1270 return 0; 1271 } 1272 1273 if (master_priv->sarea_priv) 1274 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1275 1276 if (ring->irq_get(ring)) { 1277 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, 1278 READ_BREADCRUMB(dev_priv) >= irq_nr); 1279 ring->irq_put(ring); 1280 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) 1281 ret = -EBUSY; 1282 1283 if (ret == -EBUSY) { 1284 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1285 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 1286 } 1287 1288 return ret; 1289 } 1290 1291 /* Needs the lock as it touches the ring. 1292 */ 1293 int i915_irq_emit(struct drm_device *dev, void *data, 1294 struct drm_file *file_priv) 1295 { 1296 drm_i915_private_t *dev_priv = dev->dev_private; 1297 drm_i915_irq_emit_t *emit = data; 1298 int result; 1299 1300 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 1301 DRM_ERROR("called with no initialization\n"); 1302 return -EINVAL; 1303 } 1304 1305 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 1306 1307 mutex_lock(&dev->struct_mutex); 1308 result = i915_emit_irq(dev); 1309 mutex_unlock(&dev->struct_mutex); 1310 1311 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 1312 DRM_ERROR("copy_to_user\n"); 1313 return -EFAULT; 1314 } 1315 1316 return 0; 1317 } 1318 1319 /* Doesn't need the hardware lock. 1320 */ 1321 int i915_irq_wait(struct drm_device *dev, void *data, 1322 struct drm_file *file_priv) 1323 { 1324 drm_i915_private_t *dev_priv = dev->dev_private; 1325 drm_i915_irq_wait_t *irqwait = data; 1326 1327 if (!dev_priv) { 1328 DRM_ERROR("called with no initialization\n"); 1329 return -EINVAL; 1330 } 1331 1332 return i915_wait_irq(dev, irqwait->irq_seq); 1333 } 1334 1335 /* Called from drm generic code, passed 'crtc' which 1336 * we use as a pipe index 1337 */ 1338 int i915_enable_vblank(struct drm_device *dev, int pipe) 1339 { 1340 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1341 unsigned long irqflags; 1342 1343 if (!i915_pipe_enabled(dev, pipe)) 1344 return -EINVAL; 1345 1346 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1347 if (HAS_PCH_SPLIT(dev)) 1348 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1349 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1350 else if (INTEL_INFO(dev)->gen >= 4) 1351 i915_enable_pipestat(dev_priv, pipe, 1352 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1353 else 1354 i915_enable_pipestat(dev_priv, pipe, 1355 PIPE_VBLANK_INTERRUPT_ENABLE); 1356 1357 /* maintain vblank delivery even in deep C-states */ 1358 if (dev_priv->info->gen == 3) 1359 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); 1360 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1361 1362 return 0; 1363 } 1364 1365 /* Called from drm generic code, passed 'crtc' which 1366 * we use as a pipe index 1367 */ 1368 void i915_disable_vblank(struct drm_device *dev, int pipe) 1369 { 1370 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1371 unsigned long irqflags; 1372 1373 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1374 if (dev_priv->info->gen == 3) 1375 I915_WRITE(INSTPM, 1376 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); 1377 1378 if (HAS_PCH_SPLIT(dev)) 1379 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1380 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1381 else 1382 i915_disable_pipestat(dev_priv, pipe, 1383 PIPE_VBLANK_INTERRUPT_ENABLE | 1384 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1385 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1386 } 1387 1388 /* Set the vblank monitor pipe 1389 */ 1390 int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1391 struct drm_file *file_priv) 1392 { 1393 drm_i915_private_t *dev_priv = dev->dev_private; 1394 1395 if (!dev_priv) { 1396 DRM_ERROR("called with no initialization\n"); 1397 return -EINVAL; 1398 } 1399 1400 return 0; 1401 } 1402 1403 int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1404 struct drm_file *file_priv) 1405 { 1406 drm_i915_private_t *dev_priv = dev->dev_private; 1407 drm_i915_vblank_pipe_t *pipe = data; 1408 1409 if (!dev_priv) { 1410 DRM_ERROR("called with no initialization\n"); 1411 return -EINVAL; 1412 } 1413 1414 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1415 1416 return 0; 1417 } 1418 1419 /** 1420 * Schedule buffer swap at given vertical blank. 1421 */ 1422 int i915_vblank_swap(struct drm_device *dev, void *data, 1423 struct drm_file *file_priv) 1424 { 1425 /* The delayed swap mechanism was fundamentally racy, and has been 1426 * removed. The model was that the client requested a delayed flip/swap 1427 * from the kernel, then waited for vblank before continuing to perform 1428 * rendering. The problem was that the kernel might wake the client 1429 * up before it dispatched the vblank swap (since the lock has to be 1430 * held while touching the ringbuffer), in which case the client would 1431 * clear and start the next frame before the swap occurred, and 1432 * flicker would occur in addition to likely missing the vblank. 1433 * 1434 * In the absence of this ioctl, userland falls back to a correct path 1435 * of waiting for a vblank, then dispatching the swap on its own. 1436 * Context switching to userland and back is plenty fast enough for 1437 * meeting the requirements of vblank swapping. 1438 */ 1439 return -EINVAL; 1440 } 1441 1442 static u32 1443 ring_last_seqno(struct intel_ring_buffer *ring) 1444 { 1445 return list_entry(ring->request_list.prev, 1446 struct drm_i915_gem_request, list)->seqno; 1447 } 1448 1449 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1450 { 1451 if (list_empty(&ring->request_list) || 1452 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1453 /* Issue a wake-up to catch stuck h/w. */ 1454 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { 1455 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", 1456 ring->name, 1457 ring->waiting_seqno, 1458 ring->get_seqno(ring)); 1459 wake_up_all(&ring->irq_queue); 1460 *err = true; 1461 } 1462 return true; 1463 } 1464 return false; 1465 } 1466 1467 static bool kick_ring(struct intel_ring_buffer *ring) 1468 { 1469 struct drm_device *dev = ring->dev; 1470 struct drm_i915_private *dev_priv = dev->dev_private; 1471 u32 tmp = I915_READ_CTL(ring); 1472 if (tmp & RING_WAIT) { 1473 DRM_ERROR("Kicking stuck wait on %s\n", 1474 ring->name); 1475 I915_WRITE_CTL(ring, tmp); 1476 return true; 1477 } 1478 if (IS_GEN6(dev) && 1479 (tmp & RING_WAIT_SEMAPHORE)) { 1480 DRM_ERROR("Kicking stuck semaphore on %s\n", 1481 ring->name); 1482 I915_WRITE_CTL(ring, tmp); 1483 return true; 1484 } 1485 return false; 1486 } 1487 1488 /** 1489 * This is called when the chip hasn't reported back with completed 1490 * batchbuffers in a long time. The first time this is called we simply record 1491 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1492 * again, we assume the chip is wedged and try to fix it. 1493 */ 1494 void i915_hangcheck_elapsed(unsigned long data) 1495 { 1496 struct drm_device *dev = (struct drm_device *)data; 1497 drm_i915_private_t *dev_priv = dev->dev_private; 1498 uint32_t acthd, instdone, instdone1; 1499 bool err = false; 1500 1501 /* If all work is done then ACTHD clearly hasn't advanced. */ 1502 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1503 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1504 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { 1505 dev_priv->hangcheck_count = 0; 1506 if (err) 1507 goto repeat; 1508 return; 1509 } 1510 1511 if (INTEL_INFO(dev)->gen < 4) { 1512 acthd = I915_READ(ACTHD); 1513 instdone = I915_READ(INSTDONE); 1514 instdone1 = 0; 1515 } else { 1516 acthd = I915_READ(ACTHD_I965); 1517 instdone = I915_READ(INSTDONE_I965); 1518 instdone1 = I915_READ(INSTDONE1); 1519 } 1520 1521 if (dev_priv->last_acthd == acthd && 1522 dev_priv->last_instdone == instdone && 1523 dev_priv->last_instdone1 == instdone1) { 1524 if (dev_priv->hangcheck_count++ > 1) { 1525 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1526 1527 if (!IS_GEN2(dev)) { 1528 /* Is the chip hanging on a WAIT_FOR_EVENT? 1529 * If so we can simply poke the RB_WAIT bit 1530 * and break the hang. This should work on 1531 * all but the second generation chipsets. 1532 */ 1533 1534 if (kick_ring(&dev_priv->ring[RCS])) 1535 goto repeat; 1536 1537 if (HAS_BSD(dev) && 1538 kick_ring(&dev_priv->ring[VCS])) 1539 goto repeat; 1540 1541 if (HAS_BLT(dev) && 1542 kick_ring(&dev_priv->ring[BCS])) 1543 goto repeat; 1544 } 1545 1546 i915_handle_error(dev, true); 1547 return; 1548 } 1549 } else { 1550 dev_priv->hangcheck_count = 0; 1551 1552 dev_priv->last_acthd = acthd; 1553 dev_priv->last_instdone = instdone; 1554 dev_priv->last_instdone1 = instdone1; 1555 } 1556 1557 repeat: 1558 /* Reset timer case chip hangs without another request being added */ 1559 mod_timer(&dev_priv->hangcheck_timer, 1560 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1561 } 1562 1563 /* drm_dma.h hooks 1564 */ 1565 static void ironlake_irq_preinstall(struct drm_device *dev) 1566 { 1567 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1568 1569 I915_WRITE(HWSTAM, 0xeffe); 1570 1571 /* XXX hotplug from PCH */ 1572 1573 I915_WRITE(DEIMR, 0xffffffff); 1574 I915_WRITE(DEIER, 0x0); 1575 POSTING_READ(DEIER); 1576 1577 /* and GT */ 1578 I915_WRITE(GTIMR, 0xffffffff); 1579 I915_WRITE(GTIER, 0x0); 1580 POSTING_READ(GTIER); 1581 1582 /* south display irq */ 1583 I915_WRITE(SDEIMR, 0xffffffff); 1584 I915_WRITE(SDEIER, 0x0); 1585 POSTING_READ(SDEIER); 1586 } 1587 1588 static int ironlake_irq_postinstall(struct drm_device *dev) 1589 { 1590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1591 /* enable kind of interrupts always enabled */ 1592 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1593 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1594 u32 render_irqs; 1595 u32 hotplug_mask; 1596 1597 dev_priv->irq_mask = ~display_mask; 1598 1599 /* should always can generate irq */ 1600 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1601 I915_WRITE(DEIMR, dev_priv->irq_mask); 1602 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1603 POSTING_READ(DEIER); 1604 1605 dev_priv->gt_irq_mask = ~0; 1606 1607 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1608 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1609 1610 if (IS_GEN6(dev)) 1611 render_irqs = 1612 GT_USER_INTERRUPT | 1613 GT_GEN6_BSD_USER_INTERRUPT | 1614 GT_BLT_USER_INTERRUPT; 1615 else 1616 render_irqs = 1617 GT_USER_INTERRUPT | 1618 GT_PIPE_NOTIFY | 1619 GT_BSD_USER_INTERRUPT; 1620 I915_WRITE(GTIER, render_irqs); 1621 POSTING_READ(GTIER); 1622 1623 if (HAS_PCH_CPT(dev)) { 1624 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1625 SDE_PORTB_HOTPLUG_CPT | 1626 SDE_PORTC_HOTPLUG_CPT | 1627 SDE_PORTD_HOTPLUG_CPT); 1628 } else { 1629 hotplug_mask = (SDE_CRT_HOTPLUG | 1630 SDE_PORTB_HOTPLUG | 1631 SDE_PORTC_HOTPLUG | 1632 SDE_PORTD_HOTPLUG | 1633 SDE_AUX_MASK); 1634 } 1635 1636 dev_priv->pch_irq_mask = ~hotplug_mask; 1637 1638 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1639 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1640 I915_WRITE(SDEIER, hotplug_mask); 1641 POSTING_READ(SDEIER); 1642 1643 if (IS_IRONLAKE_M(dev)) { 1644 /* Clear & enable PCU event interrupts */ 1645 I915_WRITE(DEIIR, DE_PCU_EVENT); 1646 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1647 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1648 } 1649 1650 return 0; 1651 } 1652 1653 void i915_driver_irq_preinstall(struct drm_device * dev) 1654 { 1655 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1656 int pipe; 1657 1658 atomic_set(&dev_priv->irq_received, 0); 1659 1660 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1661 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1662 1663 if (HAS_PCH_SPLIT(dev)) { 1664 ironlake_irq_preinstall(dev); 1665 return; 1666 } 1667 1668 if (I915_HAS_HOTPLUG(dev)) { 1669 I915_WRITE(PORT_HOTPLUG_EN, 0); 1670 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1671 } 1672 1673 I915_WRITE(HWSTAM, 0xeffe); 1674 for_each_pipe(pipe) 1675 I915_WRITE(PIPESTAT(pipe), 0); 1676 I915_WRITE(IMR, 0xffffffff); 1677 I915_WRITE(IER, 0x0); 1678 POSTING_READ(IER); 1679 } 1680 1681 /* 1682 * Must be called after intel_modeset_init or hotplug interrupts won't be 1683 * enabled correctly. 1684 */ 1685 int i915_driver_irq_postinstall(struct drm_device *dev) 1686 { 1687 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1688 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1689 u32 error_mask; 1690 1691 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); 1692 if (HAS_BSD(dev)) 1693 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); 1694 if (HAS_BLT(dev)) 1695 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); 1696 1697 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1698 1699 if (HAS_PCH_SPLIT(dev)) 1700 return ironlake_irq_postinstall(dev); 1701 1702 /* Unmask the interrupts that we always want on. */ 1703 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; 1704 1705 dev_priv->pipestat[0] = 0; 1706 dev_priv->pipestat[1] = 0; 1707 1708 if (I915_HAS_HOTPLUG(dev)) { 1709 /* Enable in IER... */ 1710 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1711 /* and unmask in IMR */ 1712 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 1713 } 1714 1715 /* 1716 * Enable some error detection, note the instruction error mask 1717 * bit is reserved, so we leave it masked. 1718 */ 1719 if (IS_G4X(dev)) { 1720 error_mask = ~(GM45_ERROR_PAGE_TABLE | 1721 GM45_ERROR_MEM_PRIV | 1722 GM45_ERROR_CP_PRIV | 1723 I915_ERROR_MEMORY_REFRESH); 1724 } else { 1725 error_mask = ~(I915_ERROR_PAGE_TABLE | 1726 I915_ERROR_MEMORY_REFRESH); 1727 } 1728 I915_WRITE(EMR, error_mask); 1729 1730 I915_WRITE(IMR, dev_priv->irq_mask); 1731 I915_WRITE(IER, enable_mask); 1732 POSTING_READ(IER); 1733 1734 if (I915_HAS_HOTPLUG(dev)) { 1735 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1736 1737 /* Note HDMI and DP share bits */ 1738 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1739 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1740 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1741 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1742 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1743 hotplug_en |= HDMID_HOTPLUG_INT_EN; 1744 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 1745 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1746 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 1747 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1748 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 1749 hotplug_en |= CRT_HOTPLUG_INT_EN; 1750 1751 /* Programming the CRT detection parameters tends 1752 to generate a spurious hotplug event about three 1753 seconds later. So just do it once. 1754 */ 1755 if (IS_G4X(dev)) 1756 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 1757 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1758 } 1759 1760 /* Ignore TV since it's buggy */ 1761 1762 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1763 } 1764 1765 intel_opregion_enable_asle(dev); 1766 1767 return 0; 1768 } 1769 1770 static void ironlake_irq_uninstall(struct drm_device *dev) 1771 { 1772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1773 I915_WRITE(HWSTAM, 0xffffffff); 1774 1775 I915_WRITE(DEIMR, 0xffffffff); 1776 I915_WRITE(DEIER, 0x0); 1777 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1778 1779 I915_WRITE(GTIMR, 0xffffffff); 1780 I915_WRITE(GTIER, 0x0); 1781 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1782 } 1783 1784 void i915_driver_irq_uninstall(struct drm_device * dev) 1785 { 1786 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1787 int pipe; 1788 1789 if (!dev_priv) 1790 return; 1791 1792 dev_priv->vblank_pipe = 0; 1793 1794 if (HAS_PCH_SPLIT(dev)) { 1795 ironlake_irq_uninstall(dev); 1796 return; 1797 } 1798 1799 if (I915_HAS_HOTPLUG(dev)) { 1800 I915_WRITE(PORT_HOTPLUG_EN, 0); 1801 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1802 } 1803 1804 I915_WRITE(HWSTAM, 0xffffffff); 1805 for_each_pipe(pipe) 1806 I915_WRITE(PIPESTAT(pipe), 0); 1807 I915_WRITE(IMR, 0xffffffff); 1808 I915_WRITE(IER, 0x0); 1809 1810 for_each_pipe(pipe) 1811 I915_WRITE(PIPESTAT(pipe), 1812 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 1813 I915_WRITE(IIR, I915_READ(IIR)); 1814 } 1815