1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <drm/drmP.h> 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include "intel_drv.h" 38 39 /* For display hotplug interrupt */ 40 static void 41 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 42 { 43 if ((dev_priv->irq_mask & mask) != 0) { 44 dev_priv->irq_mask &= ~mask; 45 I915_WRITE(DEIMR, dev_priv->irq_mask); 46 POSTING_READ(DEIMR); 47 } 48 } 49 50 static inline void 51 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 52 { 53 if ((dev_priv->irq_mask & mask) != mask) { 54 dev_priv->irq_mask |= mask; 55 I915_WRITE(DEIMR, dev_priv->irq_mask); 56 POSTING_READ(DEIMR); 57 } 58 } 59 60 void 61 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 62 { 63 if ((dev_priv->pipestat[pipe] & mask) != mask) { 64 u32 reg = PIPESTAT(pipe); 65 66 dev_priv->pipestat[pipe] |= mask; 67 /* Enable the interrupt, clear any pending status */ 68 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 69 POSTING_READ(reg); 70 } 71 } 72 73 void 74 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 75 { 76 if ((dev_priv->pipestat[pipe] & mask) != 0) { 77 u32 reg = PIPESTAT(pipe); 78 79 dev_priv->pipestat[pipe] &= ~mask; 80 I915_WRITE(reg, dev_priv->pipestat[pipe]); 81 POSTING_READ(reg); 82 } 83 } 84 85 /** 86 * intel_enable_asle - enable ASLE interrupt for OpRegion 87 */ 88 void intel_enable_asle(struct drm_device *dev) 89 { 90 drm_i915_private_t *dev_priv = dev->dev_private; 91 unsigned long irqflags; 92 93 /* FIXME: opregion/asle for VLV */ 94 if (IS_VALLEYVIEW(dev)) 95 return; 96 97 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 98 99 if (HAS_PCH_SPLIT(dev)) 100 ironlake_enable_display_irq(dev_priv, DE_GSE); 101 else { 102 i915_enable_pipestat(dev_priv, 1, 103 PIPE_LEGACY_BLC_EVENT_ENABLE); 104 if (INTEL_INFO(dev)->gen >= 4) 105 i915_enable_pipestat(dev_priv, 0, 106 PIPE_LEGACY_BLC_EVENT_ENABLE); 107 } 108 109 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 110 } 111 112 /** 113 * i915_pipe_enabled - check if a pipe is enabled 114 * @dev: DRM device 115 * @pipe: pipe to check 116 * 117 * Reading certain registers when the pipe is disabled can hang the chip. 118 * Use this routine to make sure the PLL is running and the pipe is active 119 * before reading such registers if unsure. 120 */ 121 static int 122 i915_pipe_enabled(struct drm_device *dev, int pipe) 123 { 124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 126 pipe); 127 128 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 129 } 130 131 /* Called from drm generic code, passed a 'crtc', which 132 * we use as a pipe index 133 */ 134 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 135 { 136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 137 unsigned long high_frame; 138 unsigned long low_frame; 139 u32 high1, high2, low; 140 141 if (!i915_pipe_enabled(dev, pipe)) { 142 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 143 "pipe %c\n", pipe_name(pipe)); 144 return 0; 145 } 146 147 high_frame = PIPEFRAME(pipe); 148 low_frame = PIPEFRAMEPIXEL(pipe); 149 150 /* 151 * High & low register fields aren't synchronized, so make sure 152 * we get a low value that's stable across two reads of the high 153 * register. 154 */ 155 do { 156 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 157 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 158 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 159 } while (high1 != high2); 160 161 high1 >>= PIPE_FRAME_HIGH_SHIFT; 162 low >>= PIPE_FRAME_LOW_SHIFT; 163 return (high1 << 8) | low; 164 } 165 166 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 167 { 168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 169 int reg = PIPE_FRMCOUNT_GM45(pipe); 170 171 if (!i915_pipe_enabled(dev, pipe)) { 172 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 173 "pipe %c\n", pipe_name(pipe)); 174 return 0; 175 } 176 177 return I915_READ(reg); 178 } 179 180 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 181 int *vpos, int *hpos) 182 { 183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 184 u32 vbl = 0, position = 0; 185 int vbl_start, vbl_end, htotal, vtotal; 186 bool in_vbl = true; 187 int ret = 0; 188 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 189 pipe); 190 191 if (!i915_pipe_enabled(dev, pipe)) { 192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 193 "pipe %c\n", pipe_name(pipe)); 194 return 0; 195 } 196 197 /* Get vtotal. */ 198 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 199 200 if (INTEL_INFO(dev)->gen >= 4) { 201 /* No obvious pixelcount register. Only query vertical 202 * scanout position from Display scan line register. 203 */ 204 position = I915_READ(PIPEDSL(pipe)); 205 206 /* Decode into vertical scanout position. Don't have 207 * horizontal scanout position. 208 */ 209 *vpos = position & 0x1fff; 210 *hpos = 0; 211 } else { 212 /* Have access to pixelcount since start of frame. 213 * We can split this into vertical and horizontal 214 * scanout position. 215 */ 216 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 217 218 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 219 *vpos = position / htotal; 220 *hpos = position - (*vpos * htotal); 221 } 222 223 /* Query vblank area. */ 224 vbl = I915_READ(VBLANK(cpu_transcoder)); 225 226 /* Test position against vblank region. */ 227 vbl_start = vbl & 0x1fff; 228 vbl_end = (vbl >> 16) & 0x1fff; 229 230 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 231 in_vbl = false; 232 233 /* Inside "upper part" of vblank area? Apply corrective offset: */ 234 if (in_vbl && (*vpos >= vbl_start)) 235 *vpos = *vpos - vtotal; 236 237 /* Readouts valid? */ 238 if (vbl > 0) 239 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 240 241 /* In vblank? */ 242 if (in_vbl) 243 ret |= DRM_SCANOUTPOS_INVBL; 244 245 return ret; 246 } 247 248 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 249 int *max_error, 250 struct timeval *vblank_time, 251 unsigned flags) 252 { 253 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct drm_crtc *crtc; 255 256 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 257 DRM_ERROR("Invalid crtc %d\n", pipe); 258 return -EINVAL; 259 } 260 261 /* Get drm_crtc to timestamp: */ 262 crtc = intel_get_crtc_for_pipe(dev, pipe); 263 if (crtc == NULL) { 264 DRM_ERROR("Invalid crtc %d\n", pipe); 265 return -EINVAL; 266 } 267 268 if (!crtc->enabled) { 269 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 270 return -EBUSY; 271 } 272 273 /* Helper routine in DRM core does all the work: */ 274 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 275 vblank_time, flags, 276 crtc); 277 } 278 279 /* 280 * Handle hotplug events outside the interrupt handler proper. 281 */ 282 static void i915_hotplug_work_func(struct work_struct *work) 283 { 284 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 285 hotplug_work); 286 struct drm_device *dev = dev_priv->dev; 287 struct drm_mode_config *mode_config = &dev->mode_config; 288 struct intel_encoder *encoder; 289 290 mutex_lock(&mode_config->mutex); 291 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 292 293 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 294 if (encoder->hot_plug) 295 encoder->hot_plug(encoder); 296 297 mutex_unlock(&mode_config->mutex); 298 299 /* Just fire off a uevent and let userspace tell us what to do */ 300 drm_helper_hpd_irq_event(dev); 301 } 302 303 /* defined intel_pm.c */ 304 extern spinlock_t mchdev_lock; 305 306 static void ironlake_handle_rps_change(struct drm_device *dev) 307 { 308 drm_i915_private_t *dev_priv = dev->dev_private; 309 u32 busy_up, busy_down, max_avg, min_avg; 310 u8 new_delay; 311 unsigned long flags; 312 313 spin_lock_irqsave(&mchdev_lock, flags); 314 315 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 316 317 new_delay = dev_priv->ips.cur_delay; 318 319 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 320 busy_up = I915_READ(RCPREVBSYTUPAVG); 321 busy_down = I915_READ(RCPREVBSYTDNAVG); 322 max_avg = I915_READ(RCBMAXAVG); 323 min_avg = I915_READ(RCBMINAVG); 324 325 /* Handle RCS change request from hw */ 326 if (busy_up > max_avg) { 327 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 328 new_delay = dev_priv->ips.cur_delay - 1; 329 if (new_delay < dev_priv->ips.max_delay) 330 new_delay = dev_priv->ips.max_delay; 331 } else if (busy_down < min_avg) { 332 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 333 new_delay = dev_priv->ips.cur_delay + 1; 334 if (new_delay > dev_priv->ips.min_delay) 335 new_delay = dev_priv->ips.min_delay; 336 } 337 338 if (ironlake_set_drps(dev, new_delay)) 339 dev_priv->ips.cur_delay = new_delay; 340 341 spin_unlock_irqrestore(&mchdev_lock, flags); 342 343 return; 344 } 345 346 static void notify_ring(struct drm_device *dev, 347 struct intel_ring_buffer *ring) 348 { 349 struct drm_i915_private *dev_priv = dev->dev_private; 350 351 if (ring->obj == NULL) 352 return; 353 354 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 355 356 wake_up_all(&ring->irq_queue); 357 if (i915_enable_hangcheck) { 358 dev_priv->hangcheck_count = 0; 359 mod_timer(&dev_priv->hangcheck_timer, 360 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 361 } 362 } 363 364 static void gen6_pm_rps_work(struct work_struct *work) 365 { 366 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 367 rps.work); 368 u32 pm_iir, pm_imr; 369 u8 new_delay; 370 371 spin_lock_irq(&dev_priv->rps.lock); 372 pm_iir = dev_priv->rps.pm_iir; 373 dev_priv->rps.pm_iir = 0; 374 pm_imr = I915_READ(GEN6_PMIMR); 375 I915_WRITE(GEN6_PMIMR, 0); 376 spin_unlock_irq(&dev_priv->rps.lock); 377 378 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 379 return; 380 381 mutex_lock(&dev_priv->rps.hw_lock); 382 383 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 384 new_delay = dev_priv->rps.cur_delay + 1; 385 else 386 new_delay = dev_priv->rps.cur_delay - 1; 387 388 /* sysfs frequency interfaces may have snuck in while servicing the 389 * interrupt 390 */ 391 if (!(new_delay > dev_priv->rps.max_delay || 392 new_delay < dev_priv->rps.min_delay)) { 393 gen6_set_rps(dev_priv->dev, new_delay); 394 } 395 396 mutex_unlock(&dev_priv->rps.hw_lock); 397 } 398 399 400 /** 401 * ivybridge_parity_work - Workqueue called when a parity error interrupt 402 * occurred. 403 * @work: workqueue struct 404 * 405 * Doesn't actually do anything except notify userspace. As a consequence of 406 * this event, userspace should try to remap the bad rows since statistically 407 * it is likely the same row is more likely to go bad again. 408 */ 409 static void ivybridge_parity_work(struct work_struct *work) 410 { 411 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 412 l3_parity.error_work); 413 u32 error_status, row, bank, subbank; 414 char *parity_event[5]; 415 uint32_t misccpctl; 416 unsigned long flags; 417 418 /* We must turn off DOP level clock gating to access the L3 registers. 419 * In order to prevent a get/put style interface, acquire struct mutex 420 * any time we access those registers. 421 */ 422 mutex_lock(&dev_priv->dev->struct_mutex); 423 424 misccpctl = I915_READ(GEN7_MISCCPCTL); 425 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 426 POSTING_READ(GEN7_MISCCPCTL); 427 428 error_status = I915_READ(GEN7_L3CDERRST1); 429 row = GEN7_PARITY_ERROR_ROW(error_status); 430 bank = GEN7_PARITY_ERROR_BANK(error_status); 431 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 432 433 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 434 GEN7_L3CDERRST1_ENABLE); 435 POSTING_READ(GEN7_L3CDERRST1); 436 437 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 438 439 spin_lock_irqsave(&dev_priv->irq_lock, flags); 440 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 441 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 442 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 443 444 mutex_unlock(&dev_priv->dev->struct_mutex); 445 446 parity_event[0] = "L3_PARITY_ERROR=1"; 447 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 448 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 449 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 450 parity_event[4] = NULL; 451 452 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 453 KOBJ_CHANGE, parity_event); 454 455 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 456 row, bank, subbank); 457 458 kfree(parity_event[3]); 459 kfree(parity_event[2]); 460 kfree(parity_event[1]); 461 } 462 463 static void ivybridge_handle_parity_error(struct drm_device *dev) 464 { 465 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 466 unsigned long flags; 467 468 if (!HAS_L3_GPU_CACHE(dev)) 469 return; 470 471 spin_lock_irqsave(&dev_priv->irq_lock, flags); 472 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 473 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 474 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 475 476 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 477 } 478 479 static void snb_gt_irq_handler(struct drm_device *dev, 480 struct drm_i915_private *dev_priv, 481 u32 gt_iir) 482 { 483 484 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 485 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 486 notify_ring(dev, &dev_priv->ring[RCS]); 487 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 488 notify_ring(dev, &dev_priv->ring[VCS]); 489 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 490 notify_ring(dev, &dev_priv->ring[BCS]); 491 492 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 493 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 494 GT_RENDER_CS_ERROR_INTERRUPT)) { 495 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 496 i915_handle_error(dev, false); 497 } 498 499 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 500 ivybridge_handle_parity_error(dev); 501 } 502 503 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 504 u32 pm_iir) 505 { 506 unsigned long flags; 507 508 /* 509 * IIR bits should never already be set because IMR should 510 * prevent an interrupt from being shown in IIR. The warning 511 * displays a case where we've unsafely cleared 512 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 513 * type is not a problem, it displays a problem in the logic. 514 * 515 * The mask bit in IMR is cleared by dev_priv->rps.work. 516 */ 517 518 spin_lock_irqsave(&dev_priv->rps.lock, flags); 519 dev_priv->rps.pm_iir |= pm_iir; 520 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 521 POSTING_READ(GEN6_PMIMR); 522 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 523 524 queue_work(dev_priv->wq, &dev_priv->rps.work); 525 } 526 527 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 528 { 529 struct drm_device *dev = (struct drm_device *) arg; 530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 531 u32 iir, gt_iir, pm_iir; 532 irqreturn_t ret = IRQ_NONE; 533 unsigned long irqflags; 534 int pipe; 535 u32 pipe_stats[I915_MAX_PIPES]; 536 bool blc_event; 537 538 atomic_inc(&dev_priv->irq_received); 539 540 while (true) { 541 iir = I915_READ(VLV_IIR); 542 gt_iir = I915_READ(GTIIR); 543 pm_iir = I915_READ(GEN6_PMIIR); 544 545 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 546 goto out; 547 548 ret = IRQ_HANDLED; 549 550 snb_gt_irq_handler(dev, dev_priv, gt_iir); 551 552 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 553 for_each_pipe(pipe) { 554 int reg = PIPESTAT(pipe); 555 pipe_stats[pipe] = I915_READ(reg); 556 557 /* 558 * Clear the PIPE*STAT regs before the IIR 559 */ 560 if (pipe_stats[pipe] & 0x8000ffff) { 561 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 562 DRM_DEBUG_DRIVER("pipe %c underrun\n", 563 pipe_name(pipe)); 564 I915_WRITE(reg, pipe_stats[pipe]); 565 } 566 } 567 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 568 569 for_each_pipe(pipe) { 570 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 571 drm_handle_vblank(dev, pipe); 572 573 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 574 intel_prepare_page_flip(dev, pipe); 575 intel_finish_page_flip(dev, pipe); 576 } 577 } 578 579 /* Consume port. Then clear IIR or we'll miss events */ 580 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 581 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 582 583 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 584 hotplug_status); 585 if (hotplug_status & dev_priv->hotplug_supported_mask) 586 queue_work(dev_priv->wq, 587 &dev_priv->hotplug_work); 588 589 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 590 I915_READ(PORT_HOTPLUG_STAT); 591 } 592 593 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 594 blc_event = true; 595 596 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 597 gen6_queue_rps_work(dev_priv, pm_iir); 598 599 I915_WRITE(GTIIR, gt_iir); 600 I915_WRITE(GEN6_PMIIR, pm_iir); 601 I915_WRITE(VLV_IIR, iir); 602 } 603 604 out: 605 return ret; 606 } 607 608 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 609 { 610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 611 int pipe; 612 613 if (pch_iir & SDE_HOTPLUG_MASK) 614 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 615 616 if (pch_iir & SDE_AUDIO_POWER_MASK) 617 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 618 (pch_iir & SDE_AUDIO_POWER_MASK) >> 619 SDE_AUDIO_POWER_SHIFT); 620 621 if (pch_iir & SDE_GMBUS) 622 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 623 624 if (pch_iir & SDE_AUDIO_HDCP_MASK) 625 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 626 627 if (pch_iir & SDE_AUDIO_TRANS_MASK) 628 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 629 630 if (pch_iir & SDE_POISON) 631 DRM_ERROR("PCH poison interrupt\n"); 632 633 if (pch_iir & SDE_FDI_MASK) 634 for_each_pipe(pipe) 635 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 636 pipe_name(pipe), 637 I915_READ(FDI_RX_IIR(pipe))); 638 639 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 640 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 641 642 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 643 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 644 645 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 646 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 647 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 648 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 649 } 650 651 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 652 { 653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 654 int pipe; 655 656 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 657 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 658 659 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 660 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 661 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 662 SDE_AUDIO_POWER_SHIFT_CPT); 663 664 if (pch_iir & SDE_AUX_MASK_CPT) 665 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 666 667 if (pch_iir & SDE_GMBUS_CPT) 668 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 669 670 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 671 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 672 673 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 674 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 675 676 if (pch_iir & SDE_FDI_MASK_CPT) 677 for_each_pipe(pipe) 678 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 679 pipe_name(pipe), 680 I915_READ(FDI_RX_IIR(pipe))); 681 } 682 683 static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 684 { 685 struct drm_device *dev = (struct drm_device *) arg; 686 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 687 u32 de_iir, gt_iir, de_ier, pm_iir; 688 irqreturn_t ret = IRQ_NONE; 689 int i; 690 691 atomic_inc(&dev_priv->irq_received); 692 693 /* disable master interrupt before clearing iir */ 694 de_ier = I915_READ(DEIER); 695 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 696 697 gt_iir = I915_READ(GTIIR); 698 if (gt_iir) { 699 snb_gt_irq_handler(dev, dev_priv, gt_iir); 700 I915_WRITE(GTIIR, gt_iir); 701 ret = IRQ_HANDLED; 702 } 703 704 de_iir = I915_READ(DEIIR); 705 if (de_iir) { 706 if (de_iir & DE_GSE_IVB) 707 intel_opregion_gse_intr(dev); 708 709 for (i = 0; i < 3; i++) { 710 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 711 drm_handle_vblank(dev, i); 712 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 713 intel_prepare_page_flip(dev, i); 714 intel_finish_page_flip_plane(dev, i); 715 } 716 } 717 718 /* check event from PCH */ 719 if (de_iir & DE_PCH_EVENT_IVB) { 720 u32 pch_iir = I915_READ(SDEIIR); 721 722 cpt_irq_handler(dev, pch_iir); 723 724 /* clear PCH hotplug event before clear CPU irq */ 725 I915_WRITE(SDEIIR, pch_iir); 726 } 727 728 I915_WRITE(DEIIR, de_iir); 729 ret = IRQ_HANDLED; 730 } 731 732 pm_iir = I915_READ(GEN6_PMIIR); 733 if (pm_iir) { 734 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 735 gen6_queue_rps_work(dev_priv, pm_iir); 736 I915_WRITE(GEN6_PMIIR, pm_iir); 737 ret = IRQ_HANDLED; 738 } 739 740 I915_WRITE(DEIER, de_ier); 741 POSTING_READ(DEIER); 742 743 return ret; 744 } 745 746 static void ilk_gt_irq_handler(struct drm_device *dev, 747 struct drm_i915_private *dev_priv, 748 u32 gt_iir) 749 { 750 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 751 notify_ring(dev, &dev_priv->ring[RCS]); 752 if (gt_iir & GT_BSD_USER_INTERRUPT) 753 notify_ring(dev, &dev_priv->ring[VCS]); 754 } 755 756 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 757 { 758 struct drm_device *dev = (struct drm_device *) arg; 759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 760 int ret = IRQ_NONE; 761 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 762 763 atomic_inc(&dev_priv->irq_received); 764 765 /* disable master interrupt before clearing iir */ 766 de_ier = I915_READ(DEIER); 767 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 768 POSTING_READ(DEIER); 769 770 de_iir = I915_READ(DEIIR); 771 gt_iir = I915_READ(GTIIR); 772 pch_iir = I915_READ(SDEIIR); 773 pm_iir = I915_READ(GEN6_PMIIR); 774 775 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 776 (!IS_GEN6(dev) || pm_iir == 0)) 777 goto done; 778 779 ret = IRQ_HANDLED; 780 781 if (IS_GEN5(dev)) 782 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 783 else 784 snb_gt_irq_handler(dev, dev_priv, gt_iir); 785 786 if (de_iir & DE_GSE) 787 intel_opregion_gse_intr(dev); 788 789 if (de_iir & DE_PIPEA_VBLANK) 790 drm_handle_vblank(dev, 0); 791 792 if (de_iir & DE_PIPEB_VBLANK) 793 drm_handle_vblank(dev, 1); 794 795 if (de_iir & DE_PLANEA_FLIP_DONE) { 796 intel_prepare_page_flip(dev, 0); 797 intel_finish_page_flip_plane(dev, 0); 798 } 799 800 if (de_iir & DE_PLANEB_FLIP_DONE) { 801 intel_prepare_page_flip(dev, 1); 802 intel_finish_page_flip_plane(dev, 1); 803 } 804 805 /* check event from PCH */ 806 if (de_iir & DE_PCH_EVENT) { 807 if (HAS_PCH_CPT(dev)) 808 cpt_irq_handler(dev, pch_iir); 809 else 810 ibx_irq_handler(dev, pch_iir); 811 } 812 813 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 814 ironlake_handle_rps_change(dev); 815 816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 817 gen6_queue_rps_work(dev_priv, pm_iir); 818 819 /* should clear PCH hotplug event before clear CPU irq */ 820 I915_WRITE(SDEIIR, pch_iir); 821 I915_WRITE(GTIIR, gt_iir); 822 I915_WRITE(DEIIR, de_iir); 823 I915_WRITE(GEN6_PMIIR, pm_iir); 824 825 done: 826 I915_WRITE(DEIER, de_ier); 827 POSTING_READ(DEIER); 828 829 return ret; 830 } 831 832 /** 833 * i915_error_work_func - do process context error handling work 834 * @work: work struct 835 * 836 * Fire an error uevent so userspace can see that a hang or error 837 * was detected. 838 */ 839 static void i915_error_work_func(struct work_struct *work) 840 { 841 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 842 error_work); 843 struct drm_device *dev = dev_priv->dev; 844 char *error_event[] = { "ERROR=1", NULL }; 845 char *reset_event[] = { "RESET=1", NULL }; 846 char *reset_done_event[] = { "ERROR=0", NULL }; 847 848 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 849 850 if (atomic_read(&dev_priv->mm.wedged)) { 851 DRM_DEBUG_DRIVER("resetting chip\n"); 852 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 853 if (!i915_reset(dev)) { 854 atomic_set(&dev_priv->mm.wedged, 0); 855 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 856 } 857 complete_all(&dev_priv->error_completion); 858 } 859 } 860 861 /* NB: please notice the memset */ 862 static void i915_get_extra_instdone(struct drm_device *dev, 863 uint32_t *instdone) 864 { 865 struct drm_i915_private *dev_priv = dev->dev_private; 866 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 867 868 switch(INTEL_INFO(dev)->gen) { 869 case 2: 870 case 3: 871 instdone[0] = I915_READ(INSTDONE); 872 break; 873 case 4: 874 case 5: 875 case 6: 876 instdone[0] = I915_READ(INSTDONE_I965); 877 instdone[1] = I915_READ(INSTDONE1); 878 break; 879 default: 880 WARN_ONCE(1, "Unsupported platform\n"); 881 case 7: 882 instdone[0] = I915_READ(GEN7_INSTDONE_1); 883 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 884 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 885 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 886 break; 887 } 888 } 889 890 #ifdef CONFIG_DEBUG_FS 891 static struct drm_i915_error_object * 892 i915_error_object_create(struct drm_i915_private *dev_priv, 893 struct drm_i915_gem_object *src) 894 { 895 struct drm_i915_error_object *dst; 896 int i, count; 897 u32 reloc_offset; 898 899 if (src == NULL || src->pages == NULL) 900 return NULL; 901 902 count = src->base.size / PAGE_SIZE; 903 904 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 905 if (dst == NULL) 906 return NULL; 907 908 reloc_offset = src->gtt_offset; 909 for (i = 0; i < count; i++) { 910 unsigned long flags; 911 void *d; 912 913 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 914 if (d == NULL) 915 goto unwind; 916 917 local_irq_save(flags); 918 if (reloc_offset < dev_priv->mm.gtt_mappable_end && 919 src->has_global_gtt_mapping) { 920 void __iomem *s; 921 922 /* Simply ignore tiling or any overlapping fence. 923 * It's part of the error state, and this hopefully 924 * captures what the GPU read. 925 */ 926 927 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 928 reloc_offset); 929 memcpy_fromio(d, s, PAGE_SIZE); 930 io_mapping_unmap_atomic(s); 931 } else { 932 struct page *page; 933 void *s; 934 935 page = i915_gem_object_get_page(src, i); 936 937 drm_clflush_pages(&page, 1); 938 939 s = kmap_atomic(page); 940 memcpy(d, s, PAGE_SIZE); 941 kunmap_atomic(s); 942 943 drm_clflush_pages(&page, 1); 944 } 945 local_irq_restore(flags); 946 947 dst->pages[i] = d; 948 949 reloc_offset += PAGE_SIZE; 950 } 951 dst->page_count = count; 952 dst->gtt_offset = src->gtt_offset; 953 954 return dst; 955 956 unwind: 957 while (i--) 958 kfree(dst->pages[i]); 959 kfree(dst); 960 return NULL; 961 } 962 963 static void 964 i915_error_object_free(struct drm_i915_error_object *obj) 965 { 966 int page; 967 968 if (obj == NULL) 969 return; 970 971 for (page = 0; page < obj->page_count; page++) 972 kfree(obj->pages[page]); 973 974 kfree(obj); 975 } 976 977 void 978 i915_error_state_free(struct kref *error_ref) 979 { 980 struct drm_i915_error_state *error = container_of(error_ref, 981 typeof(*error), ref); 982 int i; 983 984 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 985 i915_error_object_free(error->ring[i].batchbuffer); 986 i915_error_object_free(error->ring[i].ringbuffer); 987 kfree(error->ring[i].requests); 988 } 989 990 kfree(error->active_bo); 991 kfree(error->overlay); 992 kfree(error); 993 } 994 static void capture_bo(struct drm_i915_error_buffer *err, 995 struct drm_i915_gem_object *obj) 996 { 997 err->size = obj->base.size; 998 err->name = obj->base.name; 999 err->rseqno = obj->last_read_seqno; 1000 err->wseqno = obj->last_write_seqno; 1001 err->gtt_offset = obj->gtt_offset; 1002 err->read_domains = obj->base.read_domains; 1003 err->write_domain = obj->base.write_domain; 1004 err->fence_reg = obj->fence_reg; 1005 err->pinned = 0; 1006 if (obj->pin_count > 0) 1007 err->pinned = 1; 1008 if (obj->user_pin_count > 0) 1009 err->pinned = -1; 1010 err->tiling = obj->tiling_mode; 1011 err->dirty = obj->dirty; 1012 err->purgeable = obj->madv != I915_MADV_WILLNEED; 1013 err->ring = obj->ring ? obj->ring->id : -1; 1014 err->cache_level = obj->cache_level; 1015 } 1016 1017 static u32 capture_active_bo(struct drm_i915_error_buffer *err, 1018 int count, struct list_head *head) 1019 { 1020 struct drm_i915_gem_object *obj; 1021 int i = 0; 1022 1023 list_for_each_entry(obj, head, mm_list) { 1024 capture_bo(err++, obj); 1025 if (++i == count) 1026 break; 1027 } 1028 1029 return i; 1030 } 1031 1032 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 1033 int count, struct list_head *head) 1034 { 1035 struct drm_i915_gem_object *obj; 1036 int i = 0; 1037 1038 list_for_each_entry(obj, head, gtt_list) { 1039 if (obj->pin_count == 0) 1040 continue; 1041 1042 capture_bo(err++, obj); 1043 if (++i == count) 1044 break; 1045 } 1046 1047 return i; 1048 } 1049 1050 static void i915_gem_record_fences(struct drm_device *dev, 1051 struct drm_i915_error_state *error) 1052 { 1053 struct drm_i915_private *dev_priv = dev->dev_private; 1054 int i; 1055 1056 /* Fences */ 1057 switch (INTEL_INFO(dev)->gen) { 1058 case 7: 1059 case 6: 1060 for (i = 0; i < 16; i++) 1061 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1062 break; 1063 case 5: 1064 case 4: 1065 for (i = 0; i < 16; i++) 1066 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1067 break; 1068 case 3: 1069 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1070 for (i = 0; i < 8; i++) 1071 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1072 case 2: 1073 for (i = 0; i < 8; i++) 1074 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1075 break; 1076 1077 } 1078 } 1079 1080 static struct drm_i915_error_object * 1081 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1082 struct intel_ring_buffer *ring) 1083 { 1084 struct drm_i915_gem_object *obj; 1085 u32 seqno; 1086 1087 if (!ring->get_seqno) 1088 return NULL; 1089 1090 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1091 u32 acthd = I915_READ(ACTHD); 1092 1093 if (WARN_ON(ring->id != RCS)) 1094 return NULL; 1095 1096 obj = ring->private; 1097 if (acthd >= obj->gtt_offset && 1098 acthd < obj->gtt_offset + obj->base.size) 1099 return i915_error_object_create(dev_priv, obj); 1100 } 1101 1102 seqno = ring->get_seqno(ring, false); 1103 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1104 if (obj->ring != ring) 1105 continue; 1106 1107 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1108 continue; 1109 1110 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1111 continue; 1112 1113 /* We need to copy these to an anonymous buffer as the simplest 1114 * method to avoid being overwritten by userspace. 1115 */ 1116 return i915_error_object_create(dev_priv, obj); 1117 } 1118 1119 return NULL; 1120 } 1121 1122 static void i915_record_ring_state(struct drm_device *dev, 1123 struct drm_i915_error_state *error, 1124 struct intel_ring_buffer *ring) 1125 { 1126 struct drm_i915_private *dev_priv = dev->dev_private; 1127 1128 if (INTEL_INFO(dev)->gen >= 6) { 1129 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 1130 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1131 error->semaphore_mboxes[ring->id][0] 1132 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1133 error->semaphore_mboxes[ring->id][1] 1134 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1135 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1136 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 1137 } 1138 1139 if (INTEL_INFO(dev)->gen >= 4) { 1140 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1141 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1142 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1143 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1144 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1145 if (ring->id == RCS) 1146 error->bbaddr = I915_READ64(BB_ADDR); 1147 } else { 1148 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1149 error->ipeir[ring->id] = I915_READ(IPEIR); 1150 error->ipehr[ring->id] = I915_READ(IPEHR); 1151 error->instdone[ring->id] = I915_READ(INSTDONE); 1152 } 1153 1154 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1155 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1156 error->seqno[ring->id] = ring->get_seqno(ring, false); 1157 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1158 error->head[ring->id] = I915_READ_HEAD(ring); 1159 error->tail[ring->id] = I915_READ_TAIL(ring); 1160 error->ctl[ring->id] = I915_READ_CTL(ring); 1161 1162 error->cpu_ring_head[ring->id] = ring->head; 1163 error->cpu_ring_tail[ring->id] = ring->tail; 1164 } 1165 1166 static void i915_gem_record_rings(struct drm_device *dev, 1167 struct drm_i915_error_state *error) 1168 { 1169 struct drm_i915_private *dev_priv = dev->dev_private; 1170 struct intel_ring_buffer *ring; 1171 struct drm_i915_gem_request *request; 1172 int i, count; 1173 1174 for_each_ring(ring, dev_priv, i) { 1175 i915_record_ring_state(dev, error, ring); 1176 1177 error->ring[i].batchbuffer = 1178 i915_error_first_batchbuffer(dev_priv, ring); 1179 1180 error->ring[i].ringbuffer = 1181 i915_error_object_create(dev_priv, ring->obj); 1182 1183 count = 0; 1184 list_for_each_entry(request, &ring->request_list, list) 1185 count++; 1186 1187 error->ring[i].num_requests = count; 1188 error->ring[i].requests = 1189 kmalloc(count*sizeof(struct drm_i915_error_request), 1190 GFP_ATOMIC); 1191 if (error->ring[i].requests == NULL) { 1192 error->ring[i].num_requests = 0; 1193 continue; 1194 } 1195 1196 count = 0; 1197 list_for_each_entry(request, &ring->request_list, list) { 1198 struct drm_i915_error_request *erq; 1199 1200 erq = &error->ring[i].requests[count++]; 1201 erq->seqno = request->seqno; 1202 erq->jiffies = request->emitted_jiffies; 1203 erq->tail = request->tail; 1204 } 1205 } 1206 } 1207 1208 /** 1209 * i915_capture_error_state - capture an error record for later analysis 1210 * @dev: drm device 1211 * 1212 * Should be called when an error is detected (either a hang or an error 1213 * interrupt) to capture error state from the time of the error. Fills 1214 * out a structure which becomes available in debugfs for user level tools 1215 * to pick up. 1216 */ 1217 static void i915_capture_error_state(struct drm_device *dev) 1218 { 1219 struct drm_i915_private *dev_priv = dev->dev_private; 1220 struct drm_i915_gem_object *obj; 1221 struct drm_i915_error_state *error; 1222 unsigned long flags; 1223 int i, pipe; 1224 1225 spin_lock_irqsave(&dev_priv->error_lock, flags); 1226 error = dev_priv->first_error; 1227 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1228 if (error) 1229 return; 1230 1231 /* Account for pipe specific data like PIPE*STAT */ 1232 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1233 if (!error) { 1234 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1235 return; 1236 } 1237 1238 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1239 dev->primary->index); 1240 1241 kref_init(&error->ref); 1242 error->eir = I915_READ(EIR); 1243 error->pgtbl_er = I915_READ(PGTBL_ER); 1244 error->ccid = I915_READ(CCID); 1245 1246 if (HAS_PCH_SPLIT(dev)) 1247 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1248 else if (IS_VALLEYVIEW(dev)) 1249 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1250 else if (IS_GEN2(dev)) 1251 error->ier = I915_READ16(IER); 1252 else 1253 error->ier = I915_READ(IER); 1254 1255 if (INTEL_INFO(dev)->gen >= 6) 1256 error->derrmr = I915_READ(DERRMR); 1257 1258 if (IS_VALLEYVIEW(dev)) 1259 error->forcewake = I915_READ(FORCEWAKE_VLV); 1260 else if (INTEL_INFO(dev)->gen >= 7) 1261 error->forcewake = I915_READ(FORCEWAKE_MT); 1262 else if (INTEL_INFO(dev)->gen == 6) 1263 error->forcewake = I915_READ(FORCEWAKE); 1264 1265 for_each_pipe(pipe) 1266 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1267 1268 if (INTEL_INFO(dev)->gen >= 6) { 1269 error->error = I915_READ(ERROR_GEN6); 1270 error->done_reg = I915_READ(DONE_REG); 1271 } 1272 1273 if (INTEL_INFO(dev)->gen == 7) 1274 error->err_int = I915_READ(GEN7_ERR_INT); 1275 1276 i915_get_extra_instdone(dev, error->extra_instdone); 1277 1278 i915_gem_record_fences(dev, error); 1279 i915_gem_record_rings(dev, error); 1280 1281 /* Record buffers on the active and pinned lists. */ 1282 error->active_bo = NULL; 1283 error->pinned_bo = NULL; 1284 1285 i = 0; 1286 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1287 i++; 1288 error->active_bo_count = i; 1289 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 1290 if (obj->pin_count) 1291 i++; 1292 error->pinned_bo_count = i - error->active_bo_count; 1293 1294 error->active_bo = NULL; 1295 error->pinned_bo = NULL; 1296 if (i) { 1297 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 1298 GFP_ATOMIC); 1299 if (error->active_bo) 1300 error->pinned_bo = 1301 error->active_bo + error->active_bo_count; 1302 } 1303 1304 if (error->active_bo) 1305 error->active_bo_count = 1306 capture_active_bo(error->active_bo, 1307 error->active_bo_count, 1308 &dev_priv->mm.active_list); 1309 1310 if (error->pinned_bo) 1311 error->pinned_bo_count = 1312 capture_pinned_bo(error->pinned_bo, 1313 error->pinned_bo_count, 1314 &dev_priv->mm.bound_list); 1315 1316 do_gettimeofday(&error->time); 1317 1318 error->overlay = intel_overlay_capture_error_state(dev); 1319 error->display = intel_display_capture_error_state(dev); 1320 1321 spin_lock_irqsave(&dev_priv->error_lock, flags); 1322 if (dev_priv->first_error == NULL) { 1323 dev_priv->first_error = error; 1324 error = NULL; 1325 } 1326 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1327 1328 if (error) 1329 i915_error_state_free(&error->ref); 1330 } 1331 1332 void i915_destroy_error_state(struct drm_device *dev) 1333 { 1334 struct drm_i915_private *dev_priv = dev->dev_private; 1335 struct drm_i915_error_state *error; 1336 unsigned long flags; 1337 1338 spin_lock_irqsave(&dev_priv->error_lock, flags); 1339 error = dev_priv->first_error; 1340 dev_priv->first_error = NULL; 1341 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1342 1343 if (error) 1344 kref_put(&error->ref, i915_error_state_free); 1345 } 1346 #else 1347 #define i915_capture_error_state(x) 1348 #endif 1349 1350 static void i915_report_and_clear_eir(struct drm_device *dev) 1351 { 1352 struct drm_i915_private *dev_priv = dev->dev_private; 1353 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1354 u32 eir = I915_READ(EIR); 1355 int pipe, i; 1356 1357 if (!eir) 1358 return; 1359 1360 pr_err("render error detected, EIR: 0x%08x\n", eir); 1361 1362 i915_get_extra_instdone(dev, instdone); 1363 1364 if (IS_G4X(dev)) { 1365 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1366 u32 ipeir = I915_READ(IPEIR_I965); 1367 1368 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1369 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1370 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1371 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1372 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1373 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1374 I915_WRITE(IPEIR_I965, ipeir); 1375 POSTING_READ(IPEIR_I965); 1376 } 1377 if (eir & GM45_ERROR_PAGE_TABLE) { 1378 u32 pgtbl_err = I915_READ(PGTBL_ER); 1379 pr_err("page table error\n"); 1380 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1381 I915_WRITE(PGTBL_ER, pgtbl_err); 1382 POSTING_READ(PGTBL_ER); 1383 } 1384 } 1385 1386 if (!IS_GEN2(dev)) { 1387 if (eir & I915_ERROR_PAGE_TABLE) { 1388 u32 pgtbl_err = I915_READ(PGTBL_ER); 1389 pr_err("page table error\n"); 1390 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1391 I915_WRITE(PGTBL_ER, pgtbl_err); 1392 POSTING_READ(PGTBL_ER); 1393 } 1394 } 1395 1396 if (eir & I915_ERROR_MEMORY_REFRESH) { 1397 pr_err("memory refresh error:\n"); 1398 for_each_pipe(pipe) 1399 pr_err("pipe %c stat: 0x%08x\n", 1400 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1401 /* pipestat has already been acked */ 1402 } 1403 if (eir & I915_ERROR_INSTRUCTION) { 1404 pr_err("instruction error\n"); 1405 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1406 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1407 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1408 if (INTEL_INFO(dev)->gen < 4) { 1409 u32 ipeir = I915_READ(IPEIR); 1410 1411 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1412 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1413 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1414 I915_WRITE(IPEIR, ipeir); 1415 POSTING_READ(IPEIR); 1416 } else { 1417 u32 ipeir = I915_READ(IPEIR_I965); 1418 1419 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1420 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1421 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1422 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1423 I915_WRITE(IPEIR_I965, ipeir); 1424 POSTING_READ(IPEIR_I965); 1425 } 1426 } 1427 1428 I915_WRITE(EIR, eir); 1429 POSTING_READ(EIR); 1430 eir = I915_READ(EIR); 1431 if (eir) { 1432 /* 1433 * some errors might have become stuck, 1434 * mask them. 1435 */ 1436 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1437 I915_WRITE(EMR, I915_READ(EMR) | eir); 1438 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1439 } 1440 } 1441 1442 /** 1443 * i915_handle_error - handle an error interrupt 1444 * @dev: drm device 1445 * 1446 * Do some basic checking of regsiter state at error interrupt time and 1447 * dump it to the syslog. Also call i915_capture_error_state() to make 1448 * sure we get a record and make it available in debugfs. Fire a uevent 1449 * so userspace knows something bad happened (should trigger collection 1450 * of a ring dump etc.). 1451 */ 1452 void i915_handle_error(struct drm_device *dev, bool wedged) 1453 { 1454 struct drm_i915_private *dev_priv = dev->dev_private; 1455 struct intel_ring_buffer *ring; 1456 int i; 1457 1458 i915_capture_error_state(dev); 1459 i915_report_and_clear_eir(dev); 1460 1461 if (wedged) { 1462 INIT_COMPLETION(dev_priv->error_completion); 1463 atomic_set(&dev_priv->mm.wedged, 1); 1464 1465 /* 1466 * Wakeup waiting processes so they don't hang 1467 */ 1468 for_each_ring(ring, dev_priv, i) 1469 wake_up_all(&ring->irq_queue); 1470 } 1471 1472 queue_work(dev_priv->wq, &dev_priv->error_work); 1473 } 1474 1475 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1476 { 1477 drm_i915_private_t *dev_priv = dev->dev_private; 1478 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1479 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1480 struct drm_i915_gem_object *obj; 1481 struct intel_unpin_work *work; 1482 unsigned long flags; 1483 bool stall_detected; 1484 1485 /* Ignore early vblank irqs */ 1486 if (intel_crtc == NULL) 1487 return; 1488 1489 spin_lock_irqsave(&dev->event_lock, flags); 1490 work = intel_crtc->unpin_work; 1491 1492 if (work == NULL || 1493 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1494 !work->enable_stall_check) { 1495 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1496 spin_unlock_irqrestore(&dev->event_lock, flags); 1497 return; 1498 } 1499 1500 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1501 obj = work->pending_flip_obj; 1502 if (INTEL_INFO(dev)->gen >= 4) { 1503 int dspsurf = DSPSURF(intel_crtc->plane); 1504 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1505 obj->gtt_offset; 1506 } else { 1507 int dspaddr = DSPADDR(intel_crtc->plane); 1508 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1509 crtc->y * crtc->fb->pitches[0] + 1510 crtc->x * crtc->fb->bits_per_pixel/8); 1511 } 1512 1513 spin_unlock_irqrestore(&dev->event_lock, flags); 1514 1515 if (stall_detected) { 1516 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1517 intel_prepare_page_flip(dev, intel_crtc->plane); 1518 } 1519 } 1520 1521 /* Called from drm generic code, passed 'crtc' which 1522 * we use as a pipe index 1523 */ 1524 static int i915_enable_vblank(struct drm_device *dev, int pipe) 1525 { 1526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1527 unsigned long irqflags; 1528 1529 if (!i915_pipe_enabled(dev, pipe)) 1530 return -EINVAL; 1531 1532 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1533 if (INTEL_INFO(dev)->gen >= 4) 1534 i915_enable_pipestat(dev_priv, pipe, 1535 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1536 else 1537 i915_enable_pipestat(dev_priv, pipe, 1538 PIPE_VBLANK_INTERRUPT_ENABLE); 1539 1540 /* maintain vblank delivery even in deep C-states */ 1541 if (dev_priv->info->gen == 3) 1542 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1543 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1544 1545 return 0; 1546 } 1547 1548 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1549 { 1550 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1551 unsigned long irqflags; 1552 1553 if (!i915_pipe_enabled(dev, pipe)) 1554 return -EINVAL; 1555 1556 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1557 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1558 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1559 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1560 1561 return 0; 1562 } 1563 1564 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1565 { 1566 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1567 unsigned long irqflags; 1568 1569 if (!i915_pipe_enabled(dev, pipe)) 1570 return -EINVAL; 1571 1572 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1573 ironlake_enable_display_irq(dev_priv, 1574 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 1575 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1576 1577 return 0; 1578 } 1579 1580 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1581 { 1582 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1583 unsigned long irqflags; 1584 u32 imr; 1585 1586 if (!i915_pipe_enabled(dev, pipe)) 1587 return -EINVAL; 1588 1589 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1590 imr = I915_READ(VLV_IMR); 1591 if (pipe == 0) 1592 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1593 else 1594 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1595 I915_WRITE(VLV_IMR, imr); 1596 i915_enable_pipestat(dev_priv, pipe, 1597 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1598 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1599 1600 return 0; 1601 } 1602 1603 /* Called from drm generic code, passed 'crtc' which 1604 * we use as a pipe index 1605 */ 1606 static void i915_disable_vblank(struct drm_device *dev, int pipe) 1607 { 1608 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1609 unsigned long irqflags; 1610 1611 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1612 if (dev_priv->info->gen == 3) 1613 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1614 1615 i915_disable_pipestat(dev_priv, pipe, 1616 PIPE_VBLANK_INTERRUPT_ENABLE | 1617 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1618 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1619 } 1620 1621 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1622 { 1623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1624 unsigned long irqflags; 1625 1626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1627 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1628 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1629 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1630 } 1631 1632 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1633 { 1634 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1635 unsigned long irqflags; 1636 1637 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1638 ironlake_disable_display_irq(dev_priv, 1639 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 1640 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1641 } 1642 1643 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1644 { 1645 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1646 unsigned long irqflags; 1647 u32 imr; 1648 1649 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1650 i915_disable_pipestat(dev_priv, pipe, 1651 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1652 imr = I915_READ(VLV_IMR); 1653 if (pipe == 0) 1654 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1655 else 1656 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1657 I915_WRITE(VLV_IMR, imr); 1658 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1659 } 1660 1661 static u32 1662 ring_last_seqno(struct intel_ring_buffer *ring) 1663 { 1664 return list_entry(ring->request_list.prev, 1665 struct drm_i915_gem_request, list)->seqno; 1666 } 1667 1668 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1669 { 1670 if (list_empty(&ring->request_list) || 1671 i915_seqno_passed(ring->get_seqno(ring, false), 1672 ring_last_seqno(ring))) { 1673 /* Issue a wake-up to catch stuck h/w. */ 1674 if (waitqueue_active(&ring->irq_queue)) { 1675 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1676 ring->name); 1677 wake_up_all(&ring->irq_queue); 1678 *err = true; 1679 } 1680 return true; 1681 } 1682 return false; 1683 } 1684 1685 static bool kick_ring(struct intel_ring_buffer *ring) 1686 { 1687 struct drm_device *dev = ring->dev; 1688 struct drm_i915_private *dev_priv = dev->dev_private; 1689 u32 tmp = I915_READ_CTL(ring); 1690 if (tmp & RING_WAIT) { 1691 DRM_ERROR("Kicking stuck wait on %s\n", 1692 ring->name); 1693 I915_WRITE_CTL(ring, tmp); 1694 return true; 1695 } 1696 return false; 1697 } 1698 1699 static bool i915_hangcheck_hung(struct drm_device *dev) 1700 { 1701 drm_i915_private_t *dev_priv = dev->dev_private; 1702 1703 if (dev_priv->hangcheck_count++ > 1) { 1704 bool hung = true; 1705 1706 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1707 i915_handle_error(dev, true); 1708 1709 if (!IS_GEN2(dev)) { 1710 struct intel_ring_buffer *ring; 1711 int i; 1712 1713 /* Is the chip hanging on a WAIT_FOR_EVENT? 1714 * If so we can simply poke the RB_WAIT bit 1715 * and break the hang. This should work on 1716 * all but the second generation chipsets. 1717 */ 1718 for_each_ring(ring, dev_priv, i) 1719 hung &= !kick_ring(ring); 1720 } 1721 1722 return hung; 1723 } 1724 1725 return false; 1726 } 1727 1728 /** 1729 * This is called when the chip hasn't reported back with completed 1730 * batchbuffers in a long time. The first time this is called we simply record 1731 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1732 * again, we assume the chip is wedged and try to fix it. 1733 */ 1734 void i915_hangcheck_elapsed(unsigned long data) 1735 { 1736 struct drm_device *dev = (struct drm_device *)data; 1737 drm_i915_private_t *dev_priv = dev->dev_private; 1738 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; 1739 struct intel_ring_buffer *ring; 1740 bool err = false, idle; 1741 int i; 1742 1743 if (!i915_enable_hangcheck) 1744 return; 1745 1746 memset(acthd, 0, sizeof(acthd)); 1747 idle = true; 1748 for_each_ring(ring, dev_priv, i) { 1749 idle &= i915_hangcheck_ring_idle(ring, &err); 1750 acthd[i] = intel_ring_get_active_head(ring); 1751 } 1752 1753 /* If all work is done then ACTHD clearly hasn't advanced. */ 1754 if (idle) { 1755 if (err) { 1756 if (i915_hangcheck_hung(dev)) 1757 return; 1758 1759 goto repeat; 1760 } 1761 1762 dev_priv->hangcheck_count = 0; 1763 return; 1764 } 1765 1766 i915_get_extra_instdone(dev, instdone); 1767 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1768 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { 1769 if (i915_hangcheck_hung(dev)) 1770 return; 1771 } else { 1772 dev_priv->hangcheck_count = 0; 1773 1774 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1775 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); 1776 } 1777 1778 repeat: 1779 /* Reset timer case chip hangs without another request being added */ 1780 mod_timer(&dev_priv->hangcheck_timer, 1781 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1782 } 1783 1784 /* drm_dma.h hooks 1785 */ 1786 static void ironlake_irq_preinstall(struct drm_device *dev) 1787 { 1788 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1789 1790 atomic_set(&dev_priv->irq_received, 0); 1791 1792 I915_WRITE(HWSTAM, 0xeffe); 1793 1794 /* XXX hotplug from PCH */ 1795 1796 I915_WRITE(DEIMR, 0xffffffff); 1797 I915_WRITE(DEIER, 0x0); 1798 POSTING_READ(DEIER); 1799 1800 /* and GT */ 1801 I915_WRITE(GTIMR, 0xffffffff); 1802 I915_WRITE(GTIER, 0x0); 1803 POSTING_READ(GTIER); 1804 1805 /* south display irq */ 1806 I915_WRITE(SDEIMR, 0xffffffff); 1807 I915_WRITE(SDEIER, 0x0); 1808 POSTING_READ(SDEIER); 1809 } 1810 1811 static void valleyview_irq_preinstall(struct drm_device *dev) 1812 { 1813 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1814 int pipe; 1815 1816 atomic_set(&dev_priv->irq_received, 0); 1817 1818 /* VLV magic */ 1819 I915_WRITE(VLV_IMR, 0); 1820 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 1821 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 1822 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 1823 1824 /* and GT */ 1825 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1826 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1827 I915_WRITE(GTIMR, 0xffffffff); 1828 I915_WRITE(GTIER, 0x0); 1829 POSTING_READ(GTIER); 1830 1831 I915_WRITE(DPINVGTT, 0xff); 1832 1833 I915_WRITE(PORT_HOTPLUG_EN, 0); 1834 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1835 for_each_pipe(pipe) 1836 I915_WRITE(PIPESTAT(pipe), 0xffff); 1837 I915_WRITE(VLV_IIR, 0xffffffff); 1838 I915_WRITE(VLV_IMR, 0xffffffff); 1839 I915_WRITE(VLV_IER, 0x0); 1840 POSTING_READ(VLV_IER); 1841 } 1842 1843 /* 1844 * Enable digital hotplug on the PCH, and configure the DP short pulse 1845 * duration to 2ms (which is the minimum in the Display Port spec) 1846 * 1847 * This register is the same on all known PCH chips. 1848 */ 1849 1850 static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1851 { 1852 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1853 u32 hotplug; 1854 1855 hotplug = I915_READ(PCH_PORT_HOTPLUG); 1856 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1857 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1858 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1859 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1860 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1861 } 1862 1863 static int ironlake_irq_postinstall(struct drm_device *dev) 1864 { 1865 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1866 /* enable kind of interrupts always enabled */ 1867 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1868 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1869 u32 render_irqs; 1870 u32 hotplug_mask; 1871 1872 dev_priv->irq_mask = ~display_mask; 1873 1874 /* should always can generate irq */ 1875 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1876 I915_WRITE(DEIMR, dev_priv->irq_mask); 1877 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1878 POSTING_READ(DEIER); 1879 1880 dev_priv->gt_irq_mask = ~0; 1881 1882 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1883 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1884 1885 if (IS_GEN6(dev)) 1886 render_irqs = 1887 GT_USER_INTERRUPT | 1888 GEN6_BSD_USER_INTERRUPT | 1889 GEN6_BLITTER_USER_INTERRUPT; 1890 else 1891 render_irqs = 1892 GT_USER_INTERRUPT | 1893 GT_PIPE_NOTIFY | 1894 GT_BSD_USER_INTERRUPT; 1895 I915_WRITE(GTIER, render_irqs); 1896 POSTING_READ(GTIER); 1897 1898 if (HAS_PCH_CPT(dev)) { 1899 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1900 SDE_PORTB_HOTPLUG_CPT | 1901 SDE_PORTC_HOTPLUG_CPT | 1902 SDE_PORTD_HOTPLUG_CPT); 1903 } else { 1904 hotplug_mask = (SDE_CRT_HOTPLUG | 1905 SDE_PORTB_HOTPLUG | 1906 SDE_PORTC_HOTPLUG | 1907 SDE_PORTD_HOTPLUG | 1908 SDE_AUX_MASK); 1909 } 1910 1911 dev_priv->pch_irq_mask = ~hotplug_mask; 1912 1913 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1914 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1915 I915_WRITE(SDEIER, hotplug_mask); 1916 POSTING_READ(SDEIER); 1917 1918 ironlake_enable_pch_hotplug(dev); 1919 1920 if (IS_IRONLAKE_M(dev)) { 1921 /* Clear & enable PCU event interrupts */ 1922 I915_WRITE(DEIIR, DE_PCU_EVENT); 1923 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1924 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1925 } 1926 1927 return 0; 1928 } 1929 1930 static int ivybridge_irq_postinstall(struct drm_device *dev) 1931 { 1932 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1933 /* enable kind of interrupts always enabled */ 1934 u32 display_mask = 1935 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 1936 DE_PLANEC_FLIP_DONE_IVB | 1937 DE_PLANEB_FLIP_DONE_IVB | 1938 DE_PLANEA_FLIP_DONE_IVB; 1939 u32 render_irqs; 1940 u32 hotplug_mask; 1941 1942 dev_priv->irq_mask = ~display_mask; 1943 1944 /* should always can generate irq */ 1945 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1946 I915_WRITE(DEIMR, dev_priv->irq_mask); 1947 I915_WRITE(DEIER, 1948 display_mask | 1949 DE_PIPEC_VBLANK_IVB | 1950 DE_PIPEB_VBLANK_IVB | 1951 DE_PIPEA_VBLANK_IVB); 1952 POSTING_READ(DEIER); 1953 1954 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1955 1956 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1957 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1958 1959 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 1960 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1961 I915_WRITE(GTIER, render_irqs); 1962 POSTING_READ(GTIER); 1963 1964 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1965 SDE_PORTB_HOTPLUG_CPT | 1966 SDE_PORTC_HOTPLUG_CPT | 1967 SDE_PORTD_HOTPLUG_CPT); 1968 dev_priv->pch_irq_mask = ~hotplug_mask; 1969 1970 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1971 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1972 I915_WRITE(SDEIER, hotplug_mask); 1973 POSTING_READ(SDEIER); 1974 1975 ironlake_enable_pch_hotplug(dev); 1976 1977 return 0; 1978 } 1979 1980 static int valleyview_irq_postinstall(struct drm_device *dev) 1981 { 1982 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1983 u32 enable_mask; 1984 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1985 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 1986 u32 render_irqs; 1987 u16 msid; 1988 1989 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 1990 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1991 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1992 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1993 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1994 1995 /* 1996 *Leave vblank interrupts masked initially. enable/disable will 1997 * toggle them based on usage. 1998 */ 1999 dev_priv->irq_mask = (~enable_mask) | 2000 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2001 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2002 2003 dev_priv->pipestat[0] = 0; 2004 dev_priv->pipestat[1] = 0; 2005 2006 /* Hack for broken MSIs on VLV */ 2007 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); 2008 pci_read_config_word(dev->pdev, 0x98, &msid); 2009 msid &= 0xff; /* mask out delivery bits */ 2010 msid |= (1<<14); 2011 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); 2012 2013 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2014 I915_WRITE(VLV_IER, enable_mask); 2015 I915_WRITE(VLV_IIR, 0xffffffff); 2016 I915_WRITE(PIPESTAT(0), 0xffff); 2017 I915_WRITE(PIPESTAT(1), 0xffff); 2018 POSTING_READ(VLV_IER); 2019 2020 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2021 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2022 2023 I915_WRITE(VLV_IIR, 0xffffffff); 2024 I915_WRITE(VLV_IIR, 0xffffffff); 2025 2026 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2027 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2028 2029 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2030 GEN6_BLITTER_USER_INTERRUPT; 2031 I915_WRITE(GTIER, render_irqs); 2032 POSTING_READ(GTIER); 2033 2034 /* ack & enable invalid PTE error interrupts */ 2035 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2036 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2037 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2038 #endif 2039 2040 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2041 /* Note HDMI and DP share bits */ 2042 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2043 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2044 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2045 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2046 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2047 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2048 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2049 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2050 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2051 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2052 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2053 hotplug_en |= CRT_HOTPLUG_INT_EN; 2054 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2055 } 2056 2057 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2058 2059 return 0; 2060 } 2061 2062 static void valleyview_irq_uninstall(struct drm_device *dev) 2063 { 2064 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2065 int pipe; 2066 2067 if (!dev_priv) 2068 return; 2069 2070 for_each_pipe(pipe) 2071 I915_WRITE(PIPESTAT(pipe), 0xffff); 2072 2073 I915_WRITE(HWSTAM, 0xffffffff); 2074 I915_WRITE(PORT_HOTPLUG_EN, 0); 2075 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2076 for_each_pipe(pipe) 2077 I915_WRITE(PIPESTAT(pipe), 0xffff); 2078 I915_WRITE(VLV_IIR, 0xffffffff); 2079 I915_WRITE(VLV_IMR, 0xffffffff); 2080 I915_WRITE(VLV_IER, 0x0); 2081 POSTING_READ(VLV_IER); 2082 } 2083 2084 static void ironlake_irq_uninstall(struct drm_device *dev) 2085 { 2086 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2087 2088 if (!dev_priv) 2089 return; 2090 2091 I915_WRITE(HWSTAM, 0xffffffff); 2092 2093 I915_WRITE(DEIMR, 0xffffffff); 2094 I915_WRITE(DEIER, 0x0); 2095 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2096 2097 I915_WRITE(GTIMR, 0xffffffff); 2098 I915_WRITE(GTIER, 0x0); 2099 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2100 2101 I915_WRITE(SDEIMR, 0xffffffff); 2102 I915_WRITE(SDEIER, 0x0); 2103 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2104 } 2105 2106 static void i8xx_irq_preinstall(struct drm_device * dev) 2107 { 2108 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2109 int pipe; 2110 2111 atomic_set(&dev_priv->irq_received, 0); 2112 2113 for_each_pipe(pipe) 2114 I915_WRITE(PIPESTAT(pipe), 0); 2115 I915_WRITE16(IMR, 0xffff); 2116 I915_WRITE16(IER, 0x0); 2117 POSTING_READ16(IER); 2118 } 2119 2120 static int i8xx_irq_postinstall(struct drm_device *dev) 2121 { 2122 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2123 2124 dev_priv->pipestat[0] = 0; 2125 dev_priv->pipestat[1] = 0; 2126 2127 I915_WRITE16(EMR, 2128 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2129 2130 /* Unmask the interrupts that we always want on. */ 2131 dev_priv->irq_mask = 2132 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2133 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2134 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2135 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2136 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2137 I915_WRITE16(IMR, dev_priv->irq_mask); 2138 2139 I915_WRITE16(IER, 2140 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2141 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2142 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2143 I915_USER_INTERRUPT); 2144 POSTING_READ16(IER); 2145 2146 return 0; 2147 } 2148 2149 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2150 { 2151 struct drm_device *dev = (struct drm_device *) arg; 2152 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2153 u16 iir, new_iir; 2154 u32 pipe_stats[2]; 2155 unsigned long irqflags; 2156 int irq_received; 2157 int pipe; 2158 u16 flip_mask = 2159 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2160 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2161 2162 atomic_inc(&dev_priv->irq_received); 2163 2164 iir = I915_READ16(IIR); 2165 if (iir == 0) 2166 return IRQ_NONE; 2167 2168 while (iir & ~flip_mask) { 2169 /* Can't rely on pipestat interrupt bit in iir as it might 2170 * have been cleared after the pipestat interrupt was received. 2171 * It doesn't set the bit in iir again, but it still produces 2172 * interrupts (for non-MSI). 2173 */ 2174 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2175 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2176 i915_handle_error(dev, false); 2177 2178 for_each_pipe(pipe) { 2179 int reg = PIPESTAT(pipe); 2180 pipe_stats[pipe] = I915_READ(reg); 2181 2182 /* 2183 * Clear the PIPE*STAT regs before the IIR 2184 */ 2185 if (pipe_stats[pipe] & 0x8000ffff) { 2186 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2187 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2188 pipe_name(pipe)); 2189 I915_WRITE(reg, pipe_stats[pipe]); 2190 irq_received = 1; 2191 } 2192 } 2193 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2194 2195 I915_WRITE16(IIR, iir & ~flip_mask); 2196 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2197 2198 i915_update_dri1_breadcrumb(dev); 2199 2200 if (iir & I915_USER_INTERRUPT) 2201 notify_ring(dev, &dev_priv->ring[RCS]); 2202 2203 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2204 drm_handle_vblank(dev, 0)) { 2205 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2206 intel_prepare_page_flip(dev, 0); 2207 intel_finish_page_flip(dev, 0); 2208 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 2209 } 2210 } 2211 2212 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2213 drm_handle_vblank(dev, 1)) { 2214 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2215 intel_prepare_page_flip(dev, 1); 2216 intel_finish_page_flip(dev, 1); 2217 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2218 } 2219 } 2220 2221 iir = new_iir; 2222 } 2223 2224 return IRQ_HANDLED; 2225 } 2226 2227 static void i8xx_irq_uninstall(struct drm_device * dev) 2228 { 2229 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2230 int pipe; 2231 2232 for_each_pipe(pipe) { 2233 /* Clear enable bits; then clear status bits */ 2234 I915_WRITE(PIPESTAT(pipe), 0); 2235 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2236 } 2237 I915_WRITE16(IMR, 0xffff); 2238 I915_WRITE16(IER, 0x0); 2239 I915_WRITE16(IIR, I915_READ16(IIR)); 2240 } 2241 2242 static void i915_irq_preinstall(struct drm_device * dev) 2243 { 2244 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2245 int pipe; 2246 2247 atomic_set(&dev_priv->irq_received, 0); 2248 2249 if (I915_HAS_HOTPLUG(dev)) { 2250 I915_WRITE(PORT_HOTPLUG_EN, 0); 2251 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2252 } 2253 2254 I915_WRITE16(HWSTAM, 0xeffe); 2255 for_each_pipe(pipe) 2256 I915_WRITE(PIPESTAT(pipe), 0); 2257 I915_WRITE(IMR, 0xffffffff); 2258 I915_WRITE(IER, 0x0); 2259 POSTING_READ(IER); 2260 } 2261 2262 static int i915_irq_postinstall(struct drm_device *dev) 2263 { 2264 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2265 u32 enable_mask; 2266 2267 dev_priv->pipestat[0] = 0; 2268 dev_priv->pipestat[1] = 0; 2269 2270 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2271 2272 /* Unmask the interrupts that we always want on. */ 2273 dev_priv->irq_mask = 2274 ~(I915_ASLE_INTERRUPT | 2275 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2276 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2277 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2278 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2279 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2280 2281 enable_mask = 2282 I915_ASLE_INTERRUPT | 2283 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2284 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2285 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2286 I915_USER_INTERRUPT; 2287 2288 if (I915_HAS_HOTPLUG(dev)) { 2289 /* Enable in IER... */ 2290 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2291 /* and unmask in IMR */ 2292 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2293 } 2294 2295 I915_WRITE(IMR, dev_priv->irq_mask); 2296 I915_WRITE(IER, enable_mask); 2297 POSTING_READ(IER); 2298 2299 if (I915_HAS_HOTPLUG(dev)) { 2300 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2301 2302 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2303 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2304 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2305 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2306 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2307 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2308 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2309 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2310 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2311 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2312 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2313 hotplug_en |= CRT_HOTPLUG_INT_EN; 2314 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2315 } 2316 2317 /* Ignore TV since it's buggy */ 2318 2319 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2320 } 2321 2322 intel_opregion_enable_asle(dev); 2323 2324 return 0; 2325 } 2326 2327 static irqreturn_t i915_irq_handler(int irq, void *arg) 2328 { 2329 struct drm_device *dev = (struct drm_device *) arg; 2330 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2331 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2332 unsigned long irqflags; 2333 u32 flip_mask = 2334 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2335 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2336 u32 flip[2] = { 2337 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, 2338 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT 2339 }; 2340 int pipe, ret = IRQ_NONE; 2341 2342 atomic_inc(&dev_priv->irq_received); 2343 2344 iir = I915_READ(IIR); 2345 do { 2346 bool irq_received = (iir & ~flip_mask) != 0; 2347 bool blc_event = false; 2348 2349 /* Can't rely on pipestat interrupt bit in iir as it might 2350 * have been cleared after the pipestat interrupt was received. 2351 * It doesn't set the bit in iir again, but it still produces 2352 * interrupts (for non-MSI). 2353 */ 2354 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2355 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2356 i915_handle_error(dev, false); 2357 2358 for_each_pipe(pipe) { 2359 int reg = PIPESTAT(pipe); 2360 pipe_stats[pipe] = I915_READ(reg); 2361 2362 /* Clear the PIPE*STAT regs before the IIR */ 2363 if (pipe_stats[pipe] & 0x8000ffff) { 2364 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2365 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2366 pipe_name(pipe)); 2367 I915_WRITE(reg, pipe_stats[pipe]); 2368 irq_received = true; 2369 } 2370 } 2371 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2372 2373 if (!irq_received) 2374 break; 2375 2376 /* Consume port. Then clear IIR or we'll miss events */ 2377 if ((I915_HAS_HOTPLUG(dev)) && 2378 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2379 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2380 2381 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2382 hotplug_status); 2383 if (hotplug_status & dev_priv->hotplug_supported_mask) 2384 queue_work(dev_priv->wq, 2385 &dev_priv->hotplug_work); 2386 2387 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2388 POSTING_READ(PORT_HOTPLUG_STAT); 2389 } 2390 2391 I915_WRITE(IIR, iir & ~flip_mask); 2392 new_iir = I915_READ(IIR); /* Flush posted writes */ 2393 2394 if (iir & I915_USER_INTERRUPT) 2395 notify_ring(dev, &dev_priv->ring[RCS]); 2396 2397 for_each_pipe(pipe) { 2398 int plane = pipe; 2399 if (IS_MOBILE(dev)) 2400 plane = !plane; 2401 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2402 drm_handle_vblank(dev, pipe)) { 2403 if (iir & flip[plane]) { 2404 intel_prepare_page_flip(dev, plane); 2405 intel_finish_page_flip(dev, pipe); 2406 flip_mask &= ~flip[plane]; 2407 } 2408 } 2409 2410 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2411 blc_event = true; 2412 } 2413 2414 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2415 intel_opregion_asle_intr(dev); 2416 2417 /* With MSI, interrupts are only generated when iir 2418 * transitions from zero to nonzero. If another bit got 2419 * set while we were handling the existing iir bits, then 2420 * we would never get another interrupt. 2421 * 2422 * This is fine on non-MSI as well, as if we hit this path 2423 * we avoid exiting the interrupt handler only to generate 2424 * another one. 2425 * 2426 * Note that for MSI this could cause a stray interrupt report 2427 * if an interrupt landed in the time between writing IIR and 2428 * the posting read. This should be rare enough to never 2429 * trigger the 99% of 100,000 interrupts test for disabling 2430 * stray interrupts. 2431 */ 2432 ret = IRQ_HANDLED; 2433 iir = new_iir; 2434 } while (iir & ~flip_mask); 2435 2436 i915_update_dri1_breadcrumb(dev); 2437 2438 return ret; 2439 } 2440 2441 static void i915_irq_uninstall(struct drm_device * dev) 2442 { 2443 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2444 int pipe; 2445 2446 if (I915_HAS_HOTPLUG(dev)) { 2447 I915_WRITE(PORT_HOTPLUG_EN, 0); 2448 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2449 } 2450 2451 I915_WRITE16(HWSTAM, 0xffff); 2452 for_each_pipe(pipe) { 2453 /* Clear enable bits; then clear status bits */ 2454 I915_WRITE(PIPESTAT(pipe), 0); 2455 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2456 } 2457 I915_WRITE(IMR, 0xffffffff); 2458 I915_WRITE(IER, 0x0); 2459 2460 I915_WRITE(IIR, I915_READ(IIR)); 2461 } 2462 2463 static void i965_irq_preinstall(struct drm_device * dev) 2464 { 2465 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2466 int pipe; 2467 2468 atomic_set(&dev_priv->irq_received, 0); 2469 2470 I915_WRITE(PORT_HOTPLUG_EN, 0); 2471 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2472 2473 I915_WRITE(HWSTAM, 0xeffe); 2474 for_each_pipe(pipe) 2475 I915_WRITE(PIPESTAT(pipe), 0); 2476 I915_WRITE(IMR, 0xffffffff); 2477 I915_WRITE(IER, 0x0); 2478 POSTING_READ(IER); 2479 } 2480 2481 static int i965_irq_postinstall(struct drm_device *dev) 2482 { 2483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2484 u32 hotplug_en; 2485 u32 enable_mask; 2486 u32 error_mask; 2487 2488 /* Unmask the interrupts that we always want on. */ 2489 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2490 I915_DISPLAY_PORT_INTERRUPT | 2491 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2492 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2493 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2494 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2495 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2496 2497 enable_mask = ~dev_priv->irq_mask; 2498 enable_mask |= I915_USER_INTERRUPT; 2499 2500 if (IS_G4X(dev)) 2501 enable_mask |= I915_BSD_USER_INTERRUPT; 2502 2503 dev_priv->pipestat[0] = 0; 2504 dev_priv->pipestat[1] = 0; 2505 2506 /* 2507 * Enable some error detection, note the instruction error mask 2508 * bit is reserved, so we leave it masked. 2509 */ 2510 if (IS_G4X(dev)) { 2511 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2512 GM45_ERROR_MEM_PRIV | 2513 GM45_ERROR_CP_PRIV | 2514 I915_ERROR_MEMORY_REFRESH); 2515 } else { 2516 error_mask = ~(I915_ERROR_PAGE_TABLE | 2517 I915_ERROR_MEMORY_REFRESH); 2518 } 2519 I915_WRITE(EMR, error_mask); 2520 2521 I915_WRITE(IMR, dev_priv->irq_mask); 2522 I915_WRITE(IER, enable_mask); 2523 POSTING_READ(IER); 2524 2525 /* Note HDMI and DP share hotplug bits */ 2526 hotplug_en = 0; 2527 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2528 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2529 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2530 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2531 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2532 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2533 if (IS_G4X(dev)) { 2534 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2535 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2536 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) 2537 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2538 } else { 2539 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) 2540 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2541 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) 2542 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2543 } 2544 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2545 hotplug_en |= CRT_HOTPLUG_INT_EN; 2546 2547 /* Programming the CRT detection parameters tends 2548 to generate a spurious hotplug event about three 2549 seconds later. So just do it once. 2550 */ 2551 if (IS_G4X(dev)) 2552 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2553 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2554 } 2555 2556 /* Ignore TV since it's buggy */ 2557 2558 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2559 2560 intel_opregion_enable_asle(dev); 2561 2562 return 0; 2563 } 2564 2565 static irqreturn_t i965_irq_handler(int irq, void *arg) 2566 { 2567 struct drm_device *dev = (struct drm_device *) arg; 2568 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2569 u32 iir, new_iir; 2570 u32 pipe_stats[I915_MAX_PIPES]; 2571 unsigned long irqflags; 2572 int irq_received; 2573 int ret = IRQ_NONE, pipe; 2574 2575 atomic_inc(&dev_priv->irq_received); 2576 2577 iir = I915_READ(IIR); 2578 2579 for (;;) { 2580 bool blc_event = false; 2581 2582 irq_received = iir != 0; 2583 2584 /* Can't rely on pipestat interrupt bit in iir as it might 2585 * have been cleared after the pipestat interrupt was received. 2586 * It doesn't set the bit in iir again, but it still produces 2587 * interrupts (for non-MSI). 2588 */ 2589 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2590 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2591 i915_handle_error(dev, false); 2592 2593 for_each_pipe(pipe) { 2594 int reg = PIPESTAT(pipe); 2595 pipe_stats[pipe] = I915_READ(reg); 2596 2597 /* 2598 * Clear the PIPE*STAT regs before the IIR 2599 */ 2600 if (pipe_stats[pipe] & 0x8000ffff) { 2601 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2602 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2603 pipe_name(pipe)); 2604 I915_WRITE(reg, pipe_stats[pipe]); 2605 irq_received = 1; 2606 } 2607 } 2608 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2609 2610 if (!irq_received) 2611 break; 2612 2613 ret = IRQ_HANDLED; 2614 2615 /* Consume port. Then clear IIR or we'll miss events */ 2616 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2617 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2618 2619 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2620 hotplug_status); 2621 if (hotplug_status & dev_priv->hotplug_supported_mask) 2622 queue_work(dev_priv->wq, 2623 &dev_priv->hotplug_work); 2624 2625 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2626 I915_READ(PORT_HOTPLUG_STAT); 2627 } 2628 2629 I915_WRITE(IIR, iir); 2630 new_iir = I915_READ(IIR); /* Flush posted writes */ 2631 2632 if (iir & I915_USER_INTERRUPT) 2633 notify_ring(dev, &dev_priv->ring[RCS]); 2634 if (iir & I915_BSD_USER_INTERRUPT) 2635 notify_ring(dev, &dev_priv->ring[VCS]); 2636 2637 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 2638 intel_prepare_page_flip(dev, 0); 2639 2640 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 2641 intel_prepare_page_flip(dev, 1); 2642 2643 for_each_pipe(pipe) { 2644 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2645 drm_handle_vblank(dev, pipe)) { 2646 i915_pageflip_stall_check(dev, pipe); 2647 intel_finish_page_flip(dev, pipe); 2648 } 2649 2650 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2651 blc_event = true; 2652 } 2653 2654 2655 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2656 intel_opregion_asle_intr(dev); 2657 2658 /* With MSI, interrupts are only generated when iir 2659 * transitions from zero to nonzero. If another bit got 2660 * set while we were handling the existing iir bits, then 2661 * we would never get another interrupt. 2662 * 2663 * This is fine on non-MSI as well, as if we hit this path 2664 * we avoid exiting the interrupt handler only to generate 2665 * another one. 2666 * 2667 * Note that for MSI this could cause a stray interrupt report 2668 * if an interrupt landed in the time between writing IIR and 2669 * the posting read. This should be rare enough to never 2670 * trigger the 99% of 100,000 interrupts test for disabling 2671 * stray interrupts. 2672 */ 2673 iir = new_iir; 2674 } 2675 2676 i915_update_dri1_breadcrumb(dev); 2677 2678 return ret; 2679 } 2680 2681 static void i965_irq_uninstall(struct drm_device * dev) 2682 { 2683 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2684 int pipe; 2685 2686 if (!dev_priv) 2687 return; 2688 2689 I915_WRITE(PORT_HOTPLUG_EN, 0); 2690 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2691 2692 I915_WRITE(HWSTAM, 0xffffffff); 2693 for_each_pipe(pipe) 2694 I915_WRITE(PIPESTAT(pipe), 0); 2695 I915_WRITE(IMR, 0xffffffff); 2696 I915_WRITE(IER, 0x0); 2697 2698 for_each_pipe(pipe) 2699 I915_WRITE(PIPESTAT(pipe), 2700 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2701 I915_WRITE(IIR, I915_READ(IIR)); 2702 } 2703 2704 void intel_irq_init(struct drm_device *dev) 2705 { 2706 struct drm_i915_private *dev_priv = dev->dev_private; 2707 2708 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2709 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2710 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2711 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2712 2713 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2714 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2715 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2716 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2717 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2718 } 2719 2720 if (drm_core_check_feature(dev, DRIVER_MODESET)) 2721 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2722 else 2723 dev->driver->get_vblank_timestamp = NULL; 2724 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2725 2726 if (IS_VALLEYVIEW(dev)) { 2727 dev->driver->irq_handler = valleyview_irq_handler; 2728 dev->driver->irq_preinstall = valleyview_irq_preinstall; 2729 dev->driver->irq_postinstall = valleyview_irq_postinstall; 2730 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2731 dev->driver->enable_vblank = valleyview_enable_vblank; 2732 dev->driver->disable_vblank = valleyview_disable_vblank; 2733 } else if (IS_IVYBRIDGE(dev)) { 2734 /* Share pre & uninstall handlers with ILK/SNB */ 2735 dev->driver->irq_handler = ivybridge_irq_handler; 2736 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2737 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2738 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2739 dev->driver->enable_vblank = ivybridge_enable_vblank; 2740 dev->driver->disable_vblank = ivybridge_disable_vblank; 2741 } else if (IS_HASWELL(dev)) { 2742 /* Share interrupts handling with IVB */ 2743 dev->driver->irq_handler = ivybridge_irq_handler; 2744 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2745 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2746 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2747 dev->driver->enable_vblank = ivybridge_enable_vblank; 2748 dev->driver->disable_vblank = ivybridge_disable_vblank; 2749 } else if (HAS_PCH_SPLIT(dev)) { 2750 dev->driver->irq_handler = ironlake_irq_handler; 2751 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2752 dev->driver->irq_postinstall = ironlake_irq_postinstall; 2753 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2754 dev->driver->enable_vblank = ironlake_enable_vblank; 2755 dev->driver->disable_vblank = ironlake_disable_vblank; 2756 } else { 2757 if (INTEL_INFO(dev)->gen == 2) { 2758 dev->driver->irq_preinstall = i8xx_irq_preinstall; 2759 dev->driver->irq_postinstall = i8xx_irq_postinstall; 2760 dev->driver->irq_handler = i8xx_irq_handler; 2761 dev->driver->irq_uninstall = i8xx_irq_uninstall; 2762 } else if (INTEL_INFO(dev)->gen == 3) { 2763 dev->driver->irq_preinstall = i915_irq_preinstall; 2764 dev->driver->irq_postinstall = i915_irq_postinstall; 2765 dev->driver->irq_uninstall = i915_irq_uninstall; 2766 dev->driver->irq_handler = i915_irq_handler; 2767 } else { 2768 dev->driver->irq_preinstall = i965_irq_preinstall; 2769 dev->driver->irq_postinstall = i965_irq_postinstall; 2770 dev->driver->irq_uninstall = i965_irq_uninstall; 2771 dev->driver->irq_handler = i965_irq_handler; 2772 } 2773 dev->driver->enable_vblank = i915_enable_vblank; 2774 dev->driver->disable_vblank = i915_disable_vblank; 2775 } 2776 } 2777