1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <linux/sysrq.h> 30 #include <linux/slab.h> 31 #include "drmP.h" 32 #include "drm.h" 33 #include "i915_drm.h" 34 #include "i915_drv.h" 35 #include "i915_trace.h" 36 #include "intel_drv.h" 37 38 #define MAX_NOPID ((u32)~0) 39 40 /** 41 * Interrupts that are always left unmasked. 42 * 43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR, 44 * we leave them always unmasked in IMR and then control enabling them through 45 * PIPESTAT alone. 46 */ 47 #define I915_INTERRUPT_ENABLE_FIX \ 48 (I915_ASLE_INTERRUPT | \ 49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ 52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 54 55 /** Interrupts that we mask and unmask at runtime. */ 56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) 57 58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 59 PIPE_VBLANK_INTERRUPT_STATUS) 60 61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ 62 PIPE_VBLANK_INTERRUPT_ENABLE) 63 64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 65 DRM_I915_VBLANK_PIPE_B) 66 67 void 68 ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 69 { 70 if ((dev_priv->gt_irq_mask_reg & mask) != 0) { 71 dev_priv->gt_irq_mask_reg &= ~mask; 72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 73 (void) I915_READ(GTIMR); 74 } 75 } 76 77 void 78 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 79 { 80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 81 dev_priv->gt_irq_mask_reg |= mask; 82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 83 (void) I915_READ(GTIMR); 84 } 85 } 86 87 /* For display hotplug interrupt */ 88 static void 89 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 90 { 91 if ((dev_priv->irq_mask_reg & mask) != 0) { 92 dev_priv->irq_mask_reg &= ~mask; 93 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 94 (void) I915_READ(DEIMR); 95 } 96 } 97 98 static inline void 99 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 100 { 101 if ((dev_priv->irq_mask_reg & mask) != mask) { 102 dev_priv->irq_mask_reg |= mask; 103 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 104 (void) I915_READ(DEIMR); 105 } 106 } 107 108 void 109 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 110 { 111 if ((dev_priv->irq_mask_reg & mask) != 0) { 112 dev_priv->irq_mask_reg &= ~mask; 113 I915_WRITE(IMR, dev_priv->irq_mask_reg); 114 (void) I915_READ(IMR); 115 } 116 } 117 118 void 119 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 120 { 121 if ((dev_priv->irq_mask_reg & mask) != mask) { 122 dev_priv->irq_mask_reg |= mask; 123 I915_WRITE(IMR, dev_priv->irq_mask_reg); 124 (void) I915_READ(IMR); 125 } 126 } 127 128 static inline u32 129 i915_pipestat(int pipe) 130 { 131 if (pipe == 0) 132 return PIPEASTAT; 133 if (pipe == 1) 134 return PIPEBSTAT; 135 BUG(); 136 } 137 138 void 139 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 140 { 141 if ((dev_priv->pipestat[pipe] & mask) != mask) { 142 u32 reg = i915_pipestat(pipe); 143 144 dev_priv->pipestat[pipe] |= mask; 145 /* Enable the interrupt, clear any pending status */ 146 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 147 (void) I915_READ(reg); 148 } 149 } 150 151 void 152 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 153 { 154 if ((dev_priv->pipestat[pipe] & mask) != 0) { 155 u32 reg = i915_pipestat(pipe); 156 157 dev_priv->pipestat[pipe] &= ~mask; 158 I915_WRITE(reg, dev_priv->pipestat[pipe]); 159 (void) I915_READ(reg); 160 } 161 } 162 163 /** 164 * intel_enable_asle - enable ASLE interrupt for OpRegion 165 */ 166 void intel_enable_asle (struct drm_device *dev) 167 { 168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 169 170 if (HAS_PCH_SPLIT(dev)) 171 ironlake_enable_display_irq(dev_priv, DE_GSE); 172 else { 173 i915_enable_pipestat(dev_priv, 1, 174 PIPE_LEGACY_BLC_EVENT_ENABLE); 175 if (INTEL_INFO(dev)->gen >= 4) 176 i915_enable_pipestat(dev_priv, 0, 177 PIPE_LEGACY_BLC_EVENT_ENABLE); 178 } 179 } 180 181 /** 182 * i915_pipe_enabled - check if a pipe is enabled 183 * @dev: DRM device 184 * @pipe: pipe to check 185 * 186 * Reading certain registers when the pipe is disabled can hang the chip. 187 * Use this routine to make sure the PLL is running and the pipe is active 188 * before reading such registers if unsure. 189 */ 190 static int 191 i915_pipe_enabled(struct drm_device *dev, int pipe) 192 { 193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 194 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 195 } 196 197 /* Called from drm generic code, passed a 'crtc', which 198 * we use as a pipe index 199 */ 200 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 201 { 202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 203 unsigned long high_frame; 204 unsigned long low_frame; 205 u32 high1, high2, low; 206 207 if (!i915_pipe_enabled(dev, pipe)) { 208 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 209 "pipe %d\n", pipe); 210 return 0; 211 } 212 213 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 214 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 215 216 /* 217 * High & low register fields aren't synchronized, so make sure 218 * we get a low value that's stable across two reads of the high 219 * register. 220 */ 221 do { 222 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 223 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 224 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 225 } while (high1 != high2); 226 227 high1 >>= PIPE_FRAME_HIGH_SHIFT; 228 low >>= PIPE_FRAME_LOW_SHIFT; 229 return (high1 << 8) | low; 230 } 231 232 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 233 { 234 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 235 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; 236 237 if (!i915_pipe_enabled(dev, pipe)) { 238 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 239 "pipe %d\n", pipe); 240 return 0; 241 } 242 243 return I915_READ(reg); 244 } 245 246 /* 247 * Handle hotplug events outside the interrupt handler proper. 248 */ 249 static void i915_hotplug_work_func(struct work_struct *work) 250 { 251 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 252 hotplug_work); 253 struct drm_device *dev = dev_priv->dev; 254 struct drm_mode_config *mode_config = &dev->mode_config; 255 struct intel_encoder *encoder; 256 257 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 258 if (encoder->hot_plug) 259 encoder->hot_plug(encoder); 260 261 /* Just fire off a uevent and let userspace tell us what to do */ 262 drm_helper_hpd_irq_event(dev); 263 } 264 265 static void i915_handle_rps_change(struct drm_device *dev) 266 { 267 drm_i915_private_t *dev_priv = dev->dev_private; 268 u32 busy_up, busy_down, max_avg, min_avg; 269 u8 new_delay = dev_priv->cur_delay; 270 271 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 272 busy_up = I915_READ(RCPREVBSYTUPAVG); 273 busy_down = I915_READ(RCPREVBSYTDNAVG); 274 max_avg = I915_READ(RCBMAXAVG); 275 min_avg = I915_READ(RCBMINAVG); 276 277 /* Handle RCS change request from hw */ 278 if (busy_up > max_avg) { 279 if (dev_priv->cur_delay != dev_priv->max_delay) 280 new_delay = dev_priv->cur_delay - 1; 281 if (new_delay < dev_priv->max_delay) 282 new_delay = dev_priv->max_delay; 283 } else if (busy_down < min_avg) { 284 if (dev_priv->cur_delay != dev_priv->min_delay) 285 new_delay = dev_priv->cur_delay + 1; 286 if (new_delay > dev_priv->min_delay) 287 new_delay = dev_priv->min_delay; 288 } 289 290 if (ironlake_set_drps(dev, new_delay)) 291 dev_priv->cur_delay = new_delay; 292 293 return; 294 } 295 296 static void notify_ring(struct drm_device *dev, 297 struct intel_ring_buffer *ring) 298 { 299 struct drm_i915_private *dev_priv = dev->dev_private; 300 u32 seqno = ring->get_seqno(dev, ring); 301 ring->irq_gem_seqno = seqno; 302 trace_i915_gem_request_complete(dev, seqno); 303 wake_up_all(&ring->irq_queue); 304 dev_priv->hangcheck_count = 0; 305 mod_timer(&dev_priv->hangcheck_timer, 306 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 307 } 308 309 static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 310 { 311 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 312 int ret = IRQ_NONE; 313 u32 de_iir, gt_iir, de_ier, pch_iir; 314 u32 hotplug_mask; 315 struct drm_i915_master_private *master_priv; 316 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 317 318 if (IS_GEN6(dev)) 319 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT; 320 321 /* disable master interrupt before clearing iir */ 322 de_ier = I915_READ(DEIER); 323 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 324 (void)I915_READ(DEIER); 325 326 de_iir = I915_READ(DEIIR); 327 gt_iir = I915_READ(GTIIR); 328 pch_iir = I915_READ(SDEIIR); 329 330 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 331 goto done; 332 333 if (HAS_PCH_CPT(dev)) 334 hotplug_mask = SDE_HOTPLUG_MASK_CPT; 335 else 336 hotplug_mask = SDE_HOTPLUG_MASK; 337 338 ret = IRQ_HANDLED; 339 340 if (dev->primary->master) { 341 master_priv = dev->primary->master->driver_priv; 342 if (master_priv->sarea_priv) 343 master_priv->sarea_priv->last_dispatch = 344 READ_BREADCRUMB(dev_priv); 345 } 346 347 if (gt_iir & GT_PIPE_NOTIFY) 348 notify_ring(dev, &dev_priv->render_ring); 349 if (gt_iir & bsd_usr_interrupt) 350 notify_ring(dev, &dev_priv->bsd_ring); 351 if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) 352 notify_ring(dev, &dev_priv->blt_ring); 353 354 if (de_iir & DE_GSE) 355 intel_opregion_gse_intr(dev); 356 357 if (de_iir & DE_PLANEA_FLIP_DONE) { 358 intel_prepare_page_flip(dev, 0); 359 intel_finish_page_flip_plane(dev, 0); 360 } 361 362 if (de_iir & DE_PLANEB_FLIP_DONE) { 363 intel_prepare_page_flip(dev, 1); 364 intel_finish_page_flip_plane(dev, 1); 365 } 366 367 if (de_iir & DE_PIPEA_VBLANK) 368 drm_handle_vblank(dev, 0); 369 370 if (de_iir & DE_PIPEB_VBLANK) 371 drm_handle_vblank(dev, 1); 372 373 /* check event from PCH */ 374 if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) 375 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 376 377 if (de_iir & DE_PCU_EVENT) { 378 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 379 i915_handle_rps_change(dev); 380 } 381 382 /* should clear PCH hotplug event before clear CPU irq */ 383 I915_WRITE(SDEIIR, pch_iir); 384 I915_WRITE(GTIIR, gt_iir); 385 I915_WRITE(DEIIR, de_iir); 386 387 done: 388 I915_WRITE(DEIER, de_ier); 389 (void)I915_READ(DEIER); 390 391 return ret; 392 } 393 394 /** 395 * i915_error_work_func - do process context error handling work 396 * @work: work struct 397 * 398 * Fire an error uevent so userspace can see that a hang or error 399 * was detected. 400 */ 401 static void i915_error_work_func(struct work_struct *work) 402 { 403 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 404 error_work); 405 struct drm_device *dev = dev_priv->dev; 406 char *error_event[] = { "ERROR=1", NULL }; 407 char *reset_event[] = { "RESET=1", NULL }; 408 char *reset_done_event[] = { "ERROR=0", NULL }; 409 410 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 411 412 if (atomic_read(&dev_priv->mm.wedged)) { 413 DRM_DEBUG_DRIVER("resetting chip\n"); 414 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 415 if (!i915_reset(dev, GRDOM_RENDER)) { 416 atomic_set(&dev_priv->mm.wedged, 0); 417 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 418 } 419 complete_all(&dev_priv->error_completion); 420 } 421 } 422 423 #ifdef CONFIG_DEBUG_FS 424 static struct drm_i915_error_object * 425 i915_error_object_create(struct drm_device *dev, 426 struct drm_gem_object *src) 427 { 428 drm_i915_private_t *dev_priv = dev->dev_private; 429 struct drm_i915_error_object *dst; 430 struct drm_i915_gem_object *src_priv; 431 int page, page_count; 432 u32 reloc_offset; 433 434 if (src == NULL) 435 return NULL; 436 437 src_priv = to_intel_bo(src); 438 if (src_priv->pages == NULL) 439 return NULL; 440 441 page_count = src->size / PAGE_SIZE; 442 443 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); 444 if (dst == NULL) 445 return NULL; 446 447 reloc_offset = src_priv->gtt_offset; 448 for (page = 0; page < page_count; page++) { 449 unsigned long flags; 450 void __iomem *s; 451 void *d; 452 453 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 454 if (d == NULL) 455 goto unwind; 456 457 local_irq_save(flags); 458 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 459 reloc_offset); 460 memcpy_fromio(d, s, PAGE_SIZE); 461 io_mapping_unmap_atomic(s); 462 local_irq_restore(flags); 463 464 dst->pages[page] = d; 465 466 reloc_offset += PAGE_SIZE; 467 } 468 dst->page_count = page_count; 469 dst->gtt_offset = src_priv->gtt_offset; 470 471 return dst; 472 473 unwind: 474 while (page--) 475 kfree(dst->pages[page]); 476 kfree(dst); 477 return NULL; 478 } 479 480 static void 481 i915_error_object_free(struct drm_i915_error_object *obj) 482 { 483 int page; 484 485 if (obj == NULL) 486 return; 487 488 for (page = 0; page < obj->page_count; page++) 489 kfree(obj->pages[page]); 490 491 kfree(obj); 492 } 493 494 static void 495 i915_error_state_free(struct drm_device *dev, 496 struct drm_i915_error_state *error) 497 { 498 i915_error_object_free(error->batchbuffer[0]); 499 i915_error_object_free(error->batchbuffer[1]); 500 i915_error_object_free(error->ringbuffer); 501 kfree(error->active_bo); 502 kfree(error->overlay); 503 kfree(error); 504 } 505 506 static u32 507 i915_get_bbaddr(struct drm_device *dev, u32 *ring) 508 { 509 u32 cmd; 510 511 if (IS_I830(dev) || IS_845G(dev)) 512 cmd = MI_BATCH_BUFFER; 513 else if (INTEL_INFO(dev)->gen >= 4) 514 cmd = (MI_BATCH_BUFFER_START | (2 << 6) | 515 MI_BATCH_NON_SECURE_I965); 516 else 517 cmd = (MI_BATCH_BUFFER_START | (2 << 6)); 518 519 return ring[0] == cmd ? ring[1] : 0; 520 } 521 522 static u32 523 i915_ringbuffer_last_batch(struct drm_device *dev) 524 { 525 struct drm_i915_private *dev_priv = dev->dev_private; 526 u32 head, bbaddr; 527 u32 *ring; 528 529 /* Locate the current position in the ringbuffer and walk back 530 * to find the most recently dispatched batch buffer. 531 */ 532 bbaddr = 0; 533 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 534 ring = (u32 *)(dev_priv->render_ring.virtual_start + head); 535 536 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 537 bbaddr = i915_get_bbaddr(dev, ring); 538 if (bbaddr) 539 break; 540 } 541 542 if (bbaddr == 0) { 543 ring = (u32 *)(dev_priv->render_ring.virtual_start 544 + dev_priv->render_ring.size); 545 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 546 bbaddr = i915_get_bbaddr(dev, ring); 547 if (bbaddr) 548 break; 549 } 550 } 551 552 return bbaddr; 553 } 554 555 /** 556 * i915_capture_error_state - capture an error record for later analysis 557 * @dev: drm device 558 * 559 * Should be called when an error is detected (either a hang or an error 560 * interrupt) to capture error state from the time of the error. Fills 561 * out a structure which becomes available in debugfs for user level tools 562 * to pick up. 563 */ 564 static void i915_capture_error_state(struct drm_device *dev) 565 { 566 struct drm_i915_private *dev_priv = dev->dev_private; 567 struct drm_i915_gem_object *obj_priv; 568 struct drm_i915_error_state *error; 569 struct drm_gem_object *batchbuffer[2]; 570 unsigned long flags; 571 u32 bbaddr; 572 int count; 573 574 spin_lock_irqsave(&dev_priv->error_lock, flags); 575 error = dev_priv->first_error; 576 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 577 if (error) 578 return; 579 580 error = kmalloc(sizeof(*error), GFP_ATOMIC); 581 if (!error) { 582 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 583 return; 584 } 585 586 DRM_DEBUG_DRIVER("generating error event\n"); 587 588 error->seqno = 589 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); 590 error->eir = I915_READ(EIR); 591 error->pgtbl_er = I915_READ(PGTBL_ER); 592 error->pipeastat = I915_READ(PIPEASTAT); 593 error->pipebstat = I915_READ(PIPEBSTAT); 594 error->instpm = I915_READ(INSTPM); 595 if (INTEL_INFO(dev)->gen < 4) { 596 error->ipeir = I915_READ(IPEIR); 597 error->ipehr = I915_READ(IPEHR); 598 error->instdone = I915_READ(INSTDONE); 599 error->acthd = I915_READ(ACTHD); 600 error->bbaddr = 0; 601 } else { 602 error->ipeir = I915_READ(IPEIR_I965); 603 error->ipehr = I915_READ(IPEHR_I965); 604 error->instdone = I915_READ(INSTDONE_I965); 605 error->instps = I915_READ(INSTPS); 606 error->instdone1 = I915_READ(INSTDONE1); 607 error->acthd = I915_READ(ACTHD_I965); 608 error->bbaddr = I915_READ64(BB_ADDR); 609 } 610 611 bbaddr = i915_ringbuffer_last_batch(dev); 612 613 /* Grab the current batchbuffer, most likely to have crashed. */ 614 batchbuffer[0] = NULL; 615 batchbuffer[1] = NULL; 616 count = 0; 617 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 618 struct drm_gem_object *obj = &obj_priv->base; 619 620 if (batchbuffer[0] == NULL && 621 bbaddr >= obj_priv->gtt_offset && 622 bbaddr < obj_priv->gtt_offset + obj->size) 623 batchbuffer[0] = obj; 624 625 if (batchbuffer[1] == NULL && 626 error->acthd >= obj_priv->gtt_offset && 627 error->acthd < obj_priv->gtt_offset + obj->size) 628 batchbuffer[1] = obj; 629 630 count++; 631 } 632 /* Scan the other lists for completeness for those bizarre errors. */ 633 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 634 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { 635 struct drm_gem_object *obj = &obj_priv->base; 636 637 if (batchbuffer[0] == NULL && 638 bbaddr >= obj_priv->gtt_offset && 639 bbaddr < obj_priv->gtt_offset + obj->size) 640 batchbuffer[0] = obj; 641 642 if (batchbuffer[1] == NULL && 643 error->acthd >= obj_priv->gtt_offset && 644 error->acthd < obj_priv->gtt_offset + obj->size) 645 batchbuffer[1] = obj; 646 647 if (batchbuffer[0] && batchbuffer[1]) 648 break; 649 } 650 } 651 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 652 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { 653 struct drm_gem_object *obj = &obj_priv->base; 654 655 if (batchbuffer[0] == NULL && 656 bbaddr >= obj_priv->gtt_offset && 657 bbaddr < obj_priv->gtt_offset + obj->size) 658 batchbuffer[0] = obj; 659 660 if (batchbuffer[1] == NULL && 661 error->acthd >= obj_priv->gtt_offset && 662 error->acthd < obj_priv->gtt_offset + obj->size) 663 batchbuffer[1] = obj; 664 665 if (batchbuffer[0] && batchbuffer[1]) 666 break; 667 } 668 } 669 670 /* We need to copy these to an anonymous buffer as the simplest 671 * method to avoid being overwritten by userspace. 672 */ 673 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); 674 if (batchbuffer[1] != batchbuffer[0]) 675 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 676 else 677 error->batchbuffer[1] = NULL; 678 679 /* Record the ringbuffer */ 680 error->ringbuffer = i915_error_object_create(dev, 681 dev_priv->render_ring.gem_object); 682 683 /* Record buffers on the active list. */ 684 error->active_bo = NULL; 685 error->active_bo_count = 0; 686 687 if (count) 688 error->active_bo = kmalloc(sizeof(*error->active_bo)*count, 689 GFP_ATOMIC); 690 691 if (error->active_bo) { 692 int i = 0; 693 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 694 struct drm_gem_object *obj = &obj_priv->base; 695 696 error->active_bo[i].size = obj->size; 697 error->active_bo[i].name = obj->name; 698 error->active_bo[i].seqno = obj_priv->last_rendering_seqno; 699 error->active_bo[i].gtt_offset = obj_priv->gtt_offset; 700 error->active_bo[i].read_domains = obj->read_domains; 701 error->active_bo[i].write_domain = obj->write_domain; 702 error->active_bo[i].fence_reg = obj_priv->fence_reg; 703 error->active_bo[i].pinned = 0; 704 if (obj_priv->pin_count > 0) 705 error->active_bo[i].pinned = 1; 706 if (obj_priv->user_pin_count > 0) 707 error->active_bo[i].pinned = -1; 708 error->active_bo[i].tiling = obj_priv->tiling_mode; 709 error->active_bo[i].dirty = obj_priv->dirty; 710 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; 711 712 if (++i == count) 713 break; 714 } 715 error->active_bo_count = i; 716 } 717 718 do_gettimeofday(&error->time); 719 720 error->overlay = intel_overlay_capture_error_state(dev); 721 722 spin_lock_irqsave(&dev_priv->error_lock, flags); 723 if (dev_priv->first_error == NULL) { 724 dev_priv->first_error = error; 725 error = NULL; 726 } 727 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 728 729 if (error) 730 i915_error_state_free(dev, error); 731 } 732 733 void i915_destroy_error_state(struct drm_device *dev) 734 { 735 struct drm_i915_private *dev_priv = dev->dev_private; 736 struct drm_i915_error_state *error; 737 738 spin_lock(&dev_priv->error_lock); 739 error = dev_priv->first_error; 740 dev_priv->first_error = NULL; 741 spin_unlock(&dev_priv->error_lock); 742 743 if (error) 744 i915_error_state_free(dev, error); 745 } 746 #else 747 #define i915_capture_error_state(x) 748 #endif 749 750 static void i915_report_and_clear_eir(struct drm_device *dev) 751 { 752 struct drm_i915_private *dev_priv = dev->dev_private; 753 u32 eir = I915_READ(EIR); 754 755 if (!eir) 756 return; 757 758 printk(KERN_ERR "render error detected, EIR: 0x%08x\n", 759 eir); 760 761 if (IS_G4X(dev)) { 762 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 763 u32 ipeir = I915_READ(IPEIR_I965); 764 765 printk(KERN_ERR " IPEIR: 0x%08x\n", 766 I915_READ(IPEIR_I965)); 767 printk(KERN_ERR " IPEHR: 0x%08x\n", 768 I915_READ(IPEHR_I965)); 769 printk(KERN_ERR " INSTDONE: 0x%08x\n", 770 I915_READ(INSTDONE_I965)); 771 printk(KERN_ERR " INSTPS: 0x%08x\n", 772 I915_READ(INSTPS)); 773 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 774 I915_READ(INSTDONE1)); 775 printk(KERN_ERR " ACTHD: 0x%08x\n", 776 I915_READ(ACTHD_I965)); 777 I915_WRITE(IPEIR_I965, ipeir); 778 (void)I915_READ(IPEIR_I965); 779 } 780 if (eir & GM45_ERROR_PAGE_TABLE) { 781 u32 pgtbl_err = I915_READ(PGTBL_ER); 782 printk(KERN_ERR "page table error\n"); 783 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 784 pgtbl_err); 785 I915_WRITE(PGTBL_ER, pgtbl_err); 786 (void)I915_READ(PGTBL_ER); 787 } 788 } 789 790 if (!IS_GEN2(dev)) { 791 if (eir & I915_ERROR_PAGE_TABLE) { 792 u32 pgtbl_err = I915_READ(PGTBL_ER); 793 printk(KERN_ERR "page table error\n"); 794 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 795 pgtbl_err); 796 I915_WRITE(PGTBL_ER, pgtbl_err); 797 (void)I915_READ(PGTBL_ER); 798 } 799 } 800 801 if (eir & I915_ERROR_MEMORY_REFRESH) { 802 u32 pipea_stats = I915_READ(PIPEASTAT); 803 u32 pipeb_stats = I915_READ(PIPEBSTAT); 804 805 printk(KERN_ERR "memory refresh error\n"); 806 printk(KERN_ERR "PIPEASTAT: 0x%08x\n", 807 pipea_stats); 808 printk(KERN_ERR "PIPEBSTAT: 0x%08x\n", 809 pipeb_stats); 810 /* pipestat has already been acked */ 811 } 812 if (eir & I915_ERROR_INSTRUCTION) { 813 printk(KERN_ERR "instruction error\n"); 814 printk(KERN_ERR " INSTPM: 0x%08x\n", 815 I915_READ(INSTPM)); 816 if (INTEL_INFO(dev)->gen < 4) { 817 u32 ipeir = I915_READ(IPEIR); 818 819 printk(KERN_ERR " IPEIR: 0x%08x\n", 820 I915_READ(IPEIR)); 821 printk(KERN_ERR " IPEHR: 0x%08x\n", 822 I915_READ(IPEHR)); 823 printk(KERN_ERR " INSTDONE: 0x%08x\n", 824 I915_READ(INSTDONE)); 825 printk(KERN_ERR " ACTHD: 0x%08x\n", 826 I915_READ(ACTHD)); 827 I915_WRITE(IPEIR, ipeir); 828 (void)I915_READ(IPEIR); 829 } else { 830 u32 ipeir = I915_READ(IPEIR_I965); 831 832 printk(KERN_ERR " IPEIR: 0x%08x\n", 833 I915_READ(IPEIR_I965)); 834 printk(KERN_ERR " IPEHR: 0x%08x\n", 835 I915_READ(IPEHR_I965)); 836 printk(KERN_ERR " INSTDONE: 0x%08x\n", 837 I915_READ(INSTDONE_I965)); 838 printk(KERN_ERR " INSTPS: 0x%08x\n", 839 I915_READ(INSTPS)); 840 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 841 I915_READ(INSTDONE1)); 842 printk(KERN_ERR " ACTHD: 0x%08x\n", 843 I915_READ(ACTHD_I965)); 844 I915_WRITE(IPEIR_I965, ipeir); 845 (void)I915_READ(IPEIR_I965); 846 } 847 } 848 849 I915_WRITE(EIR, eir); 850 (void)I915_READ(EIR); 851 eir = I915_READ(EIR); 852 if (eir) { 853 /* 854 * some errors might have become stuck, 855 * mask them. 856 */ 857 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 858 I915_WRITE(EMR, I915_READ(EMR) | eir); 859 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 860 } 861 } 862 863 /** 864 * i915_handle_error - handle an error interrupt 865 * @dev: drm device 866 * 867 * Do some basic checking of regsiter state at error interrupt time and 868 * dump it to the syslog. Also call i915_capture_error_state() to make 869 * sure we get a record and make it available in debugfs. Fire a uevent 870 * so userspace knows something bad happened (should trigger collection 871 * of a ring dump etc.). 872 */ 873 static void i915_handle_error(struct drm_device *dev, bool wedged) 874 { 875 struct drm_i915_private *dev_priv = dev->dev_private; 876 877 i915_capture_error_state(dev); 878 i915_report_and_clear_eir(dev); 879 880 if (wedged) { 881 INIT_COMPLETION(dev_priv->error_completion); 882 atomic_set(&dev_priv->mm.wedged, 1); 883 884 /* 885 * Wakeup waiting processes so they don't hang 886 */ 887 wake_up_all(&dev_priv->render_ring.irq_queue); 888 if (HAS_BSD(dev)) 889 wake_up_all(&dev_priv->bsd_ring.irq_queue); 890 if (HAS_BLT(dev)) 891 wake_up_all(&dev_priv->blt_ring.irq_queue); 892 } 893 894 queue_work(dev_priv->wq, &dev_priv->error_work); 895 } 896 897 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 898 { 899 drm_i915_private_t *dev_priv = dev->dev_private; 900 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 901 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 902 struct drm_i915_gem_object *obj_priv; 903 struct intel_unpin_work *work; 904 unsigned long flags; 905 bool stall_detected; 906 907 /* Ignore early vblank irqs */ 908 if (intel_crtc == NULL) 909 return; 910 911 spin_lock_irqsave(&dev->event_lock, flags); 912 work = intel_crtc->unpin_work; 913 914 if (work == NULL || work->pending || !work->enable_stall_check) { 915 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 916 spin_unlock_irqrestore(&dev->event_lock, flags); 917 return; 918 } 919 920 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 921 obj_priv = to_intel_bo(work->pending_flip_obj); 922 if (INTEL_INFO(dev)->gen >= 4) { 923 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; 924 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; 925 } else { 926 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; 927 stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + 928 crtc->y * crtc->fb->pitch + 929 crtc->x * crtc->fb->bits_per_pixel/8); 930 } 931 932 spin_unlock_irqrestore(&dev->event_lock, flags); 933 934 if (stall_detected) { 935 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 936 intel_prepare_page_flip(dev, intel_crtc->plane); 937 } 938 } 939 940 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 941 { 942 struct drm_device *dev = (struct drm_device *) arg; 943 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 944 struct drm_i915_master_private *master_priv; 945 u32 iir, new_iir; 946 u32 pipea_stats, pipeb_stats; 947 u32 vblank_status; 948 int vblank = 0; 949 unsigned long irqflags; 950 int irq_received; 951 int ret = IRQ_NONE; 952 953 atomic_inc(&dev_priv->irq_received); 954 955 if (HAS_PCH_SPLIT(dev)) 956 return ironlake_irq_handler(dev); 957 958 iir = I915_READ(IIR); 959 960 if (INTEL_INFO(dev)->gen >= 4) 961 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; 962 else 963 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; 964 965 for (;;) { 966 irq_received = iir != 0; 967 968 /* Can't rely on pipestat interrupt bit in iir as it might 969 * have been cleared after the pipestat interrupt was received. 970 * It doesn't set the bit in iir again, but it still produces 971 * interrupts (for non-MSI). 972 */ 973 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 974 pipea_stats = I915_READ(PIPEASTAT); 975 pipeb_stats = I915_READ(PIPEBSTAT); 976 977 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 978 i915_handle_error(dev, false); 979 980 /* 981 * Clear the PIPE(A|B)STAT regs before the IIR 982 */ 983 if (pipea_stats & 0x8000ffff) { 984 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) 985 DRM_DEBUG_DRIVER("pipe a underrun\n"); 986 I915_WRITE(PIPEASTAT, pipea_stats); 987 irq_received = 1; 988 } 989 990 if (pipeb_stats & 0x8000ffff) { 991 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) 992 DRM_DEBUG_DRIVER("pipe b underrun\n"); 993 I915_WRITE(PIPEBSTAT, pipeb_stats); 994 irq_received = 1; 995 } 996 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 997 998 if (!irq_received) 999 break; 1000 1001 ret = IRQ_HANDLED; 1002 1003 /* Consume port. Then clear IIR or we'll miss events */ 1004 if ((I915_HAS_HOTPLUG(dev)) && 1005 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 1006 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1007 1008 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1009 hotplug_status); 1010 if (hotplug_status & dev_priv->hotplug_supported_mask) 1011 queue_work(dev_priv->wq, 1012 &dev_priv->hotplug_work); 1013 1014 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1015 I915_READ(PORT_HOTPLUG_STAT); 1016 } 1017 1018 I915_WRITE(IIR, iir); 1019 new_iir = I915_READ(IIR); /* Flush posted writes */ 1020 1021 if (dev->primary->master) { 1022 master_priv = dev->primary->master->driver_priv; 1023 if (master_priv->sarea_priv) 1024 master_priv->sarea_priv->last_dispatch = 1025 READ_BREADCRUMB(dev_priv); 1026 } 1027 1028 if (iir & I915_USER_INTERRUPT) 1029 notify_ring(dev, &dev_priv->render_ring); 1030 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 1031 notify_ring(dev, &dev_priv->bsd_ring); 1032 1033 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1034 intel_prepare_page_flip(dev, 0); 1035 if (dev_priv->flip_pending_is_done) 1036 intel_finish_page_flip_plane(dev, 0); 1037 } 1038 1039 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 1040 intel_prepare_page_flip(dev, 1); 1041 if (dev_priv->flip_pending_is_done) 1042 intel_finish_page_flip_plane(dev, 1); 1043 } 1044 1045 if (pipea_stats & vblank_status) { 1046 vblank++; 1047 drm_handle_vblank(dev, 0); 1048 if (!dev_priv->flip_pending_is_done) { 1049 i915_pageflip_stall_check(dev, 0); 1050 intel_finish_page_flip(dev, 0); 1051 } 1052 } 1053 1054 if (pipeb_stats & vblank_status) { 1055 vblank++; 1056 drm_handle_vblank(dev, 1); 1057 if (!dev_priv->flip_pending_is_done) { 1058 i915_pageflip_stall_check(dev, 1); 1059 intel_finish_page_flip(dev, 1); 1060 } 1061 } 1062 1063 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1064 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1065 (iir & I915_ASLE_INTERRUPT)) 1066 intel_opregion_asle_intr(dev); 1067 1068 /* With MSI, interrupts are only generated when iir 1069 * transitions from zero to nonzero. If another bit got 1070 * set while we were handling the existing iir bits, then 1071 * we would never get another interrupt. 1072 * 1073 * This is fine on non-MSI as well, as if we hit this path 1074 * we avoid exiting the interrupt handler only to generate 1075 * another one. 1076 * 1077 * Note that for MSI this could cause a stray interrupt report 1078 * if an interrupt landed in the time between writing IIR and 1079 * the posting read. This should be rare enough to never 1080 * trigger the 99% of 100,000 interrupts test for disabling 1081 * stray interrupts. 1082 */ 1083 iir = new_iir; 1084 } 1085 1086 return ret; 1087 } 1088 1089 static int i915_emit_irq(struct drm_device * dev) 1090 { 1091 drm_i915_private_t *dev_priv = dev->dev_private; 1092 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1093 1094 i915_kernel_lost_context(dev); 1095 1096 DRM_DEBUG_DRIVER("\n"); 1097 1098 dev_priv->counter++; 1099 if (dev_priv->counter > 0x7FFFFFFFUL) 1100 dev_priv->counter = 1; 1101 if (master_priv->sarea_priv) 1102 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 1103 1104 BEGIN_LP_RING(4); 1105 OUT_RING(MI_STORE_DWORD_INDEX); 1106 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1107 OUT_RING(dev_priv->counter); 1108 OUT_RING(MI_USER_INTERRUPT); 1109 ADVANCE_LP_RING(); 1110 1111 return dev_priv->counter; 1112 } 1113 1114 void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1115 { 1116 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1117 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1118 1119 if (dev_priv->trace_irq_seqno == 0) 1120 render_ring->user_irq_get(dev, render_ring); 1121 1122 dev_priv->trace_irq_seqno = seqno; 1123 } 1124 1125 static int i915_wait_irq(struct drm_device * dev, int irq_nr) 1126 { 1127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1128 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1129 int ret = 0; 1130 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1131 1132 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1133 READ_BREADCRUMB(dev_priv)); 1134 1135 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 1136 if (master_priv->sarea_priv) 1137 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 1138 return 0; 1139 } 1140 1141 if (master_priv->sarea_priv) 1142 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1143 1144 render_ring->user_irq_get(dev, render_ring); 1145 DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, 1146 READ_BREADCRUMB(dev_priv) >= irq_nr); 1147 render_ring->user_irq_put(dev, render_ring); 1148 1149 if (ret == -EBUSY) { 1150 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1151 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 1152 } 1153 1154 return ret; 1155 } 1156 1157 /* Needs the lock as it touches the ring. 1158 */ 1159 int i915_irq_emit(struct drm_device *dev, void *data, 1160 struct drm_file *file_priv) 1161 { 1162 drm_i915_private_t *dev_priv = dev->dev_private; 1163 drm_i915_irq_emit_t *emit = data; 1164 int result; 1165 1166 if (!dev_priv || !dev_priv->render_ring.virtual_start) { 1167 DRM_ERROR("called with no initialization\n"); 1168 return -EINVAL; 1169 } 1170 1171 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 1172 1173 mutex_lock(&dev->struct_mutex); 1174 result = i915_emit_irq(dev); 1175 mutex_unlock(&dev->struct_mutex); 1176 1177 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 1178 DRM_ERROR("copy_to_user\n"); 1179 return -EFAULT; 1180 } 1181 1182 return 0; 1183 } 1184 1185 /* Doesn't need the hardware lock. 1186 */ 1187 int i915_irq_wait(struct drm_device *dev, void *data, 1188 struct drm_file *file_priv) 1189 { 1190 drm_i915_private_t *dev_priv = dev->dev_private; 1191 drm_i915_irq_wait_t *irqwait = data; 1192 1193 if (!dev_priv) { 1194 DRM_ERROR("called with no initialization\n"); 1195 return -EINVAL; 1196 } 1197 1198 return i915_wait_irq(dev, irqwait->irq_seq); 1199 } 1200 1201 /* Called from drm generic code, passed 'crtc' which 1202 * we use as a pipe index 1203 */ 1204 int i915_enable_vblank(struct drm_device *dev, int pipe) 1205 { 1206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1207 unsigned long irqflags; 1208 1209 if (!i915_pipe_enabled(dev, pipe)) 1210 return -EINVAL; 1211 1212 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1213 if (HAS_PCH_SPLIT(dev)) 1214 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1215 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1216 else if (INTEL_INFO(dev)->gen >= 4) 1217 i915_enable_pipestat(dev_priv, pipe, 1218 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1219 else 1220 i915_enable_pipestat(dev_priv, pipe, 1221 PIPE_VBLANK_INTERRUPT_ENABLE); 1222 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1223 return 0; 1224 } 1225 1226 /* Called from drm generic code, passed 'crtc' which 1227 * we use as a pipe index 1228 */ 1229 void i915_disable_vblank(struct drm_device *dev, int pipe) 1230 { 1231 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1232 unsigned long irqflags; 1233 1234 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1235 if (HAS_PCH_SPLIT(dev)) 1236 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1237 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1238 else 1239 i915_disable_pipestat(dev_priv, pipe, 1240 PIPE_VBLANK_INTERRUPT_ENABLE | 1241 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1242 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1243 } 1244 1245 void i915_enable_interrupt (struct drm_device *dev) 1246 { 1247 struct drm_i915_private *dev_priv = dev->dev_private; 1248 1249 if (!HAS_PCH_SPLIT(dev)) 1250 intel_opregion_enable_asle(dev); 1251 dev_priv->irq_enabled = 1; 1252 } 1253 1254 1255 /* Set the vblank monitor pipe 1256 */ 1257 int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1258 struct drm_file *file_priv) 1259 { 1260 drm_i915_private_t *dev_priv = dev->dev_private; 1261 1262 if (!dev_priv) { 1263 DRM_ERROR("called with no initialization\n"); 1264 return -EINVAL; 1265 } 1266 1267 return 0; 1268 } 1269 1270 int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1271 struct drm_file *file_priv) 1272 { 1273 drm_i915_private_t *dev_priv = dev->dev_private; 1274 drm_i915_vblank_pipe_t *pipe = data; 1275 1276 if (!dev_priv) { 1277 DRM_ERROR("called with no initialization\n"); 1278 return -EINVAL; 1279 } 1280 1281 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1282 1283 return 0; 1284 } 1285 1286 /** 1287 * Schedule buffer swap at given vertical blank. 1288 */ 1289 int i915_vblank_swap(struct drm_device *dev, void *data, 1290 struct drm_file *file_priv) 1291 { 1292 /* The delayed swap mechanism was fundamentally racy, and has been 1293 * removed. The model was that the client requested a delayed flip/swap 1294 * from the kernel, then waited for vblank before continuing to perform 1295 * rendering. The problem was that the kernel might wake the client 1296 * up before it dispatched the vblank swap (since the lock has to be 1297 * held while touching the ringbuffer), in which case the client would 1298 * clear and start the next frame before the swap occurred, and 1299 * flicker would occur in addition to likely missing the vblank. 1300 * 1301 * In the absence of this ioctl, userland falls back to a correct path 1302 * of waiting for a vblank, then dispatching the swap on its own. 1303 * Context switching to userland and back is plenty fast enough for 1304 * meeting the requirements of vblank swapping. 1305 */ 1306 return -EINVAL; 1307 } 1308 1309 static struct drm_i915_gem_request * 1310 i915_get_tail_request(struct drm_device *dev) 1311 { 1312 drm_i915_private_t *dev_priv = dev->dev_private; 1313 return list_entry(dev_priv->render_ring.request_list.prev, 1314 struct drm_i915_gem_request, list); 1315 } 1316 1317 /** 1318 * This is called when the chip hasn't reported back with completed 1319 * batchbuffers in a long time. The first time this is called we simply record 1320 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1321 * again, we assume the chip is wedged and try to fix it. 1322 */ 1323 void i915_hangcheck_elapsed(unsigned long data) 1324 { 1325 struct drm_device *dev = (struct drm_device *)data; 1326 drm_i915_private_t *dev_priv = dev->dev_private; 1327 uint32_t acthd, instdone, instdone1; 1328 1329 if (INTEL_INFO(dev)->gen < 4) { 1330 acthd = I915_READ(ACTHD); 1331 instdone = I915_READ(INSTDONE); 1332 instdone1 = 0; 1333 } else { 1334 acthd = I915_READ(ACTHD_I965); 1335 instdone = I915_READ(INSTDONE_I965); 1336 instdone1 = I915_READ(INSTDONE1); 1337 } 1338 1339 /* If all work is done then ACTHD clearly hasn't advanced. */ 1340 if (list_empty(&dev_priv->render_ring.request_list) || 1341 i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), 1342 i915_get_tail_request(dev)->seqno)) { 1343 bool missed_wakeup = false; 1344 1345 dev_priv->hangcheck_count = 0; 1346 1347 /* Issue a wake-up to catch stuck h/w. */ 1348 if (dev_priv->render_ring.waiting_gem_seqno && 1349 waitqueue_active(&dev_priv->render_ring.irq_queue)) { 1350 wake_up_all(&dev_priv->render_ring.irq_queue); 1351 missed_wakeup = true; 1352 } 1353 1354 if (dev_priv->bsd_ring.waiting_gem_seqno && 1355 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { 1356 wake_up_all(&dev_priv->bsd_ring.irq_queue); 1357 missed_wakeup = true; 1358 } 1359 1360 if (dev_priv->blt_ring.waiting_gem_seqno && 1361 waitqueue_active(&dev_priv->blt_ring.irq_queue)) { 1362 wake_up_all(&dev_priv->blt_ring.irq_queue); 1363 missed_wakeup = true; 1364 } 1365 1366 if (missed_wakeup) 1367 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); 1368 return; 1369 } 1370 1371 if (dev_priv->last_acthd == acthd && 1372 dev_priv->last_instdone == instdone && 1373 dev_priv->last_instdone1 == instdone1) { 1374 if (dev_priv->hangcheck_count++ > 1) { 1375 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1376 1377 if (!IS_GEN2(dev)) { 1378 /* Is the chip hanging on a WAIT_FOR_EVENT? 1379 * If so we can simply poke the RB_WAIT bit 1380 * and break the hang. This should work on 1381 * all but the second generation chipsets. 1382 */ 1383 u32 tmp = I915_READ(PRB0_CTL); 1384 if (tmp & RING_WAIT) { 1385 I915_WRITE(PRB0_CTL, tmp); 1386 POSTING_READ(PRB0_CTL); 1387 goto out; 1388 } 1389 } 1390 1391 i915_handle_error(dev, true); 1392 return; 1393 } 1394 } else { 1395 dev_priv->hangcheck_count = 0; 1396 1397 dev_priv->last_acthd = acthd; 1398 dev_priv->last_instdone = instdone; 1399 dev_priv->last_instdone1 = instdone1; 1400 } 1401 1402 out: 1403 /* Reset timer case chip hangs without another request being added */ 1404 mod_timer(&dev_priv->hangcheck_timer, 1405 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1406 } 1407 1408 /* drm_dma.h hooks 1409 */ 1410 static void ironlake_irq_preinstall(struct drm_device *dev) 1411 { 1412 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1413 1414 I915_WRITE(HWSTAM, 0xeffe); 1415 1416 /* XXX hotplug from PCH */ 1417 1418 I915_WRITE(DEIMR, 0xffffffff); 1419 I915_WRITE(DEIER, 0x0); 1420 (void) I915_READ(DEIER); 1421 1422 /* and GT */ 1423 I915_WRITE(GTIMR, 0xffffffff); 1424 I915_WRITE(GTIER, 0x0); 1425 (void) I915_READ(GTIER); 1426 1427 /* south display irq */ 1428 I915_WRITE(SDEIMR, 0xffffffff); 1429 I915_WRITE(SDEIER, 0x0); 1430 (void) I915_READ(SDEIER); 1431 } 1432 1433 static int ironlake_irq_postinstall(struct drm_device *dev) 1434 { 1435 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1436 /* enable kind of interrupts always enabled */ 1437 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1438 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1439 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; 1440 u32 hotplug_mask; 1441 1442 dev_priv->irq_mask_reg = ~display_mask; 1443 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; 1444 1445 /* should always can generate irq */ 1446 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1447 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 1448 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); 1449 (void) I915_READ(DEIER); 1450 1451 if (IS_GEN6(dev)) { 1452 render_mask = 1453 GT_PIPE_NOTIFY | 1454 GT_GEN6_BSD_USER_INTERRUPT | 1455 GT_BLT_USER_INTERRUPT; 1456 } 1457 1458 dev_priv->gt_irq_mask_reg = ~render_mask; 1459 dev_priv->gt_irq_enable_reg = render_mask; 1460 1461 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1462 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 1463 if (IS_GEN6(dev)) { 1464 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); 1465 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); 1466 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); 1467 } 1468 1469 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1470 (void) I915_READ(GTIER); 1471 1472 if (HAS_PCH_CPT(dev)) { 1473 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | 1474 SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; 1475 } else { 1476 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1477 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1478 } 1479 1480 dev_priv->pch_irq_mask_reg = ~hotplug_mask; 1481 dev_priv->pch_irq_enable_reg = hotplug_mask; 1482 1483 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1484 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); 1485 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1486 (void) I915_READ(SDEIER); 1487 1488 if (IS_IRONLAKE_M(dev)) { 1489 /* Clear & enable PCU event interrupts */ 1490 I915_WRITE(DEIIR, DE_PCU_EVENT); 1491 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1492 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1493 } 1494 1495 return 0; 1496 } 1497 1498 void i915_driver_irq_preinstall(struct drm_device * dev) 1499 { 1500 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1501 1502 atomic_set(&dev_priv->irq_received, 0); 1503 1504 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1505 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1506 1507 if (HAS_PCH_SPLIT(dev)) { 1508 ironlake_irq_preinstall(dev); 1509 return; 1510 } 1511 1512 if (I915_HAS_HOTPLUG(dev)) { 1513 I915_WRITE(PORT_HOTPLUG_EN, 0); 1514 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1515 } 1516 1517 I915_WRITE(HWSTAM, 0xeffe); 1518 I915_WRITE(PIPEASTAT, 0); 1519 I915_WRITE(PIPEBSTAT, 0); 1520 I915_WRITE(IMR, 0xffffffff); 1521 I915_WRITE(IER, 0x0); 1522 (void) I915_READ(IER); 1523 } 1524 1525 /* 1526 * Must be called after intel_modeset_init or hotplug interrupts won't be 1527 * enabled correctly. 1528 */ 1529 int i915_driver_irq_postinstall(struct drm_device *dev) 1530 { 1531 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1532 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1533 u32 error_mask; 1534 1535 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); 1536 if (HAS_BSD(dev)) 1537 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); 1538 if (HAS_BLT(dev)) 1539 DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); 1540 1541 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1542 1543 if (HAS_PCH_SPLIT(dev)) 1544 return ironlake_irq_postinstall(dev); 1545 1546 /* Unmask the interrupts that we always want on. */ 1547 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 1548 1549 dev_priv->pipestat[0] = 0; 1550 dev_priv->pipestat[1] = 0; 1551 1552 if (I915_HAS_HOTPLUG(dev)) { 1553 /* Enable in IER... */ 1554 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1555 /* and unmask in IMR */ 1556 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; 1557 } 1558 1559 /* 1560 * Enable some error detection, note the instruction error mask 1561 * bit is reserved, so we leave it masked. 1562 */ 1563 if (IS_G4X(dev)) { 1564 error_mask = ~(GM45_ERROR_PAGE_TABLE | 1565 GM45_ERROR_MEM_PRIV | 1566 GM45_ERROR_CP_PRIV | 1567 I915_ERROR_MEMORY_REFRESH); 1568 } else { 1569 error_mask = ~(I915_ERROR_PAGE_TABLE | 1570 I915_ERROR_MEMORY_REFRESH); 1571 } 1572 I915_WRITE(EMR, error_mask); 1573 1574 I915_WRITE(IMR, dev_priv->irq_mask_reg); 1575 I915_WRITE(IER, enable_mask); 1576 (void) I915_READ(IER); 1577 1578 if (I915_HAS_HOTPLUG(dev)) { 1579 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1580 1581 /* Note HDMI and DP share bits */ 1582 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1583 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1584 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1585 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1586 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1587 hotplug_en |= HDMID_HOTPLUG_INT_EN; 1588 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 1589 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1590 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 1591 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1592 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 1593 hotplug_en |= CRT_HOTPLUG_INT_EN; 1594 1595 /* Programming the CRT detection parameters tends 1596 to generate a spurious hotplug event about three 1597 seconds later. So just do it once. 1598 */ 1599 if (IS_G4X(dev)) 1600 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 1601 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1602 } 1603 1604 /* Ignore TV since it's buggy */ 1605 1606 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1607 } 1608 1609 intel_opregion_enable_asle(dev); 1610 1611 return 0; 1612 } 1613 1614 static void ironlake_irq_uninstall(struct drm_device *dev) 1615 { 1616 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1617 I915_WRITE(HWSTAM, 0xffffffff); 1618 1619 I915_WRITE(DEIMR, 0xffffffff); 1620 I915_WRITE(DEIER, 0x0); 1621 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1622 1623 I915_WRITE(GTIMR, 0xffffffff); 1624 I915_WRITE(GTIER, 0x0); 1625 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1626 } 1627 1628 void i915_driver_irq_uninstall(struct drm_device * dev) 1629 { 1630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1631 1632 if (!dev_priv) 1633 return; 1634 1635 dev_priv->vblank_pipe = 0; 1636 1637 if (HAS_PCH_SPLIT(dev)) { 1638 ironlake_irq_uninstall(dev); 1639 return; 1640 } 1641 1642 if (I915_HAS_HOTPLUG(dev)) { 1643 I915_WRITE(PORT_HOTPLUG_EN, 0); 1644 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1645 } 1646 1647 I915_WRITE(HWSTAM, 0xffffffff); 1648 I915_WRITE(PIPEASTAT, 0); 1649 I915_WRITE(PIPEBSTAT, 0); 1650 I915_WRITE(IMR, 0xffffffff); 1651 I915_WRITE(IER, 0x0); 1652 1653 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff); 1654 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff); 1655 I915_WRITE(IIR, I915_READ(IIR)); 1656 } 1657