xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision a09d2831)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <linux/sysrq.h>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drm.h"
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 
37 #define MAX_NOPID ((u32)~0)
38 
39 /**
40  * Interrupts that are always left unmasked.
41  *
42  * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
43  * we leave them always unmasked in IMR and then control enabling them through
44  * PIPESTAT alone.
45  */
46 #define I915_INTERRUPT_ENABLE_FIX			\
47 	(I915_ASLE_INTERRUPT |				\
48 	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
49 	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
50 	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
51 	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
52 	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
53 
54 /** Interrupts that we mask and unmask at runtime. */
55 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
56 
57 #define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
58 				 PIPE_VBLANK_INTERRUPT_STATUS)
59 
60 #define I915_PIPE_VBLANK_ENABLE	(PIPE_START_VBLANK_INTERRUPT_ENABLE |\
61 				 PIPE_VBLANK_INTERRUPT_ENABLE)
62 
63 #define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
64 					 DRM_I915_VBLANK_PIPE_B)
65 
66 void
67 ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
68 {
69 	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
70 		dev_priv->gt_irq_mask_reg &= ~mask;
71 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
72 		(void) I915_READ(GTIMR);
73 	}
74 }
75 
76 static inline void
77 ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
78 {
79 	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
80 		dev_priv->gt_irq_mask_reg |= mask;
81 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
82 		(void) I915_READ(GTIMR);
83 	}
84 }
85 
86 /* For display hotplug interrupt */
87 void
88 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
89 {
90 	if ((dev_priv->irq_mask_reg & mask) != 0) {
91 		dev_priv->irq_mask_reg &= ~mask;
92 		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
93 		(void) I915_READ(DEIMR);
94 	}
95 }
96 
97 static inline void
98 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
99 {
100 	if ((dev_priv->irq_mask_reg & mask) != mask) {
101 		dev_priv->irq_mask_reg |= mask;
102 		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
103 		(void) I915_READ(DEIMR);
104 	}
105 }
106 
107 void
108 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
109 {
110 	if ((dev_priv->irq_mask_reg & mask) != 0) {
111 		dev_priv->irq_mask_reg &= ~mask;
112 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
113 		(void) I915_READ(IMR);
114 	}
115 }
116 
117 static inline void
118 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
119 {
120 	if ((dev_priv->irq_mask_reg & mask) != mask) {
121 		dev_priv->irq_mask_reg |= mask;
122 		I915_WRITE(IMR, dev_priv->irq_mask_reg);
123 		(void) I915_READ(IMR);
124 	}
125 }
126 
127 static inline u32
128 i915_pipestat(int pipe)
129 {
130 	if (pipe == 0)
131 		return PIPEASTAT;
132 	if (pipe == 1)
133 		return PIPEBSTAT;
134 	BUG();
135 }
136 
137 void
138 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
139 {
140 	if ((dev_priv->pipestat[pipe] & mask) != mask) {
141 		u32 reg = i915_pipestat(pipe);
142 
143 		dev_priv->pipestat[pipe] |= mask;
144 		/* Enable the interrupt, clear any pending status */
145 		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
146 		(void) I915_READ(reg);
147 	}
148 }
149 
150 void
151 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
152 {
153 	if ((dev_priv->pipestat[pipe] & mask) != 0) {
154 		u32 reg = i915_pipestat(pipe);
155 
156 		dev_priv->pipestat[pipe] &= ~mask;
157 		I915_WRITE(reg, dev_priv->pipestat[pipe]);
158 		(void) I915_READ(reg);
159 	}
160 }
161 
162 /**
163  * intel_enable_asle - enable ASLE interrupt for OpRegion
164  */
165 void intel_enable_asle (struct drm_device *dev)
166 {
167 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168 
169 	if (IS_IRONLAKE(dev))
170 		ironlake_enable_display_irq(dev_priv, DE_GSE);
171 	else
172 		i915_enable_pipestat(dev_priv, 1,
173 				     I915_LEGACY_BLC_EVENT_ENABLE);
174 }
175 
176 /**
177  * i915_pipe_enabled - check if a pipe is enabled
178  * @dev: DRM device
179  * @pipe: pipe to check
180  *
181  * Reading certain registers when the pipe is disabled can hang the chip.
182  * Use this routine to make sure the PLL is running and the pipe is active
183  * before reading such registers if unsure.
184  */
185 static int
186 i915_pipe_enabled(struct drm_device *dev, int pipe)
187 {
188 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
189 	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
190 
191 	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
192 		return 1;
193 
194 	return 0;
195 }
196 
197 /* Called from drm generic code, passed a 'crtc', which
198  * we use as a pipe index
199  */
200 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
201 {
202 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
203 	unsigned long high_frame;
204 	unsigned long low_frame;
205 	u32 high1, high2, low, count;
206 
207 	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
208 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
209 
210 	if (!i915_pipe_enabled(dev, pipe)) {
211 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
212 				"pipe %d\n", pipe);
213 		return 0;
214 	}
215 
216 	/*
217 	 * High & low register fields aren't synchronized, so make sure
218 	 * we get a low value that's stable across two reads of the high
219 	 * register.
220 	 */
221 	do {
222 		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
223 			 PIPE_FRAME_HIGH_SHIFT);
224 		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
225 			PIPE_FRAME_LOW_SHIFT);
226 		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
227 			 PIPE_FRAME_HIGH_SHIFT);
228 	} while (high1 != high2);
229 
230 	count = (high1 << 8) | low;
231 
232 	return count;
233 }
234 
235 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
236 {
237 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
238 	int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
239 
240 	if (!i915_pipe_enabled(dev, pipe)) {
241 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
242 					"pipe %d\n", pipe);
243 		return 0;
244 	}
245 
246 	return I915_READ(reg);
247 }
248 
249 /*
250  * Handle hotplug events outside the interrupt handler proper.
251  */
252 static void i915_hotplug_work_func(struct work_struct *work)
253 {
254 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
255 						    hotplug_work);
256 	struct drm_device *dev = dev_priv->dev;
257 	struct drm_mode_config *mode_config = &dev->mode_config;
258 	struct drm_connector *connector;
259 
260 	if (mode_config->num_connector) {
261 		list_for_each_entry(connector, &mode_config->connector_list, head) {
262 			struct intel_output *intel_output = to_intel_output(connector);
263 
264 			if (intel_output->hot_plug)
265 				(*intel_output->hot_plug) (intel_output);
266 		}
267 	}
268 	/* Just fire off a uevent and let userspace tell us what to do */
269 	drm_sysfs_hotplug_event(dev);
270 }
271 
272 irqreturn_t ironlake_irq_handler(struct drm_device *dev)
273 {
274 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
275 	int ret = IRQ_NONE;
276 	u32 de_iir, gt_iir, de_ier, pch_iir;
277 	u32 new_de_iir, new_gt_iir, new_pch_iir;
278 	struct drm_i915_master_private *master_priv;
279 
280 	/* disable master interrupt before clearing iir  */
281 	de_ier = I915_READ(DEIER);
282 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
283 	(void)I915_READ(DEIER);
284 
285 	de_iir = I915_READ(DEIIR);
286 	gt_iir = I915_READ(GTIIR);
287 	pch_iir = I915_READ(SDEIIR);
288 
289 	for (;;) {
290 		if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
291 			break;
292 
293 		ret = IRQ_HANDLED;
294 
295 		/* should clear PCH hotplug event before clear CPU irq */
296 		I915_WRITE(SDEIIR, pch_iir);
297 		new_pch_iir = I915_READ(SDEIIR);
298 
299 		I915_WRITE(DEIIR, de_iir);
300 		new_de_iir = I915_READ(DEIIR);
301 		I915_WRITE(GTIIR, gt_iir);
302 		new_gt_iir = I915_READ(GTIIR);
303 
304 		if (dev->primary->master) {
305 			master_priv = dev->primary->master->driver_priv;
306 			if (master_priv->sarea_priv)
307 				master_priv->sarea_priv->last_dispatch =
308 					READ_BREADCRUMB(dev_priv);
309 		}
310 
311 		if (gt_iir & GT_USER_INTERRUPT) {
312 			u32 seqno = i915_get_gem_seqno(dev);
313 			dev_priv->mm.irq_gem_seqno = seqno;
314 			trace_i915_gem_request_complete(dev, seqno);
315 			DRM_WAKEUP(&dev_priv->irq_queue);
316 			dev_priv->hangcheck_count = 0;
317 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
318 		}
319 
320 		if (de_iir & DE_GSE)
321 			ironlake_opregion_gse_intr(dev);
322 
323 		/* check event from PCH */
324 		if ((de_iir & DE_PCH_EVENT) &&
325 			(pch_iir & SDE_HOTPLUG_MASK)) {
326 			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
327 		}
328 
329 		de_iir = new_de_iir;
330 		gt_iir = new_gt_iir;
331 		pch_iir = new_pch_iir;
332 	}
333 
334 	I915_WRITE(DEIER, de_ier);
335 	(void)I915_READ(DEIER);
336 
337 	return ret;
338 }
339 
340 /**
341  * i915_error_work_func - do process context error handling work
342  * @work: work struct
343  *
344  * Fire an error uevent so userspace can see that a hang or error
345  * was detected.
346  */
347 static void i915_error_work_func(struct work_struct *work)
348 {
349 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
350 						    error_work);
351 	struct drm_device *dev = dev_priv->dev;
352 	char *error_event[] = { "ERROR=1", NULL };
353 	char *reset_event[] = { "RESET=1", NULL };
354 	char *reset_done_event[] = { "ERROR=0", NULL };
355 
356 	DRM_DEBUG_DRIVER("generating error event\n");
357 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
358 
359 	if (atomic_read(&dev_priv->mm.wedged)) {
360 		if (IS_I965G(dev)) {
361 			DRM_DEBUG_DRIVER("resetting chip\n");
362 			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
363 			if (!i965_reset(dev, GDRST_RENDER)) {
364 				atomic_set(&dev_priv->mm.wedged, 0);
365 				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
366 			}
367 		} else {
368 			DRM_DEBUG_DRIVER("reboot required\n");
369 		}
370 	}
371 }
372 
373 /**
374  * i915_capture_error_state - capture an error record for later analysis
375  * @dev: drm device
376  *
377  * Should be called when an error is detected (either a hang or an error
378  * interrupt) to capture error state from the time of the error.  Fills
379  * out a structure which becomes available in debugfs for user level tools
380  * to pick up.
381  */
382 static void i915_capture_error_state(struct drm_device *dev)
383 {
384 	struct drm_i915_private *dev_priv = dev->dev_private;
385 	struct drm_i915_error_state *error;
386 	unsigned long flags;
387 
388 	spin_lock_irqsave(&dev_priv->error_lock, flags);
389 	if (dev_priv->first_error)
390 		goto out;
391 
392 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
393 	if (!error) {
394 		DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
395 		goto out;
396 	}
397 
398 	error->eir = I915_READ(EIR);
399 	error->pgtbl_er = I915_READ(PGTBL_ER);
400 	error->pipeastat = I915_READ(PIPEASTAT);
401 	error->pipebstat = I915_READ(PIPEBSTAT);
402 	error->instpm = I915_READ(INSTPM);
403 	if (!IS_I965G(dev)) {
404 		error->ipeir = I915_READ(IPEIR);
405 		error->ipehr = I915_READ(IPEHR);
406 		error->instdone = I915_READ(INSTDONE);
407 		error->acthd = I915_READ(ACTHD);
408 	} else {
409 		error->ipeir = I915_READ(IPEIR_I965);
410 		error->ipehr = I915_READ(IPEHR_I965);
411 		error->instdone = I915_READ(INSTDONE_I965);
412 		error->instps = I915_READ(INSTPS);
413 		error->instdone1 = I915_READ(INSTDONE1);
414 		error->acthd = I915_READ(ACTHD_I965);
415 	}
416 
417 	do_gettimeofday(&error->time);
418 
419 	dev_priv->first_error = error;
420 
421 out:
422 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
423 }
424 
425 /**
426  * i915_handle_error - handle an error interrupt
427  * @dev: drm device
428  *
429  * Do some basic checking of regsiter state at error interrupt time and
430  * dump it to the syslog.  Also call i915_capture_error_state() to make
431  * sure we get a record and make it available in debugfs.  Fire a uevent
432  * so userspace knows something bad happened (should trigger collection
433  * of a ring dump etc.).
434  */
435 static void i915_handle_error(struct drm_device *dev, bool wedged)
436 {
437 	struct drm_i915_private *dev_priv = dev->dev_private;
438 	u32 eir = I915_READ(EIR);
439 	u32 pipea_stats = I915_READ(PIPEASTAT);
440 	u32 pipeb_stats = I915_READ(PIPEBSTAT);
441 
442 	i915_capture_error_state(dev);
443 
444 	printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
445 	       eir);
446 
447 	if (IS_G4X(dev)) {
448 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
449 			u32 ipeir = I915_READ(IPEIR_I965);
450 
451 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
452 			       I915_READ(IPEIR_I965));
453 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
454 			       I915_READ(IPEHR_I965));
455 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
456 			       I915_READ(INSTDONE_I965));
457 			printk(KERN_ERR "  INSTPS: 0x%08x\n",
458 			       I915_READ(INSTPS));
459 			printk(KERN_ERR "  INSTDONE1: 0x%08x\n",
460 			       I915_READ(INSTDONE1));
461 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
462 			       I915_READ(ACTHD_I965));
463 			I915_WRITE(IPEIR_I965, ipeir);
464 			(void)I915_READ(IPEIR_I965);
465 		}
466 		if (eir & GM45_ERROR_PAGE_TABLE) {
467 			u32 pgtbl_err = I915_READ(PGTBL_ER);
468 			printk(KERN_ERR "page table error\n");
469 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
470 			       pgtbl_err);
471 			I915_WRITE(PGTBL_ER, pgtbl_err);
472 			(void)I915_READ(PGTBL_ER);
473 		}
474 	}
475 
476 	if (IS_I9XX(dev)) {
477 		if (eir & I915_ERROR_PAGE_TABLE) {
478 			u32 pgtbl_err = I915_READ(PGTBL_ER);
479 			printk(KERN_ERR "page table error\n");
480 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
481 			       pgtbl_err);
482 			I915_WRITE(PGTBL_ER, pgtbl_err);
483 			(void)I915_READ(PGTBL_ER);
484 		}
485 	}
486 
487 	if (eir & I915_ERROR_MEMORY_REFRESH) {
488 		printk(KERN_ERR "memory refresh error\n");
489 		printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
490 		       pipea_stats);
491 		printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
492 		       pipeb_stats);
493 		/* pipestat has already been acked */
494 	}
495 	if (eir & I915_ERROR_INSTRUCTION) {
496 		printk(KERN_ERR "instruction error\n");
497 		printk(KERN_ERR "  INSTPM: 0x%08x\n",
498 		       I915_READ(INSTPM));
499 		if (!IS_I965G(dev)) {
500 			u32 ipeir = I915_READ(IPEIR);
501 
502 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
503 			       I915_READ(IPEIR));
504 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
505 			       I915_READ(IPEHR));
506 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
507 			       I915_READ(INSTDONE));
508 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
509 			       I915_READ(ACTHD));
510 			I915_WRITE(IPEIR, ipeir);
511 			(void)I915_READ(IPEIR);
512 		} else {
513 			u32 ipeir = I915_READ(IPEIR_I965);
514 
515 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
516 			       I915_READ(IPEIR_I965));
517 			printk(KERN_ERR "  IPEHR: 0x%08x\n",
518 			       I915_READ(IPEHR_I965));
519 			printk(KERN_ERR "  INSTDONE: 0x%08x\n",
520 			       I915_READ(INSTDONE_I965));
521 			printk(KERN_ERR "  INSTPS: 0x%08x\n",
522 			       I915_READ(INSTPS));
523 			printk(KERN_ERR "  INSTDONE1: 0x%08x\n",
524 			       I915_READ(INSTDONE1));
525 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
526 			       I915_READ(ACTHD_I965));
527 			I915_WRITE(IPEIR_I965, ipeir);
528 			(void)I915_READ(IPEIR_I965);
529 		}
530 	}
531 
532 	I915_WRITE(EIR, eir);
533 	(void)I915_READ(EIR);
534 	eir = I915_READ(EIR);
535 	if (eir) {
536 		/*
537 		 * some errors might have become stuck,
538 		 * mask them.
539 		 */
540 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
541 		I915_WRITE(EMR, I915_READ(EMR) | eir);
542 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
543 	}
544 
545 	if (wedged) {
546 		atomic_set(&dev_priv->mm.wedged, 1);
547 
548 		/*
549 		 * Wakeup waiting processes so they don't hang
550 		 */
551 		DRM_WAKEUP(&dev_priv->irq_queue);
552 	}
553 
554 	queue_work(dev_priv->wq, &dev_priv->error_work);
555 }
556 
557 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
558 {
559 	struct drm_device *dev = (struct drm_device *) arg;
560 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
561 	struct drm_i915_master_private *master_priv;
562 	u32 iir, new_iir;
563 	u32 pipea_stats, pipeb_stats;
564 	u32 vblank_status;
565 	u32 vblank_enable;
566 	int vblank = 0;
567 	unsigned long irqflags;
568 	int irq_received;
569 	int ret = IRQ_NONE;
570 
571 	atomic_inc(&dev_priv->irq_received);
572 
573 	if (IS_IRONLAKE(dev))
574 		return ironlake_irq_handler(dev);
575 
576 	iir = I915_READ(IIR);
577 
578 	if (IS_I965G(dev)) {
579 		vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
580 		vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
581 	} else {
582 		vblank_status = I915_VBLANK_INTERRUPT_STATUS;
583 		vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
584 	}
585 
586 	for (;;) {
587 		irq_received = iir != 0;
588 
589 		/* Can't rely on pipestat interrupt bit in iir as it might
590 		 * have been cleared after the pipestat interrupt was received.
591 		 * It doesn't set the bit in iir again, but it still produces
592 		 * interrupts (for non-MSI).
593 		 */
594 		spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
595 		pipea_stats = I915_READ(PIPEASTAT);
596 		pipeb_stats = I915_READ(PIPEBSTAT);
597 
598 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
599 			i915_handle_error(dev, false);
600 
601 		/*
602 		 * Clear the PIPE(A|B)STAT regs before the IIR
603 		 */
604 		if (pipea_stats & 0x8000ffff) {
605 			if (pipea_stats &  PIPE_FIFO_UNDERRUN_STATUS)
606 				DRM_DEBUG_DRIVER("pipe a underrun\n");
607 			I915_WRITE(PIPEASTAT, pipea_stats);
608 			irq_received = 1;
609 		}
610 
611 		if (pipeb_stats & 0x8000ffff) {
612 			if (pipeb_stats &  PIPE_FIFO_UNDERRUN_STATUS)
613 				DRM_DEBUG_DRIVER("pipe b underrun\n");
614 			I915_WRITE(PIPEBSTAT, pipeb_stats);
615 			irq_received = 1;
616 		}
617 		spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
618 
619 		if (!irq_received)
620 			break;
621 
622 		ret = IRQ_HANDLED;
623 
624 		/* Consume port.  Then clear IIR or we'll miss events */
625 		if ((I915_HAS_HOTPLUG(dev)) &&
626 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
627 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
628 
629 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
630 				  hotplug_status);
631 			if (hotplug_status & dev_priv->hotplug_supported_mask)
632 				queue_work(dev_priv->wq,
633 					   &dev_priv->hotplug_work);
634 
635 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
636 			I915_READ(PORT_HOTPLUG_STAT);
637 		}
638 
639 		I915_WRITE(IIR, iir);
640 		new_iir = I915_READ(IIR); /* Flush posted writes */
641 
642 		if (dev->primary->master) {
643 			master_priv = dev->primary->master->driver_priv;
644 			if (master_priv->sarea_priv)
645 				master_priv->sarea_priv->last_dispatch =
646 					READ_BREADCRUMB(dev_priv);
647 		}
648 
649 		if (iir & I915_USER_INTERRUPT) {
650 			u32 seqno = i915_get_gem_seqno(dev);
651 			dev_priv->mm.irq_gem_seqno = seqno;
652 			trace_i915_gem_request_complete(dev, seqno);
653 			DRM_WAKEUP(&dev_priv->irq_queue);
654 			dev_priv->hangcheck_count = 0;
655 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
656 		}
657 
658 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
659 			intel_prepare_page_flip(dev, 0);
660 
661 		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
662 			intel_prepare_page_flip(dev, 1);
663 
664 		if (pipea_stats & vblank_status) {
665 			vblank++;
666 			drm_handle_vblank(dev, 0);
667 			intel_finish_page_flip(dev, 0);
668 		}
669 
670 		if (pipeb_stats & vblank_status) {
671 			vblank++;
672 			drm_handle_vblank(dev, 1);
673 			intel_finish_page_flip(dev, 1);
674 		}
675 
676 		if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
677 		    (iir & I915_ASLE_INTERRUPT))
678 			opregion_asle_intr(dev);
679 
680 		/* With MSI, interrupts are only generated when iir
681 		 * transitions from zero to nonzero.  If another bit got
682 		 * set while we were handling the existing iir bits, then
683 		 * we would never get another interrupt.
684 		 *
685 		 * This is fine on non-MSI as well, as if we hit this path
686 		 * we avoid exiting the interrupt handler only to generate
687 		 * another one.
688 		 *
689 		 * Note that for MSI this could cause a stray interrupt report
690 		 * if an interrupt landed in the time between writing IIR and
691 		 * the posting read.  This should be rare enough to never
692 		 * trigger the 99% of 100,000 interrupts test for disabling
693 		 * stray interrupts.
694 		 */
695 		iir = new_iir;
696 	}
697 
698 	return ret;
699 }
700 
701 static int i915_emit_irq(struct drm_device * dev)
702 {
703 	drm_i915_private_t *dev_priv = dev->dev_private;
704 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
705 	RING_LOCALS;
706 
707 	i915_kernel_lost_context(dev);
708 
709 	DRM_DEBUG_DRIVER("\n");
710 
711 	dev_priv->counter++;
712 	if (dev_priv->counter > 0x7FFFFFFFUL)
713 		dev_priv->counter = 1;
714 	if (master_priv->sarea_priv)
715 		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
716 
717 	BEGIN_LP_RING(4);
718 	OUT_RING(MI_STORE_DWORD_INDEX);
719 	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
720 	OUT_RING(dev_priv->counter);
721 	OUT_RING(MI_USER_INTERRUPT);
722 	ADVANCE_LP_RING();
723 
724 	return dev_priv->counter;
725 }
726 
727 void i915_user_irq_get(struct drm_device *dev)
728 {
729 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
730 	unsigned long irqflags;
731 
732 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
733 	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
734 		if (IS_IRONLAKE(dev))
735 			ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
736 		else
737 			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
738 	}
739 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
740 }
741 
742 void i915_user_irq_put(struct drm_device *dev)
743 {
744 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
745 	unsigned long irqflags;
746 
747 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
748 	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
749 	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
750 		if (IS_IRONLAKE(dev))
751 			ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
752 		else
753 			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
754 	}
755 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
756 }
757 
758 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
759 {
760 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
761 
762 	if (dev_priv->trace_irq_seqno == 0)
763 		i915_user_irq_get(dev);
764 
765 	dev_priv->trace_irq_seqno = seqno;
766 }
767 
768 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
769 {
770 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
771 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
772 	int ret = 0;
773 
774 	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
775 		  READ_BREADCRUMB(dev_priv));
776 
777 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
778 		if (master_priv->sarea_priv)
779 			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
780 		return 0;
781 	}
782 
783 	if (master_priv->sarea_priv)
784 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
785 
786 	i915_user_irq_get(dev);
787 	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
788 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
789 	i915_user_irq_put(dev);
790 
791 	if (ret == -EBUSY) {
792 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
793 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
794 	}
795 
796 	return ret;
797 }
798 
799 /* Needs the lock as it touches the ring.
800  */
801 int i915_irq_emit(struct drm_device *dev, void *data,
802 			 struct drm_file *file_priv)
803 {
804 	drm_i915_private_t *dev_priv = dev->dev_private;
805 	drm_i915_irq_emit_t *emit = data;
806 	int result;
807 
808 	if (!dev_priv || !dev_priv->ring.virtual_start) {
809 		DRM_ERROR("called with no initialization\n");
810 		return -EINVAL;
811 	}
812 
813 	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
814 
815 	mutex_lock(&dev->struct_mutex);
816 	result = i915_emit_irq(dev);
817 	mutex_unlock(&dev->struct_mutex);
818 
819 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
820 		DRM_ERROR("copy_to_user\n");
821 		return -EFAULT;
822 	}
823 
824 	return 0;
825 }
826 
827 /* Doesn't need the hardware lock.
828  */
829 int i915_irq_wait(struct drm_device *dev, void *data,
830 			 struct drm_file *file_priv)
831 {
832 	drm_i915_private_t *dev_priv = dev->dev_private;
833 	drm_i915_irq_wait_t *irqwait = data;
834 
835 	if (!dev_priv) {
836 		DRM_ERROR("called with no initialization\n");
837 		return -EINVAL;
838 	}
839 
840 	return i915_wait_irq(dev, irqwait->irq_seq);
841 }
842 
843 /* Called from drm generic code, passed 'crtc' which
844  * we use as a pipe index
845  */
846 int i915_enable_vblank(struct drm_device *dev, int pipe)
847 {
848 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
849 	unsigned long irqflags;
850 	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
851 	u32 pipeconf;
852 
853 	pipeconf = I915_READ(pipeconf_reg);
854 	if (!(pipeconf & PIPEACONF_ENABLE))
855 		return -EINVAL;
856 
857 	if (IS_IRONLAKE(dev))
858 		return 0;
859 
860 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
861 	if (IS_I965G(dev))
862 		i915_enable_pipestat(dev_priv, pipe,
863 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
864 	else
865 		i915_enable_pipestat(dev_priv, pipe,
866 				     PIPE_VBLANK_INTERRUPT_ENABLE);
867 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
868 	return 0;
869 }
870 
871 /* Called from drm generic code, passed 'crtc' which
872  * we use as a pipe index
873  */
874 void i915_disable_vblank(struct drm_device *dev, int pipe)
875 {
876 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
877 	unsigned long irqflags;
878 
879 	if (IS_IRONLAKE(dev))
880 		return;
881 
882 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
883 	i915_disable_pipestat(dev_priv, pipe,
884 			      PIPE_VBLANK_INTERRUPT_ENABLE |
885 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
886 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
887 }
888 
889 void i915_enable_interrupt (struct drm_device *dev)
890 {
891 	struct drm_i915_private *dev_priv = dev->dev_private;
892 
893 	if (!IS_IRONLAKE(dev))
894 		opregion_enable_asle(dev);
895 	dev_priv->irq_enabled = 1;
896 }
897 
898 
899 /* Set the vblank monitor pipe
900  */
901 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
902 			 struct drm_file *file_priv)
903 {
904 	drm_i915_private_t *dev_priv = dev->dev_private;
905 
906 	if (!dev_priv) {
907 		DRM_ERROR("called with no initialization\n");
908 		return -EINVAL;
909 	}
910 
911 	return 0;
912 }
913 
914 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
915 			 struct drm_file *file_priv)
916 {
917 	drm_i915_private_t *dev_priv = dev->dev_private;
918 	drm_i915_vblank_pipe_t *pipe = data;
919 
920 	if (!dev_priv) {
921 		DRM_ERROR("called with no initialization\n");
922 		return -EINVAL;
923 	}
924 
925 	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
926 
927 	return 0;
928 }
929 
930 /**
931  * Schedule buffer swap at given vertical blank.
932  */
933 int i915_vblank_swap(struct drm_device *dev, void *data,
934 		     struct drm_file *file_priv)
935 {
936 	/* The delayed swap mechanism was fundamentally racy, and has been
937 	 * removed.  The model was that the client requested a delayed flip/swap
938 	 * from the kernel, then waited for vblank before continuing to perform
939 	 * rendering.  The problem was that the kernel might wake the client
940 	 * up before it dispatched the vblank swap (since the lock has to be
941 	 * held while touching the ringbuffer), in which case the client would
942 	 * clear and start the next frame before the swap occurred, and
943 	 * flicker would occur in addition to likely missing the vblank.
944 	 *
945 	 * In the absence of this ioctl, userland falls back to a correct path
946 	 * of waiting for a vblank, then dispatching the swap on its own.
947 	 * Context switching to userland and back is plenty fast enough for
948 	 * meeting the requirements of vblank swapping.
949 	 */
950 	return -EINVAL;
951 }
952 
953 struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
954 	drm_i915_private_t *dev_priv = dev->dev_private;
955 	return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
956 }
957 
958 /**
959  * This is called when the chip hasn't reported back with completed
960  * batchbuffers in a long time. The first time this is called we simply record
961  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
962  * again, we assume the chip is wedged and try to fix it.
963  */
964 void i915_hangcheck_elapsed(unsigned long data)
965 {
966 	struct drm_device *dev = (struct drm_device *)data;
967 	drm_i915_private_t *dev_priv = dev->dev_private;
968 	uint32_t acthd;
969 
970 	if (!IS_I965G(dev))
971 		acthd = I915_READ(ACTHD);
972 	else
973 		acthd = I915_READ(ACTHD_I965);
974 
975 	/* If all work is done then ACTHD clearly hasn't advanced. */
976 	if (list_empty(&dev_priv->mm.request_list) ||
977 		       i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
978 		dev_priv->hangcheck_count = 0;
979 		return;
980 	}
981 
982 	if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
983 		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
984 		i915_handle_error(dev, true);
985 		return;
986 	}
987 
988 	/* Reset timer case chip hangs without another request being added */
989 	mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
990 
991 	if (acthd != dev_priv->last_acthd)
992 		dev_priv->hangcheck_count = 0;
993 	else
994 		dev_priv->hangcheck_count++;
995 
996 	dev_priv->last_acthd = acthd;
997 }
998 
999 /* drm_dma.h hooks
1000 */
1001 static void ironlake_irq_preinstall(struct drm_device *dev)
1002 {
1003 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1004 
1005 	I915_WRITE(HWSTAM, 0xeffe);
1006 
1007 	/* XXX hotplug from PCH */
1008 
1009 	I915_WRITE(DEIMR, 0xffffffff);
1010 	I915_WRITE(DEIER, 0x0);
1011 	(void) I915_READ(DEIER);
1012 
1013 	/* and GT */
1014 	I915_WRITE(GTIMR, 0xffffffff);
1015 	I915_WRITE(GTIER, 0x0);
1016 	(void) I915_READ(GTIER);
1017 
1018 	/* south display irq */
1019 	I915_WRITE(SDEIMR, 0xffffffff);
1020 	I915_WRITE(SDEIER, 0x0);
1021 	(void) I915_READ(SDEIER);
1022 }
1023 
1024 static int ironlake_irq_postinstall(struct drm_device *dev)
1025 {
1026 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1027 	/* enable kind of interrupts always enabled */
1028 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
1029 	u32 render_mask = GT_USER_INTERRUPT;
1030 	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1031 			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1032 
1033 	dev_priv->irq_mask_reg = ~display_mask;
1034 	dev_priv->de_irq_enable_reg = display_mask;
1035 
1036 	/* should always can generate irq */
1037 	I915_WRITE(DEIIR, I915_READ(DEIIR));
1038 	I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
1039 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1040 	(void) I915_READ(DEIER);
1041 
1042 	/* user interrupt should be enabled, but masked initial */
1043 	dev_priv->gt_irq_mask_reg = 0xffffffff;
1044 	dev_priv->gt_irq_enable_reg = render_mask;
1045 
1046 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1047 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1048 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1049 	(void) I915_READ(GTIER);
1050 
1051 	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1052 	dev_priv->pch_irq_enable_reg = hotplug_mask;
1053 
1054 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1055 	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
1056 	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1057 	(void) I915_READ(SDEIER);
1058 
1059 	return 0;
1060 }
1061 
1062 void i915_driver_irq_preinstall(struct drm_device * dev)
1063 {
1064 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1065 
1066 	atomic_set(&dev_priv->irq_received, 0);
1067 
1068 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1069 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1070 
1071 	if (IS_IRONLAKE(dev)) {
1072 		ironlake_irq_preinstall(dev);
1073 		return;
1074 	}
1075 
1076 	if (I915_HAS_HOTPLUG(dev)) {
1077 		I915_WRITE(PORT_HOTPLUG_EN, 0);
1078 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1079 	}
1080 
1081 	I915_WRITE(HWSTAM, 0xeffe);
1082 	I915_WRITE(PIPEASTAT, 0);
1083 	I915_WRITE(PIPEBSTAT, 0);
1084 	I915_WRITE(IMR, 0xffffffff);
1085 	I915_WRITE(IER, 0x0);
1086 	(void) I915_READ(IER);
1087 }
1088 
1089 /*
1090  * Must be called after intel_modeset_init or hotplug interrupts won't be
1091  * enabled correctly.
1092  */
1093 int i915_driver_irq_postinstall(struct drm_device *dev)
1094 {
1095 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1096 	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1097 	u32 error_mask;
1098 
1099 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
1100 
1101 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1102 
1103 	if (IS_IRONLAKE(dev))
1104 		return ironlake_irq_postinstall(dev);
1105 
1106 	/* Unmask the interrupts that we always want on. */
1107 	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
1108 
1109 	dev_priv->pipestat[0] = 0;
1110 	dev_priv->pipestat[1] = 0;
1111 
1112 	if (I915_HAS_HOTPLUG(dev)) {
1113 		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1114 
1115 		/* Note HDMI and DP share bits */
1116 		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1117 			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1118 		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1119 			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1120 		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
1121 			hotplug_en |= HDMID_HOTPLUG_INT_EN;
1122 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
1123 			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
1124 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
1125 			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
1126 		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
1127 			hotplug_en |= CRT_HOTPLUG_INT_EN;
1128 		/* Ignore TV since it's buggy */
1129 
1130 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
1131 
1132 		/* Enable in IER... */
1133 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1134 		/* and unmask in IMR */
1135 		i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
1136 	}
1137 
1138 	/*
1139 	 * Enable some error detection, note the instruction error mask
1140 	 * bit is reserved, so we leave it masked.
1141 	 */
1142 	if (IS_G4X(dev)) {
1143 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
1144 			       GM45_ERROR_MEM_PRIV |
1145 			       GM45_ERROR_CP_PRIV |
1146 			       I915_ERROR_MEMORY_REFRESH);
1147 	} else {
1148 		error_mask = ~(I915_ERROR_PAGE_TABLE |
1149 			       I915_ERROR_MEMORY_REFRESH);
1150 	}
1151 	I915_WRITE(EMR, error_mask);
1152 
1153 	/* Disable pipe interrupt enables, clear pending pipe status */
1154 	I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1155 	I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1156 	/* Clear pending interrupt status */
1157 	I915_WRITE(IIR, I915_READ(IIR));
1158 
1159 	I915_WRITE(IER, enable_mask);
1160 	I915_WRITE(IMR, dev_priv->irq_mask_reg);
1161 	(void) I915_READ(IER);
1162 
1163 	opregion_enable_asle(dev);
1164 
1165 	return 0;
1166 }
1167 
1168 static void ironlake_irq_uninstall(struct drm_device *dev)
1169 {
1170 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1171 	I915_WRITE(HWSTAM, 0xffffffff);
1172 
1173 	I915_WRITE(DEIMR, 0xffffffff);
1174 	I915_WRITE(DEIER, 0x0);
1175 	I915_WRITE(DEIIR, I915_READ(DEIIR));
1176 
1177 	I915_WRITE(GTIMR, 0xffffffff);
1178 	I915_WRITE(GTIER, 0x0);
1179 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1180 }
1181 
1182 void i915_driver_irq_uninstall(struct drm_device * dev)
1183 {
1184 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1185 
1186 	if (!dev_priv)
1187 		return;
1188 
1189 	dev_priv->vblank_pipe = 0;
1190 
1191 	if (IS_IRONLAKE(dev)) {
1192 		ironlake_irq_uninstall(dev);
1193 		return;
1194 	}
1195 
1196 	if (I915_HAS_HOTPLUG(dev)) {
1197 		I915_WRITE(PORT_HOTPLUG_EN, 0);
1198 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1199 	}
1200 
1201 	I915_WRITE(HWSTAM, 0xffffffff);
1202 	I915_WRITE(PIPEASTAT, 0);
1203 	I915_WRITE(PIPEBSTAT, 0);
1204 	I915_WRITE(IMR, 0xffffffff);
1205 	I915_WRITE(IER, 0x0);
1206 
1207 	I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
1208 	I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
1209 	I915_WRITE(IIR, I915_READ(IIR));
1210 }
1211