xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision e8f6f3b4)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47 
48 static const u32 hpd_ibx[] = {
49 	[HPD_CRT] = SDE_CRT_HOTPLUG,
50 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55 
56 static const u32 hpd_cpt[] = {
57 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63 
64 static const u32 hpd_mask_i915[] = {
65 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71 };
72 
73 static const u32 hpd_status_g4x[] = {
74 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81 
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90 
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 	POSTING_READ(GEN8_##type##_IMR(which)); \
95 	I915_WRITE(GEN8_##type##_IER(which), 0); \
96 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 	POSTING_READ(GEN8_##type##_IIR(which)); \
98 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 	POSTING_READ(GEN8_##type##_IIR(which)); \
100 } while (0)
101 
102 #define GEN5_IRQ_RESET(type) do { \
103 	I915_WRITE(type##IMR, 0xffffffff); \
104 	POSTING_READ(type##IMR); \
105 	I915_WRITE(type##IER, 0); \
106 	I915_WRITE(type##IIR, 0xffffffff); \
107 	POSTING_READ(type##IIR); \
108 	I915_WRITE(type##IIR, 0xffffffff); \
109 	POSTING_READ(type##IIR); \
110 } while (0)
111 
112 /*
113  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
114  */
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 	u32 val = I915_READ(reg); \
117 	if (val) { \
118 		WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
119 		     (reg), val); \
120 		I915_WRITE((reg), 0xffffffff); \
121 		POSTING_READ(reg); \
122 		I915_WRITE((reg), 0xffffffff); \
123 		POSTING_READ(reg); \
124 	} \
125 } while (0)
126 
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 	GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 	POSTING_READ(GEN8_##type##_IMR(which)); \
132 } while (0)
133 
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 	GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 	I915_WRITE(type##IER, (ier_val)); \
137 	I915_WRITE(type##IMR, (imr_val)); \
138 	POSTING_READ(type##IMR); \
139 } while (0)
140 
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
142 
143 /* For display hotplug interrupt */
144 void
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
146 {
147 	assert_spin_locked(&dev_priv->irq_lock);
148 
149 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
150 		return;
151 
152 	if ((dev_priv->irq_mask & mask) != 0) {
153 		dev_priv->irq_mask &= ~mask;
154 		I915_WRITE(DEIMR, dev_priv->irq_mask);
155 		POSTING_READ(DEIMR);
156 	}
157 }
158 
159 void
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
161 {
162 	assert_spin_locked(&dev_priv->irq_lock);
163 
164 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
165 		return;
166 
167 	if ((dev_priv->irq_mask & mask) != mask) {
168 		dev_priv->irq_mask |= mask;
169 		I915_WRITE(DEIMR, dev_priv->irq_mask);
170 		POSTING_READ(DEIMR);
171 	}
172 }
173 
174 /**
175  * ilk_update_gt_irq - update GTIMR
176  * @dev_priv: driver private
177  * @interrupt_mask: mask of interrupt bits to update
178  * @enabled_irq_mask: mask of interrupt bits to enable
179  */
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 			      uint32_t interrupt_mask,
182 			      uint32_t enabled_irq_mask)
183 {
184 	assert_spin_locked(&dev_priv->irq_lock);
185 
186 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
187 		return;
188 
189 	dev_priv->gt_irq_mask &= ~interrupt_mask;
190 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
191 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
192 	POSTING_READ(GTIMR);
193 }
194 
195 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
196 {
197 	ilk_update_gt_irq(dev_priv, mask, mask);
198 }
199 
200 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
201 {
202 	ilk_update_gt_irq(dev_priv, mask, 0);
203 }
204 
205 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
206 {
207 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
208 }
209 
210 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
211 {
212 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
213 }
214 
215 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
216 {
217 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
218 }
219 
220 /**
221   * snb_update_pm_irq - update GEN6_PMIMR
222   * @dev_priv: driver private
223   * @interrupt_mask: mask of interrupt bits to update
224   * @enabled_irq_mask: mask of interrupt bits to enable
225   */
226 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
227 			      uint32_t interrupt_mask,
228 			      uint32_t enabled_irq_mask)
229 {
230 	uint32_t new_val;
231 
232 	assert_spin_locked(&dev_priv->irq_lock);
233 
234 	new_val = dev_priv->pm_irq_mask;
235 	new_val &= ~interrupt_mask;
236 	new_val |= (~enabled_irq_mask & interrupt_mask);
237 
238 	if (new_val != dev_priv->pm_irq_mask) {
239 		dev_priv->pm_irq_mask = new_val;
240 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
241 		POSTING_READ(gen6_pm_imr(dev_priv));
242 	}
243 }
244 
245 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
246 {
247 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
248 		return;
249 
250 	snb_update_pm_irq(dev_priv, mask, mask);
251 }
252 
253 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
254 				  uint32_t mask)
255 {
256 	snb_update_pm_irq(dev_priv, mask, 0);
257 }
258 
259 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
260 {
261 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
262 		return;
263 
264 	__gen6_disable_pm_irq(dev_priv, mask);
265 }
266 
267 void gen6_reset_rps_interrupts(struct drm_device *dev)
268 {
269 	struct drm_i915_private *dev_priv = dev->dev_private;
270 	uint32_t reg = gen6_pm_iir(dev_priv);
271 
272 	spin_lock_irq(&dev_priv->irq_lock);
273 	I915_WRITE(reg, dev_priv->pm_rps_events);
274 	I915_WRITE(reg, dev_priv->pm_rps_events);
275 	POSTING_READ(reg);
276 	spin_unlock_irq(&dev_priv->irq_lock);
277 }
278 
279 void gen6_enable_rps_interrupts(struct drm_device *dev)
280 {
281 	struct drm_i915_private *dev_priv = dev->dev_private;
282 
283 	spin_lock_irq(&dev_priv->irq_lock);
284 
285 	WARN_ON(dev_priv->rps.pm_iir);
286 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
287 	dev_priv->rps.interrupts_enabled = true;
288 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
289 				dev_priv->pm_rps_events);
290 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
291 
292 	spin_unlock_irq(&dev_priv->irq_lock);
293 }
294 
295 void gen6_disable_rps_interrupts(struct drm_device *dev)
296 {
297 	struct drm_i915_private *dev_priv = dev->dev_private;
298 
299 	spin_lock_irq(&dev_priv->irq_lock);
300 	dev_priv->rps.interrupts_enabled = false;
301 	spin_unlock_irq(&dev_priv->irq_lock);
302 
303 	cancel_work_sync(&dev_priv->rps.work);
304 
305 	spin_lock_irq(&dev_priv->irq_lock);
306 
307 	I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
308 		   ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
309 
310 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
311 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
312 				~dev_priv->pm_rps_events);
313 	I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
314 	I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
315 
316 	dev_priv->rps.pm_iir = 0;
317 
318 	spin_unlock_irq(&dev_priv->irq_lock);
319 }
320 
321 /**
322  * ibx_display_interrupt_update - update SDEIMR
323  * @dev_priv: driver private
324  * @interrupt_mask: mask of interrupt bits to update
325  * @enabled_irq_mask: mask of interrupt bits to enable
326  */
327 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
328 				  uint32_t interrupt_mask,
329 				  uint32_t enabled_irq_mask)
330 {
331 	uint32_t sdeimr = I915_READ(SDEIMR);
332 	sdeimr &= ~interrupt_mask;
333 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
334 
335 	assert_spin_locked(&dev_priv->irq_lock);
336 
337 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
338 		return;
339 
340 	I915_WRITE(SDEIMR, sdeimr);
341 	POSTING_READ(SDEIMR);
342 }
343 
344 static void
345 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
346 		       u32 enable_mask, u32 status_mask)
347 {
348 	u32 reg = PIPESTAT(pipe);
349 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
350 
351 	assert_spin_locked(&dev_priv->irq_lock);
352 	WARN_ON(!intel_irqs_enabled(dev_priv));
353 
354 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
355 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
356 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
357 		      pipe_name(pipe), enable_mask, status_mask))
358 		return;
359 
360 	if ((pipestat & enable_mask) == enable_mask)
361 		return;
362 
363 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
364 
365 	/* Enable the interrupt, clear any pending status */
366 	pipestat |= enable_mask | status_mask;
367 	I915_WRITE(reg, pipestat);
368 	POSTING_READ(reg);
369 }
370 
371 static void
372 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
373 		        u32 enable_mask, u32 status_mask)
374 {
375 	u32 reg = PIPESTAT(pipe);
376 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
377 
378 	assert_spin_locked(&dev_priv->irq_lock);
379 	WARN_ON(!intel_irqs_enabled(dev_priv));
380 
381 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
382 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
383 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
384 		      pipe_name(pipe), enable_mask, status_mask))
385 		return;
386 
387 	if ((pipestat & enable_mask) == 0)
388 		return;
389 
390 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
391 
392 	pipestat &= ~enable_mask;
393 	I915_WRITE(reg, pipestat);
394 	POSTING_READ(reg);
395 }
396 
397 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
398 {
399 	u32 enable_mask = status_mask << 16;
400 
401 	/*
402 	 * On pipe A we don't support the PSR interrupt yet,
403 	 * on pipe B and C the same bit MBZ.
404 	 */
405 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
406 		return 0;
407 	/*
408 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
409 	 * A the same bit is for perf counters which we don't use either.
410 	 */
411 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
412 		return 0;
413 
414 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
415 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
416 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
417 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
418 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
419 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
420 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
421 
422 	return enable_mask;
423 }
424 
425 void
426 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
427 		     u32 status_mask)
428 {
429 	u32 enable_mask;
430 
431 	if (IS_VALLEYVIEW(dev_priv->dev))
432 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
433 							   status_mask);
434 	else
435 		enable_mask = status_mask << 16;
436 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
437 }
438 
439 void
440 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
441 		      u32 status_mask)
442 {
443 	u32 enable_mask;
444 
445 	if (IS_VALLEYVIEW(dev_priv->dev))
446 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
447 							   status_mask);
448 	else
449 		enable_mask = status_mask << 16;
450 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
451 }
452 
453 /**
454  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
455  */
456 static void i915_enable_asle_pipestat(struct drm_device *dev)
457 {
458 	struct drm_i915_private *dev_priv = dev->dev_private;
459 
460 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
461 		return;
462 
463 	spin_lock_irq(&dev_priv->irq_lock);
464 
465 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
466 	if (INTEL_INFO(dev)->gen >= 4)
467 		i915_enable_pipestat(dev_priv, PIPE_A,
468 				     PIPE_LEGACY_BLC_EVENT_STATUS);
469 
470 	spin_unlock_irq(&dev_priv->irq_lock);
471 }
472 
473 /**
474  * i915_pipe_enabled - check if a pipe is enabled
475  * @dev: DRM device
476  * @pipe: pipe to check
477  *
478  * Reading certain registers when the pipe is disabled can hang the chip.
479  * Use this routine to make sure the PLL is running and the pipe is active
480  * before reading such registers if unsure.
481  */
482 static int
483 i915_pipe_enabled(struct drm_device *dev, int pipe)
484 {
485 	struct drm_i915_private *dev_priv = dev->dev_private;
486 
487 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
488 		/* Locking is horribly broken here, but whatever. */
489 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
490 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
491 
492 		return intel_crtc->active;
493 	} else {
494 		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
495 	}
496 }
497 
498 /*
499  * This timing diagram depicts the video signal in and
500  * around the vertical blanking period.
501  *
502  * Assumptions about the fictitious mode used in this example:
503  *  vblank_start >= 3
504  *  vsync_start = vblank_start + 1
505  *  vsync_end = vblank_start + 2
506  *  vtotal = vblank_start + 3
507  *
508  *           start of vblank:
509  *           latch double buffered registers
510  *           increment frame counter (ctg+)
511  *           generate start of vblank interrupt (gen4+)
512  *           |
513  *           |          frame start:
514  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
515  *           |          may be shifted forward 1-3 extra lines via PIPECONF
516  *           |          |
517  *           |          |  start of vsync:
518  *           |          |  generate vsync interrupt
519  *           |          |  |
520  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
521  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
522  * ----va---> <-----------------vb--------------------> <--------va-------------
523  *       |          |       <----vs----->                     |
524  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
525  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
526  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
527  *       |          |                                         |
528  *       last visible pixel                                   first visible pixel
529  *                  |                                         increment frame counter (gen3/4)
530  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
531  *
532  * x  = horizontal active
533  * _  = horizontal blanking
534  * hs = horizontal sync
535  * va = vertical active
536  * vb = vertical blanking
537  * vs = vertical sync
538  * vbs = vblank_start (number)
539  *
540  * Summary:
541  * - most events happen at the start of horizontal sync
542  * - frame start happens at the start of horizontal blank, 1-4 lines
543  *   (depending on PIPECONF settings) after the start of vblank
544  * - gen3/4 pixel and frame counter are synchronized with the start
545  *   of horizontal active on the first line of vertical active
546  */
547 
548 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
549 {
550 	/* Gen2 doesn't have a hardware frame counter */
551 	return 0;
552 }
553 
554 /* Called from drm generic code, passed a 'crtc', which
555  * we use as a pipe index
556  */
557 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
558 {
559 	struct drm_i915_private *dev_priv = dev->dev_private;
560 	unsigned long high_frame;
561 	unsigned long low_frame;
562 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
563 
564 	if (!i915_pipe_enabled(dev, pipe)) {
565 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
566 				"pipe %c\n", pipe_name(pipe));
567 		return 0;
568 	}
569 
570 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
571 		struct intel_crtc *intel_crtc =
572 			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
573 		const struct drm_display_mode *mode =
574 			&intel_crtc->config.adjusted_mode;
575 
576 		htotal = mode->crtc_htotal;
577 		hsync_start = mode->crtc_hsync_start;
578 		vbl_start = mode->crtc_vblank_start;
579 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
580 			vbl_start = DIV_ROUND_UP(vbl_start, 2);
581 	} else {
582 		enum transcoder cpu_transcoder = (enum transcoder) pipe;
583 
584 		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
585 		hsync_start = (I915_READ(HSYNC(cpu_transcoder))  & 0x1fff) + 1;
586 		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
587 		if ((I915_READ(PIPECONF(cpu_transcoder)) &
588 		     PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
589 			vbl_start = DIV_ROUND_UP(vbl_start, 2);
590 	}
591 
592 	/* Convert to pixel count */
593 	vbl_start *= htotal;
594 
595 	/* Start of vblank event occurs at start of hsync */
596 	vbl_start -= htotal - hsync_start;
597 
598 	high_frame = PIPEFRAME(pipe);
599 	low_frame = PIPEFRAMEPIXEL(pipe);
600 
601 	/*
602 	 * High & low register fields aren't synchronized, so make sure
603 	 * we get a low value that's stable across two reads of the high
604 	 * register.
605 	 */
606 	do {
607 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
608 		low   = I915_READ(low_frame);
609 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
610 	} while (high1 != high2);
611 
612 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
613 	pixel = low & PIPE_PIXEL_MASK;
614 	low >>= PIPE_FRAME_LOW_SHIFT;
615 
616 	/*
617 	 * The frame counter increments at beginning of active.
618 	 * Cook up a vblank counter by also checking the pixel
619 	 * counter against vblank start.
620 	 */
621 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
622 }
623 
624 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
625 {
626 	struct drm_i915_private *dev_priv = dev->dev_private;
627 	int reg = PIPE_FRMCOUNT_GM45(pipe);
628 
629 	if (!i915_pipe_enabled(dev, pipe)) {
630 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
631 				 "pipe %c\n", pipe_name(pipe));
632 		return 0;
633 	}
634 
635 	return I915_READ(reg);
636 }
637 
638 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
639 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
640 
641 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
642 {
643 	struct drm_device *dev = crtc->base.dev;
644 	struct drm_i915_private *dev_priv = dev->dev_private;
645 	const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
646 	enum pipe pipe = crtc->pipe;
647 	int position, vtotal;
648 
649 	vtotal = mode->crtc_vtotal;
650 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
651 		vtotal /= 2;
652 
653 	if (IS_GEN2(dev))
654 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
655 	else
656 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
657 
658 	/*
659 	 * See update_scanline_offset() for the details on the
660 	 * scanline_offset adjustment.
661 	 */
662 	return (position + crtc->scanline_offset) % vtotal;
663 }
664 
665 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
666 				    unsigned int flags, int *vpos, int *hpos,
667 				    ktime_t *stime, ktime_t *etime)
668 {
669 	struct drm_i915_private *dev_priv = dev->dev_private;
670 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
671 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
672 	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
673 	int position;
674 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
675 	bool in_vbl = true;
676 	int ret = 0;
677 	unsigned long irqflags;
678 
679 	if (!intel_crtc->active) {
680 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
681 				 "pipe %c\n", pipe_name(pipe));
682 		return 0;
683 	}
684 
685 	htotal = mode->crtc_htotal;
686 	hsync_start = mode->crtc_hsync_start;
687 	vtotal = mode->crtc_vtotal;
688 	vbl_start = mode->crtc_vblank_start;
689 	vbl_end = mode->crtc_vblank_end;
690 
691 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
692 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
693 		vbl_end /= 2;
694 		vtotal /= 2;
695 	}
696 
697 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
698 
699 	/*
700 	 * Lock uncore.lock, as we will do multiple timing critical raw
701 	 * register reads, potentially with preemption disabled, so the
702 	 * following code must not block on uncore.lock.
703 	 */
704 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
705 
706 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
707 
708 	/* Get optional system timestamp before query. */
709 	if (stime)
710 		*stime = ktime_get();
711 
712 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
713 		/* No obvious pixelcount register. Only query vertical
714 		 * scanout position from Display scan line register.
715 		 */
716 		position = __intel_get_crtc_scanline(intel_crtc);
717 	} else {
718 		/* Have access to pixelcount since start of frame.
719 		 * We can split this into vertical and horizontal
720 		 * scanout position.
721 		 */
722 		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
723 
724 		/* convert to pixel counts */
725 		vbl_start *= htotal;
726 		vbl_end *= htotal;
727 		vtotal *= htotal;
728 
729 		/*
730 		 * In interlaced modes, the pixel counter counts all pixels,
731 		 * so one field will have htotal more pixels. In order to avoid
732 		 * the reported position from jumping backwards when the pixel
733 		 * counter is beyond the length of the shorter field, just
734 		 * clamp the position the length of the shorter field. This
735 		 * matches how the scanline counter based position works since
736 		 * the scanline counter doesn't count the two half lines.
737 		 */
738 		if (position >= vtotal)
739 			position = vtotal - 1;
740 
741 		/*
742 		 * Start of vblank interrupt is triggered at start of hsync,
743 		 * just prior to the first active line of vblank. However we
744 		 * consider lines to start at the leading edge of horizontal
745 		 * active. So, should we get here before we've crossed into
746 		 * the horizontal active of the first line in vblank, we would
747 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
748 		 * always add htotal-hsync_start to the current pixel position.
749 		 */
750 		position = (position + htotal - hsync_start) % vtotal;
751 	}
752 
753 	/* Get optional system timestamp after query. */
754 	if (etime)
755 		*etime = ktime_get();
756 
757 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
758 
759 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
760 
761 	in_vbl = position >= vbl_start && position < vbl_end;
762 
763 	/*
764 	 * While in vblank, position will be negative
765 	 * counting up towards 0 at vbl_end. And outside
766 	 * vblank, position will be positive counting
767 	 * up since vbl_end.
768 	 */
769 	if (position >= vbl_start)
770 		position -= vbl_end;
771 	else
772 		position += vtotal - vbl_end;
773 
774 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
775 		*vpos = position;
776 		*hpos = 0;
777 	} else {
778 		*vpos = position / htotal;
779 		*hpos = position - (*vpos * htotal);
780 	}
781 
782 	/* In vblank? */
783 	if (in_vbl)
784 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
785 
786 	return ret;
787 }
788 
789 int intel_get_crtc_scanline(struct intel_crtc *crtc)
790 {
791 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
792 	unsigned long irqflags;
793 	int position;
794 
795 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
796 	position = __intel_get_crtc_scanline(crtc);
797 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
798 
799 	return position;
800 }
801 
802 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
803 			      int *max_error,
804 			      struct timeval *vblank_time,
805 			      unsigned flags)
806 {
807 	struct drm_crtc *crtc;
808 
809 	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
810 		DRM_ERROR("Invalid crtc %d\n", pipe);
811 		return -EINVAL;
812 	}
813 
814 	/* Get drm_crtc to timestamp: */
815 	crtc = intel_get_crtc_for_pipe(dev, pipe);
816 	if (crtc == NULL) {
817 		DRM_ERROR("Invalid crtc %d\n", pipe);
818 		return -EINVAL;
819 	}
820 
821 	if (!crtc->enabled) {
822 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
823 		return -EBUSY;
824 	}
825 
826 	/* Helper routine in DRM core does all the work: */
827 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
828 						     vblank_time, flags,
829 						     crtc,
830 						     &to_intel_crtc(crtc)->config.adjusted_mode);
831 }
832 
833 static bool intel_hpd_irq_event(struct drm_device *dev,
834 				struct drm_connector *connector)
835 {
836 	enum drm_connector_status old_status;
837 
838 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
839 	old_status = connector->status;
840 
841 	connector->status = connector->funcs->detect(connector, false);
842 	if (old_status == connector->status)
843 		return false;
844 
845 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
846 		      connector->base.id,
847 		      connector->name,
848 		      drm_get_connector_status_name(old_status),
849 		      drm_get_connector_status_name(connector->status));
850 
851 	return true;
852 }
853 
854 static void i915_digport_work_func(struct work_struct *work)
855 {
856 	struct drm_i915_private *dev_priv =
857 		container_of(work, struct drm_i915_private, dig_port_work);
858 	u32 long_port_mask, short_port_mask;
859 	struct intel_digital_port *intel_dig_port;
860 	int i, ret;
861 	u32 old_bits = 0;
862 
863 	spin_lock_irq(&dev_priv->irq_lock);
864 	long_port_mask = dev_priv->long_hpd_port_mask;
865 	dev_priv->long_hpd_port_mask = 0;
866 	short_port_mask = dev_priv->short_hpd_port_mask;
867 	dev_priv->short_hpd_port_mask = 0;
868 	spin_unlock_irq(&dev_priv->irq_lock);
869 
870 	for (i = 0; i < I915_MAX_PORTS; i++) {
871 		bool valid = false;
872 		bool long_hpd = false;
873 		intel_dig_port = dev_priv->hpd_irq_port[i];
874 		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
875 			continue;
876 
877 		if (long_port_mask & (1 << i))  {
878 			valid = true;
879 			long_hpd = true;
880 		} else if (short_port_mask & (1 << i))
881 			valid = true;
882 
883 		if (valid) {
884 			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
885 			if (ret == true) {
886 				/* if we get true fallback to old school hpd */
887 				old_bits |= (1 << intel_dig_port->base.hpd_pin);
888 			}
889 		}
890 	}
891 
892 	if (old_bits) {
893 		spin_lock_irq(&dev_priv->irq_lock);
894 		dev_priv->hpd_event_bits |= old_bits;
895 		spin_unlock_irq(&dev_priv->irq_lock);
896 		schedule_work(&dev_priv->hotplug_work);
897 	}
898 }
899 
900 /*
901  * Handle hotplug events outside the interrupt handler proper.
902  */
903 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
904 
905 static void i915_hotplug_work_func(struct work_struct *work)
906 {
907 	struct drm_i915_private *dev_priv =
908 		container_of(work, struct drm_i915_private, hotplug_work);
909 	struct drm_device *dev = dev_priv->dev;
910 	struct drm_mode_config *mode_config = &dev->mode_config;
911 	struct intel_connector *intel_connector;
912 	struct intel_encoder *intel_encoder;
913 	struct drm_connector *connector;
914 	bool hpd_disabled = false;
915 	bool changed = false;
916 	u32 hpd_event_bits;
917 
918 	mutex_lock(&mode_config->mutex);
919 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
920 
921 	spin_lock_irq(&dev_priv->irq_lock);
922 
923 	hpd_event_bits = dev_priv->hpd_event_bits;
924 	dev_priv->hpd_event_bits = 0;
925 	list_for_each_entry(connector, &mode_config->connector_list, head) {
926 		intel_connector = to_intel_connector(connector);
927 		if (!intel_connector->encoder)
928 			continue;
929 		intel_encoder = intel_connector->encoder;
930 		if (intel_encoder->hpd_pin > HPD_NONE &&
931 		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
932 		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
933 			DRM_INFO("HPD interrupt storm detected on connector %s: "
934 				 "switching from hotplug detection to polling\n",
935 				connector->name);
936 			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
937 			connector->polled = DRM_CONNECTOR_POLL_CONNECT
938 				| DRM_CONNECTOR_POLL_DISCONNECT;
939 			hpd_disabled = true;
940 		}
941 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
942 			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
943 				      connector->name, intel_encoder->hpd_pin);
944 		}
945 	}
946 	 /* if there were no outputs to poll, poll was disabled,
947 	  * therefore make sure it's enabled when disabling HPD on
948 	  * some connectors */
949 	if (hpd_disabled) {
950 		drm_kms_helper_poll_enable(dev);
951 		mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
952 				 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
953 	}
954 
955 	spin_unlock_irq(&dev_priv->irq_lock);
956 
957 	list_for_each_entry(connector, &mode_config->connector_list, head) {
958 		intel_connector = to_intel_connector(connector);
959 		if (!intel_connector->encoder)
960 			continue;
961 		intel_encoder = intel_connector->encoder;
962 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
963 			if (intel_encoder->hot_plug)
964 				intel_encoder->hot_plug(intel_encoder);
965 			if (intel_hpd_irq_event(dev, connector))
966 				changed = true;
967 		}
968 	}
969 	mutex_unlock(&mode_config->mutex);
970 
971 	if (changed)
972 		drm_kms_helper_hotplug_event(dev);
973 }
974 
975 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
976 {
977 	struct drm_i915_private *dev_priv = dev->dev_private;
978 	u32 busy_up, busy_down, max_avg, min_avg;
979 	u8 new_delay;
980 
981 	spin_lock(&mchdev_lock);
982 
983 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
984 
985 	new_delay = dev_priv->ips.cur_delay;
986 
987 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
988 	busy_up = I915_READ(RCPREVBSYTUPAVG);
989 	busy_down = I915_READ(RCPREVBSYTDNAVG);
990 	max_avg = I915_READ(RCBMAXAVG);
991 	min_avg = I915_READ(RCBMINAVG);
992 
993 	/* Handle RCS change request from hw */
994 	if (busy_up > max_avg) {
995 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
996 			new_delay = dev_priv->ips.cur_delay - 1;
997 		if (new_delay < dev_priv->ips.max_delay)
998 			new_delay = dev_priv->ips.max_delay;
999 	} else if (busy_down < min_avg) {
1000 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1001 			new_delay = dev_priv->ips.cur_delay + 1;
1002 		if (new_delay > dev_priv->ips.min_delay)
1003 			new_delay = dev_priv->ips.min_delay;
1004 	}
1005 
1006 	if (ironlake_set_drps(dev, new_delay))
1007 		dev_priv->ips.cur_delay = new_delay;
1008 
1009 	spin_unlock(&mchdev_lock);
1010 
1011 	return;
1012 }
1013 
1014 static void notify_ring(struct drm_device *dev,
1015 			struct intel_engine_cs *ring)
1016 {
1017 	if (!intel_ring_initialized(ring))
1018 		return;
1019 
1020 	trace_i915_gem_request_complete(ring);
1021 
1022 	wake_up_all(&ring->irq_queue);
1023 }
1024 
1025 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1026 			    struct intel_rps_ei *rps_ei)
1027 {
1028 	u32 cz_ts, cz_freq_khz;
1029 	u32 render_count, media_count;
1030 	u32 elapsed_render, elapsed_media, elapsed_time;
1031 	u32 residency = 0;
1032 
1033 	cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1034 	cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1035 
1036 	render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1037 	media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1038 
1039 	if (rps_ei->cz_clock == 0) {
1040 		rps_ei->cz_clock = cz_ts;
1041 		rps_ei->render_c0 = render_count;
1042 		rps_ei->media_c0 = media_count;
1043 
1044 		return dev_priv->rps.cur_freq;
1045 	}
1046 
1047 	elapsed_time = cz_ts - rps_ei->cz_clock;
1048 	rps_ei->cz_clock = cz_ts;
1049 
1050 	elapsed_render = render_count - rps_ei->render_c0;
1051 	rps_ei->render_c0 = render_count;
1052 
1053 	elapsed_media = media_count - rps_ei->media_c0;
1054 	rps_ei->media_c0 = media_count;
1055 
1056 	/* Convert all the counters into common unit of milli sec */
1057 	elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1058 	elapsed_render /=  cz_freq_khz;
1059 	elapsed_media /= cz_freq_khz;
1060 
1061 	/*
1062 	 * Calculate overall C0 residency percentage
1063 	 * only if elapsed time is non zero
1064 	 */
1065 	if (elapsed_time) {
1066 		residency =
1067 			((max(elapsed_render, elapsed_media) * 100)
1068 				/ elapsed_time);
1069 	}
1070 
1071 	return residency;
1072 }
1073 
1074 /**
1075  * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1076  * busy-ness calculated from C0 counters of render & media power wells
1077  * @dev_priv: DRM device private
1078  *
1079  */
1080 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1081 {
1082 	u32 residency_C0_up = 0, residency_C0_down = 0;
1083 	int new_delay, adj;
1084 
1085 	dev_priv->rps.ei_interrupt_count++;
1086 
1087 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1088 
1089 
1090 	if (dev_priv->rps.up_ei.cz_clock == 0) {
1091 		vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1092 		vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1093 		return dev_priv->rps.cur_freq;
1094 	}
1095 
1096 
1097 	/*
1098 	 * To down throttle, C0 residency should be less than down threshold
1099 	 * for continous EI intervals. So calculate down EI counters
1100 	 * once in VLV_INT_COUNT_FOR_DOWN_EI
1101 	 */
1102 	if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1103 
1104 		dev_priv->rps.ei_interrupt_count = 0;
1105 
1106 		residency_C0_down = vlv_c0_residency(dev_priv,
1107 						     &dev_priv->rps.down_ei);
1108 	} else {
1109 		residency_C0_up = vlv_c0_residency(dev_priv,
1110 						   &dev_priv->rps.up_ei);
1111 	}
1112 
1113 	new_delay = dev_priv->rps.cur_freq;
1114 
1115 	adj = dev_priv->rps.last_adj;
1116 	/* C0 residency is greater than UP threshold. Increase Frequency */
1117 	if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1118 		if (adj > 0)
1119 			adj *= 2;
1120 		else
1121 			adj = 1;
1122 
1123 		if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1124 			new_delay = dev_priv->rps.cur_freq + adj;
1125 
1126 		/*
1127 		 * For better performance, jump directly
1128 		 * to RPe if we're below it.
1129 		 */
1130 		if (new_delay < dev_priv->rps.efficient_freq)
1131 			new_delay = dev_priv->rps.efficient_freq;
1132 
1133 	} else if (!dev_priv->rps.ei_interrupt_count &&
1134 			(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1135 		if (adj < 0)
1136 			adj *= 2;
1137 		else
1138 			adj = -1;
1139 		/*
1140 		 * This means, C0 residency is less than down threshold over
1141 		 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1142 		 */
1143 		if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1144 			new_delay = dev_priv->rps.cur_freq + adj;
1145 	}
1146 
1147 	return new_delay;
1148 }
1149 
1150 static void gen6_pm_rps_work(struct work_struct *work)
1151 {
1152 	struct drm_i915_private *dev_priv =
1153 		container_of(work, struct drm_i915_private, rps.work);
1154 	u32 pm_iir;
1155 	int new_delay, adj;
1156 
1157 	spin_lock_irq(&dev_priv->irq_lock);
1158 	/* Speed up work cancelation during disabling rps interrupts. */
1159 	if (!dev_priv->rps.interrupts_enabled) {
1160 		spin_unlock_irq(&dev_priv->irq_lock);
1161 		return;
1162 	}
1163 	pm_iir = dev_priv->rps.pm_iir;
1164 	dev_priv->rps.pm_iir = 0;
1165 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1166 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1167 	spin_unlock_irq(&dev_priv->irq_lock);
1168 
1169 	/* Make sure we didn't queue anything we're not going to process. */
1170 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1171 
1172 	if ((pm_iir & dev_priv->pm_rps_events) == 0)
1173 		return;
1174 
1175 	mutex_lock(&dev_priv->rps.hw_lock);
1176 
1177 	adj = dev_priv->rps.last_adj;
1178 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1179 		if (adj > 0)
1180 			adj *= 2;
1181 		else {
1182 			/* CHV needs even encode values */
1183 			adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1184 		}
1185 		new_delay = dev_priv->rps.cur_freq + adj;
1186 
1187 		/*
1188 		 * For better performance, jump directly
1189 		 * to RPe if we're below it.
1190 		 */
1191 		if (new_delay < dev_priv->rps.efficient_freq)
1192 			new_delay = dev_priv->rps.efficient_freq;
1193 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1194 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1195 			new_delay = dev_priv->rps.efficient_freq;
1196 		else
1197 			new_delay = dev_priv->rps.min_freq_softlimit;
1198 		adj = 0;
1199 	} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1200 		new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1201 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1202 		if (adj < 0)
1203 			adj *= 2;
1204 		else {
1205 			/* CHV needs even encode values */
1206 			adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1207 		}
1208 		new_delay = dev_priv->rps.cur_freq + adj;
1209 	} else { /* unknown event */
1210 		new_delay = dev_priv->rps.cur_freq;
1211 	}
1212 
1213 	/* sysfs frequency interfaces may have snuck in while servicing the
1214 	 * interrupt
1215 	 */
1216 	new_delay = clamp_t(int, new_delay,
1217 			    dev_priv->rps.min_freq_softlimit,
1218 			    dev_priv->rps.max_freq_softlimit);
1219 
1220 	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1221 
1222 	if (IS_VALLEYVIEW(dev_priv->dev))
1223 		valleyview_set_rps(dev_priv->dev, new_delay);
1224 	else
1225 		gen6_set_rps(dev_priv->dev, new_delay);
1226 
1227 	mutex_unlock(&dev_priv->rps.hw_lock);
1228 }
1229 
1230 
1231 /**
1232  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1233  * occurred.
1234  * @work: workqueue struct
1235  *
1236  * Doesn't actually do anything except notify userspace. As a consequence of
1237  * this event, userspace should try to remap the bad rows since statistically
1238  * it is likely the same row is more likely to go bad again.
1239  */
1240 static void ivybridge_parity_work(struct work_struct *work)
1241 {
1242 	struct drm_i915_private *dev_priv =
1243 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1244 	u32 error_status, row, bank, subbank;
1245 	char *parity_event[6];
1246 	uint32_t misccpctl;
1247 	uint8_t slice = 0;
1248 
1249 	/* We must turn off DOP level clock gating to access the L3 registers.
1250 	 * In order to prevent a get/put style interface, acquire struct mutex
1251 	 * any time we access those registers.
1252 	 */
1253 	mutex_lock(&dev_priv->dev->struct_mutex);
1254 
1255 	/* If we've screwed up tracking, just let the interrupt fire again */
1256 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1257 		goto out;
1258 
1259 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1260 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1261 	POSTING_READ(GEN7_MISCCPCTL);
1262 
1263 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1264 		u32 reg;
1265 
1266 		slice--;
1267 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1268 			break;
1269 
1270 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1271 
1272 		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1273 
1274 		error_status = I915_READ(reg);
1275 		row = GEN7_PARITY_ERROR_ROW(error_status);
1276 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1277 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1278 
1279 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1280 		POSTING_READ(reg);
1281 
1282 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1283 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1284 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1285 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1286 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1287 		parity_event[5] = NULL;
1288 
1289 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1290 				   KOBJ_CHANGE, parity_event);
1291 
1292 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1293 			  slice, row, bank, subbank);
1294 
1295 		kfree(parity_event[4]);
1296 		kfree(parity_event[3]);
1297 		kfree(parity_event[2]);
1298 		kfree(parity_event[1]);
1299 	}
1300 
1301 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1302 
1303 out:
1304 	WARN_ON(dev_priv->l3_parity.which_slice);
1305 	spin_lock_irq(&dev_priv->irq_lock);
1306 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1307 	spin_unlock_irq(&dev_priv->irq_lock);
1308 
1309 	mutex_unlock(&dev_priv->dev->struct_mutex);
1310 }
1311 
1312 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1313 {
1314 	struct drm_i915_private *dev_priv = dev->dev_private;
1315 
1316 	if (!HAS_L3_DPF(dev))
1317 		return;
1318 
1319 	spin_lock(&dev_priv->irq_lock);
1320 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1321 	spin_unlock(&dev_priv->irq_lock);
1322 
1323 	iir &= GT_PARITY_ERROR(dev);
1324 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1325 		dev_priv->l3_parity.which_slice |= 1 << 1;
1326 
1327 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1328 		dev_priv->l3_parity.which_slice |= 1 << 0;
1329 
1330 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1331 }
1332 
1333 static void ilk_gt_irq_handler(struct drm_device *dev,
1334 			       struct drm_i915_private *dev_priv,
1335 			       u32 gt_iir)
1336 {
1337 	if (gt_iir &
1338 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1339 		notify_ring(dev, &dev_priv->ring[RCS]);
1340 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1341 		notify_ring(dev, &dev_priv->ring[VCS]);
1342 }
1343 
1344 static void snb_gt_irq_handler(struct drm_device *dev,
1345 			       struct drm_i915_private *dev_priv,
1346 			       u32 gt_iir)
1347 {
1348 
1349 	if (gt_iir &
1350 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1351 		notify_ring(dev, &dev_priv->ring[RCS]);
1352 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1353 		notify_ring(dev, &dev_priv->ring[VCS]);
1354 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1355 		notify_ring(dev, &dev_priv->ring[BCS]);
1356 
1357 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1358 		      GT_BSD_CS_ERROR_INTERRUPT |
1359 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1360 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1361 
1362 	if (gt_iir & GT_PARITY_ERROR(dev))
1363 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1364 }
1365 
1366 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1367 				       struct drm_i915_private *dev_priv,
1368 				       u32 master_ctl)
1369 {
1370 	struct intel_engine_cs *ring;
1371 	u32 rcs, bcs, vcs;
1372 	uint32_t tmp = 0;
1373 	irqreturn_t ret = IRQ_NONE;
1374 
1375 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1376 		tmp = I915_READ(GEN8_GT_IIR(0));
1377 		if (tmp) {
1378 			I915_WRITE(GEN8_GT_IIR(0), tmp);
1379 			ret = IRQ_HANDLED;
1380 
1381 			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1382 			ring = &dev_priv->ring[RCS];
1383 			if (rcs & GT_RENDER_USER_INTERRUPT)
1384 				notify_ring(dev, ring);
1385 			if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1386 				intel_execlists_handle_ctx_events(ring);
1387 
1388 			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1389 			ring = &dev_priv->ring[BCS];
1390 			if (bcs & GT_RENDER_USER_INTERRUPT)
1391 				notify_ring(dev, ring);
1392 			if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1393 				intel_execlists_handle_ctx_events(ring);
1394 		} else
1395 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1396 	}
1397 
1398 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1399 		tmp = I915_READ(GEN8_GT_IIR(1));
1400 		if (tmp) {
1401 			I915_WRITE(GEN8_GT_IIR(1), tmp);
1402 			ret = IRQ_HANDLED;
1403 
1404 			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1405 			ring = &dev_priv->ring[VCS];
1406 			if (vcs & GT_RENDER_USER_INTERRUPT)
1407 				notify_ring(dev, ring);
1408 			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1409 				intel_execlists_handle_ctx_events(ring);
1410 
1411 			vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1412 			ring = &dev_priv->ring[VCS2];
1413 			if (vcs & GT_RENDER_USER_INTERRUPT)
1414 				notify_ring(dev, ring);
1415 			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1416 				intel_execlists_handle_ctx_events(ring);
1417 		} else
1418 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1419 	}
1420 
1421 	if (master_ctl & GEN8_GT_PM_IRQ) {
1422 		tmp = I915_READ(GEN8_GT_IIR(2));
1423 		if (tmp & dev_priv->pm_rps_events) {
1424 			I915_WRITE(GEN8_GT_IIR(2),
1425 				   tmp & dev_priv->pm_rps_events);
1426 			ret = IRQ_HANDLED;
1427 			gen6_rps_irq_handler(dev_priv, tmp);
1428 		} else
1429 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1430 	}
1431 
1432 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1433 		tmp = I915_READ(GEN8_GT_IIR(3));
1434 		if (tmp) {
1435 			I915_WRITE(GEN8_GT_IIR(3), tmp);
1436 			ret = IRQ_HANDLED;
1437 
1438 			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1439 			ring = &dev_priv->ring[VECS];
1440 			if (vcs & GT_RENDER_USER_INTERRUPT)
1441 				notify_ring(dev, ring);
1442 			if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1443 				intel_execlists_handle_ctx_events(ring);
1444 		} else
1445 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1446 	}
1447 
1448 	return ret;
1449 }
1450 
1451 #define HPD_STORM_DETECT_PERIOD 1000
1452 #define HPD_STORM_THRESHOLD 5
1453 
1454 static int pch_port_to_hotplug_shift(enum port port)
1455 {
1456 	switch (port) {
1457 	case PORT_A:
1458 	case PORT_E:
1459 	default:
1460 		return -1;
1461 	case PORT_B:
1462 		return 0;
1463 	case PORT_C:
1464 		return 8;
1465 	case PORT_D:
1466 		return 16;
1467 	}
1468 }
1469 
1470 static int i915_port_to_hotplug_shift(enum port port)
1471 {
1472 	switch (port) {
1473 	case PORT_A:
1474 	case PORT_E:
1475 	default:
1476 		return -1;
1477 	case PORT_B:
1478 		return 17;
1479 	case PORT_C:
1480 		return 19;
1481 	case PORT_D:
1482 		return 21;
1483 	}
1484 }
1485 
1486 static inline enum port get_port_from_pin(enum hpd_pin pin)
1487 {
1488 	switch (pin) {
1489 	case HPD_PORT_B:
1490 		return PORT_B;
1491 	case HPD_PORT_C:
1492 		return PORT_C;
1493 	case HPD_PORT_D:
1494 		return PORT_D;
1495 	default:
1496 		return PORT_A; /* no hpd */
1497 	}
1498 }
1499 
1500 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1501 					 u32 hotplug_trigger,
1502 					 u32 dig_hotplug_reg,
1503 					 const u32 *hpd)
1504 {
1505 	struct drm_i915_private *dev_priv = dev->dev_private;
1506 	int i;
1507 	enum port port;
1508 	bool storm_detected = false;
1509 	bool queue_dig = false, queue_hp = false;
1510 	u32 dig_shift;
1511 	u32 dig_port_mask = 0;
1512 
1513 	if (!hotplug_trigger)
1514 		return;
1515 
1516 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1517 			 hotplug_trigger, dig_hotplug_reg);
1518 
1519 	spin_lock(&dev_priv->irq_lock);
1520 	for (i = 1; i < HPD_NUM_PINS; i++) {
1521 		if (!(hpd[i] & hotplug_trigger))
1522 			continue;
1523 
1524 		port = get_port_from_pin(i);
1525 		if (port && dev_priv->hpd_irq_port[port]) {
1526 			bool long_hpd;
1527 
1528 			if (HAS_PCH_SPLIT(dev)) {
1529 				dig_shift = pch_port_to_hotplug_shift(port);
1530 				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1531 			} else {
1532 				dig_shift = i915_port_to_hotplug_shift(port);
1533 				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1534 			}
1535 
1536 			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1537 					 port_name(port),
1538 					 long_hpd ? "long" : "short");
1539 			/* for long HPD pulses we want to have the digital queue happen,
1540 			   but we still want HPD storm detection to function. */
1541 			if (long_hpd) {
1542 				dev_priv->long_hpd_port_mask |= (1 << port);
1543 				dig_port_mask |= hpd[i];
1544 			} else {
1545 				/* for short HPD just trigger the digital queue */
1546 				dev_priv->short_hpd_port_mask |= (1 << port);
1547 				hotplug_trigger &= ~hpd[i];
1548 			}
1549 			queue_dig = true;
1550 		}
1551 	}
1552 
1553 	for (i = 1; i < HPD_NUM_PINS; i++) {
1554 		if (hpd[i] & hotplug_trigger &&
1555 		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1556 			/*
1557 			 * On GMCH platforms the interrupt mask bits only
1558 			 * prevent irq generation, not the setting of the
1559 			 * hotplug bits itself. So only WARN about unexpected
1560 			 * interrupts on saner platforms.
1561 			 */
1562 			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1563 				  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1564 				  hotplug_trigger, i, hpd[i]);
1565 
1566 			continue;
1567 		}
1568 
1569 		if (!(hpd[i] & hotplug_trigger) ||
1570 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1571 			continue;
1572 
1573 		if (!(dig_port_mask & hpd[i])) {
1574 			dev_priv->hpd_event_bits |= (1 << i);
1575 			queue_hp = true;
1576 		}
1577 
1578 		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1579 				   dev_priv->hpd_stats[i].hpd_last_jiffies
1580 				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1581 			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1582 			dev_priv->hpd_stats[i].hpd_cnt = 0;
1583 			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1584 		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1585 			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1586 			dev_priv->hpd_event_bits &= ~(1 << i);
1587 			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1588 			storm_detected = true;
1589 		} else {
1590 			dev_priv->hpd_stats[i].hpd_cnt++;
1591 			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1592 				      dev_priv->hpd_stats[i].hpd_cnt);
1593 		}
1594 	}
1595 
1596 	if (storm_detected)
1597 		dev_priv->display.hpd_irq_setup(dev);
1598 	spin_unlock(&dev_priv->irq_lock);
1599 
1600 	/*
1601 	 * Our hotplug handler can grab modeset locks (by calling down into the
1602 	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1603 	 * queue for otherwise the flush_work in the pageflip code will
1604 	 * deadlock.
1605 	 */
1606 	if (queue_dig)
1607 		queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1608 	if (queue_hp)
1609 		schedule_work(&dev_priv->hotplug_work);
1610 }
1611 
1612 static void gmbus_irq_handler(struct drm_device *dev)
1613 {
1614 	struct drm_i915_private *dev_priv = dev->dev_private;
1615 
1616 	wake_up_all(&dev_priv->gmbus_wait_queue);
1617 }
1618 
1619 static void dp_aux_irq_handler(struct drm_device *dev)
1620 {
1621 	struct drm_i915_private *dev_priv = dev->dev_private;
1622 
1623 	wake_up_all(&dev_priv->gmbus_wait_queue);
1624 }
1625 
1626 #if defined(CONFIG_DEBUG_FS)
1627 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1628 					 uint32_t crc0, uint32_t crc1,
1629 					 uint32_t crc2, uint32_t crc3,
1630 					 uint32_t crc4)
1631 {
1632 	struct drm_i915_private *dev_priv = dev->dev_private;
1633 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1634 	struct intel_pipe_crc_entry *entry;
1635 	int head, tail;
1636 
1637 	spin_lock(&pipe_crc->lock);
1638 
1639 	if (!pipe_crc->entries) {
1640 		spin_unlock(&pipe_crc->lock);
1641 		DRM_DEBUG_KMS("spurious interrupt\n");
1642 		return;
1643 	}
1644 
1645 	head = pipe_crc->head;
1646 	tail = pipe_crc->tail;
1647 
1648 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1649 		spin_unlock(&pipe_crc->lock);
1650 		DRM_ERROR("CRC buffer overflowing\n");
1651 		return;
1652 	}
1653 
1654 	entry = &pipe_crc->entries[head];
1655 
1656 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1657 	entry->crc[0] = crc0;
1658 	entry->crc[1] = crc1;
1659 	entry->crc[2] = crc2;
1660 	entry->crc[3] = crc3;
1661 	entry->crc[4] = crc4;
1662 
1663 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1664 	pipe_crc->head = head;
1665 
1666 	spin_unlock(&pipe_crc->lock);
1667 
1668 	wake_up_interruptible(&pipe_crc->wq);
1669 }
1670 #else
1671 static inline void
1672 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1673 			     uint32_t crc0, uint32_t crc1,
1674 			     uint32_t crc2, uint32_t crc3,
1675 			     uint32_t crc4) {}
1676 #endif
1677 
1678 
1679 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1680 {
1681 	struct drm_i915_private *dev_priv = dev->dev_private;
1682 
1683 	display_pipe_crc_irq_handler(dev, pipe,
1684 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1685 				     0, 0, 0, 0);
1686 }
1687 
1688 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1689 {
1690 	struct drm_i915_private *dev_priv = dev->dev_private;
1691 
1692 	display_pipe_crc_irq_handler(dev, pipe,
1693 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1694 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1695 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1696 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1697 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1698 }
1699 
1700 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1701 {
1702 	struct drm_i915_private *dev_priv = dev->dev_private;
1703 	uint32_t res1, res2;
1704 
1705 	if (INTEL_INFO(dev)->gen >= 3)
1706 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1707 	else
1708 		res1 = 0;
1709 
1710 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1711 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1712 	else
1713 		res2 = 0;
1714 
1715 	display_pipe_crc_irq_handler(dev, pipe,
1716 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1717 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1718 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1719 				     res1, res2);
1720 }
1721 
1722 /* The RPS events need forcewake, so we add them to a work queue and mask their
1723  * IMR bits until the work is done. Other interrupts can be processed without
1724  * the work queue. */
1725 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1726 {
1727 	/* TODO: RPS on GEN9+ is not supported yet. */
1728 	if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1729 		      "GEN9+: unexpected RPS IRQ\n"))
1730 		return;
1731 
1732 	if (pm_iir & dev_priv->pm_rps_events) {
1733 		spin_lock(&dev_priv->irq_lock);
1734 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1735 		if (dev_priv->rps.interrupts_enabled) {
1736 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1737 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1738 		}
1739 		spin_unlock(&dev_priv->irq_lock);
1740 	}
1741 
1742 	if (INTEL_INFO(dev_priv)->gen >= 8)
1743 		return;
1744 
1745 	if (HAS_VEBOX(dev_priv->dev)) {
1746 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1747 			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1748 
1749 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1750 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1751 	}
1752 }
1753 
1754 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1755 {
1756 	if (!drm_handle_vblank(dev, pipe))
1757 		return false;
1758 
1759 	return true;
1760 }
1761 
1762 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1763 {
1764 	struct drm_i915_private *dev_priv = dev->dev_private;
1765 	u32 pipe_stats[I915_MAX_PIPES] = { };
1766 	int pipe;
1767 
1768 	spin_lock(&dev_priv->irq_lock);
1769 	for_each_pipe(dev_priv, pipe) {
1770 		int reg;
1771 		u32 mask, iir_bit = 0;
1772 
1773 		/*
1774 		 * PIPESTAT bits get signalled even when the interrupt is
1775 		 * disabled with the mask bits, and some of the status bits do
1776 		 * not generate interrupts at all (like the underrun bit). Hence
1777 		 * we need to be careful that we only handle what we want to
1778 		 * handle.
1779 		 */
1780 
1781 		/* fifo underruns are filterered in the underrun handler. */
1782 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1783 
1784 		switch (pipe) {
1785 		case PIPE_A:
1786 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1787 			break;
1788 		case PIPE_B:
1789 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1790 			break;
1791 		case PIPE_C:
1792 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1793 			break;
1794 		}
1795 		if (iir & iir_bit)
1796 			mask |= dev_priv->pipestat_irq_mask[pipe];
1797 
1798 		if (!mask)
1799 			continue;
1800 
1801 		reg = PIPESTAT(pipe);
1802 		mask |= PIPESTAT_INT_ENABLE_MASK;
1803 		pipe_stats[pipe] = I915_READ(reg) & mask;
1804 
1805 		/*
1806 		 * Clear the PIPE*STAT regs before the IIR
1807 		 */
1808 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1809 					PIPESTAT_INT_STATUS_MASK))
1810 			I915_WRITE(reg, pipe_stats[pipe]);
1811 	}
1812 	spin_unlock(&dev_priv->irq_lock);
1813 
1814 	for_each_pipe(dev_priv, pipe) {
1815 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1816 		    intel_pipe_handle_vblank(dev, pipe))
1817 			intel_check_page_flip(dev, pipe);
1818 
1819 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1820 			intel_prepare_page_flip(dev, pipe);
1821 			intel_finish_page_flip(dev, pipe);
1822 		}
1823 
1824 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1825 			i9xx_pipe_crc_irq_handler(dev, pipe);
1826 
1827 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1828 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1829 	}
1830 
1831 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1832 		gmbus_irq_handler(dev);
1833 }
1834 
1835 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1836 {
1837 	struct drm_i915_private *dev_priv = dev->dev_private;
1838 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1839 
1840 	if (hotplug_status) {
1841 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1842 		/*
1843 		 * Make sure hotplug status is cleared before we clear IIR, or else we
1844 		 * may miss hotplug events.
1845 		 */
1846 		POSTING_READ(PORT_HOTPLUG_STAT);
1847 
1848 		if (IS_G4X(dev)) {
1849 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1850 
1851 			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1852 		} else {
1853 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1854 
1855 			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1856 		}
1857 
1858 		if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1859 		    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1860 			dp_aux_irq_handler(dev);
1861 	}
1862 }
1863 
1864 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1865 {
1866 	struct drm_device *dev = arg;
1867 	struct drm_i915_private *dev_priv = dev->dev_private;
1868 	u32 iir, gt_iir, pm_iir;
1869 	irqreturn_t ret = IRQ_NONE;
1870 
1871 	while (true) {
1872 		/* Find, clear, then process each source of interrupt */
1873 
1874 		gt_iir = I915_READ(GTIIR);
1875 		if (gt_iir)
1876 			I915_WRITE(GTIIR, gt_iir);
1877 
1878 		pm_iir = I915_READ(GEN6_PMIIR);
1879 		if (pm_iir)
1880 			I915_WRITE(GEN6_PMIIR, pm_iir);
1881 
1882 		iir = I915_READ(VLV_IIR);
1883 		if (iir) {
1884 			/* Consume port before clearing IIR or we'll miss events */
1885 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1886 				i9xx_hpd_irq_handler(dev);
1887 			I915_WRITE(VLV_IIR, iir);
1888 		}
1889 
1890 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1891 			goto out;
1892 
1893 		ret = IRQ_HANDLED;
1894 
1895 		if (gt_iir)
1896 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1897 		if (pm_iir)
1898 			gen6_rps_irq_handler(dev_priv, pm_iir);
1899 		/* Call regardless, as some status bits might not be
1900 		 * signalled in iir */
1901 		valleyview_pipestat_irq_handler(dev, iir);
1902 	}
1903 
1904 out:
1905 	return ret;
1906 }
1907 
1908 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1909 {
1910 	struct drm_device *dev = arg;
1911 	struct drm_i915_private *dev_priv = dev->dev_private;
1912 	u32 master_ctl, iir;
1913 	irqreturn_t ret = IRQ_NONE;
1914 
1915 	for (;;) {
1916 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1917 		iir = I915_READ(VLV_IIR);
1918 
1919 		if (master_ctl == 0 && iir == 0)
1920 			break;
1921 
1922 		ret = IRQ_HANDLED;
1923 
1924 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1925 
1926 		/* Find, clear, then process each source of interrupt */
1927 
1928 		if (iir) {
1929 			/* Consume port before clearing IIR or we'll miss events */
1930 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1931 				i9xx_hpd_irq_handler(dev);
1932 			I915_WRITE(VLV_IIR, iir);
1933 		}
1934 
1935 		gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1936 
1937 		/* Call regardless, as some status bits might not be
1938 		 * signalled in iir */
1939 		valleyview_pipestat_irq_handler(dev, iir);
1940 
1941 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1942 		POSTING_READ(GEN8_MASTER_IRQ);
1943 	}
1944 
1945 	return ret;
1946 }
1947 
1948 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1949 {
1950 	struct drm_i915_private *dev_priv = dev->dev_private;
1951 	int pipe;
1952 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1953 	u32 dig_hotplug_reg;
1954 
1955 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1956 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1957 
1958 	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1959 
1960 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1961 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1962 			       SDE_AUDIO_POWER_SHIFT);
1963 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1964 				 port_name(port));
1965 	}
1966 
1967 	if (pch_iir & SDE_AUX_MASK)
1968 		dp_aux_irq_handler(dev);
1969 
1970 	if (pch_iir & SDE_GMBUS)
1971 		gmbus_irq_handler(dev);
1972 
1973 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1974 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1975 
1976 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1977 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1978 
1979 	if (pch_iir & SDE_POISON)
1980 		DRM_ERROR("PCH poison interrupt\n");
1981 
1982 	if (pch_iir & SDE_FDI_MASK)
1983 		for_each_pipe(dev_priv, pipe)
1984 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1985 					 pipe_name(pipe),
1986 					 I915_READ(FDI_RX_IIR(pipe)));
1987 
1988 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1989 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1990 
1991 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1992 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1993 
1994 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1995 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1996 
1997 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1998 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1999 }
2000 
2001 static void ivb_err_int_handler(struct drm_device *dev)
2002 {
2003 	struct drm_i915_private *dev_priv = dev->dev_private;
2004 	u32 err_int = I915_READ(GEN7_ERR_INT);
2005 	enum pipe pipe;
2006 
2007 	if (err_int & ERR_INT_POISON)
2008 		DRM_ERROR("Poison interrupt\n");
2009 
2010 	for_each_pipe(dev_priv, pipe) {
2011 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2012 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2013 
2014 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2015 			if (IS_IVYBRIDGE(dev))
2016 				ivb_pipe_crc_irq_handler(dev, pipe);
2017 			else
2018 				hsw_pipe_crc_irq_handler(dev, pipe);
2019 		}
2020 	}
2021 
2022 	I915_WRITE(GEN7_ERR_INT, err_int);
2023 }
2024 
2025 static void cpt_serr_int_handler(struct drm_device *dev)
2026 {
2027 	struct drm_i915_private *dev_priv = dev->dev_private;
2028 	u32 serr_int = I915_READ(SERR_INT);
2029 
2030 	if (serr_int & SERR_INT_POISON)
2031 		DRM_ERROR("PCH poison interrupt\n");
2032 
2033 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2034 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2035 
2036 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2037 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2038 
2039 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2040 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2041 
2042 	I915_WRITE(SERR_INT, serr_int);
2043 }
2044 
2045 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2046 {
2047 	struct drm_i915_private *dev_priv = dev->dev_private;
2048 	int pipe;
2049 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2050 	u32 dig_hotplug_reg;
2051 
2052 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2053 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2054 
2055 	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2056 
2057 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2058 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2059 			       SDE_AUDIO_POWER_SHIFT_CPT);
2060 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2061 				 port_name(port));
2062 	}
2063 
2064 	if (pch_iir & SDE_AUX_MASK_CPT)
2065 		dp_aux_irq_handler(dev);
2066 
2067 	if (pch_iir & SDE_GMBUS_CPT)
2068 		gmbus_irq_handler(dev);
2069 
2070 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2071 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2072 
2073 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2074 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2075 
2076 	if (pch_iir & SDE_FDI_MASK_CPT)
2077 		for_each_pipe(dev_priv, pipe)
2078 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2079 					 pipe_name(pipe),
2080 					 I915_READ(FDI_RX_IIR(pipe)));
2081 
2082 	if (pch_iir & SDE_ERROR_CPT)
2083 		cpt_serr_int_handler(dev);
2084 }
2085 
2086 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2087 {
2088 	struct drm_i915_private *dev_priv = dev->dev_private;
2089 	enum pipe pipe;
2090 
2091 	if (de_iir & DE_AUX_CHANNEL_A)
2092 		dp_aux_irq_handler(dev);
2093 
2094 	if (de_iir & DE_GSE)
2095 		intel_opregion_asle_intr(dev);
2096 
2097 	if (de_iir & DE_POISON)
2098 		DRM_ERROR("Poison interrupt\n");
2099 
2100 	for_each_pipe(dev_priv, pipe) {
2101 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2102 		    intel_pipe_handle_vblank(dev, pipe))
2103 			intel_check_page_flip(dev, pipe);
2104 
2105 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2106 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2107 
2108 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2109 			i9xx_pipe_crc_irq_handler(dev, pipe);
2110 
2111 		/* plane/pipes map 1:1 on ilk+ */
2112 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2113 			intel_prepare_page_flip(dev, pipe);
2114 			intel_finish_page_flip_plane(dev, pipe);
2115 		}
2116 	}
2117 
2118 	/* check event from PCH */
2119 	if (de_iir & DE_PCH_EVENT) {
2120 		u32 pch_iir = I915_READ(SDEIIR);
2121 
2122 		if (HAS_PCH_CPT(dev))
2123 			cpt_irq_handler(dev, pch_iir);
2124 		else
2125 			ibx_irq_handler(dev, pch_iir);
2126 
2127 		/* should clear PCH hotplug event before clear CPU irq */
2128 		I915_WRITE(SDEIIR, pch_iir);
2129 	}
2130 
2131 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2132 		ironlake_rps_change_irq_handler(dev);
2133 }
2134 
2135 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2136 {
2137 	struct drm_i915_private *dev_priv = dev->dev_private;
2138 	enum pipe pipe;
2139 
2140 	if (de_iir & DE_ERR_INT_IVB)
2141 		ivb_err_int_handler(dev);
2142 
2143 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2144 		dp_aux_irq_handler(dev);
2145 
2146 	if (de_iir & DE_GSE_IVB)
2147 		intel_opregion_asle_intr(dev);
2148 
2149 	for_each_pipe(dev_priv, pipe) {
2150 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2151 		    intel_pipe_handle_vblank(dev, pipe))
2152 			intel_check_page_flip(dev, pipe);
2153 
2154 		/* plane/pipes map 1:1 on ilk+ */
2155 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2156 			intel_prepare_page_flip(dev, pipe);
2157 			intel_finish_page_flip_plane(dev, pipe);
2158 		}
2159 	}
2160 
2161 	/* check event from PCH */
2162 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2163 		u32 pch_iir = I915_READ(SDEIIR);
2164 
2165 		cpt_irq_handler(dev, pch_iir);
2166 
2167 		/* clear PCH hotplug event before clear CPU irq */
2168 		I915_WRITE(SDEIIR, pch_iir);
2169 	}
2170 }
2171 
2172 /*
2173  * To handle irqs with the minimum potential races with fresh interrupts, we:
2174  * 1 - Disable Master Interrupt Control.
2175  * 2 - Find the source(s) of the interrupt.
2176  * 3 - Clear the Interrupt Identity bits (IIR).
2177  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2178  * 5 - Re-enable Master Interrupt Control.
2179  */
2180 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2181 {
2182 	struct drm_device *dev = arg;
2183 	struct drm_i915_private *dev_priv = dev->dev_private;
2184 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2185 	irqreturn_t ret = IRQ_NONE;
2186 
2187 	/* We get interrupts on unclaimed registers, so check for this before we
2188 	 * do any I915_{READ,WRITE}. */
2189 	intel_uncore_check_errors(dev);
2190 
2191 	/* disable master interrupt before clearing iir  */
2192 	de_ier = I915_READ(DEIER);
2193 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2194 	POSTING_READ(DEIER);
2195 
2196 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2197 	 * interrupts will will be stored on its back queue, and then we'll be
2198 	 * able to process them after we restore SDEIER (as soon as we restore
2199 	 * it, we'll get an interrupt if SDEIIR still has something to process
2200 	 * due to its back queue). */
2201 	if (!HAS_PCH_NOP(dev)) {
2202 		sde_ier = I915_READ(SDEIER);
2203 		I915_WRITE(SDEIER, 0);
2204 		POSTING_READ(SDEIER);
2205 	}
2206 
2207 	/* Find, clear, then process each source of interrupt */
2208 
2209 	gt_iir = I915_READ(GTIIR);
2210 	if (gt_iir) {
2211 		I915_WRITE(GTIIR, gt_iir);
2212 		ret = IRQ_HANDLED;
2213 		if (INTEL_INFO(dev)->gen >= 6)
2214 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2215 		else
2216 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2217 	}
2218 
2219 	de_iir = I915_READ(DEIIR);
2220 	if (de_iir) {
2221 		I915_WRITE(DEIIR, de_iir);
2222 		ret = IRQ_HANDLED;
2223 		if (INTEL_INFO(dev)->gen >= 7)
2224 			ivb_display_irq_handler(dev, de_iir);
2225 		else
2226 			ilk_display_irq_handler(dev, de_iir);
2227 	}
2228 
2229 	if (INTEL_INFO(dev)->gen >= 6) {
2230 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2231 		if (pm_iir) {
2232 			I915_WRITE(GEN6_PMIIR, pm_iir);
2233 			ret = IRQ_HANDLED;
2234 			gen6_rps_irq_handler(dev_priv, pm_iir);
2235 		}
2236 	}
2237 
2238 	I915_WRITE(DEIER, de_ier);
2239 	POSTING_READ(DEIER);
2240 	if (!HAS_PCH_NOP(dev)) {
2241 		I915_WRITE(SDEIER, sde_ier);
2242 		POSTING_READ(SDEIER);
2243 	}
2244 
2245 	return ret;
2246 }
2247 
2248 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2249 {
2250 	struct drm_device *dev = arg;
2251 	struct drm_i915_private *dev_priv = dev->dev_private;
2252 	u32 master_ctl;
2253 	irqreturn_t ret = IRQ_NONE;
2254 	uint32_t tmp = 0;
2255 	enum pipe pipe;
2256 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
2257 
2258 	if (IS_GEN9(dev))
2259 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2260 			GEN9_AUX_CHANNEL_D;
2261 
2262 	master_ctl = I915_READ(GEN8_MASTER_IRQ);
2263 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2264 	if (!master_ctl)
2265 		return IRQ_NONE;
2266 
2267 	I915_WRITE(GEN8_MASTER_IRQ, 0);
2268 	POSTING_READ(GEN8_MASTER_IRQ);
2269 
2270 	/* Find, clear, then process each source of interrupt */
2271 
2272 	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2273 
2274 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2275 		tmp = I915_READ(GEN8_DE_MISC_IIR);
2276 		if (tmp) {
2277 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2278 			ret = IRQ_HANDLED;
2279 			if (tmp & GEN8_DE_MISC_GSE)
2280 				intel_opregion_asle_intr(dev);
2281 			else
2282 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2283 		}
2284 		else
2285 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2286 	}
2287 
2288 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2289 		tmp = I915_READ(GEN8_DE_PORT_IIR);
2290 		if (tmp) {
2291 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2292 			ret = IRQ_HANDLED;
2293 
2294 			if (tmp & aux_mask)
2295 				dp_aux_irq_handler(dev);
2296 			else
2297 				DRM_ERROR("Unexpected DE Port interrupt\n");
2298 		}
2299 		else
2300 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2301 	}
2302 
2303 	for_each_pipe(dev_priv, pipe) {
2304 		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2305 
2306 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2307 			continue;
2308 
2309 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2310 		if (pipe_iir) {
2311 			ret = IRQ_HANDLED;
2312 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2313 
2314 			if (pipe_iir & GEN8_PIPE_VBLANK &&
2315 			    intel_pipe_handle_vblank(dev, pipe))
2316 				intel_check_page_flip(dev, pipe);
2317 
2318 			if (IS_GEN9(dev))
2319 				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2320 			else
2321 				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2322 
2323 			if (flip_done) {
2324 				intel_prepare_page_flip(dev, pipe);
2325 				intel_finish_page_flip_plane(dev, pipe);
2326 			}
2327 
2328 			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2329 				hsw_pipe_crc_irq_handler(dev, pipe);
2330 
2331 			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2332 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2333 								    pipe);
2334 
2335 
2336 			if (IS_GEN9(dev))
2337 				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2338 			else
2339 				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2340 
2341 			if (fault_errors)
2342 				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2343 					  pipe_name(pipe),
2344 					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2345 		} else
2346 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2347 	}
2348 
2349 	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2350 		/*
2351 		 * FIXME(BDW): Assume for now that the new interrupt handling
2352 		 * scheme also closed the SDE interrupt handling race we've seen
2353 		 * on older pch-split platforms. But this needs testing.
2354 		 */
2355 		u32 pch_iir = I915_READ(SDEIIR);
2356 		if (pch_iir) {
2357 			I915_WRITE(SDEIIR, pch_iir);
2358 			ret = IRQ_HANDLED;
2359 			cpt_irq_handler(dev, pch_iir);
2360 		} else
2361 			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2362 
2363 	}
2364 
2365 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2366 	POSTING_READ(GEN8_MASTER_IRQ);
2367 
2368 	return ret;
2369 }
2370 
2371 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2372 			       bool reset_completed)
2373 {
2374 	struct intel_engine_cs *ring;
2375 	int i;
2376 
2377 	/*
2378 	 * Notify all waiters for GPU completion events that reset state has
2379 	 * been changed, and that they need to restart their wait after
2380 	 * checking for potential errors (and bail out to drop locks if there is
2381 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2382 	 */
2383 
2384 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2385 	for_each_ring(ring, dev_priv, i)
2386 		wake_up_all(&ring->irq_queue);
2387 
2388 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2389 	wake_up_all(&dev_priv->pending_flip_queue);
2390 
2391 	/*
2392 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2393 	 * reset state is cleared.
2394 	 */
2395 	if (reset_completed)
2396 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2397 }
2398 
2399 /**
2400  * i915_error_work_func - do process context error handling work
2401  * @work: work struct
2402  *
2403  * Fire an error uevent so userspace can see that a hang or error
2404  * was detected.
2405  */
2406 static void i915_error_work_func(struct work_struct *work)
2407 {
2408 	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2409 						    work);
2410 	struct drm_i915_private *dev_priv =
2411 		container_of(error, struct drm_i915_private, gpu_error);
2412 	struct drm_device *dev = dev_priv->dev;
2413 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2414 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2415 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2416 	int ret;
2417 
2418 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2419 
2420 	/*
2421 	 * Note that there's only one work item which does gpu resets, so we
2422 	 * need not worry about concurrent gpu resets potentially incrementing
2423 	 * error->reset_counter twice. We only need to take care of another
2424 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2425 	 * quick check for that is good enough: schedule_work ensures the
2426 	 * correct ordering between hang detection and this work item, and since
2427 	 * the reset in-progress bit is only ever set by code outside of this
2428 	 * work we don't need to worry about any other races.
2429 	 */
2430 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2431 		DRM_DEBUG_DRIVER("resetting chip\n");
2432 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2433 				   reset_event);
2434 
2435 		/*
2436 		 * In most cases it's guaranteed that we get here with an RPM
2437 		 * reference held, for example because there is a pending GPU
2438 		 * request that won't finish until the reset is done. This
2439 		 * isn't the case at least when we get here by doing a
2440 		 * simulated reset via debugs, so get an RPM reference.
2441 		 */
2442 		intel_runtime_pm_get(dev_priv);
2443 
2444 		intel_prepare_reset(dev);
2445 
2446 		/*
2447 		 * All state reset _must_ be completed before we update the
2448 		 * reset counter, for otherwise waiters might miss the reset
2449 		 * pending state and not properly drop locks, resulting in
2450 		 * deadlocks with the reset work.
2451 		 */
2452 		ret = i915_reset(dev);
2453 
2454 		intel_finish_reset(dev);
2455 
2456 		intel_runtime_pm_put(dev_priv);
2457 
2458 		if (ret == 0) {
2459 			/*
2460 			 * After all the gem state is reset, increment the reset
2461 			 * counter and wake up everyone waiting for the reset to
2462 			 * complete.
2463 			 *
2464 			 * Since unlock operations are a one-sided barrier only,
2465 			 * we need to insert a barrier here to order any seqno
2466 			 * updates before
2467 			 * the counter increment.
2468 			 */
2469 			smp_mb__before_atomic();
2470 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2471 
2472 			kobject_uevent_env(&dev->primary->kdev->kobj,
2473 					   KOBJ_CHANGE, reset_done_event);
2474 		} else {
2475 			atomic_set_mask(I915_WEDGED, &error->reset_counter);
2476 		}
2477 
2478 		/*
2479 		 * Note: The wake_up also serves as a memory barrier so that
2480 		 * waiters see the update value of the reset counter atomic_t.
2481 		 */
2482 		i915_error_wake_up(dev_priv, true);
2483 	}
2484 }
2485 
2486 static void i915_report_and_clear_eir(struct drm_device *dev)
2487 {
2488 	struct drm_i915_private *dev_priv = dev->dev_private;
2489 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2490 	u32 eir = I915_READ(EIR);
2491 	int pipe, i;
2492 
2493 	if (!eir)
2494 		return;
2495 
2496 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2497 
2498 	i915_get_extra_instdone(dev, instdone);
2499 
2500 	if (IS_G4X(dev)) {
2501 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2502 			u32 ipeir = I915_READ(IPEIR_I965);
2503 
2504 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2505 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2506 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2507 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2508 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2509 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2510 			I915_WRITE(IPEIR_I965, ipeir);
2511 			POSTING_READ(IPEIR_I965);
2512 		}
2513 		if (eir & GM45_ERROR_PAGE_TABLE) {
2514 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2515 			pr_err("page table error\n");
2516 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2517 			I915_WRITE(PGTBL_ER, pgtbl_err);
2518 			POSTING_READ(PGTBL_ER);
2519 		}
2520 	}
2521 
2522 	if (!IS_GEN2(dev)) {
2523 		if (eir & I915_ERROR_PAGE_TABLE) {
2524 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2525 			pr_err("page table error\n");
2526 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2527 			I915_WRITE(PGTBL_ER, pgtbl_err);
2528 			POSTING_READ(PGTBL_ER);
2529 		}
2530 	}
2531 
2532 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2533 		pr_err("memory refresh error:\n");
2534 		for_each_pipe(dev_priv, pipe)
2535 			pr_err("pipe %c stat: 0x%08x\n",
2536 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2537 		/* pipestat has already been acked */
2538 	}
2539 	if (eir & I915_ERROR_INSTRUCTION) {
2540 		pr_err("instruction error\n");
2541 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2542 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2543 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2544 		if (INTEL_INFO(dev)->gen < 4) {
2545 			u32 ipeir = I915_READ(IPEIR);
2546 
2547 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2548 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2549 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2550 			I915_WRITE(IPEIR, ipeir);
2551 			POSTING_READ(IPEIR);
2552 		} else {
2553 			u32 ipeir = I915_READ(IPEIR_I965);
2554 
2555 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2556 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2557 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2558 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2559 			I915_WRITE(IPEIR_I965, ipeir);
2560 			POSTING_READ(IPEIR_I965);
2561 		}
2562 	}
2563 
2564 	I915_WRITE(EIR, eir);
2565 	POSTING_READ(EIR);
2566 	eir = I915_READ(EIR);
2567 	if (eir) {
2568 		/*
2569 		 * some errors might have become stuck,
2570 		 * mask them.
2571 		 */
2572 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2573 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2574 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2575 	}
2576 }
2577 
2578 /**
2579  * i915_handle_error - handle an error interrupt
2580  * @dev: drm device
2581  *
2582  * Do some basic checking of regsiter state at error interrupt time and
2583  * dump it to the syslog.  Also call i915_capture_error_state() to make
2584  * sure we get a record and make it available in debugfs.  Fire a uevent
2585  * so userspace knows something bad happened (should trigger collection
2586  * of a ring dump etc.).
2587  */
2588 void i915_handle_error(struct drm_device *dev, bool wedged,
2589 		       const char *fmt, ...)
2590 {
2591 	struct drm_i915_private *dev_priv = dev->dev_private;
2592 	va_list args;
2593 	char error_msg[80];
2594 
2595 	va_start(args, fmt);
2596 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2597 	va_end(args);
2598 
2599 	i915_capture_error_state(dev, wedged, error_msg);
2600 	i915_report_and_clear_eir(dev);
2601 
2602 	if (wedged) {
2603 		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2604 				&dev_priv->gpu_error.reset_counter);
2605 
2606 		/*
2607 		 * Wakeup waiting processes so that the reset work function
2608 		 * i915_error_work_func doesn't deadlock trying to grab various
2609 		 * locks. By bumping the reset counter first, the woken
2610 		 * processes will see a reset in progress and back off,
2611 		 * releasing their locks and then wait for the reset completion.
2612 		 * We must do this for _all_ gpu waiters that might hold locks
2613 		 * that the reset work needs to acquire.
2614 		 *
2615 		 * Note: The wake_up serves as the required memory barrier to
2616 		 * ensure that the waiters see the updated value of the reset
2617 		 * counter atomic_t.
2618 		 */
2619 		i915_error_wake_up(dev_priv, false);
2620 	}
2621 
2622 	/*
2623 	 * Our reset work can grab modeset locks (since it needs to reset the
2624 	 * state of outstanding pagelips). Hence it must not be run on our own
2625 	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2626 	 * code will deadlock.
2627 	 */
2628 	schedule_work(&dev_priv->gpu_error.work);
2629 }
2630 
2631 /* Called from drm generic code, passed 'crtc' which
2632  * we use as a pipe index
2633  */
2634 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2635 {
2636 	struct drm_i915_private *dev_priv = dev->dev_private;
2637 	unsigned long irqflags;
2638 
2639 	if (!i915_pipe_enabled(dev, pipe))
2640 		return -EINVAL;
2641 
2642 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2643 	if (INTEL_INFO(dev)->gen >= 4)
2644 		i915_enable_pipestat(dev_priv, pipe,
2645 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2646 	else
2647 		i915_enable_pipestat(dev_priv, pipe,
2648 				     PIPE_VBLANK_INTERRUPT_STATUS);
2649 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2650 
2651 	return 0;
2652 }
2653 
2654 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2655 {
2656 	struct drm_i915_private *dev_priv = dev->dev_private;
2657 	unsigned long irqflags;
2658 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2659 						     DE_PIPE_VBLANK(pipe);
2660 
2661 	if (!i915_pipe_enabled(dev, pipe))
2662 		return -EINVAL;
2663 
2664 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2665 	ironlake_enable_display_irq(dev_priv, bit);
2666 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2667 
2668 	return 0;
2669 }
2670 
2671 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2672 {
2673 	struct drm_i915_private *dev_priv = dev->dev_private;
2674 	unsigned long irqflags;
2675 
2676 	if (!i915_pipe_enabled(dev, pipe))
2677 		return -EINVAL;
2678 
2679 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2680 	i915_enable_pipestat(dev_priv, pipe,
2681 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2682 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2683 
2684 	return 0;
2685 }
2686 
2687 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2688 {
2689 	struct drm_i915_private *dev_priv = dev->dev_private;
2690 	unsigned long irqflags;
2691 
2692 	if (!i915_pipe_enabled(dev, pipe))
2693 		return -EINVAL;
2694 
2695 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2696 	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2697 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2698 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2699 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2700 	return 0;
2701 }
2702 
2703 /* Called from drm generic code, passed 'crtc' which
2704  * we use as a pipe index
2705  */
2706 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2707 {
2708 	struct drm_i915_private *dev_priv = dev->dev_private;
2709 	unsigned long irqflags;
2710 
2711 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2712 	i915_disable_pipestat(dev_priv, pipe,
2713 			      PIPE_VBLANK_INTERRUPT_STATUS |
2714 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2715 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2716 }
2717 
2718 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2719 {
2720 	struct drm_i915_private *dev_priv = dev->dev_private;
2721 	unsigned long irqflags;
2722 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2723 						     DE_PIPE_VBLANK(pipe);
2724 
2725 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2726 	ironlake_disable_display_irq(dev_priv, bit);
2727 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2728 }
2729 
2730 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2731 {
2732 	struct drm_i915_private *dev_priv = dev->dev_private;
2733 	unsigned long irqflags;
2734 
2735 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2736 	i915_disable_pipestat(dev_priv, pipe,
2737 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2738 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2739 }
2740 
2741 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2742 {
2743 	struct drm_i915_private *dev_priv = dev->dev_private;
2744 	unsigned long irqflags;
2745 
2746 	if (!i915_pipe_enabled(dev, pipe))
2747 		return;
2748 
2749 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2750 	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2751 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2752 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2753 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2754 }
2755 
2756 static u32
2757 ring_last_seqno(struct intel_engine_cs *ring)
2758 {
2759 	return list_entry(ring->request_list.prev,
2760 			  struct drm_i915_gem_request, list)->seqno;
2761 }
2762 
2763 static bool
2764 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2765 {
2766 	return (list_empty(&ring->request_list) ||
2767 		i915_seqno_passed(seqno, ring_last_seqno(ring)));
2768 }
2769 
2770 static bool
2771 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2772 {
2773 	if (INTEL_INFO(dev)->gen >= 8) {
2774 		return (ipehr >> 23) == 0x1c;
2775 	} else {
2776 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2777 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2778 				 MI_SEMAPHORE_REGISTER);
2779 	}
2780 }
2781 
2782 static struct intel_engine_cs *
2783 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2784 {
2785 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2786 	struct intel_engine_cs *signaller;
2787 	int i;
2788 
2789 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2790 		for_each_ring(signaller, dev_priv, i) {
2791 			if (ring == signaller)
2792 				continue;
2793 
2794 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2795 				return signaller;
2796 		}
2797 	} else {
2798 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2799 
2800 		for_each_ring(signaller, dev_priv, i) {
2801 			if(ring == signaller)
2802 				continue;
2803 
2804 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2805 				return signaller;
2806 		}
2807 	}
2808 
2809 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2810 		  ring->id, ipehr, offset);
2811 
2812 	return NULL;
2813 }
2814 
2815 static struct intel_engine_cs *
2816 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2817 {
2818 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2819 	u32 cmd, ipehr, head;
2820 	u64 offset = 0;
2821 	int i, backwards;
2822 
2823 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2824 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2825 		return NULL;
2826 
2827 	/*
2828 	 * HEAD is likely pointing to the dword after the actual command,
2829 	 * so scan backwards until we find the MBOX. But limit it to just 3
2830 	 * or 4 dwords depending on the semaphore wait command size.
2831 	 * Note that we don't care about ACTHD here since that might
2832 	 * point at at batch, and semaphores are always emitted into the
2833 	 * ringbuffer itself.
2834 	 */
2835 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2836 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2837 
2838 	for (i = backwards; i; --i) {
2839 		/*
2840 		 * Be paranoid and presume the hw has gone off into the wild -
2841 		 * our ring is smaller than what the hardware (and hence
2842 		 * HEAD_ADDR) allows. Also handles wrap-around.
2843 		 */
2844 		head &= ring->buffer->size - 1;
2845 
2846 		/* This here seems to blow up */
2847 		cmd = ioread32(ring->buffer->virtual_start + head);
2848 		if (cmd == ipehr)
2849 			break;
2850 
2851 		head -= 4;
2852 	}
2853 
2854 	if (!i)
2855 		return NULL;
2856 
2857 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2858 	if (INTEL_INFO(ring->dev)->gen >= 8) {
2859 		offset = ioread32(ring->buffer->virtual_start + head + 12);
2860 		offset <<= 32;
2861 		offset = ioread32(ring->buffer->virtual_start + head + 8);
2862 	}
2863 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2864 }
2865 
2866 static int semaphore_passed(struct intel_engine_cs *ring)
2867 {
2868 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2869 	struct intel_engine_cs *signaller;
2870 	u32 seqno;
2871 
2872 	ring->hangcheck.deadlock++;
2873 
2874 	signaller = semaphore_waits_for(ring, &seqno);
2875 	if (signaller == NULL)
2876 		return -1;
2877 
2878 	/* Prevent pathological recursion due to driver bugs */
2879 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2880 		return -1;
2881 
2882 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2883 		return 1;
2884 
2885 	/* cursory check for an unkickable deadlock */
2886 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2887 	    semaphore_passed(signaller) < 0)
2888 		return -1;
2889 
2890 	return 0;
2891 }
2892 
2893 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2894 {
2895 	struct intel_engine_cs *ring;
2896 	int i;
2897 
2898 	for_each_ring(ring, dev_priv, i)
2899 		ring->hangcheck.deadlock = 0;
2900 }
2901 
2902 static enum intel_ring_hangcheck_action
2903 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2904 {
2905 	struct drm_device *dev = ring->dev;
2906 	struct drm_i915_private *dev_priv = dev->dev_private;
2907 	u32 tmp;
2908 
2909 	if (acthd != ring->hangcheck.acthd) {
2910 		if (acthd > ring->hangcheck.max_acthd) {
2911 			ring->hangcheck.max_acthd = acthd;
2912 			return HANGCHECK_ACTIVE;
2913 		}
2914 
2915 		return HANGCHECK_ACTIVE_LOOP;
2916 	}
2917 
2918 	if (IS_GEN2(dev))
2919 		return HANGCHECK_HUNG;
2920 
2921 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2922 	 * If so we can simply poke the RB_WAIT bit
2923 	 * and break the hang. This should work on
2924 	 * all but the second generation chipsets.
2925 	 */
2926 	tmp = I915_READ_CTL(ring);
2927 	if (tmp & RING_WAIT) {
2928 		i915_handle_error(dev, false,
2929 				  "Kicking stuck wait on %s",
2930 				  ring->name);
2931 		I915_WRITE_CTL(ring, tmp);
2932 		return HANGCHECK_KICK;
2933 	}
2934 
2935 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2936 		switch (semaphore_passed(ring)) {
2937 		default:
2938 			return HANGCHECK_HUNG;
2939 		case 1:
2940 			i915_handle_error(dev, false,
2941 					  "Kicking stuck semaphore on %s",
2942 					  ring->name);
2943 			I915_WRITE_CTL(ring, tmp);
2944 			return HANGCHECK_KICK;
2945 		case 0:
2946 			return HANGCHECK_WAIT;
2947 		}
2948 	}
2949 
2950 	return HANGCHECK_HUNG;
2951 }
2952 
2953 /**
2954  * This is called when the chip hasn't reported back with completed
2955  * batchbuffers in a long time. We keep track per ring seqno progress and
2956  * if there are no progress, hangcheck score for that ring is increased.
2957  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2958  * we kick the ring. If we see no progress on three subsequent calls
2959  * we assume chip is wedged and try to fix it by resetting the chip.
2960  */
2961 static void i915_hangcheck_elapsed(unsigned long data)
2962 {
2963 	struct drm_device *dev = (struct drm_device *)data;
2964 	struct drm_i915_private *dev_priv = dev->dev_private;
2965 	struct intel_engine_cs *ring;
2966 	int i;
2967 	int busy_count = 0, rings_hung = 0;
2968 	bool stuck[I915_NUM_RINGS] = { 0 };
2969 #define BUSY 1
2970 #define KICK 5
2971 #define HUNG 20
2972 
2973 	if (!i915.enable_hangcheck)
2974 		return;
2975 
2976 	for_each_ring(ring, dev_priv, i) {
2977 		u64 acthd;
2978 		u32 seqno;
2979 		bool busy = true;
2980 
2981 		semaphore_clear_deadlocks(dev_priv);
2982 
2983 		seqno = ring->get_seqno(ring, false);
2984 		acthd = intel_ring_get_active_head(ring);
2985 
2986 		if (ring->hangcheck.seqno == seqno) {
2987 			if (ring_idle(ring, seqno)) {
2988 				ring->hangcheck.action = HANGCHECK_IDLE;
2989 
2990 				if (waitqueue_active(&ring->irq_queue)) {
2991 					/* Issue a wake-up to catch stuck h/w. */
2992 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2993 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2994 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2995 								  ring->name);
2996 						else
2997 							DRM_INFO("Fake missed irq on %s\n",
2998 								 ring->name);
2999 						wake_up_all(&ring->irq_queue);
3000 					}
3001 					/* Safeguard against driver failure */
3002 					ring->hangcheck.score += BUSY;
3003 				} else
3004 					busy = false;
3005 			} else {
3006 				/* We always increment the hangcheck score
3007 				 * if the ring is busy and still processing
3008 				 * the same request, so that no single request
3009 				 * can run indefinitely (such as a chain of
3010 				 * batches). The only time we do not increment
3011 				 * the hangcheck score on this ring, if this
3012 				 * ring is in a legitimate wait for another
3013 				 * ring. In that case the waiting ring is a
3014 				 * victim and we want to be sure we catch the
3015 				 * right culprit. Then every time we do kick
3016 				 * the ring, add a small increment to the
3017 				 * score so that we can catch a batch that is
3018 				 * being repeatedly kicked and so responsible
3019 				 * for stalling the machine.
3020 				 */
3021 				ring->hangcheck.action = ring_stuck(ring,
3022 								    acthd);
3023 
3024 				switch (ring->hangcheck.action) {
3025 				case HANGCHECK_IDLE:
3026 				case HANGCHECK_WAIT:
3027 				case HANGCHECK_ACTIVE:
3028 					break;
3029 				case HANGCHECK_ACTIVE_LOOP:
3030 					ring->hangcheck.score += BUSY;
3031 					break;
3032 				case HANGCHECK_KICK:
3033 					ring->hangcheck.score += KICK;
3034 					break;
3035 				case HANGCHECK_HUNG:
3036 					ring->hangcheck.score += HUNG;
3037 					stuck[i] = true;
3038 					break;
3039 				}
3040 			}
3041 		} else {
3042 			ring->hangcheck.action = HANGCHECK_ACTIVE;
3043 
3044 			/* Gradually reduce the count so that we catch DoS
3045 			 * attempts across multiple batches.
3046 			 */
3047 			if (ring->hangcheck.score > 0)
3048 				ring->hangcheck.score--;
3049 
3050 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3051 		}
3052 
3053 		ring->hangcheck.seqno = seqno;
3054 		ring->hangcheck.acthd = acthd;
3055 		busy_count += busy;
3056 	}
3057 
3058 	for_each_ring(ring, dev_priv, i) {
3059 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3060 			DRM_INFO("%s on %s\n",
3061 				 stuck[i] ? "stuck" : "no progress",
3062 				 ring->name);
3063 			rings_hung++;
3064 		}
3065 	}
3066 
3067 	if (rings_hung)
3068 		return i915_handle_error(dev, true, "Ring hung");
3069 
3070 	if (busy_count)
3071 		/* Reset timer case chip hangs without another request
3072 		 * being added */
3073 		i915_queue_hangcheck(dev);
3074 }
3075 
3076 void i915_queue_hangcheck(struct drm_device *dev)
3077 {
3078 	struct drm_i915_private *dev_priv = dev->dev_private;
3079 	struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
3080 
3081 	if (!i915.enable_hangcheck)
3082 		return;
3083 
3084 	/* Don't continually defer the hangcheck, but make sure it is active */
3085 	if (timer_pending(timer))
3086 		return;
3087 	mod_timer(timer,
3088 		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3089 }
3090 
3091 static void ibx_irq_reset(struct drm_device *dev)
3092 {
3093 	struct drm_i915_private *dev_priv = dev->dev_private;
3094 
3095 	if (HAS_PCH_NOP(dev))
3096 		return;
3097 
3098 	GEN5_IRQ_RESET(SDE);
3099 
3100 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3101 		I915_WRITE(SERR_INT, 0xffffffff);
3102 }
3103 
3104 /*
3105  * SDEIER is also touched by the interrupt handler to work around missed PCH
3106  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3107  * instead we unconditionally enable all PCH interrupt sources here, but then
3108  * only unmask them as needed with SDEIMR.
3109  *
3110  * This function needs to be called before interrupts are enabled.
3111  */
3112 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3113 {
3114 	struct drm_i915_private *dev_priv = dev->dev_private;
3115 
3116 	if (HAS_PCH_NOP(dev))
3117 		return;
3118 
3119 	WARN_ON(I915_READ(SDEIER) != 0);
3120 	I915_WRITE(SDEIER, 0xffffffff);
3121 	POSTING_READ(SDEIER);
3122 }
3123 
3124 static void gen5_gt_irq_reset(struct drm_device *dev)
3125 {
3126 	struct drm_i915_private *dev_priv = dev->dev_private;
3127 
3128 	GEN5_IRQ_RESET(GT);
3129 	if (INTEL_INFO(dev)->gen >= 6)
3130 		GEN5_IRQ_RESET(GEN6_PM);
3131 }
3132 
3133 /* drm_dma.h hooks
3134 */
3135 static void ironlake_irq_reset(struct drm_device *dev)
3136 {
3137 	struct drm_i915_private *dev_priv = dev->dev_private;
3138 
3139 	I915_WRITE(HWSTAM, 0xffffffff);
3140 
3141 	GEN5_IRQ_RESET(DE);
3142 	if (IS_GEN7(dev))
3143 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3144 
3145 	gen5_gt_irq_reset(dev);
3146 
3147 	ibx_irq_reset(dev);
3148 }
3149 
3150 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3151 {
3152 	enum pipe pipe;
3153 
3154 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3155 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3156 
3157 	for_each_pipe(dev_priv, pipe)
3158 		I915_WRITE(PIPESTAT(pipe), 0xffff);
3159 
3160 	GEN5_IRQ_RESET(VLV_);
3161 }
3162 
3163 static void valleyview_irq_preinstall(struct drm_device *dev)
3164 {
3165 	struct drm_i915_private *dev_priv = dev->dev_private;
3166 
3167 	/* VLV magic */
3168 	I915_WRITE(VLV_IMR, 0);
3169 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3170 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3171 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3172 
3173 	gen5_gt_irq_reset(dev);
3174 
3175 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3176 
3177 	vlv_display_irq_reset(dev_priv);
3178 }
3179 
3180 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3181 {
3182 	GEN8_IRQ_RESET_NDX(GT, 0);
3183 	GEN8_IRQ_RESET_NDX(GT, 1);
3184 	GEN8_IRQ_RESET_NDX(GT, 2);
3185 	GEN8_IRQ_RESET_NDX(GT, 3);
3186 }
3187 
3188 static void gen8_irq_reset(struct drm_device *dev)
3189 {
3190 	struct drm_i915_private *dev_priv = dev->dev_private;
3191 	int pipe;
3192 
3193 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3194 	POSTING_READ(GEN8_MASTER_IRQ);
3195 
3196 	gen8_gt_irq_reset(dev_priv);
3197 
3198 	for_each_pipe(dev_priv, pipe)
3199 		if (intel_display_power_is_enabled(dev_priv,
3200 						   POWER_DOMAIN_PIPE(pipe)))
3201 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3202 
3203 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3204 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3205 	GEN5_IRQ_RESET(GEN8_PCU_);
3206 
3207 	ibx_irq_reset(dev);
3208 }
3209 
3210 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3211 {
3212 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3213 
3214 	spin_lock_irq(&dev_priv->irq_lock);
3215 	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3216 			  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3217 	GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3218 			  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3219 	spin_unlock_irq(&dev_priv->irq_lock);
3220 }
3221 
3222 static void cherryview_irq_preinstall(struct drm_device *dev)
3223 {
3224 	struct drm_i915_private *dev_priv = dev->dev_private;
3225 
3226 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3227 	POSTING_READ(GEN8_MASTER_IRQ);
3228 
3229 	gen8_gt_irq_reset(dev_priv);
3230 
3231 	GEN5_IRQ_RESET(GEN8_PCU_);
3232 
3233 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3234 
3235 	vlv_display_irq_reset(dev_priv);
3236 }
3237 
3238 static void ibx_hpd_irq_setup(struct drm_device *dev)
3239 {
3240 	struct drm_i915_private *dev_priv = dev->dev_private;
3241 	struct intel_encoder *intel_encoder;
3242 	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3243 
3244 	if (HAS_PCH_IBX(dev)) {
3245 		hotplug_irqs = SDE_HOTPLUG_MASK;
3246 		for_each_intel_encoder(dev, intel_encoder)
3247 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3248 				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3249 	} else {
3250 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3251 		for_each_intel_encoder(dev, intel_encoder)
3252 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3253 				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3254 	}
3255 
3256 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3257 
3258 	/*
3259 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3260 	 * duration to 2ms (which is the minimum in the Display Port spec)
3261 	 *
3262 	 * This register is the same on all known PCH chips.
3263 	 */
3264 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3265 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3266 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3267 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3268 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3269 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3270 }
3271 
3272 static void ibx_irq_postinstall(struct drm_device *dev)
3273 {
3274 	struct drm_i915_private *dev_priv = dev->dev_private;
3275 	u32 mask;
3276 
3277 	if (HAS_PCH_NOP(dev))
3278 		return;
3279 
3280 	if (HAS_PCH_IBX(dev))
3281 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3282 	else
3283 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3284 
3285 	GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3286 	I915_WRITE(SDEIMR, ~mask);
3287 }
3288 
3289 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3290 {
3291 	struct drm_i915_private *dev_priv = dev->dev_private;
3292 	u32 pm_irqs, gt_irqs;
3293 
3294 	pm_irqs = gt_irqs = 0;
3295 
3296 	dev_priv->gt_irq_mask = ~0;
3297 	if (HAS_L3_DPF(dev)) {
3298 		/* L3 parity interrupt is always unmasked. */
3299 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3300 		gt_irqs |= GT_PARITY_ERROR(dev);
3301 	}
3302 
3303 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3304 	if (IS_GEN5(dev)) {
3305 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3306 			   ILK_BSD_USER_INTERRUPT;
3307 	} else {
3308 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3309 	}
3310 
3311 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3312 
3313 	if (INTEL_INFO(dev)->gen >= 6) {
3314 		/*
3315 		 * RPS interrupts will get enabled/disabled on demand when RPS
3316 		 * itself is enabled/disabled.
3317 		 */
3318 		if (HAS_VEBOX(dev))
3319 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3320 
3321 		dev_priv->pm_irq_mask = 0xffffffff;
3322 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3323 	}
3324 }
3325 
3326 static int ironlake_irq_postinstall(struct drm_device *dev)
3327 {
3328 	struct drm_i915_private *dev_priv = dev->dev_private;
3329 	u32 display_mask, extra_mask;
3330 
3331 	if (INTEL_INFO(dev)->gen >= 7) {
3332 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3333 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3334 				DE_PLANEB_FLIP_DONE_IVB |
3335 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3336 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3337 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3338 	} else {
3339 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3340 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3341 				DE_AUX_CHANNEL_A |
3342 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3343 				DE_POISON);
3344 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3345 				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3346 	}
3347 
3348 	dev_priv->irq_mask = ~display_mask;
3349 
3350 	I915_WRITE(HWSTAM, 0xeffe);
3351 
3352 	ibx_irq_pre_postinstall(dev);
3353 
3354 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3355 
3356 	gen5_gt_irq_postinstall(dev);
3357 
3358 	ibx_irq_postinstall(dev);
3359 
3360 	if (IS_IRONLAKE_M(dev)) {
3361 		/* Enable PCU event interrupts
3362 		 *
3363 		 * spinlocking not required here for correctness since interrupt
3364 		 * setup is guaranteed to run in single-threaded context. But we
3365 		 * need it to make the assert_spin_locked happy. */
3366 		spin_lock_irq(&dev_priv->irq_lock);
3367 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3368 		spin_unlock_irq(&dev_priv->irq_lock);
3369 	}
3370 
3371 	return 0;
3372 }
3373 
3374 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3375 {
3376 	u32 pipestat_mask;
3377 	u32 iir_mask;
3378 	enum pipe pipe;
3379 
3380 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3381 			PIPE_FIFO_UNDERRUN_STATUS;
3382 
3383 	for_each_pipe(dev_priv, pipe)
3384 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3385 	POSTING_READ(PIPESTAT(PIPE_A));
3386 
3387 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3388 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3389 
3390 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3391 	for_each_pipe(dev_priv, pipe)
3392 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3393 
3394 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3395 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3396 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3397 	if (IS_CHERRYVIEW(dev_priv))
3398 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3399 	dev_priv->irq_mask &= ~iir_mask;
3400 
3401 	I915_WRITE(VLV_IIR, iir_mask);
3402 	I915_WRITE(VLV_IIR, iir_mask);
3403 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3404 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3405 	POSTING_READ(VLV_IMR);
3406 }
3407 
3408 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3409 {
3410 	u32 pipestat_mask;
3411 	u32 iir_mask;
3412 	enum pipe pipe;
3413 
3414 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3415 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3416 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3417 	if (IS_CHERRYVIEW(dev_priv))
3418 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3419 
3420 	dev_priv->irq_mask |= iir_mask;
3421 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3422 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3423 	I915_WRITE(VLV_IIR, iir_mask);
3424 	I915_WRITE(VLV_IIR, iir_mask);
3425 	POSTING_READ(VLV_IIR);
3426 
3427 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3428 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3429 
3430 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3431 	for_each_pipe(dev_priv, pipe)
3432 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3433 
3434 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3435 			PIPE_FIFO_UNDERRUN_STATUS;
3436 
3437 	for_each_pipe(dev_priv, pipe)
3438 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3439 	POSTING_READ(PIPESTAT(PIPE_A));
3440 }
3441 
3442 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3443 {
3444 	assert_spin_locked(&dev_priv->irq_lock);
3445 
3446 	if (dev_priv->display_irqs_enabled)
3447 		return;
3448 
3449 	dev_priv->display_irqs_enabled = true;
3450 
3451 	if (intel_irqs_enabled(dev_priv))
3452 		valleyview_display_irqs_install(dev_priv);
3453 }
3454 
3455 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3456 {
3457 	assert_spin_locked(&dev_priv->irq_lock);
3458 
3459 	if (!dev_priv->display_irqs_enabled)
3460 		return;
3461 
3462 	dev_priv->display_irqs_enabled = false;
3463 
3464 	if (intel_irqs_enabled(dev_priv))
3465 		valleyview_display_irqs_uninstall(dev_priv);
3466 }
3467 
3468 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3469 {
3470 	dev_priv->irq_mask = ~0;
3471 
3472 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3473 	POSTING_READ(PORT_HOTPLUG_EN);
3474 
3475 	I915_WRITE(VLV_IIR, 0xffffffff);
3476 	I915_WRITE(VLV_IIR, 0xffffffff);
3477 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3478 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3479 	POSTING_READ(VLV_IMR);
3480 
3481 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3482 	 * just to make the assert_spin_locked check happy. */
3483 	spin_lock_irq(&dev_priv->irq_lock);
3484 	if (dev_priv->display_irqs_enabled)
3485 		valleyview_display_irqs_install(dev_priv);
3486 	spin_unlock_irq(&dev_priv->irq_lock);
3487 }
3488 
3489 static int valleyview_irq_postinstall(struct drm_device *dev)
3490 {
3491 	struct drm_i915_private *dev_priv = dev->dev_private;
3492 
3493 	vlv_display_irq_postinstall(dev_priv);
3494 
3495 	gen5_gt_irq_postinstall(dev);
3496 
3497 	/* ack & enable invalid PTE error interrupts */
3498 #if 0 /* FIXME: add support to irq handler for checking these bits */
3499 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3500 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3501 #endif
3502 
3503 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3504 
3505 	return 0;
3506 }
3507 
3508 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3509 {
3510 	/* These are interrupts we'll toggle with the ring mask register */
3511 	uint32_t gt_interrupts[] = {
3512 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3513 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3514 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3515 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3516 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3517 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3518 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3519 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3520 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3521 		0,
3522 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3523 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3524 		};
3525 
3526 	dev_priv->pm_irq_mask = 0xffffffff;
3527 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3528 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3529 	/*
3530 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3531 	 * is enabled/disabled.
3532 	 */
3533 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3534 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3535 }
3536 
3537 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3538 {
3539 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3540 	uint32_t de_pipe_enables;
3541 	int pipe;
3542 	u32 aux_en = GEN8_AUX_CHANNEL_A;
3543 
3544 	if (IS_GEN9(dev_priv)) {
3545 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3546 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3547 		aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3548 			GEN9_AUX_CHANNEL_D;
3549 	} else
3550 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3551 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3552 
3553 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3554 					   GEN8_PIPE_FIFO_UNDERRUN;
3555 
3556 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3557 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3558 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3559 
3560 	for_each_pipe(dev_priv, pipe)
3561 		if (intel_display_power_is_enabled(dev_priv,
3562 				POWER_DOMAIN_PIPE(pipe)))
3563 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3564 					  dev_priv->de_irq_mask[pipe],
3565 					  de_pipe_enables);
3566 
3567 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3568 }
3569 
3570 static int gen8_irq_postinstall(struct drm_device *dev)
3571 {
3572 	struct drm_i915_private *dev_priv = dev->dev_private;
3573 
3574 	ibx_irq_pre_postinstall(dev);
3575 
3576 	gen8_gt_irq_postinstall(dev_priv);
3577 	gen8_de_irq_postinstall(dev_priv);
3578 
3579 	ibx_irq_postinstall(dev);
3580 
3581 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3582 	POSTING_READ(GEN8_MASTER_IRQ);
3583 
3584 	return 0;
3585 }
3586 
3587 static int cherryview_irq_postinstall(struct drm_device *dev)
3588 {
3589 	struct drm_i915_private *dev_priv = dev->dev_private;
3590 
3591 	vlv_display_irq_postinstall(dev_priv);
3592 
3593 	gen8_gt_irq_postinstall(dev_priv);
3594 
3595 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3596 	POSTING_READ(GEN8_MASTER_IRQ);
3597 
3598 	return 0;
3599 }
3600 
3601 static void gen8_irq_uninstall(struct drm_device *dev)
3602 {
3603 	struct drm_i915_private *dev_priv = dev->dev_private;
3604 
3605 	if (!dev_priv)
3606 		return;
3607 
3608 	gen8_irq_reset(dev);
3609 }
3610 
3611 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3612 {
3613 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3614 	 * just to make the assert_spin_locked check happy. */
3615 	spin_lock_irq(&dev_priv->irq_lock);
3616 	if (dev_priv->display_irqs_enabled)
3617 		valleyview_display_irqs_uninstall(dev_priv);
3618 	spin_unlock_irq(&dev_priv->irq_lock);
3619 
3620 	vlv_display_irq_reset(dev_priv);
3621 
3622 	dev_priv->irq_mask = ~0;
3623 }
3624 
3625 static void valleyview_irq_uninstall(struct drm_device *dev)
3626 {
3627 	struct drm_i915_private *dev_priv = dev->dev_private;
3628 
3629 	if (!dev_priv)
3630 		return;
3631 
3632 	I915_WRITE(VLV_MASTER_IER, 0);
3633 
3634 	gen5_gt_irq_reset(dev);
3635 
3636 	I915_WRITE(HWSTAM, 0xffffffff);
3637 
3638 	vlv_display_irq_uninstall(dev_priv);
3639 }
3640 
3641 static void cherryview_irq_uninstall(struct drm_device *dev)
3642 {
3643 	struct drm_i915_private *dev_priv = dev->dev_private;
3644 
3645 	if (!dev_priv)
3646 		return;
3647 
3648 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3649 	POSTING_READ(GEN8_MASTER_IRQ);
3650 
3651 	gen8_gt_irq_reset(dev_priv);
3652 
3653 	GEN5_IRQ_RESET(GEN8_PCU_);
3654 
3655 	vlv_display_irq_uninstall(dev_priv);
3656 }
3657 
3658 static void ironlake_irq_uninstall(struct drm_device *dev)
3659 {
3660 	struct drm_i915_private *dev_priv = dev->dev_private;
3661 
3662 	if (!dev_priv)
3663 		return;
3664 
3665 	ironlake_irq_reset(dev);
3666 }
3667 
3668 static void i8xx_irq_preinstall(struct drm_device * dev)
3669 {
3670 	struct drm_i915_private *dev_priv = dev->dev_private;
3671 	int pipe;
3672 
3673 	for_each_pipe(dev_priv, pipe)
3674 		I915_WRITE(PIPESTAT(pipe), 0);
3675 	I915_WRITE16(IMR, 0xffff);
3676 	I915_WRITE16(IER, 0x0);
3677 	POSTING_READ16(IER);
3678 }
3679 
3680 static int i8xx_irq_postinstall(struct drm_device *dev)
3681 {
3682 	struct drm_i915_private *dev_priv = dev->dev_private;
3683 
3684 	I915_WRITE16(EMR,
3685 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3686 
3687 	/* Unmask the interrupts that we always want on. */
3688 	dev_priv->irq_mask =
3689 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3690 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3691 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3692 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3693 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3694 	I915_WRITE16(IMR, dev_priv->irq_mask);
3695 
3696 	I915_WRITE16(IER,
3697 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3698 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3699 		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3700 		     I915_USER_INTERRUPT);
3701 	POSTING_READ16(IER);
3702 
3703 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3704 	 * just to make the assert_spin_locked check happy. */
3705 	spin_lock_irq(&dev_priv->irq_lock);
3706 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3707 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3708 	spin_unlock_irq(&dev_priv->irq_lock);
3709 
3710 	return 0;
3711 }
3712 
3713 /*
3714  * Returns true when a page flip has completed.
3715  */
3716 static bool i8xx_handle_vblank(struct drm_device *dev,
3717 			       int plane, int pipe, u32 iir)
3718 {
3719 	struct drm_i915_private *dev_priv = dev->dev_private;
3720 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3721 
3722 	if (!intel_pipe_handle_vblank(dev, pipe))
3723 		return false;
3724 
3725 	if ((iir & flip_pending) == 0)
3726 		goto check_page_flip;
3727 
3728 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3729 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3730 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3731 	 * the flip is completed (no longer pending). Since this doesn't raise
3732 	 * an interrupt per se, we watch for the change at vblank.
3733 	 */
3734 	if (I915_READ16(ISR) & flip_pending)
3735 		goto check_page_flip;
3736 
3737 	intel_prepare_page_flip(dev, plane);
3738 	intel_finish_page_flip(dev, pipe);
3739 	return true;
3740 
3741 check_page_flip:
3742 	intel_check_page_flip(dev, pipe);
3743 	return false;
3744 }
3745 
3746 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3747 {
3748 	struct drm_device *dev = arg;
3749 	struct drm_i915_private *dev_priv = dev->dev_private;
3750 	u16 iir, new_iir;
3751 	u32 pipe_stats[2];
3752 	int pipe;
3753 	u16 flip_mask =
3754 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3755 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3756 
3757 	iir = I915_READ16(IIR);
3758 	if (iir == 0)
3759 		return IRQ_NONE;
3760 
3761 	while (iir & ~flip_mask) {
3762 		/* Can't rely on pipestat interrupt bit in iir as it might
3763 		 * have been cleared after the pipestat interrupt was received.
3764 		 * It doesn't set the bit in iir again, but it still produces
3765 		 * interrupts (for non-MSI).
3766 		 */
3767 		spin_lock(&dev_priv->irq_lock);
3768 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3769 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3770 
3771 		for_each_pipe(dev_priv, pipe) {
3772 			int reg = PIPESTAT(pipe);
3773 			pipe_stats[pipe] = I915_READ(reg);
3774 
3775 			/*
3776 			 * Clear the PIPE*STAT regs before the IIR
3777 			 */
3778 			if (pipe_stats[pipe] & 0x8000ffff)
3779 				I915_WRITE(reg, pipe_stats[pipe]);
3780 		}
3781 		spin_unlock(&dev_priv->irq_lock);
3782 
3783 		I915_WRITE16(IIR, iir & ~flip_mask);
3784 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3785 
3786 		if (iir & I915_USER_INTERRUPT)
3787 			notify_ring(dev, &dev_priv->ring[RCS]);
3788 
3789 		for_each_pipe(dev_priv, pipe) {
3790 			int plane = pipe;
3791 			if (HAS_FBC(dev))
3792 				plane = !plane;
3793 
3794 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3795 			    i8xx_handle_vblank(dev, plane, pipe, iir))
3796 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3797 
3798 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3799 				i9xx_pipe_crc_irq_handler(dev, pipe);
3800 
3801 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3802 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3803 								    pipe);
3804 		}
3805 
3806 		iir = new_iir;
3807 	}
3808 
3809 	return IRQ_HANDLED;
3810 }
3811 
3812 static void i8xx_irq_uninstall(struct drm_device * dev)
3813 {
3814 	struct drm_i915_private *dev_priv = dev->dev_private;
3815 	int pipe;
3816 
3817 	for_each_pipe(dev_priv, pipe) {
3818 		/* Clear enable bits; then clear status bits */
3819 		I915_WRITE(PIPESTAT(pipe), 0);
3820 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3821 	}
3822 	I915_WRITE16(IMR, 0xffff);
3823 	I915_WRITE16(IER, 0x0);
3824 	I915_WRITE16(IIR, I915_READ16(IIR));
3825 }
3826 
3827 static void i915_irq_preinstall(struct drm_device * dev)
3828 {
3829 	struct drm_i915_private *dev_priv = dev->dev_private;
3830 	int pipe;
3831 
3832 	if (I915_HAS_HOTPLUG(dev)) {
3833 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3834 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3835 	}
3836 
3837 	I915_WRITE16(HWSTAM, 0xeffe);
3838 	for_each_pipe(dev_priv, pipe)
3839 		I915_WRITE(PIPESTAT(pipe), 0);
3840 	I915_WRITE(IMR, 0xffffffff);
3841 	I915_WRITE(IER, 0x0);
3842 	POSTING_READ(IER);
3843 }
3844 
3845 static int i915_irq_postinstall(struct drm_device *dev)
3846 {
3847 	struct drm_i915_private *dev_priv = dev->dev_private;
3848 	u32 enable_mask;
3849 
3850 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3851 
3852 	/* Unmask the interrupts that we always want on. */
3853 	dev_priv->irq_mask =
3854 		~(I915_ASLE_INTERRUPT |
3855 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3856 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3857 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3858 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3859 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3860 
3861 	enable_mask =
3862 		I915_ASLE_INTERRUPT |
3863 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3864 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3865 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3866 		I915_USER_INTERRUPT;
3867 
3868 	if (I915_HAS_HOTPLUG(dev)) {
3869 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3870 		POSTING_READ(PORT_HOTPLUG_EN);
3871 
3872 		/* Enable in IER... */
3873 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3874 		/* and unmask in IMR */
3875 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3876 	}
3877 
3878 	I915_WRITE(IMR, dev_priv->irq_mask);
3879 	I915_WRITE(IER, enable_mask);
3880 	POSTING_READ(IER);
3881 
3882 	i915_enable_asle_pipestat(dev);
3883 
3884 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3885 	 * just to make the assert_spin_locked check happy. */
3886 	spin_lock_irq(&dev_priv->irq_lock);
3887 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3888 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3889 	spin_unlock_irq(&dev_priv->irq_lock);
3890 
3891 	return 0;
3892 }
3893 
3894 /*
3895  * Returns true when a page flip has completed.
3896  */
3897 static bool i915_handle_vblank(struct drm_device *dev,
3898 			       int plane, int pipe, u32 iir)
3899 {
3900 	struct drm_i915_private *dev_priv = dev->dev_private;
3901 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3902 
3903 	if (!intel_pipe_handle_vblank(dev, pipe))
3904 		return false;
3905 
3906 	if ((iir & flip_pending) == 0)
3907 		goto check_page_flip;
3908 
3909 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3910 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3911 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3912 	 * the flip is completed (no longer pending). Since this doesn't raise
3913 	 * an interrupt per se, we watch for the change at vblank.
3914 	 */
3915 	if (I915_READ(ISR) & flip_pending)
3916 		goto check_page_flip;
3917 
3918 	intel_prepare_page_flip(dev, plane);
3919 	intel_finish_page_flip(dev, pipe);
3920 	return true;
3921 
3922 check_page_flip:
3923 	intel_check_page_flip(dev, pipe);
3924 	return false;
3925 }
3926 
3927 static irqreturn_t i915_irq_handler(int irq, void *arg)
3928 {
3929 	struct drm_device *dev = arg;
3930 	struct drm_i915_private *dev_priv = dev->dev_private;
3931 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3932 	u32 flip_mask =
3933 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3934 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3935 	int pipe, ret = IRQ_NONE;
3936 
3937 	iir = I915_READ(IIR);
3938 	do {
3939 		bool irq_received = (iir & ~flip_mask) != 0;
3940 		bool blc_event = false;
3941 
3942 		/* Can't rely on pipestat interrupt bit in iir as it might
3943 		 * have been cleared after the pipestat interrupt was received.
3944 		 * It doesn't set the bit in iir again, but it still produces
3945 		 * interrupts (for non-MSI).
3946 		 */
3947 		spin_lock(&dev_priv->irq_lock);
3948 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3949 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3950 
3951 		for_each_pipe(dev_priv, pipe) {
3952 			int reg = PIPESTAT(pipe);
3953 			pipe_stats[pipe] = I915_READ(reg);
3954 
3955 			/* Clear the PIPE*STAT regs before the IIR */
3956 			if (pipe_stats[pipe] & 0x8000ffff) {
3957 				I915_WRITE(reg, pipe_stats[pipe]);
3958 				irq_received = true;
3959 			}
3960 		}
3961 		spin_unlock(&dev_priv->irq_lock);
3962 
3963 		if (!irq_received)
3964 			break;
3965 
3966 		/* Consume port.  Then clear IIR or we'll miss events */
3967 		if (I915_HAS_HOTPLUG(dev) &&
3968 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3969 			i9xx_hpd_irq_handler(dev);
3970 
3971 		I915_WRITE(IIR, iir & ~flip_mask);
3972 		new_iir = I915_READ(IIR); /* Flush posted writes */
3973 
3974 		if (iir & I915_USER_INTERRUPT)
3975 			notify_ring(dev, &dev_priv->ring[RCS]);
3976 
3977 		for_each_pipe(dev_priv, pipe) {
3978 			int plane = pipe;
3979 			if (HAS_FBC(dev))
3980 				plane = !plane;
3981 
3982 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3983 			    i915_handle_vblank(dev, plane, pipe, iir))
3984 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3985 
3986 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3987 				blc_event = true;
3988 
3989 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3990 				i9xx_pipe_crc_irq_handler(dev, pipe);
3991 
3992 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3993 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3994 								    pipe);
3995 		}
3996 
3997 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3998 			intel_opregion_asle_intr(dev);
3999 
4000 		/* With MSI, interrupts are only generated when iir
4001 		 * transitions from zero to nonzero.  If another bit got
4002 		 * set while we were handling the existing iir bits, then
4003 		 * we would never get another interrupt.
4004 		 *
4005 		 * This is fine on non-MSI as well, as if we hit this path
4006 		 * we avoid exiting the interrupt handler only to generate
4007 		 * another one.
4008 		 *
4009 		 * Note that for MSI this could cause a stray interrupt report
4010 		 * if an interrupt landed in the time between writing IIR and
4011 		 * the posting read.  This should be rare enough to never
4012 		 * trigger the 99% of 100,000 interrupts test for disabling
4013 		 * stray interrupts.
4014 		 */
4015 		ret = IRQ_HANDLED;
4016 		iir = new_iir;
4017 	} while (iir & ~flip_mask);
4018 
4019 	return ret;
4020 }
4021 
4022 static void i915_irq_uninstall(struct drm_device * dev)
4023 {
4024 	struct drm_i915_private *dev_priv = dev->dev_private;
4025 	int pipe;
4026 
4027 	if (I915_HAS_HOTPLUG(dev)) {
4028 		I915_WRITE(PORT_HOTPLUG_EN, 0);
4029 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4030 	}
4031 
4032 	I915_WRITE16(HWSTAM, 0xffff);
4033 	for_each_pipe(dev_priv, pipe) {
4034 		/* Clear enable bits; then clear status bits */
4035 		I915_WRITE(PIPESTAT(pipe), 0);
4036 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4037 	}
4038 	I915_WRITE(IMR, 0xffffffff);
4039 	I915_WRITE(IER, 0x0);
4040 
4041 	I915_WRITE(IIR, I915_READ(IIR));
4042 }
4043 
4044 static void i965_irq_preinstall(struct drm_device * dev)
4045 {
4046 	struct drm_i915_private *dev_priv = dev->dev_private;
4047 	int pipe;
4048 
4049 	I915_WRITE(PORT_HOTPLUG_EN, 0);
4050 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4051 
4052 	I915_WRITE(HWSTAM, 0xeffe);
4053 	for_each_pipe(dev_priv, pipe)
4054 		I915_WRITE(PIPESTAT(pipe), 0);
4055 	I915_WRITE(IMR, 0xffffffff);
4056 	I915_WRITE(IER, 0x0);
4057 	POSTING_READ(IER);
4058 }
4059 
4060 static int i965_irq_postinstall(struct drm_device *dev)
4061 {
4062 	struct drm_i915_private *dev_priv = dev->dev_private;
4063 	u32 enable_mask;
4064 	u32 error_mask;
4065 
4066 	/* Unmask the interrupts that we always want on. */
4067 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4068 			       I915_DISPLAY_PORT_INTERRUPT |
4069 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4070 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4071 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4072 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4073 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4074 
4075 	enable_mask = ~dev_priv->irq_mask;
4076 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4077 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4078 	enable_mask |= I915_USER_INTERRUPT;
4079 
4080 	if (IS_G4X(dev))
4081 		enable_mask |= I915_BSD_USER_INTERRUPT;
4082 
4083 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4084 	 * just to make the assert_spin_locked check happy. */
4085 	spin_lock_irq(&dev_priv->irq_lock);
4086 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4087 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4088 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4089 	spin_unlock_irq(&dev_priv->irq_lock);
4090 
4091 	/*
4092 	 * Enable some error detection, note the instruction error mask
4093 	 * bit is reserved, so we leave it masked.
4094 	 */
4095 	if (IS_G4X(dev)) {
4096 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4097 			       GM45_ERROR_MEM_PRIV |
4098 			       GM45_ERROR_CP_PRIV |
4099 			       I915_ERROR_MEMORY_REFRESH);
4100 	} else {
4101 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4102 			       I915_ERROR_MEMORY_REFRESH);
4103 	}
4104 	I915_WRITE(EMR, error_mask);
4105 
4106 	I915_WRITE(IMR, dev_priv->irq_mask);
4107 	I915_WRITE(IER, enable_mask);
4108 	POSTING_READ(IER);
4109 
4110 	I915_WRITE(PORT_HOTPLUG_EN, 0);
4111 	POSTING_READ(PORT_HOTPLUG_EN);
4112 
4113 	i915_enable_asle_pipestat(dev);
4114 
4115 	return 0;
4116 }
4117 
4118 static void i915_hpd_irq_setup(struct drm_device *dev)
4119 {
4120 	struct drm_i915_private *dev_priv = dev->dev_private;
4121 	struct intel_encoder *intel_encoder;
4122 	u32 hotplug_en;
4123 
4124 	assert_spin_locked(&dev_priv->irq_lock);
4125 
4126 	if (I915_HAS_HOTPLUG(dev)) {
4127 		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4128 		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4129 		/* Note HDMI and DP share hotplug bits */
4130 		/* enable bits are the same for all generations */
4131 		for_each_intel_encoder(dev, intel_encoder)
4132 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4133 				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4134 		/* Programming the CRT detection parameters tends
4135 		   to generate a spurious hotplug event about three
4136 		   seconds later.  So just do it once.
4137 		*/
4138 		if (IS_G4X(dev))
4139 			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4140 		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4141 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4142 
4143 		/* Ignore TV since it's buggy */
4144 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4145 	}
4146 }
4147 
4148 static irqreturn_t i965_irq_handler(int irq, void *arg)
4149 {
4150 	struct drm_device *dev = arg;
4151 	struct drm_i915_private *dev_priv = dev->dev_private;
4152 	u32 iir, new_iir;
4153 	u32 pipe_stats[I915_MAX_PIPES];
4154 	int ret = IRQ_NONE, pipe;
4155 	u32 flip_mask =
4156 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4157 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4158 
4159 	iir = I915_READ(IIR);
4160 
4161 	for (;;) {
4162 		bool irq_received = (iir & ~flip_mask) != 0;
4163 		bool blc_event = false;
4164 
4165 		/* Can't rely on pipestat interrupt bit in iir as it might
4166 		 * have been cleared after the pipestat interrupt was received.
4167 		 * It doesn't set the bit in iir again, but it still produces
4168 		 * interrupts (for non-MSI).
4169 		 */
4170 		spin_lock(&dev_priv->irq_lock);
4171 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4172 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4173 
4174 		for_each_pipe(dev_priv, pipe) {
4175 			int reg = PIPESTAT(pipe);
4176 			pipe_stats[pipe] = I915_READ(reg);
4177 
4178 			/*
4179 			 * Clear the PIPE*STAT regs before the IIR
4180 			 */
4181 			if (pipe_stats[pipe] & 0x8000ffff) {
4182 				I915_WRITE(reg, pipe_stats[pipe]);
4183 				irq_received = true;
4184 			}
4185 		}
4186 		spin_unlock(&dev_priv->irq_lock);
4187 
4188 		if (!irq_received)
4189 			break;
4190 
4191 		ret = IRQ_HANDLED;
4192 
4193 		/* Consume port.  Then clear IIR or we'll miss events */
4194 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4195 			i9xx_hpd_irq_handler(dev);
4196 
4197 		I915_WRITE(IIR, iir & ~flip_mask);
4198 		new_iir = I915_READ(IIR); /* Flush posted writes */
4199 
4200 		if (iir & I915_USER_INTERRUPT)
4201 			notify_ring(dev, &dev_priv->ring[RCS]);
4202 		if (iir & I915_BSD_USER_INTERRUPT)
4203 			notify_ring(dev, &dev_priv->ring[VCS]);
4204 
4205 		for_each_pipe(dev_priv, pipe) {
4206 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4207 			    i915_handle_vblank(dev, pipe, pipe, iir))
4208 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4209 
4210 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4211 				blc_event = true;
4212 
4213 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4214 				i9xx_pipe_crc_irq_handler(dev, pipe);
4215 
4216 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4217 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4218 		}
4219 
4220 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4221 			intel_opregion_asle_intr(dev);
4222 
4223 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4224 			gmbus_irq_handler(dev);
4225 
4226 		/* With MSI, interrupts are only generated when iir
4227 		 * transitions from zero to nonzero.  If another bit got
4228 		 * set while we were handling the existing iir bits, then
4229 		 * we would never get another interrupt.
4230 		 *
4231 		 * This is fine on non-MSI as well, as if we hit this path
4232 		 * we avoid exiting the interrupt handler only to generate
4233 		 * another one.
4234 		 *
4235 		 * Note that for MSI this could cause a stray interrupt report
4236 		 * if an interrupt landed in the time between writing IIR and
4237 		 * the posting read.  This should be rare enough to never
4238 		 * trigger the 99% of 100,000 interrupts test for disabling
4239 		 * stray interrupts.
4240 		 */
4241 		iir = new_iir;
4242 	}
4243 
4244 	return ret;
4245 }
4246 
4247 static void i965_irq_uninstall(struct drm_device * dev)
4248 {
4249 	struct drm_i915_private *dev_priv = dev->dev_private;
4250 	int pipe;
4251 
4252 	if (!dev_priv)
4253 		return;
4254 
4255 	I915_WRITE(PORT_HOTPLUG_EN, 0);
4256 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4257 
4258 	I915_WRITE(HWSTAM, 0xffffffff);
4259 	for_each_pipe(dev_priv, pipe)
4260 		I915_WRITE(PIPESTAT(pipe), 0);
4261 	I915_WRITE(IMR, 0xffffffff);
4262 	I915_WRITE(IER, 0x0);
4263 
4264 	for_each_pipe(dev_priv, pipe)
4265 		I915_WRITE(PIPESTAT(pipe),
4266 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4267 	I915_WRITE(IIR, I915_READ(IIR));
4268 }
4269 
4270 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4271 {
4272 	struct drm_i915_private *dev_priv =
4273 		container_of(work, typeof(*dev_priv),
4274 			     hotplug_reenable_work.work);
4275 	struct drm_device *dev = dev_priv->dev;
4276 	struct drm_mode_config *mode_config = &dev->mode_config;
4277 	int i;
4278 
4279 	intel_runtime_pm_get(dev_priv);
4280 
4281 	spin_lock_irq(&dev_priv->irq_lock);
4282 	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4283 		struct drm_connector *connector;
4284 
4285 		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4286 			continue;
4287 
4288 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4289 
4290 		list_for_each_entry(connector, &mode_config->connector_list, head) {
4291 			struct intel_connector *intel_connector = to_intel_connector(connector);
4292 
4293 			if (intel_connector->encoder->hpd_pin == i) {
4294 				if (connector->polled != intel_connector->polled)
4295 					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4296 							 connector->name);
4297 				connector->polled = intel_connector->polled;
4298 				if (!connector->polled)
4299 					connector->polled = DRM_CONNECTOR_POLL_HPD;
4300 			}
4301 		}
4302 	}
4303 	if (dev_priv->display.hpd_irq_setup)
4304 		dev_priv->display.hpd_irq_setup(dev);
4305 	spin_unlock_irq(&dev_priv->irq_lock);
4306 
4307 	intel_runtime_pm_put(dev_priv);
4308 }
4309 
4310 /**
4311  * intel_irq_init - initializes irq support
4312  * @dev_priv: i915 device instance
4313  *
4314  * This function initializes all the irq support including work items, timers
4315  * and all the vtables. It does not setup the interrupt itself though.
4316  */
4317 void intel_irq_init(struct drm_i915_private *dev_priv)
4318 {
4319 	struct drm_device *dev = dev_priv->dev;
4320 
4321 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4322 	INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4323 	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4324 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4325 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4326 
4327 	/* Let's track the enabled rps events */
4328 	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4329 		/* WaGsvRC0ResidencyMethod:vlv */
4330 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4331 	else
4332 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4333 
4334 	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4335 		    i915_hangcheck_elapsed,
4336 		    (unsigned long) dev);
4337 	INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4338 			  intel_hpd_irq_reenable_work);
4339 
4340 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4341 
4342 	if (IS_GEN2(dev_priv)) {
4343 		dev->max_vblank_count = 0;
4344 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4345 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4346 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4347 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4348 	} else {
4349 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4350 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4351 	}
4352 
4353 	/*
4354 	 * Opt out of the vblank disable timer on everything except gen2.
4355 	 * Gen2 doesn't have a hardware frame counter and so depends on
4356 	 * vblank interrupts to produce sane vblank seuquence numbers.
4357 	 */
4358 	if (!IS_GEN2(dev_priv))
4359 		dev->vblank_disable_immediate = true;
4360 
4361 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4362 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4363 		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4364 	}
4365 
4366 	if (IS_CHERRYVIEW(dev_priv)) {
4367 		dev->driver->irq_handler = cherryview_irq_handler;
4368 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4369 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4370 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4371 		dev->driver->enable_vblank = valleyview_enable_vblank;
4372 		dev->driver->disable_vblank = valleyview_disable_vblank;
4373 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4374 	} else if (IS_VALLEYVIEW(dev_priv)) {
4375 		dev->driver->irq_handler = valleyview_irq_handler;
4376 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4377 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4378 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4379 		dev->driver->enable_vblank = valleyview_enable_vblank;
4380 		dev->driver->disable_vblank = valleyview_disable_vblank;
4381 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4382 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4383 		dev->driver->irq_handler = gen8_irq_handler;
4384 		dev->driver->irq_preinstall = gen8_irq_reset;
4385 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4386 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4387 		dev->driver->enable_vblank = gen8_enable_vblank;
4388 		dev->driver->disable_vblank = gen8_disable_vblank;
4389 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4390 	} else if (HAS_PCH_SPLIT(dev)) {
4391 		dev->driver->irq_handler = ironlake_irq_handler;
4392 		dev->driver->irq_preinstall = ironlake_irq_reset;
4393 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4394 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4395 		dev->driver->enable_vblank = ironlake_enable_vblank;
4396 		dev->driver->disable_vblank = ironlake_disable_vblank;
4397 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4398 	} else {
4399 		if (INTEL_INFO(dev_priv)->gen == 2) {
4400 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4401 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4402 			dev->driver->irq_handler = i8xx_irq_handler;
4403 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4404 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4405 			dev->driver->irq_preinstall = i915_irq_preinstall;
4406 			dev->driver->irq_postinstall = i915_irq_postinstall;
4407 			dev->driver->irq_uninstall = i915_irq_uninstall;
4408 			dev->driver->irq_handler = i915_irq_handler;
4409 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4410 		} else {
4411 			dev->driver->irq_preinstall = i965_irq_preinstall;
4412 			dev->driver->irq_postinstall = i965_irq_postinstall;
4413 			dev->driver->irq_uninstall = i965_irq_uninstall;
4414 			dev->driver->irq_handler = i965_irq_handler;
4415 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4416 		}
4417 		dev->driver->enable_vblank = i915_enable_vblank;
4418 		dev->driver->disable_vblank = i915_disable_vblank;
4419 	}
4420 }
4421 
4422 /**
4423  * intel_hpd_init - initializes and enables hpd support
4424  * @dev_priv: i915 device instance
4425  *
4426  * This function enables the hotplug support. It requires that interrupts have
4427  * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4428  * poll request can run concurrently to other code, so locking rules must be
4429  * obeyed.
4430  *
4431  * This is a separate step from interrupt enabling to simplify the locking rules
4432  * in the driver load and resume code.
4433  */
4434 void intel_hpd_init(struct drm_i915_private *dev_priv)
4435 {
4436 	struct drm_device *dev = dev_priv->dev;
4437 	struct drm_mode_config *mode_config = &dev->mode_config;
4438 	struct drm_connector *connector;
4439 	int i;
4440 
4441 	for (i = 1; i < HPD_NUM_PINS; i++) {
4442 		dev_priv->hpd_stats[i].hpd_cnt = 0;
4443 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4444 	}
4445 	list_for_each_entry(connector, &mode_config->connector_list, head) {
4446 		struct intel_connector *intel_connector = to_intel_connector(connector);
4447 		connector->polled = intel_connector->polled;
4448 		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4449 			connector->polled = DRM_CONNECTOR_POLL_HPD;
4450 		if (intel_connector->mst_port)
4451 			connector->polled = DRM_CONNECTOR_POLL_HPD;
4452 	}
4453 
4454 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4455 	 * just to make the assert_spin_locked checks happy. */
4456 	spin_lock_irq(&dev_priv->irq_lock);
4457 	if (dev_priv->display.hpd_irq_setup)
4458 		dev_priv->display.hpd_irq_setup(dev);
4459 	spin_unlock_irq(&dev_priv->irq_lock);
4460 }
4461 
4462 /**
4463  * intel_irq_install - enables the hardware interrupt
4464  * @dev_priv: i915 device instance
4465  *
4466  * This function enables the hardware interrupt handling, but leaves the hotplug
4467  * handling still disabled. It is called after intel_irq_init().
4468  *
4469  * In the driver load and resume code we need working interrupts in a few places
4470  * but don't want to deal with the hassle of concurrent probe and hotplug
4471  * workers. Hence the split into this two-stage approach.
4472  */
4473 int intel_irq_install(struct drm_i915_private *dev_priv)
4474 {
4475 	/*
4476 	 * We enable some interrupt sources in our postinstall hooks, so mark
4477 	 * interrupts as enabled _before_ actually enabling them to avoid
4478 	 * special cases in our ordering checks.
4479 	 */
4480 	dev_priv->pm.irqs_enabled = true;
4481 
4482 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4483 }
4484 
4485 /**
4486  * intel_irq_uninstall - finilizes all irq handling
4487  * @dev_priv: i915 device instance
4488  *
4489  * This stops interrupt and hotplug handling and unregisters and frees all
4490  * resources acquired in the init functions.
4491  */
4492 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4493 {
4494 	drm_irq_uninstall(dev_priv->dev);
4495 	intel_hpd_cancel_work(dev_priv);
4496 	dev_priv->pm.irqs_enabled = false;
4497 }
4498 
4499 /**
4500  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4501  * @dev_priv: i915 device instance
4502  *
4503  * This function is used to disable interrupts at runtime, both in the runtime
4504  * pm and the system suspend/resume code.
4505  */
4506 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4507 {
4508 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4509 	dev_priv->pm.irqs_enabled = false;
4510 }
4511 
4512 /**
4513  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4514  * @dev_priv: i915 device instance
4515  *
4516  * This function is used to enable interrupts at runtime, both in the runtime
4517  * pm and the system suspend/resume code.
4518  */
4519 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4520 {
4521 	dev_priv->pm.irqs_enabled = true;
4522 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4523 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4524 }
4525