xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision a06c488d)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47 
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51 
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55 
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59 
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 	[HPD_CRT] = SDE_CRT_HOTPLUG,
62 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67 
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75 
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83 
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92 
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101 
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110 
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117 
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 	POSTING_READ(GEN8_##type##_IMR(which)); \
122 	I915_WRITE(GEN8_##type##_IER(which), 0); \
123 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 	POSTING_READ(GEN8_##type##_IIR(which)); \
125 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 	POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128 
129 #define GEN5_IRQ_RESET(type) do { \
130 	I915_WRITE(type##IMR, 0xffffffff); \
131 	POSTING_READ(type##IMR); \
132 	I915_WRITE(type##IER, 0); \
133 	I915_WRITE(type##IIR, 0xffffffff); \
134 	POSTING_READ(type##IIR); \
135 	I915_WRITE(type##IIR, 0xffffffff); \
136 	POSTING_READ(type##IIR); \
137 } while (0)
138 
139 /*
140  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141  */
142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 				    i915_reg_t reg)
144 {
145 	u32 val = I915_READ(reg);
146 
147 	if (val == 0)
148 		return;
149 
150 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 	     i915_mmio_reg_offset(reg), val);
152 	I915_WRITE(reg, 0xffffffff);
153 	POSTING_READ(reg);
154 	I915_WRITE(reg, 0xffffffff);
155 	POSTING_READ(reg);
156 }
157 
158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 	POSTING_READ(GEN8_##type##_IMR(which)); \
163 } while (0)
164 
165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 	I915_WRITE(type##IER, (ier_val)); \
168 	I915_WRITE(type##IMR, (imr_val)); \
169 	POSTING_READ(type##IMR); \
170 } while (0)
171 
172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173 
174 /* For display hotplug interrupt */
175 static inline void
176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 				     uint32_t mask,
178 				     uint32_t bits)
179 {
180 	uint32_t val;
181 
182 	assert_spin_locked(&dev_priv->irq_lock);
183 	WARN_ON(bits & ~mask);
184 
185 	val = I915_READ(PORT_HOTPLUG_EN);
186 	val &= ~mask;
187 	val |= bits;
188 	I915_WRITE(PORT_HOTPLUG_EN, val);
189 }
190 
191 /**
192  * i915_hotplug_interrupt_update - update hotplug interrupt enable
193  * @dev_priv: driver private
194  * @mask: bits to update
195  * @bits: bits to enable
196  * NOTE: the HPD enable bits are modified both inside and outside
197  * of an interrupt context. To avoid that read-modify-write cycles
198  * interfer, these bits are protected by a spinlock. Since this
199  * function is usually not called from a context where the lock is
200  * held already, this function acquires the lock itself. A non-locking
201  * version is also available.
202  */
203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 				   uint32_t mask,
205 				   uint32_t bits)
206 {
207 	spin_lock_irq(&dev_priv->irq_lock);
208 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 	spin_unlock_irq(&dev_priv->irq_lock);
210 }
211 
212 /**
213  * ilk_update_display_irq - update DEIMR
214  * @dev_priv: driver private
215  * @interrupt_mask: mask of interrupt bits to update
216  * @enabled_irq_mask: mask of interrupt bits to enable
217  */
218 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 			    uint32_t interrupt_mask,
220 			    uint32_t enabled_irq_mask)
221 {
222 	uint32_t new_val;
223 
224 	assert_spin_locked(&dev_priv->irq_lock);
225 
226 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
227 
228 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
229 		return;
230 
231 	new_val = dev_priv->irq_mask;
232 	new_val &= ~interrupt_mask;
233 	new_val |= (~enabled_irq_mask & interrupt_mask);
234 
235 	if (new_val != dev_priv->irq_mask) {
236 		dev_priv->irq_mask = new_val;
237 		I915_WRITE(DEIMR, dev_priv->irq_mask);
238 		POSTING_READ(DEIMR);
239 	}
240 }
241 
242 /**
243  * ilk_update_gt_irq - update GTIMR
244  * @dev_priv: driver private
245  * @interrupt_mask: mask of interrupt bits to update
246  * @enabled_irq_mask: mask of interrupt bits to enable
247  */
248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 			      uint32_t interrupt_mask,
250 			      uint32_t enabled_irq_mask)
251 {
252 	assert_spin_locked(&dev_priv->irq_lock);
253 
254 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
255 
256 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
257 		return;
258 
259 	dev_priv->gt_irq_mask &= ~interrupt_mask;
260 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 	POSTING_READ(GTIMR);
263 }
264 
265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266 {
267 	ilk_update_gt_irq(dev_priv, mask, mask);
268 }
269 
270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
271 {
272 	ilk_update_gt_irq(dev_priv, mask, 0);
273 }
274 
275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
276 {
277 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278 }
279 
280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
281 {
282 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283 }
284 
285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
286 {
287 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288 }
289 
290 /**
291  * snb_update_pm_irq - update GEN6_PMIMR
292  * @dev_priv: driver private
293  * @interrupt_mask: mask of interrupt bits to update
294  * @enabled_irq_mask: mask of interrupt bits to enable
295  */
296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 			      uint32_t interrupt_mask,
298 			      uint32_t enabled_irq_mask)
299 {
300 	uint32_t new_val;
301 
302 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
303 
304 	assert_spin_locked(&dev_priv->irq_lock);
305 
306 	new_val = dev_priv->pm_irq_mask;
307 	new_val &= ~interrupt_mask;
308 	new_val |= (~enabled_irq_mask & interrupt_mask);
309 
310 	if (new_val != dev_priv->pm_irq_mask) {
311 		dev_priv->pm_irq_mask = new_val;
312 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 		POSTING_READ(gen6_pm_imr(dev_priv));
314 	}
315 }
316 
317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
318 {
319 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 		return;
321 
322 	snb_update_pm_irq(dev_priv, mask, mask);
323 }
324 
325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 				  uint32_t mask)
327 {
328 	snb_update_pm_irq(dev_priv, mask, 0);
329 }
330 
331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
332 {
333 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 		return;
335 
336 	__gen6_disable_pm_irq(dev_priv, mask);
337 }
338 
339 void gen6_reset_rps_interrupts(struct drm_device *dev)
340 {
341 	struct drm_i915_private *dev_priv = dev->dev_private;
342 	i915_reg_t reg = gen6_pm_iir(dev_priv);
343 
344 	spin_lock_irq(&dev_priv->irq_lock);
345 	I915_WRITE(reg, dev_priv->pm_rps_events);
346 	I915_WRITE(reg, dev_priv->pm_rps_events);
347 	POSTING_READ(reg);
348 	dev_priv->rps.pm_iir = 0;
349 	spin_unlock_irq(&dev_priv->irq_lock);
350 }
351 
352 void gen6_enable_rps_interrupts(struct drm_device *dev)
353 {
354 	struct drm_i915_private *dev_priv = dev->dev_private;
355 
356 	spin_lock_irq(&dev_priv->irq_lock);
357 
358 	WARN_ON(dev_priv->rps.pm_iir);
359 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
360 	dev_priv->rps.interrupts_enabled = true;
361 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
362 				dev_priv->pm_rps_events);
363 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
364 
365 	spin_unlock_irq(&dev_priv->irq_lock);
366 }
367 
368 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
369 {
370 	/*
371 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
372 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
373 	 *
374 	 * TODO: verify if this can be reproduced on VLV,CHV.
375 	 */
376 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
377 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
378 
379 	if (INTEL_INFO(dev_priv)->gen >= 8)
380 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
381 
382 	return mask;
383 }
384 
385 void gen6_disable_rps_interrupts(struct drm_device *dev)
386 {
387 	struct drm_i915_private *dev_priv = dev->dev_private;
388 
389 	spin_lock_irq(&dev_priv->irq_lock);
390 	dev_priv->rps.interrupts_enabled = false;
391 	spin_unlock_irq(&dev_priv->irq_lock);
392 
393 	cancel_work_sync(&dev_priv->rps.work);
394 
395 	spin_lock_irq(&dev_priv->irq_lock);
396 
397 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
398 
399 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
400 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
401 				~dev_priv->pm_rps_events);
402 
403 	spin_unlock_irq(&dev_priv->irq_lock);
404 
405 	synchronize_irq(dev->irq);
406 }
407 
408 /**
409  * bdw_update_port_irq - update DE port interrupt
410  * @dev_priv: driver private
411  * @interrupt_mask: mask of interrupt bits to update
412  * @enabled_irq_mask: mask of interrupt bits to enable
413  */
414 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
415 				uint32_t interrupt_mask,
416 				uint32_t enabled_irq_mask)
417 {
418 	uint32_t new_val;
419 	uint32_t old_val;
420 
421 	assert_spin_locked(&dev_priv->irq_lock);
422 
423 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
424 
425 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
426 		return;
427 
428 	old_val = I915_READ(GEN8_DE_PORT_IMR);
429 
430 	new_val = old_val;
431 	new_val &= ~interrupt_mask;
432 	new_val |= (~enabled_irq_mask & interrupt_mask);
433 
434 	if (new_val != old_val) {
435 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
436 		POSTING_READ(GEN8_DE_PORT_IMR);
437 	}
438 }
439 
440 /**
441  * bdw_update_pipe_irq - update DE pipe interrupt
442  * @dev_priv: driver private
443  * @pipe: pipe whose interrupt to update
444  * @interrupt_mask: mask of interrupt bits to update
445  * @enabled_irq_mask: mask of interrupt bits to enable
446  */
447 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
448 			 enum pipe pipe,
449 			 uint32_t interrupt_mask,
450 			 uint32_t enabled_irq_mask)
451 {
452 	uint32_t new_val;
453 
454 	assert_spin_locked(&dev_priv->irq_lock);
455 
456 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
457 
458 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
459 		return;
460 
461 	new_val = dev_priv->de_irq_mask[pipe];
462 	new_val &= ~interrupt_mask;
463 	new_val |= (~enabled_irq_mask & interrupt_mask);
464 
465 	if (new_val != dev_priv->de_irq_mask[pipe]) {
466 		dev_priv->de_irq_mask[pipe] = new_val;
467 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
468 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
469 	}
470 }
471 
472 /**
473  * ibx_display_interrupt_update - update SDEIMR
474  * @dev_priv: driver private
475  * @interrupt_mask: mask of interrupt bits to update
476  * @enabled_irq_mask: mask of interrupt bits to enable
477  */
478 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
479 				  uint32_t interrupt_mask,
480 				  uint32_t enabled_irq_mask)
481 {
482 	uint32_t sdeimr = I915_READ(SDEIMR);
483 	sdeimr &= ~interrupt_mask;
484 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
485 
486 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
487 
488 	assert_spin_locked(&dev_priv->irq_lock);
489 
490 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
491 		return;
492 
493 	I915_WRITE(SDEIMR, sdeimr);
494 	POSTING_READ(SDEIMR);
495 }
496 
497 static void
498 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
499 		       u32 enable_mask, u32 status_mask)
500 {
501 	i915_reg_t reg = PIPESTAT(pipe);
502 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
503 
504 	assert_spin_locked(&dev_priv->irq_lock);
505 	WARN_ON(!intel_irqs_enabled(dev_priv));
506 
507 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
508 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
509 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
510 		      pipe_name(pipe), enable_mask, status_mask))
511 		return;
512 
513 	if ((pipestat & enable_mask) == enable_mask)
514 		return;
515 
516 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
517 
518 	/* Enable the interrupt, clear any pending status */
519 	pipestat |= enable_mask | status_mask;
520 	I915_WRITE(reg, pipestat);
521 	POSTING_READ(reg);
522 }
523 
524 static void
525 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
526 		        u32 enable_mask, u32 status_mask)
527 {
528 	i915_reg_t reg = PIPESTAT(pipe);
529 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
530 
531 	assert_spin_locked(&dev_priv->irq_lock);
532 	WARN_ON(!intel_irqs_enabled(dev_priv));
533 
534 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
535 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
536 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
537 		      pipe_name(pipe), enable_mask, status_mask))
538 		return;
539 
540 	if ((pipestat & enable_mask) == 0)
541 		return;
542 
543 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
544 
545 	pipestat &= ~enable_mask;
546 	I915_WRITE(reg, pipestat);
547 	POSTING_READ(reg);
548 }
549 
550 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
551 {
552 	u32 enable_mask = status_mask << 16;
553 
554 	/*
555 	 * On pipe A we don't support the PSR interrupt yet,
556 	 * on pipe B and C the same bit MBZ.
557 	 */
558 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
559 		return 0;
560 	/*
561 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
562 	 * A the same bit is for perf counters which we don't use either.
563 	 */
564 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
565 		return 0;
566 
567 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
568 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
569 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
570 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
571 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
572 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
573 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
574 
575 	return enable_mask;
576 }
577 
578 void
579 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
580 		     u32 status_mask)
581 {
582 	u32 enable_mask;
583 
584 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
585 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
586 							   status_mask);
587 	else
588 		enable_mask = status_mask << 16;
589 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
590 }
591 
592 void
593 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
594 		      u32 status_mask)
595 {
596 	u32 enable_mask;
597 
598 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
599 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
600 							   status_mask);
601 	else
602 		enable_mask = status_mask << 16;
603 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
604 }
605 
606 /**
607  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
608  * @dev: drm device
609  */
610 static void i915_enable_asle_pipestat(struct drm_device *dev)
611 {
612 	struct drm_i915_private *dev_priv = dev->dev_private;
613 
614 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
615 		return;
616 
617 	spin_lock_irq(&dev_priv->irq_lock);
618 
619 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
620 	if (INTEL_INFO(dev)->gen >= 4)
621 		i915_enable_pipestat(dev_priv, PIPE_A,
622 				     PIPE_LEGACY_BLC_EVENT_STATUS);
623 
624 	spin_unlock_irq(&dev_priv->irq_lock);
625 }
626 
627 /*
628  * This timing diagram depicts the video signal in and
629  * around the vertical blanking period.
630  *
631  * Assumptions about the fictitious mode used in this example:
632  *  vblank_start >= 3
633  *  vsync_start = vblank_start + 1
634  *  vsync_end = vblank_start + 2
635  *  vtotal = vblank_start + 3
636  *
637  *           start of vblank:
638  *           latch double buffered registers
639  *           increment frame counter (ctg+)
640  *           generate start of vblank interrupt (gen4+)
641  *           |
642  *           |          frame start:
643  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
644  *           |          may be shifted forward 1-3 extra lines via PIPECONF
645  *           |          |
646  *           |          |  start of vsync:
647  *           |          |  generate vsync interrupt
648  *           |          |  |
649  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
650  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
651  * ----va---> <-----------------vb--------------------> <--------va-------------
652  *       |          |       <----vs----->                     |
653  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
654  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
655  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
656  *       |          |                                         |
657  *       last visible pixel                                   first visible pixel
658  *                  |                                         increment frame counter (gen3/4)
659  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
660  *
661  * x  = horizontal active
662  * _  = horizontal blanking
663  * hs = horizontal sync
664  * va = vertical active
665  * vb = vertical blanking
666  * vs = vertical sync
667  * vbs = vblank_start (number)
668  *
669  * Summary:
670  * - most events happen at the start of horizontal sync
671  * - frame start happens at the start of horizontal blank, 1-4 lines
672  *   (depending on PIPECONF settings) after the start of vblank
673  * - gen3/4 pixel and frame counter are synchronized with the start
674  *   of horizontal active on the first line of vertical active
675  */
676 
677 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
678 {
679 	/* Gen2 doesn't have a hardware frame counter */
680 	return 0;
681 }
682 
683 /* Called from drm generic code, passed a 'crtc', which
684  * we use as a pipe index
685  */
686 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
687 {
688 	struct drm_i915_private *dev_priv = dev->dev_private;
689 	i915_reg_t high_frame, low_frame;
690 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
691 	struct intel_crtc *intel_crtc =
692 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
693 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
694 
695 	htotal = mode->crtc_htotal;
696 	hsync_start = mode->crtc_hsync_start;
697 	vbl_start = mode->crtc_vblank_start;
698 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
699 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
700 
701 	/* Convert to pixel count */
702 	vbl_start *= htotal;
703 
704 	/* Start of vblank event occurs at start of hsync */
705 	vbl_start -= htotal - hsync_start;
706 
707 	high_frame = PIPEFRAME(pipe);
708 	low_frame = PIPEFRAMEPIXEL(pipe);
709 
710 	/*
711 	 * High & low register fields aren't synchronized, so make sure
712 	 * we get a low value that's stable across two reads of the high
713 	 * register.
714 	 */
715 	do {
716 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
717 		low   = I915_READ(low_frame);
718 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
719 	} while (high1 != high2);
720 
721 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
722 	pixel = low & PIPE_PIXEL_MASK;
723 	low >>= PIPE_FRAME_LOW_SHIFT;
724 
725 	/*
726 	 * The frame counter increments at beginning of active.
727 	 * Cook up a vblank counter by also checking the pixel
728 	 * counter against vblank start.
729 	 */
730 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
731 }
732 
733 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
734 {
735 	struct drm_i915_private *dev_priv = dev->dev_private;
736 
737 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
738 }
739 
740 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
741 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
742 {
743 	struct drm_device *dev = crtc->base.dev;
744 	struct drm_i915_private *dev_priv = dev->dev_private;
745 	const struct drm_display_mode *mode = &crtc->base.hwmode;
746 	enum pipe pipe = crtc->pipe;
747 	int position, vtotal;
748 
749 	vtotal = mode->crtc_vtotal;
750 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
751 		vtotal /= 2;
752 
753 	if (IS_GEN2(dev))
754 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
755 	else
756 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
757 
758 	/*
759 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
760 	 * read it just before the start of vblank.  So try it again
761 	 * so we don't accidentally end up spanning a vblank frame
762 	 * increment, causing the pipe_update_end() code to squak at us.
763 	 *
764 	 * The nature of this problem means we can't simply check the ISR
765 	 * bit and return the vblank start value; nor can we use the scanline
766 	 * debug register in the transcoder as it appears to have the same
767 	 * problem.  We may need to extend this to include other platforms,
768 	 * but so far testing only shows the problem on HSW.
769 	 */
770 	if (HAS_DDI(dev) && !position) {
771 		int i, temp;
772 
773 		for (i = 0; i < 100; i++) {
774 			udelay(1);
775 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
776 				DSL_LINEMASK_GEN3;
777 			if (temp != position) {
778 				position = temp;
779 				break;
780 			}
781 		}
782 	}
783 
784 	/*
785 	 * See update_scanline_offset() for the details on the
786 	 * scanline_offset adjustment.
787 	 */
788 	return (position + crtc->scanline_offset) % vtotal;
789 }
790 
791 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
792 				    unsigned int flags, int *vpos, int *hpos,
793 				    ktime_t *stime, ktime_t *etime,
794 				    const struct drm_display_mode *mode)
795 {
796 	struct drm_i915_private *dev_priv = dev->dev_private;
797 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
798 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
799 	int position;
800 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
801 	bool in_vbl = true;
802 	int ret = 0;
803 	unsigned long irqflags;
804 
805 	if (WARN_ON(!mode->crtc_clock)) {
806 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
807 				 "pipe %c\n", pipe_name(pipe));
808 		return 0;
809 	}
810 
811 	htotal = mode->crtc_htotal;
812 	hsync_start = mode->crtc_hsync_start;
813 	vtotal = mode->crtc_vtotal;
814 	vbl_start = mode->crtc_vblank_start;
815 	vbl_end = mode->crtc_vblank_end;
816 
817 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
818 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
819 		vbl_end /= 2;
820 		vtotal /= 2;
821 	}
822 
823 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
824 
825 	/*
826 	 * Lock uncore.lock, as we will do multiple timing critical raw
827 	 * register reads, potentially with preemption disabled, so the
828 	 * following code must not block on uncore.lock.
829 	 */
830 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
831 
832 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
833 
834 	/* Get optional system timestamp before query. */
835 	if (stime)
836 		*stime = ktime_get();
837 
838 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
839 		/* No obvious pixelcount register. Only query vertical
840 		 * scanout position from Display scan line register.
841 		 */
842 		position = __intel_get_crtc_scanline(intel_crtc);
843 	} else {
844 		/* Have access to pixelcount since start of frame.
845 		 * We can split this into vertical and horizontal
846 		 * scanout position.
847 		 */
848 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
849 
850 		/* convert to pixel counts */
851 		vbl_start *= htotal;
852 		vbl_end *= htotal;
853 		vtotal *= htotal;
854 
855 		/*
856 		 * In interlaced modes, the pixel counter counts all pixels,
857 		 * so one field will have htotal more pixels. In order to avoid
858 		 * the reported position from jumping backwards when the pixel
859 		 * counter is beyond the length of the shorter field, just
860 		 * clamp the position the length of the shorter field. This
861 		 * matches how the scanline counter based position works since
862 		 * the scanline counter doesn't count the two half lines.
863 		 */
864 		if (position >= vtotal)
865 			position = vtotal - 1;
866 
867 		/*
868 		 * Start of vblank interrupt is triggered at start of hsync,
869 		 * just prior to the first active line of vblank. However we
870 		 * consider lines to start at the leading edge of horizontal
871 		 * active. So, should we get here before we've crossed into
872 		 * the horizontal active of the first line in vblank, we would
873 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
874 		 * always add htotal-hsync_start to the current pixel position.
875 		 */
876 		position = (position + htotal - hsync_start) % vtotal;
877 	}
878 
879 	/* Get optional system timestamp after query. */
880 	if (etime)
881 		*etime = ktime_get();
882 
883 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
884 
885 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
886 
887 	in_vbl = position >= vbl_start && position < vbl_end;
888 
889 	/*
890 	 * While in vblank, position will be negative
891 	 * counting up towards 0 at vbl_end. And outside
892 	 * vblank, position will be positive counting
893 	 * up since vbl_end.
894 	 */
895 	if (position >= vbl_start)
896 		position -= vbl_end;
897 	else
898 		position += vtotal - vbl_end;
899 
900 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
901 		*vpos = position;
902 		*hpos = 0;
903 	} else {
904 		*vpos = position / htotal;
905 		*hpos = position - (*vpos * htotal);
906 	}
907 
908 	/* In vblank? */
909 	if (in_vbl)
910 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
911 
912 	return ret;
913 }
914 
915 int intel_get_crtc_scanline(struct intel_crtc *crtc)
916 {
917 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
918 	unsigned long irqflags;
919 	int position;
920 
921 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
922 	position = __intel_get_crtc_scanline(crtc);
923 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
924 
925 	return position;
926 }
927 
928 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
929 			      int *max_error,
930 			      struct timeval *vblank_time,
931 			      unsigned flags)
932 {
933 	struct drm_crtc *crtc;
934 
935 	if (pipe >= INTEL_INFO(dev)->num_pipes) {
936 		DRM_ERROR("Invalid crtc %u\n", pipe);
937 		return -EINVAL;
938 	}
939 
940 	/* Get drm_crtc to timestamp: */
941 	crtc = intel_get_crtc_for_pipe(dev, pipe);
942 	if (crtc == NULL) {
943 		DRM_ERROR("Invalid crtc %u\n", pipe);
944 		return -EINVAL;
945 	}
946 
947 	if (!crtc->hwmode.crtc_clock) {
948 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
949 		return -EBUSY;
950 	}
951 
952 	/* Helper routine in DRM core does all the work: */
953 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
954 						     vblank_time, flags,
955 						     &crtc->hwmode);
956 }
957 
958 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
959 {
960 	struct drm_i915_private *dev_priv = dev->dev_private;
961 	u32 busy_up, busy_down, max_avg, min_avg;
962 	u8 new_delay;
963 
964 	spin_lock(&mchdev_lock);
965 
966 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
967 
968 	new_delay = dev_priv->ips.cur_delay;
969 
970 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
971 	busy_up = I915_READ(RCPREVBSYTUPAVG);
972 	busy_down = I915_READ(RCPREVBSYTDNAVG);
973 	max_avg = I915_READ(RCBMAXAVG);
974 	min_avg = I915_READ(RCBMINAVG);
975 
976 	/* Handle RCS change request from hw */
977 	if (busy_up > max_avg) {
978 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
979 			new_delay = dev_priv->ips.cur_delay - 1;
980 		if (new_delay < dev_priv->ips.max_delay)
981 			new_delay = dev_priv->ips.max_delay;
982 	} else if (busy_down < min_avg) {
983 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
984 			new_delay = dev_priv->ips.cur_delay + 1;
985 		if (new_delay > dev_priv->ips.min_delay)
986 			new_delay = dev_priv->ips.min_delay;
987 	}
988 
989 	if (ironlake_set_drps(dev, new_delay))
990 		dev_priv->ips.cur_delay = new_delay;
991 
992 	spin_unlock(&mchdev_lock);
993 
994 	return;
995 }
996 
997 static void notify_ring(struct intel_engine_cs *ring)
998 {
999 	if (!intel_ring_initialized(ring))
1000 		return;
1001 
1002 	trace_i915_gem_request_notify(ring);
1003 
1004 	wake_up_all(&ring->irq_queue);
1005 }
1006 
1007 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1008 			struct intel_rps_ei *ei)
1009 {
1010 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1011 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1012 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1013 }
1014 
1015 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1016 			 const struct intel_rps_ei *old,
1017 			 const struct intel_rps_ei *now,
1018 			 int threshold)
1019 {
1020 	u64 time, c0;
1021 	unsigned int mul = 100;
1022 
1023 	if (old->cz_clock == 0)
1024 		return false;
1025 
1026 	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1027 		mul <<= 8;
1028 
1029 	time = now->cz_clock - old->cz_clock;
1030 	time *= threshold * dev_priv->czclk_freq;
1031 
1032 	/* Workload can be split between render + media, e.g. SwapBuffers
1033 	 * being blitted in X after being rendered in mesa. To account for
1034 	 * this we need to combine both engines into our activity counter.
1035 	 */
1036 	c0 = now->render_c0 - old->render_c0;
1037 	c0 += now->media_c0 - old->media_c0;
1038 	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1039 
1040 	return c0 >= time;
1041 }
1042 
1043 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1044 {
1045 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1046 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1047 }
1048 
1049 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1050 {
1051 	struct intel_rps_ei now;
1052 	u32 events = 0;
1053 
1054 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1055 		return 0;
1056 
1057 	vlv_c0_read(dev_priv, &now);
1058 	if (now.cz_clock == 0)
1059 		return 0;
1060 
1061 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1062 		if (!vlv_c0_above(dev_priv,
1063 				  &dev_priv->rps.down_ei, &now,
1064 				  dev_priv->rps.down_threshold))
1065 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1066 		dev_priv->rps.down_ei = now;
1067 	}
1068 
1069 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1070 		if (vlv_c0_above(dev_priv,
1071 				 &dev_priv->rps.up_ei, &now,
1072 				 dev_priv->rps.up_threshold))
1073 			events |= GEN6_PM_RP_UP_THRESHOLD;
1074 		dev_priv->rps.up_ei = now;
1075 	}
1076 
1077 	return events;
1078 }
1079 
1080 static bool any_waiters(struct drm_i915_private *dev_priv)
1081 {
1082 	struct intel_engine_cs *ring;
1083 	int i;
1084 
1085 	for_each_ring(ring, dev_priv, i)
1086 		if (ring->irq_refcount)
1087 			return true;
1088 
1089 	return false;
1090 }
1091 
1092 static void gen6_pm_rps_work(struct work_struct *work)
1093 {
1094 	struct drm_i915_private *dev_priv =
1095 		container_of(work, struct drm_i915_private, rps.work);
1096 	bool client_boost;
1097 	int new_delay, adj, min, max;
1098 	u32 pm_iir;
1099 
1100 	spin_lock_irq(&dev_priv->irq_lock);
1101 	/* Speed up work cancelation during disabling rps interrupts. */
1102 	if (!dev_priv->rps.interrupts_enabled) {
1103 		spin_unlock_irq(&dev_priv->irq_lock);
1104 		return;
1105 	}
1106 
1107 	/*
1108 	 * The RPS work is synced during runtime suspend, we don't require a
1109 	 * wakeref. TODO: instead of disabling the asserts make sure that we
1110 	 * always hold an RPM reference while the work is running.
1111 	 */
1112 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1113 
1114 	pm_iir = dev_priv->rps.pm_iir;
1115 	dev_priv->rps.pm_iir = 0;
1116 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1117 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1118 	client_boost = dev_priv->rps.client_boost;
1119 	dev_priv->rps.client_boost = false;
1120 	spin_unlock_irq(&dev_priv->irq_lock);
1121 
1122 	/* Make sure we didn't queue anything we're not going to process. */
1123 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1124 
1125 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1126 		goto out;
1127 
1128 	mutex_lock(&dev_priv->rps.hw_lock);
1129 
1130 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1131 
1132 	adj = dev_priv->rps.last_adj;
1133 	new_delay = dev_priv->rps.cur_freq;
1134 	min = dev_priv->rps.min_freq_softlimit;
1135 	max = dev_priv->rps.max_freq_softlimit;
1136 
1137 	if (client_boost) {
1138 		new_delay = dev_priv->rps.max_freq_softlimit;
1139 		adj = 0;
1140 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1141 		if (adj > 0)
1142 			adj *= 2;
1143 		else /* CHV needs even encode values */
1144 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1145 		/*
1146 		 * For better performance, jump directly
1147 		 * to RPe if we're below it.
1148 		 */
1149 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1150 			new_delay = dev_priv->rps.efficient_freq;
1151 			adj = 0;
1152 		}
1153 	} else if (any_waiters(dev_priv)) {
1154 		adj = 0;
1155 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1156 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1157 			new_delay = dev_priv->rps.efficient_freq;
1158 		else
1159 			new_delay = dev_priv->rps.min_freq_softlimit;
1160 		adj = 0;
1161 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1162 		if (adj < 0)
1163 			adj *= 2;
1164 		else /* CHV needs even encode values */
1165 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1166 	} else { /* unknown event */
1167 		adj = 0;
1168 	}
1169 
1170 	dev_priv->rps.last_adj = adj;
1171 
1172 	/* sysfs frequency interfaces may have snuck in while servicing the
1173 	 * interrupt
1174 	 */
1175 	new_delay += adj;
1176 	new_delay = clamp_t(int, new_delay, min, max);
1177 
1178 	intel_set_rps(dev_priv->dev, new_delay);
1179 
1180 	mutex_unlock(&dev_priv->rps.hw_lock);
1181 out:
1182 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1183 }
1184 
1185 
1186 /**
1187  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1188  * occurred.
1189  * @work: workqueue struct
1190  *
1191  * Doesn't actually do anything except notify userspace. As a consequence of
1192  * this event, userspace should try to remap the bad rows since statistically
1193  * it is likely the same row is more likely to go bad again.
1194  */
1195 static void ivybridge_parity_work(struct work_struct *work)
1196 {
1197 	struct drm_i915_private *dev_priv =
1198 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1199 	u32 error_status, row, bank, subbank;
1200 	char *parity_event[6];
1201 	uint32_t misccpctl;
1202 	uint8_t slice = 0;
1203 
1204 	/* We must turn off DOP level clock gating to access the L3 registers.
1205 	 * In order to prevent a get/put style interface, acquire struct mutex
1206 	 * any time we access those registers.
1207 	 */
1208 	mutex_lock(&dev_priv->dev->struct_mutex);
1209 
1210 	/* If we've screwed up tracking, just let the interrupt fire again */
1211 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1212 		goto out;
1213 
1214 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1215 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1216 	POSTING_READ(GEN7_MISCCPCTL);
1217 
1218 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1219 		i915_reg_t reg;
1220 
1221 		slice--;
1222 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1223 			break;
1224 
1225 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1226 
1227 		reg = GEN7_L3CDERRST1(slice);
1228 
1229 		error_status = I915_READ(reg);
1230 		row = GEN7_PARITY_ERROR_ROW(error_status);
1231 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1232 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1233 
1234 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1235 		POSTING_READ(reg);
1236 
1237 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1238 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1239 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1240 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1241 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1242 		parity_event[5] = NULL;
1243 
1244 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1245 				   KOBJ_CHANGE, parity_event);
1246 
1247 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1248 			  slice, row, bank, subbank);
1249 
1250 		kfree(parity_event[4]);
1251 		kfree(parity_event[3]);
1252 		kfree(parity_event[2]);
1253 		kfree(parity_event[1]);
1254 	}
1255 
1256 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1257 
1258 out:
1259 	WARN_ON(dev_priv->l3_parity.which_slice);
1260 	spin_lock_irq(&dev_priv->irq_lock);
1261 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1262 	spin_unlock_irq(&dev_priv->irq_lock);
1263 
1264 	mutex_unlock(&dev_priv->dev->struct_mutex);
1265 }
1266 
1267 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1268 {
1269 	struct drm_i915_private *dev_priv = dev->dev_private;
1270 
1271 	if (!HAS_L3_DPF(dev))
1272 		return;
1273 
1274 	spin_lock(&dev_priv->irq_lock);
1275 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1276 	spin_unlock(&dev_priv->irq_lock);
1277 
1278 	iir &= GT_PARITY_ERROR(dev);
1279 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1280 		dev_priv->l3_parity.which_slice |= 1 << 1;
1281 
1282 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1283 		dev_priv->l3_parity.which_slice |= 1 << 0;
1284 
1285 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1286 }
1287 
1288 static void ilk_gt_irq_handler(struct drm_device *dev,
1289 			       struct drm_i915_private *dev_priv,
1290 			       u32 gt_iir)
1291 {
1292 	if (gt_iir &
1293 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1294 		notify_ring(&dev_priv->ring[RCS]);
1295 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1296 		notify_ring(&dev_priv->ring[VCS]);
1297 }
1298 
1299 static void snb_gt_irq_handler(struct drm_device *dev,
1300 			       struct drm_i915_private *dev_priv,
1301 			       u32 gt_iir)
1302 {
1303 
1304 	if (gt_iir &
1305 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1306 		notify_ring(&dev_priv->ring[RCS]);
1307 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1308 		notify_ring(&dev_priv->ring[VCS]);
1309 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1310 		notify_ring(&dev_priv->ring[BCS]);
1311 
1312 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1313 		      GT_BSD_CS_ERROR_INTERRUPT |
1314 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1315 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1316 
1317 	if (gt_iir & GT_PARITY_ERROR(dev))
1318 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1319 }
1320 
1321 static __always_inline void
1322 gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
1323 {
1324 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1325 		notify_ring(ring);
1326 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1327 		intel_lrc_irq_handler(ring);
1328 }
1329 
1330 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1331 				       u32 master_ctl)
1332 {
1333 	irqreturn_t ret = IRQ_NONE;
1334 
1335 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1336 		u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1337 		if (iir) {
1338 			I915_WRITE_FW(GEN8_GT_IIR(0), iir);
1339 			ret = IRQ_HANDLED;
1340 
1341 			gen8_cs_irq_handler(&dev_priv->ring[RCS],
1342 					iir, GEN8_RCS_IRQ_SHIFT);
1343 
1344 			gen8_cs_irq_handler(&dev_priv->ring[BCS],
1345 					iir, GEN8_BCS_IRQ_SHIFT);
1346 		} else
1347 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1348 	}
1349 
1350 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1351 		u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1352 		if (iir) {
1353 			I915_WRITE_FW(GEN8_GT_IIR(1), iir);
1354 			ret = IRQ_HANDLED;
1355 
1356 			gen8_cs_irq_handler(&dev_priv->ring[VCS],
1357 					iir, GEN8_VCS1_IRQ_SHIFT);
1358 
1359 			gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1360 					iir, GEN8_VCS2_IRQ_SHIFT);
1361 		} else
1362 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1363 	}
1364 
1365 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1366 		u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1367 		if (iir) {
1368 			I915_WRITE_FW(GEN8_GT_IIR(3), iir);
1369 			ret = IRQ_HANDLED;
1370 
1371 			gen8_cs_irq_handler(&dev_priv->ring[VECS],
1372 					iir, GEN8_VECS_IRQ_SHIFT);
1373 		} else
1374 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1375 	}
1376 
1377 	if (master_ctl & GEN8_GT_PM_IRQ) {
1378 		u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1379 		if (iir & dev_priv->pm_rps_events) {
1380 			I915_WRITE_FW(GEN8_GT_IIR(2),
1381 				      iir & dev_priv->pm_rps_events);
1382 			ret = IRQ_HANDLED;
1383 			gen6_rps_irq_handler(dev_priv, iir);
1384 		} else
1385 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1386 	}
1387 
1388 	return ret;
1389 }
1390 
1391 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1392 {
1393 	switch (port) {
1394 	case PORT_A:
1395 		return val & PORTA_HOTPLUG_LONG_DETECT;
1396 	case PORT_B:
1397 		return val & PORTB_HOTPLUG_LONG_DETECT;
1398 	case PORT_C:
1399 		return val & PORTC_HOTPLUG_LONG_DETECT;
1400 	default:
1401 		return false;
1402 	}
1403 }
1404 
1405 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1406 {
1407 	switch (port) {
1408 	case PORT_E:
1409 		return val & PORTE_HOTPLUG_LONG_DETECT;
1410 	default:
1411 		return false;
1412 	}
1413 }
1414 
1415 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1416 {
1417 	switch (port) {
1418 	case PORT_A:
1419 		return val & PORTA_HOTPLUG_LONG_DETECT;
1420 	case PORT_B:
1421 		return val & PORTB_HOTPLUG_LONG_DETECT;
1422 	case PORT_C:
1423 		return val & PORTC_HOTPLUG_LONG_DETECT;
1424 	case PORT_D:
1425 		return val & PORTD_HOTPLUG_LONG_DETECT;
1426 	default:
1427 		return false;
1428 	}
1429 }
1430 
1431 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1432 {
1433 	switch (port) {
1434 	case PORT_A:
1435 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1436 	default:
1437 		return false;
1438 	}
1439 }
1440 
1441 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1442 {
1443 	switch (port) {
1444 	case PORT_B:
1445 		return val & PORTB_HOTPLUG_LONG_DETECT;
1446 	case PORT_C:
1447 		return val & PORTC_HOTPLUG_LONG_DETECT;
1448 	case PORT_D:
1449 		return val & PORTD_HOTPLUG_LONG_DETECT;
1450 	default:
1451 		return false;
1452 	}
1453 }
1454 
1455 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1456 {
1457 	switch (port) {
1458 	case PORT_B:
1459 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1460 	case PORT_C:
1461 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1462 	case PORT_D:
1463 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1464 	default:
1465 		return false;
1466 	}
1467 }
1468 
1469 /*
1470  * Get a bit mask of pins that have triggered, and which ones may be long.
1471  * This can be called multiple times with the same masks to accumulate
1472  * hotplug detection results from several registers.
1473  *
1474  * Note that the caller is expected to zero out the masks initially.
1475  */
1476 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1477 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1478 			     const u32 hpd[HPD_NUM_PINS],
1479 			     bool long_pulse_detect(enum port port, u32 val))
1480 {
1481 	enum port port;
1482 	int i;
1483 
1484 	for_each_hpd_pin(i) {
1485 		if ((hpd[i] & hotplug_trigger) == 0)
1486 			continue;
1487 
1488 		*pin_mask |= BIT(i);
1489 
1490 		if (!intel_hpd_pin_to_port(i, &port))
1491 			continue;
1492 
1493 		if (long_pulse_detect(port, dig_hotplug_reg))
1494 			*long_mask |= BIT(i);
1495 	}
1496 
1497 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1498 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1499 
1500 }
1501 
1502 static void gmbus_irq_handler(struct drm_device *dev)
1503 {
1504 	struct drm_i915_private *dev_priv = dev->dev_private;
1505 
1506 	wake_up_all(&dev_priv->gmbus_wait_queue);
1507 }
1508 
1509 static void dp_aux_irq_handler(struct drm_device *dev)
1510 {
1511 	struct drm_i915_private *dev_priv = dev->dev_private;
1512 
1513 	wake_up_all(&dev_priv->gmbus_wait_queue);
1514 }
1515 
1516 #if defined(CONFIG_DEBUG_FS)
1517 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1518 					 uint32_t crc0, uint32_t crc1,
1519 					 uint32_t crc2, uint32_t crc3,
1520 					 uint32_t crc4)
1521 {
1522 	struct drm_i915_private *dev_priv = dev->dev_private;
1523 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1524 	struct intel_pipe_crc_entry *entry;
1525 	int head, tail;
1526 
1527 	spin_lock(&pipe_crc->lock);
1528 
1529 	if (!pipe_crc->entries) {
1530 		spin_unlock(&pipe_crc->lock);
1531 		DRM_DEBUG_KMS("spurious interrupt\n");
1532 		return;
1533 	}
1534 
1535 	head = pipe_crc->head;
1536 	tail = pipe_crc->tail;
1537 
1538 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1539 		spin_unlock(&pipe_crc->lock);
1540 		DRM_ERROR("CRC buffer overflowing\n");
1541 		return;
1542 	}
1543 
1544 	entry = &pipe_crc->entries[head];
1545 
1546 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1547 	entry->crc[0] = crc0;
1548 	entry->crc[1] = crc1;
1549 	entry->crc[2] = crc2;
1550 	entry->crc[3] = crc3;
1551 	entry->crc[4] = crc4;
1552 
1553 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1554 	pipe_crc->head = head;
1555 
1556 	spin_unlock(&pipe_crc->lock);
1557 
1558 	wake_up_interruptible(&pipe_crc->wq);
1559 }
1560 #else
1561 static inline void
1562 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1563 			     uint32_t crc0, uint32_t crc1,
1564 			     uint32_t crc2, uint32_t crc3,
1565 			     uint32_t crc4) {}
1566 #endif
1567 
1568 
1569 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1570 {
1571 	struct drm_i915_private *dev_priv = dev->dev_private;
1572 
1573 	display_pipe_crc_irq_handler(dev, pipe,
1574 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1575 				     0, 0, 0, 0);
1576 }
1577 
1578 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1579 {
1580 	struct drm_i915_private *dev_priv = dev->dev_private;
1581 
1582 	display_pipe_crc_irq_handler(dev, pipe,
1583 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1584 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1585 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1586 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1587 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1588 }
1589 
1590 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1591 {
1592 	struct drm_i915_private *dev_priv = dev->dev_private;
1593 	uint32_t res1, res2;
1594 
1595 	if (INTEL_INFO(dev)->gen >= 3)
1596 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1597 	else
1598 		res1 = 0;
1599 
1600 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1601 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1602 	else
1603 		res2 = 0;
1604 
1605 	display_pipe_crc_irq_handler(dev, pipe,
1606 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1607 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1608 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1609 				     res1, res2);
1610 }
1611 
1612 /* The RPS events need forcewake, so we add them to a work queue and mask their
1613  * IMR bits until the work is done. Other interrupts can be processed without
1614  * the work queue. */
1615 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1616 {
1617 	if (pm_iir & dev_priv->pm_rps_events) {
1618 		spin_lock(&dev_priv->irq_lock);
1619 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1620 		if (dev_priv->rps.interrupts_enabled) {
1621 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1622 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1623 		}
1624 		spin_unlock(&dev_priv->irq_lock);
1625 	}
1626 
1627 	if (INTEL_INFO(dev_priv)->gen >= 8)
1628 		return;
1629 
1630 	if (HAS_VEBOX(dev_priv->dev)) {
1631 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1632 			notify_ring(&dev_priv->ring[VECS]);
1633 
1634 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1635 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1636 	}
1637 }
1638 
1639 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1640 {
1641 	if (!drm_handle_vblank(dev, pipe))
1642 		return false;
1643 
1644 	return true;
1645 }
1646 
1647 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1648 {
1649 	struct drm_i915_private *dev_priv = dev->dev_private;
1650 	u32 pipe_stats[I915_MAX_PIPES] = { };
1651 	int pipe;
1652 
1653 	spin_lock(&dev_priv->irq_lock);
1654 	for_each_pipe(dev_priv, pipe) {
1655 		i915_reg_t reg;
1656 		u32 mask, iir_bit = 0;
1657 
1658 		/*
1659 		 * PIPESTAT bits get signalled even when the interrupt is
1660 		 * disabled with the mask bits, and some of the status bits do
1661 		 * not generate interrupts at all (like the underrun bit). Hence
1662 		 * we need to be careful that we only handle what we want to
1663 		 * handle.
1664 		 */
1665 
1666 		/* fifo underruns are filterered in the underrun handler. */
1667 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1668 
1669 		switch (pipe) {
1670 		case PIPE_A:
1671 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1672 			break;
1673 		case PIPE_B:
1674 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1675 			break;
1676 		case PIPE_C:
1677 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1678 			break;
1679 		}
1680 		if (iir & iir_bit)
1681 			mask |= dev_priv->pipestat_irq_mask[pipe];
1682 
1683 		if (!mask)
1684 			continue;
1685 
1686 		reg = PIPESTAT(pipe);
1687 		mask |= PIPESTAT_INT_ENABLE_MASK;
1688 		pipe_stats[pipe] = I915_READ(reg) & mask;
1689 
1690 		/*
1691 		 * Clear the PIPE*STAT regs before the IIR
1692 		 */
1693 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1694 					PIPESTAT_INT_STATUS_MASK))
1695 			I915_WRITE(reg, pipe_stats[pipe]);
1696 	}
1697 	spin_unlock(&dev_priv->irq_lock);
1698 
1699 	for_each_pipe(dev_priv, pipe) {
1700 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1701 		    intel_pipe_handle_vblank(dev, pipe))
1702 			intel_check_page_flip(dev, pipe);
1703 
1704 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1705 			intel_prepare_page_flip(dev, pipe);
1706 			intel_finish_page_flip(dev, pipe);
1707 		}
1708 
1709 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1710 			i9xx_pipe_crc_irq_handler(dev, pipe);
1711 
1712 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1713 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1714 	}
1715 
1716 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1717 		gmbus_irq_handler(dev);
1718 }
1719 
1720 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1721 {
1722 	struct drm_i915_private *dev_priv = dev->dev_private;
1723 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1724 	u32 pin_mask = 0, long_mask = 0;
1725 
1726 	if (!hotplug_status)
1727 		return;
1728 
1729 	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1730 	/*
1731 	 * Make sure hotplug status is cleared before we clear IIR, or else we
1732 	 * may miss hotplug events.
1733 	 */
1734 	POSTING_READ(PORT_HOTPLUG_STAT);
1735 
1736 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1737 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1738 
1739 		if (hotplug_trigger) {
1740 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1741 					   hotplug_trigger, hpd_status_g4x,
1742 					   i9xx_port_hotplug_long_detect);
1743 
1744 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1745 		}
1746 
1747 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1748 			dp_aux_irq_handler(dev);
1749 	} else {
1750 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1751 
1752 		if (hotplug_trigger) {
1753 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1754 					   hotplug_trigger, hpd_status_i915,
1755 					   i9xx_port_hotplug_long_detect);
1756 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1757 		}
1758 	}
1759 }
1760 
1761 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1762 {
1763 	struct drm_device *dev = arg;
1764 	struct drm_i915_private *dev_priv = dev->dev_private;
1765 	u32 iir, gt_iir, pm_iir;
1766 	irqreturn_t ret = IRQ_NONE;
1767 
1768 	if (!intel_irqs_enabled(dev_priv))
1769 		return IRQ_NONE;
1770 
1771 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1772 	disable_rpm_wakeref_asserts(dev_priv);
1773 
1774 	while (true) {
1775 		/* Find, clear, then process each source of interrupt */
1776 
1777 		gt_iir = I915_READ(GTIIR);
1778 		if (gt_iir)
1779 			I915_WRITE(GTIIR, gt_iir);
1780 
1781 		pm_iir = I915_READ(GEN6_PMIIR);
1782 		if (pm_iir)
1783 			I915_WRITE(GEN6_PMIIR, pm_iir);
1784 
1785 		iir = I915_READ(VLV_IIR);
1786 		if (iir) {
1787 			/* Consume port before clearing IIR or we'll miss events */
1788 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1789 				i9xx_hpd_irq_handler(dev);
1790 			I915_WRITE(VLV_IIR, iir);
1791 		}
1792 
1793 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1794 			goto out;
1795 
1796 		ret = IRQ_HANDLED;
1797 
1798 		if (gt_iir)
1799 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1800 		if (pm_iir)
1801 			gen6_rps_irq_handler(dev_priv, pm_iir);
1802 		/* Call regardless, as some status bits might not be
1803 		 * signalled in iir */
1804 		valleyview_pipestat_irq_handler(dev, iir);
1805 	}
1806 
1807 out:
1808 	enable_rpm_wakeref_asserts(dev_priv);
1809 
1810 	return ret;
1811 }
1812 
1813 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1814 {
1815 	struct drm_device *dev = arg;
1816 	struct drm_i915_private *dev_priv = dev->dev_private;
1817 	u32 master_ctl, iir;
1818 	irqreturn_t ret = IRQ_NONE;
1819 
1820 	if (!intel_irqs_enabled(dev_priv))
1821 		return IRQ_NONE;
1822 
1823 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1824 	disable_rpm_wakeref_asserts(dev_priv);
1825 
1826 	for (;;) {
1827 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1828 		iir = I915_READ(VLV_IIR);
1829 
1830 		if (master_ctl == 0 && iir == 0)
1831 			break;
1832 
1833 		ret = IRQ_HANDLED;
1834 
1835 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1836 
1837 		/* Find, clear, then process each source of interrupt */
1838 
1839 		if (iir) {
1840 			/* Consume port before clearing IIR or we'll miss events */
1841 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1842 				i9xx_hpd_irq_handler(dev);
1843 			I915_WRITE(VLV_IIR, iir);
1844 		}
1845 
1846 		gen8_gt_irq_handler(dev_priv, master_ctl);
1847 
1848 		/* Call regardless, as some status bits might not be
1849 		 * signalled in iir */
1850 		valleyview_pipestat_irq_handler(dev, iir);
1851 
1852 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1853 		POSTING_READ(GEN8_MASTER_IRQ);
1854 	}
1855 
1856 	enable_rpm_wakeref_asserts(dev_priv);
1857 
1858 	return ret;
1859 }
1860 
1861 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1862 				const u32 hpd[HPD_NUM_PINS])
1863 {
1864 	struct drm_i915_private *dev_priv = to_i915(dev);
1865 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1866 
1867 	/*
1868 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1869 	 * unless we touch the hotplug register, even if hotplug_trigger is
1870 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1871 	 * errors.
1872 	 */
1873 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1874 	if (!hotplug_trigger) {
1875 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1876 			PORTD_HOTPLUG_STATUS_MASK |
1877 			PORTC_HOTPLUG_STATUS_MASK |
1878 			PORTB_HOTPLUG_STATUS_MASK;
1879 		dig_hotplug_reg &= ~mask;
1880 	}
1881 
1882 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1883 	if (!hotplug_trigger)
1884 		return;
1885 
1886 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1887 			   dig_hotplug_reg, hpd,
1888 			   pch_port_hotplug_long_detect);
1889 
1890 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
1891 }
1892 
1893 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1894 {
1895 	struct drm_i915_private *dev_priv = dev->dev_private;
1896 	int pipe;
1897 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1898 
1899 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1900 
1901 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1902 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1903 			       SDE_AUDIO_POWER_SHIFT);
1904 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1905 				 port_name(port));
1906 	}
1907 
1908 	if (pch_iir & SDE_AUX_MASK)
1909 		dp_aux_irq_handler(dev);
1910 
1911 	if (pch_iir & SDE_GMBUS)
1912 		gmbus_irq_handler(dev);
1913 
1914 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1915 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1916 
1917 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1918 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1919 
1920 	if (pch_iir & SDE_POISON)
1921 		DRM_ERROR("PCH poison interrupt\n");
1922 
1923 	if (pch_iir & SDE_FDI_MASK)
1924 		for_each_pipe(dev_priv, pipe)
1925 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1926 					 pipe_name(pipe),
1927 					 I915_READ(FDI_RX_IIR(pipe)));
1928 
1929 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1930 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1931 
1932 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1933 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1934 
1935 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1936 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1937 
1938 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1939 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1940 }
1941 
1942 static void ivb_err_int_handler(struct drm_device *dev)
1943 {
1944 	struct drm_i915_private *dev_priv = dev->dev_private;
1945 	u32 err_int = I915_READ(GEN7_ERR_INT);
1946 	enum pipe pipe;
1947 
1948 	if (err_int & ERR_INT_POISON)
1949 		DRM_ERROR("Poison interrupt\n");
1950 
1951 	for_each_pipe(dev_priv, pipe) {
1952 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1953 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1954 
1955 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1956 			if (IS_IVYBRIDGE(dev))
1957 				ivb_pipe_crc_irq_handler(dev, pipe);
1958 			else
1959 				hsw_pipe_crc_irq_handler(dev, pipe);
1960 		}
1961 	}
1962 
1963 	I915_WRITE(GEN7_ERR_INT, err_int);
1964 }
1965 
1966 static void cpt_serr_int_handler(struct drm_device *dev)
1967 {
1968 	struct drm_i915_private *dev_priv = dev->dev_private;
1969 	u32 serr_int = I915_READ(SERR_INT);
1970 
1971 	if (serr_int & SERR_INT_POISON)
1972 		DRM_ERROR("PCH poison interrupt\n");
1973 
1974 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1975 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1976 
1977 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1978 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1979 
1980 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1981 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1982 
1983 	I915_WRITE(SERR_INT, serr_int);
1984 }
1985 
1986 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1987 {
1988 	struct drm_i915_private *dev_priv = dev->dev_private;
1989 	int pipe;
1990 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1991 
1992 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1993 
1994 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1995 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1996 			       SDE_AUDIO_POWER_SHIFT_CPT);
1997 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1998 				 port_name(port));
1999 	}
2000 
2001 	if (pch_iir & SDE_AUX_MASK_CPT)
2002 		dp_aux_irq_handler(dev);
2003 
2004 	if (pch_iir & SDE_GMBUS_CPT)
2005 		gmbus_irq_handler(dev);
2006 
2007 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2008 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2009 
2010 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2011 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2012 
2013 	if (pch_iir & SDE_FDI_MASK_CPT)
2014 		for_each_pipe(dev_priv, pipe)
2015 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2016 					 pipe_name(pipe),
2017 					 I915_READ(FDI_RX_IIR(pipe)));
2018 
2019 	if (pch_iir & SDE_ERROR_CPT)
2020 		cpt_serr_int_handler(dev);
2021 }
2022 
2023 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2024 {
2025 	struct drm_i915_private *dev_priv = dev->dev_private;
2026 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2027 		~SDE_PORTE_HOTPLUG_SPT;
2028 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2029 	u32 pin_mask = 0, long_mask = 0;
2030 
2031 	if (hotplug_trigger) {
2032 		u32 dig_hotplug_reg;
2033 
2034 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2035 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2036 
2037 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2038 				   dig_hotplug_reg, hpd_spt,
2039 				   spt_port_hotplug_long_detect);
2040 	}
2041 
2042 	if (hotplug2_trigger) {
2043 		u32 dig_hotplug_reg;
2044 
2045 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2046 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2047 
2048 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2049 				   dig_hotplug_reg, hpd_spt,
2050 				   spt_port_hotplug2_long_detect);
2051 	}
2052 
2053 	if (pin_mask)
2054 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
2055 
2056 	if (pch_iir & SDE_GMBUS_CPT)
2057 		gmbus_irq_handler(dev);
2058 }
2059 
2060 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2061 				const u32 hpd[HPD_NUM_PINS])
2062 {
2063 	struct drm_i915_private *dev_priv = to_i915(dev);
2064 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2065 
2066 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2067 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2068 
2069 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2070 			   dig_hotplug_reg, hpd,
2071 			   ilk_port_hotplug_long_detect);
2072 
2073 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2074 }
2075 
2076 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2077 {
2078 	struct drm_i915_private *dev_priv = dev->dev_private;
2079 	enum pipe pipe;
2080 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2081 
2082 	if (hotplug_trigger)
2083 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2084 
2085 	if (de_iir & DE_AUX_CHANNEL_A)
2086 		dp_aux_irq_handler(dev);
2087 
2088 	if (de_iir & DE_GSE)
2089 		intel_opregion_asle_intr(dev);
2090 
2091 	if (de_iir & DE_POISON)
2092 		DRM_ERROR("Poison interrupt\n");
2093 
2094 	for_each_pipe(dev_priv, pipe) {
2095 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2096 		    intel_pipe_handle_vblank(dev, pipe))
2097 			intel_check_page_flip(dev, pipe);
2098 
2099 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2100 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2101 
2102 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2103 			i9xx_pipe_crc_irq_handler(dev, pipe);
2104 
2105 		/* plane/pipes map 1:1 on ilk+ */
2106 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2107 			intel_prepare_page_flip(dev, pipe);
2108 			intel_finish_page_flip_plane(dev, pipe);
2109 		}
2110 	}
2111 
2112 	/* check event from PCH */
2113 	if (de_iir & DE_PCH_EVENT) {
2114 		u32 pch_iir = I915_READ(SDEIIR);
2115 
2116 		if (HAS_PCH_CPT(dev))
2117 			cpt_irq_handler(dev, pch_iir);
2118 		else
2119 			ibx_irq_handler(dev, pch_iir);
2120 
2121 		/* should clear PCH hotplug event before clear CPU irq */
2122 		I915_WRITE(SDEIIR, pch_iir);
2123 	}
2124 
2125 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2126 		ironlake_rps_change_irq_handler(dev);
2127 }
2128 
2129 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2130 {
2131 	struct drm_i915_private *dev_priv = dev->dev_private;
2132 	enum pipe pipe;
2133 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2134 
2135 	if (hotplug_trigger)
2136 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2137 
2138 	if (de_iir & DE_ERR_INT_IVB)
2139 		ivb_err_int_handler(dev);
2140 
2141 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2142 		dp_aux_irq_handler(dev);
2143 
2144 	if (de_iir & DE_GSE_IVB)
2145 		intel_opregion_asle_intr(dev);
2146 
2147 	for_each_pipe(dev_priv, pipe) {
2148 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2149 		    intel_pipe_handle_vblank(dev, pipe))
2150 			intel_check_page_flip(dev, pipe);
2151 
2152 		/* plane/pipes map 1:1 on ilk+ */
2153 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2154 			intel_prepare_page_flip(dev, pipe);
2155 			intel_finish_page_flip_plane(dev, pipe);
2156 		}
2157 	}
2158 
2159 	/* check event from PCH */
2160 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2161 		u32 pch_iir = I915_READ(SDEIIR);
2162 
2163 		cpt_irq_handler(dev, pch_iir);
2164 
2165 		/* clear PCH hotplug event before clear CPU irq */
2166 		I915_WRITE(SDEIIR, pch_iir);
2167 	}
2168 }
2169 
2170 /*
2171  * To handle irqs with the minimum potential races with fresh interrupts, we:
2172  * 1 - Disable Master Interrupt Control.
2173  * 2 - Find the source(s) of the interrupt.
2174  * 3 - Clear the Interrupt Identity bits (IIR).
2175  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2176  * 5 - Re-enable Master Interrupt Control.
2177  */
2178 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2179 {
2180 	struct drm_device *dev = arg;
2181 	struct drm_i915_private *dev_priv = dev->dev_private;
2182 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2183 	irqreturn_t ret = IRQ_NONE;
2184 
2185 	if (!intel_irqs_enabled(dev_priv))
2186 		return IRQ_NONE;
2187 
2188 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2189 	disable_rpm_wakeref_asserts(dev_priv);
2190 
2191 	/* We get interrupts on unclaimed registers, so check for this before we
2192 	 * do any I915_{READ,WRITE}. */
2193 	intel_uncore_check_errors(dev);
2194 
2195 	/* disable master interrupt before clearing iir  */
2196 	de_ier = I915_READ(DEIER);
2197 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2198 	POSTING_READ(DEIER);
2199 
2200 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2201 	 * interrupts will will be stored on its back queue, and then we'll be
2202 	 * able to process them after we restore SDEIER (as soon as we restore
2203 	 * it, we'll get an interrupt if SDEIIR still has something to process
2204 	 * due to its back queue). */
2205 	if (!HAS_PCH_NOP(dev)) {
2206 		sde_ier = I915_READ(SDEIER);
2207 		I915_WRITE(SDEIER, 0);
2208 		POSTING_READ(SDEIER);
2209 	}
2210 
2211 	/* Find, clear, then process each source of interrupt */
2212 
2213 	gt_iir = I915_READ(GTIIR);
2214 	if (gt_iir) {
2215 		I915_WRITE(GTIIR, gt_iir);
2216 		ret = IRQ_HANDLED;
2217 		if (INTEL_INFO(dev)->gen >= 6)
2218 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2219 		else
2220 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2221 	}
2222 
2223 	de_iir = I915_READ(DEIIR);
2224 	if (de_iir) {
2225 		I915_WRITE(DEIIR, de_iir);
2226 		ret = IRQ_HANDLED;
2227 		if (INTEL_INFO(dev)->gen >= 7)
2228 			ivb_display_irq_handler(dev, de_iir);
2229 		else
2230 			ilk_display_irq_handler(dev, de_iir);
2231 	}
2232 
2233 	if (INTEL_INFO(dev)->gen >= 6) {
2234 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2235 		if (pm_iir) {
2236 			I915_WRITE(GEN6_PMIIR, pm_iir);
2237 			ret = IRQ_HANDLED;
2238 			gen6_rps_irq_handler(dev_priv, pm_iir);
2239 		}
2240 	}
2241 
2242 	I915_WRITE(DEIER, de_ier);
2243 	POSTING_READ(DEIER);
2244 	if (!HAS_PCH_NOP(dev)) {
2245 		I915_WRITE(SDEIER, sde_ier);
2246 		POSTING_READ(SDEIER);
2247 	}
2248 
2249 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2250 	enable_rpm_wakeref_asserts(dev_priv);
2251 
2252 	return ret;
2253 }
2254 
2255 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2256 				const u32 hpd[HPD_NUM_PINS])
2257 {
2258 	struct drm_i915_private *dev_priv = to_i915(dev);
2259 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2260 
2261 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2262 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2263 
2264 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2265 			   dig_hotplug_reg, hpd,
2266 			   bxt_port_hotplug_long_detect);
2267 
2268 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2269 }
2270 
2271 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2272 {
2273 	struct drm_device *dev = arg;
2274 	struct drm_i915_private *dev_priv = dev->dev_private;
2275 	u32 master_ctl;
2276 	irqreturn_t ret = IRQ_NONE;
2277 	uint32_t tmp = 0;
2278 	enum pipe pipe;
2279 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
2280 
2281 	if (!intel_irqs_enabled(dev_priv))
2282 		return IRQ_NONE;
2283 
2284 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2285 	disable_rpm_wakeref_asserts(dev_priv);
2286 
2287 	if (INTEL_INFO(dev_priv)->gen >= 9)
2288 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2289 			GEN9_AUX_CHANNEL_D;
2290 
2291 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2292 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2293 	if (!master_ctl)
2294 		goto out;
2295 
2296 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2297 
2298 	/* Find, clear, then process each source of interrupt */
2299 
2300 	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2301 
2302 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2303 		tmp = I915_READ(GEN8_DE_MISC_IIR);
2304 		if (tmp) {
2305 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2306 			ret = IRQ_HANDLED;
2307 			if (tmp & GEN8_DE_MISC_GSE)
2308 				intel_opregion_asle_intr(dev);
2309 			else
2310 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2311 		}
2312 		else
2313 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2314 	}
2315 
2316 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2317 		tmp = I915_READ(GEN8_DE_PORT_IIR);
2318 		if (tmp) {
2319 			bool found = false;
2320 			u32 hotplug_trigger = 0;
2321 
2322 			if (IS_BROXTON(dev_priv))
2323 				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2324 			else if (IS_BROADWELL(dev_priv))
2325 				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2326 
2327 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2328 			ret = IRQ_HANDLED;
2329 
2330 			if (tmp & aux_mask) {
2331 				dp_aux_irq_handler(dev);
2332 				found = true;
2333 			}
2334 
2335 			if (hotplug_trigger) {
2336 				if (IS_BROXTON(dev))
2337 					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2338 				else
2339 					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2340 				found = true;
2341 			}
2342 
2343 			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2344 				gmbus_irq_handler(dev);
2345 				found = true;
2346 			}
2347 
2348 			if (!found)
2349 				DRM_ERROR("Unexpected DE Port interrupt\n");
2350 		}
2351 		else
2352 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2353 	}
2354 
2355 	for_each_pipe(dev_priv, pipe) {
2356 		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2357 
2358 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2359 			continue;
2360 
2361 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2362 		if (pipe_iir) {
2363 			ret = IRQ_HANDLED;
2364 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2365 
2366 			if (pipe_iir & GEN8_PIPE_VBLANK &&
2367 			    intel_pipe_handle_vblank(dev, pipe))
2368 				intel_check_page_flip(dev, pipe);
2369 
2370 			if (INTEL_INFO(dev_priv)->gen >= 9)
2371 				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2372 			else
2373 				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2374 
2375 			if (flip_done) {
2376 				intel_prepare_page_flip(dev, pipe);
2377 				intel_finish_page_flip_plane(dev, pipe);
2378 			}
2379 
2380 			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2381 				hsw_pipe_crc_irq_handler(dev, pipe);
2382 
2383 			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2384 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2385 								    pipe);
2386 
2387 
2388 			if (INTEL_INFO(dev_priv)->gen >= 9)
2389 				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2390 			else
2391 				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2392 
2393 			if (fault_errors)
2394 				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2395 					  pipe_name(pipe),
2396 					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2397 		} else
2398 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2399 	}
2400 
2401 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2402 	    master_ctl & GEN8_DE_PCH_IRQ) {
2403 		/*
2404 		 * FIXME(BDW): Assume for now that the new interrupt handling
2405 		 * scheme also closed the SDE interrupt handling race we've seen
2406 		 * on older pch-split platforms. But this needs testing.
2407 		 */
2408 		u32 pch_iir = I915_READ(SDEIIR);
2409 		if (pch_iir) {
2410 			I915_WRITE(SDEIIR, pch_iir);
2411 			ret = IRQ_HANDLED;
2412 
2413 			if (HAS_PCH_SPT(dev_priv))
2414 				spt_irq_handler(dev, pch_iir);
2415 			else
2416 				cpt_irq_handler(dev, pch_iir);
2417 		} else {
2418 			/*
2419 			 * Like on previous PCH there seems to be something
2420 			 * fishy going on with forwarding PCH interrupts.
2421 			 */
2422 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2423 		}
2424 	}
2425 
2426 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2427 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2428 
2429 out:
2430 	enable_rpm_wakeref_asserts(dev_priv);
2431 
2432 	return ret;
2433 }
2434 
2435 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2436 			       bool reset_completed)
2437 {
2438 	struct intel_engine_cs *ring;
2439 	int i;
2440 
2441 	/*
2442 	 * Notify all waiters for GPU completion events that reset state has
2443 	 * been changed, and that they need to restart their wait after
2444 	 * checking for potential errors (and bail out to drop locks if there is
2445 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2446 	 */
2447 
2448 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2449 	for_each_ring(ring, dev_priv, i)
2450 		wake_up_all(&ring->irq_queue);
2451 
2452 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2453 	wake_up_all(&dev_priv->pending_flip_queue);
2454 
2455 	/*
2456 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2457 	 * reset state is cleared.
2458 	 */
2459 	if (reset_completed)
2460 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2461 }
2462 
2463 /**
2464  * i915_reset_and_wakeup - do process context error handling work
2465  * @dev: drm device
2466  *
2467  * Fire an error uevent so userspace can see that a hang or error
2468  * was detected.
2469  */
2470 static void i915_reset_and_wakeup(struct drm_device *dev)
2471 {
2472 	struct drm_i915_private *dev_priv = to_i915(dev);
2473 	struct i915_gpu_error *error = &dev_priv->gpu_error;
2474 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2475 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2476 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2477 	int ret;
2478 
2479 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2480 
2481 	/*
2482 	 * Note that there's only one work item which does gpu resets, so we
2483 	 * need not worry about concurrent gpu resets potentially incrementing
2484 	 * error->reset_counter twice. We only need to take care of another
2485 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2486 	 * quick check for that is good enough: schedule_work ensures the
2487 	 * correct ordering between hang detection and this work item, and since
2488 	 * the reset in-progress bit is only ever set by code outside of this
2489 	 * work we don't need to worry about any other races.
2490 	 */
2491 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2492 		DRM_DEBUG_DRIVER("resetting chip\n");
2493 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2494 				   reset_event);
2495 
2496 		/*
2497 		 * In most cases it's guaranteed that we get here with an RPM
2498 		 * reference held, for example because there is a pending GPU
2499 		 * request that won't finish until the reset is done. This
2500 		 * isn't the case at least when we get here by doing a
2501 		 * simulated reset via debugs, so get an RPM reference.
2502 		 */
2503 		intel_runtime_pm_get(dev_priv);
2504 
2505 		intel_prepare_reset(dev);
2506 
2507 		/*
2508 		 * All state reset _must_ be completed before we update the
2509 		 * reset counter, for otherwise waiters might miss the reset
2510 		 * pending state and not properly drop locks, resulting in
2511 		 * deadlocks with the reset work.
2512 		 */
2513 		ret = i915_reset(dev);
2514 
2515 		intel_finish_reset(dev);
2516 
2517 		intel_runtime_pm_put(dev_priv);
2518 
2519 		if (ret == 0) {
2520 			/*
2521 			 * After all the gem state is reset, increment the reset
2522 			 * counter and wake up everyone waiting for the reset to
2523 			 * complete.
2524 			 *
2525 			 * Since unlock operations are a one-sided barrier only,
2526 			 * we need to insert a barrier here to order any seqno
2527 			 * updates before
2528 			 * the counter increment.
2529 			 */
2530 			smp_mb__before_atomic();
2531 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2532 
2533 			kobject_uevent_env(&dev->primary->kdev->kobj,
2534 					   KOBJ_CHANGE, reset_done_event);
2535 		} else {
2536 			atomic_or(I915_WEDGED, &error->reset_counter);
2537 		}
2538 
2539 		/*
2540 		 * Note: The wake_up also serves as a memory barrier so that
2541 		 * waiters see the update value of the reset counter atomic_t.
2542 		 */
2543 		i915_error_wake_up(dev_priv, true);
2544 	}
2545 }
2546 
2547 static void i915_report_and_clear_eir(struct drm_device *dev)
2548 {
2549 	struct drm_i915_private *dev_priv = dev->dev_private;
2550 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2551 	u32 eir = I915_READ(EIR);
2552 	int pipe, i;
2553 
2554 	if (!eir)
2555 		return;
2556 
2557 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2558 
2559 	i915_get_extra_instdone(dev, instdone);
2560 
2561 	if (IS_G4X(dev)) {
2562 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2563 			u32 ipeir = I915_READ(IPEIR_I965);
2564 
2565 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2566 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2567 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2568 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2569 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2570 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2571 			I915_WRITE(IPEIR_I965, ipeir);
2572 			POSTING_READ(IPEIR_I965);
2573 		}
2574 		if (eir & GM45_ERROR_PAGE_TABLE) {
2575 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2576 			pr_err("page table error\n");
2577 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2578 			I915_WRITE(PGTBL_ER, pgtbl_err);
2579 			POSTING_READ(PGTBL_ER);
2580 		}
2581 	}
2582 
2583 	if (!IS_GEN2(dev)) {
2584 		if (eir & I915_ERROR_PAGE_TABLE) {
2585 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2586 			pr_err("page table error\n");
2587 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2588 			I915_WRITE(PGTBL_ER, pgtbl_err);
2589 			POSTING_READ(PGTBL_ER);
2590 		}
2591 	}
2592 
2593 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2594 		pr_err("memory refresh error:\n");
2595 		for_each_pipe(dev_priv, pipe)
2596 			pr_err("pipe %c stat: 0x%08x\n",
2597 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2598 		/* pipestat has already been acked */
2599 	}
2600 	if (eir & I915_ERROR_INSTRUCTION) {
2601 		pr_err("instruction error\n");
2602 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2603 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2604 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2605 		if (INTEL_INFO(dev)->gen < 4) {
2606 			u32 ipeir = I915_READ(IPEIR);
2607 
2608 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2609 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2610 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2611 			I915_WRITE(IPEIR, ipeir);
2612 			POSTING_READ(IPEIR);
2613 		} else {
2614 			u32 ipeir = I915_READ(IPEIR_I965);
2615 
2616 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2617 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2618 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2619 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2620 			I915_WRITE(IPEIR_I965, ipeir);
2621 			POSTING_READ(IPEIR_I965);
2622 		}
2623 	}
2624 
2625 	I915_WRITE(EIR, eir);
2626 	POSTING_READ(EIR);
2627 	eir = I915_READ(EIR);
2628 	if (eir) {
2629 		/*
2630 		 * some errors might have become stuck,
2631 		 * mask them.
2632 		 */
2633 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2634 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2635 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2636 	}
2637 }
2638 
2639 /**
2640  * i915_handle_error - handle a gpu error
2641  * @dev: drm device
2642  *
2643  * Do some basic checking of register state at error time and
2644  * dump it to the syslog.  Also call i915_capture_error_state() to make
2645  * sure we get a record and make it available in debugfs.  Fire a uevent
2646  * so userspace knows something bad happened (should trigger collection
2647  * of a ring dump etc.).
2648  */
2649 void i915_handle_error(struct drm_device *dev, bool wedged,
2650 		       const char *fmt, ...)
2651 {
2652 	struct drm_i915_private *dev_priv = dev->dev_private;
2653 	va_list args;
2654 	char error_msg[80];
2655 
2656 	va_start(args, fmt);
2657 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2658 	va_end(args);
2659 
2660 	i915_capture_error_state(dev, wedged, error_msg);
2661 	i915_report_and_clear_eir(dev);
2662 
2663 	if (wedged) {
2664 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2665 				&dev_priv->gpu_error.reset_counter);
2666 
2667 		/*
2668 		 * Wakeup waiting processes so that the reset function
2669 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2670 		 * various locks. By bumping the reset counter first, the woken
2671 		 * processes will see a reset in progress and back off,
2672 		 * releasing their locks and then wait for the reset completion.
2673 		 * We must do this for _all_ gpu waiters that might hold locks
2674 		 * that the reset work needs to acquire.
2675 		 *
2676 		 * Note: The wake_up serves as the required memory barrier to
2677 		 * ensure that the waiters see the updated value of the reset
2678 		 * counter atomic_t.
2679 		 */
2680 		i915_error_wake_up(dev_priv, false);
2681 	}
2682 
2683 	i915_reset_and_wakeup(dev);
2684 }
2685 
2686 /* Called from drm generic code, passed 'crtc' which
2687  * we use as a pipe index
2688  */
2689 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2690 {
2691 	struct drm_i915_private *dev_priv = dev->dev_private;
2692 	unsigned long irqflags;
2693 
2694 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2695 	if (INTEL_INFO(dev)->gen >= 4)
2696 		i915_enable_pipestat(dev_priv, pipe,
2697 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2698 	else
2699 		i915_enable_pipestat(dev_priv, pipe,
2700 				     PIPE_VBLANK_INTERRUPT_STATUS);
2701 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2702 
2703 	return 0;
2704 }
2705 
2706 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2707 {
2708 	struct drm_i915_private *dev_priv = dev->dev_private;
2709 	unsigned long irqflags;
2710 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2711 						     DE_PIPE_VBLANK(pipe);
2712 
2713 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2714 	ilk_enable_display_irq(dev_priv, bit);
2715 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2716 
2717 	return 0;
2718 }
2719 
2720 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2721 {
2722 	struct drm_i915_private *dev_priv = dev->dev_private;
2723 	unsigned long irqflags;
2724 
2725 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2726 	i915_enable_pipestat(dev_priv, pipe,
2727 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2728 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2729 
2730 	return 0;
2731 }
2732 
2733 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2734 {
2735 	struct drm_i915_private *dev_priv = dev->dev_private;
2736 	unsigned long irqflags;
2737 
2738 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2739 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2740 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2741 
2742 	return 0;
2743 }
2744 
2745 /* Called from drm generic code, passed 'crtc' which
2746  * we use as a pipe index
2747  */
2748 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2749 {
2750 	struct drm_i915_private *dev_priv = dev->dev_private;
2751 	unsigned long irqflags;
2752 
2753 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2754 	i915_disable_pipestat(dev_priv, pipe,
2755 			      PIPE_VBLANK_INTERRUPT_STATUS |
2756 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2757 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2758 }
2759 
2760 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2761 {
2762 	struct drm_i915_private *dev_priv = dev->dev_private;
2763 	unsigned long irqflags;
2764 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2765 						     DE_PIPE_VBLANK(pipe);
2766 
2767 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2768 	ilk_disable_display_irq(dev_priv, bit);
2769 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2770 }
2771 
2772 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2773 {
2774 	struct drm_i915_private *dev_priv = dev->dev_private;
2775 	unsigned long irqflags;
2776 
2777 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2778 	i915_disable_pipestat(dev_priv, pipe,
2779 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2780 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2781 }
2782 
2783 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2784 {
2785 	struct drm_i915_private *dev_priv = dev->dev_private;
2786 	unsigned long irqflags;
2787 
2788 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2789 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2790 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2791 }
2792 
2793 static bool
2794 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2795 {
2796 	return (list_empty(&ring->request_list) ||
2797 		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2798 }
2799 
2800 static bool
2801 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2802 {
2803 	if (INTEL_INFO(dev)->gen >= 8) {
2804 		return (ipehr >> 23) == 0x1c;
2805 	} else {
2806 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2807 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2808 				 MI_SEMAPHORE_REGISTER);
2809 	}
2810 }
2811 
2812 static struct intel_engine_cs *
2813 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2814 {
2815 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2816 	struct intel_engine_cs *signaller;
2817 	int i;
2818 
2819 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2820 		for_each_ring(signaller, dev_priv, i) {
2821 			if (ring == signaller)
2822 				continue;
2823 
2824 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2825 				return signaller;
2826 		}
2827 	} else {
2828 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2829 
2830 		for_each_ring(signaller, dev_priv, i) {
2831 			if(ring == signaller)
2832 				continue;
2833 
2834 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2835 				return signaller;
2836 		}
2837 	}
2838 
2839 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2840 		  ring->id, ipehr, offset);
2841 
2842 	return NULL;
2843 }
2844 
2845 static struct intel_engine_cs *
2846 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2847 {
2848 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2849 	u32 cmd, ipehr, head;
2850 	u64 offset = 0;
2851 	int i, backwards;
2852 
2853 	/*
2854 	 * This function does not support execlist mode - any attempt to
2855 	 * proceed further into this function will result in a kernel panic
2856 	 * when dereferencing ring->buffer, which is not set up in execlist
2857 	 * mode.
2858 	 *
2859 	 * The correct way of doing it would be to derive the currently
2860 	 * executing ring buffer from the current context, which is derived
2861 	 * from the currently running request. Unfortunately, to get the
2862 	 * current request we would have to grab the struct_mutex before doing
2863 	 * anything else, which would be ill-advised since some other thread
2864 	 * might have grabbed it already and managed to hang itself, causing
2865 	 * the hang checker to deadlock.
2866 	 *
2867 	 * Therefore, this function does not support execlist mode in its
2868 	 * current form. Just return NULL and move on.
2869 	 */
2870 	if (ring->buffer == NULL)
2871 		return NULL;
2872 
2873 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2874 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2875 		return NULL;
2876 
2877 	/*
2878 	 * HEAD is likely pointing to the dword after the actual command,
2879 	 * so scan backwards until we find the MBOX. But limit it to just 3
2880 	 * or 4 dwords depending on the semaphore wait command size.
2881 	 * Note that we don't care about ACTHD here since that might
2882 	 * point at at batch, and semaphores are always emitted into the
2883 	 * ringbuffer itself.
2884 	 */
2885 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2886 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2887 
2888 	for (i = backwards; i; --i) {
2889 		/*
2890 		 * Be paranoid and presume the hw has gone off into the wild -
2891 		 * our ring is smaller than what the hardware (and hence
2892 		 * HEAD_ADDR) allows. Also handles wrap-around.
2893 		 */
2894 		head &= ring->buffer->size - 1;
2895 
2896 		/* This here seems to blow up */
2897 		cmd = ioread32(ring->buffer->virtual_start + head);
2898 		if (cmd == ipehr)
2899 			break;
2900 
2901 		head -= 4;
2902 	}
2903 
2904 	if (!i)
2905 		return NULL;
2906 
2907 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2908 	if (INTEL_INFO(ring->dev)->gen >= 8) {
2909 		offset = ioread32(ring->buffer->virtual_start + head + 12);
2910 		offset <<= 32;
2911 		offset = ioread32(ring->buffer->virtual_start + head + 8);
2912 	}
2913 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2914 }
2915 
2916 static int semaphore_passed(struct intel_engine_cs *ring)
2917 {
2918 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2919 	struct intel_engine_cs *signaller;
2920 	u32 seqno;
2921 
2922 	ring->hangcheck.deadlock++;
2923 
2924 	signaller = semaphore_waits_for(ring, &seqno);
2925 	if (signaller == NULL)
2926 		return -1;
2927 
2928 	/* Prevent pathological recursion due to driver bugs */
2929 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2930 		return -1;
2931 
2932 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2933 		return 1;
2934 
2935 	/* cursory check for an unkickable deadlock */
2936 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2937 	    semaphore_passed(signaller) < 0)
2938 		return -1;
2939 
2940 	return 0;
2941 }
2942 
2943 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2944 {
2945 	struct intel_engine_cs *ring;
2946 	int i;
2947 
2948 	for_each_ring(ring, dev_priv, i)
2949 		ring->hangcheck.deadlock = 0;
2950 }
2951 
2952 static enum intel_ring_hangcheck_action
2953 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2954 {
2955 	struct drm_device *dev = ring->dev;
2956 	struct drm_i915_private *dev_priv = dev->dev_private;
2957 	u32 tmp;
2958 
2959 	if (acthd != ring->hangcheck.acthd) {
2960 		if (acthd > ring->hangcheck.max_acthd) {
2961 			ring->hangcheck.max_acthd = acthd;
2962 			return HANGCHECK_ACTIVE;
2963 		}
2964 
2965 		return HANGCHECK_ACTIVE_LOOP;
2966 	}
2967 
2968 	if (IS_GEN2(dev))
2969 		return HANGCHECK_HUNG;
2970 
2971 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2972 	 * If so we can simply poke the RB_WAIT bit
2973 	 * and break the hang. This should work on
2974 	 * all but the second generation chipsets.
2975 	 */
2976 	tmp = I915_READ_CTL(ring);
2977 	if (tmp & RING_WAIT) {
2978 		i915_handle_error(dev, false,
2979 				  "Kicking stuck wait on %s",
2980 				  ring->name);
2981 		I915_WRITE_CTL(ring, tmp);
2982 		return HANGCHECK_KICK;
2983 	}
2984 
2985 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2986 		switch (semaphore_passed(ring)) {
2987 		default:
2988 			return HANGCHECK_HUNG;
2989 		case 1:
2990 			i915_handle_error(dev, false,
2991 					  "Kicking stuck semaphore on %s",
2992 					  ring->name);
2993 			I915_WRITE_CTL(ring, tmp);
2994 			return HANGCHECK_KICK;
2995 		case 0:
2996 			return HANGCHECK_WAIT;
2997 		}
2998 	}
2999 
3000 	return HANGCHECK_HUNG;
3001 }
3002 
3003 /*
3004  * This is called when the chip hasn't reported back with completed
3005  * batchbuffers in a long time. We keep track per ring seqno progress and
3006  * if there are no progress, hangcheck score for that ring is increased.
3007  * Further, acthd is inspected to see if the ring is stuck. On stuck case
3008  * we kick the ring. If we see no progress on three subsequent calls
3009  * we assume chip is wedged and try to fix it by resetting the chip.
3010  */
3011 static void i915_hangcheck_elapsed(struct work_struct *work)
3012 {
3013 	struct drm_i915_private *dev_priv =
3014 		container_of(work, typeof(*dev_priv),
3015 			     gpu_error.hangcheck_work.work);
3016 	struct drm_device *dev = dev_priv->dev;
3017 	struct intel_engine_cs *ring;
3018 	int i;
3019 	int busy_count = 0, rings_hung = 0;
3020 	bool stuck[I915_NUM_RINGS] = { 0 };
3021 #define BUSY 1
3022 #define KICK 5
3023 #define HUNG 20
3024 
3025 	if (!i915.enable_hangcheck)
3026 		return;
3027 
3028 	/*
3029 	 * The hangcheck work is synced during runtime suspend, we don't
3030 	 * require a wakeref. TODO: instead of disabling the asserts make
3031 	 * sure that we hold a reference when this work is running.
3032 	 */
3033 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3034 
3035 	for_each_ring(ring, dev_priv, i) {
3036 		u64 acthd;
3037 		u32 seqno;
3038 		bool busy = true;
3039 
3040 		semaphore_clear_deadlocks(dev_priv);
3041 
3042 		seqno = ring->get_seqno(ring, false);
3043 		acthd = intel_ring_get_active_head(ring);
3044 
3045 		if (ring->hangcheck.seqno == seqno) {
3046 			if (ring_idle(ring, seqno)) {
3047 				ring->hangcheck.action = HANGCHECK_IDLE;
3048 
3049 				if (waitqueue_active(&ring->irq_queue)) {
3050 					/* Issue a wake-up to catch stuck h/w. */
3051 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3052 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3053 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3054 								  ring->name);
3055 						else
3056 							DRM_INFO("Fake missed irq on %s\n",
3057 								 ring->name);
3058 						wake_up_all(&ring->irq_queue);
3059 					}
3060 					/* Safeguard against driver failure */
3061 					ring->hangcheck.score += BUSY;
3062 				} else
3063 					busy = false;
3064 			} else {
3065 				/* We always increment the hangcheck score
3066 				 * if the ring is busy and still processing
3067 				 * the same request, so that no single request
3068 				 * can run indefinitely (such as a chain of
3069 				 * batches). The only time we do not increment
3070 				 * the hangcheck score on this ring, if this
3071 				 * ring is in a legitimate wait for another
3072 				 * ring. In that case the waiting ring is a
3073 				 * victim and we want to be sure we catch the
3074 				 * right culprit. Then every time we do kick
3075 				 * the ring, add a small increment to the
3076 				 * score so that we can catch a batch that is
3077 				 * being repeatedly kicked and so responsible
3078 				 * for stalling the machine.
3079 				 */
3080 				ring->hangcheck.action = ring_stuck(ring,
3081 								    acthd);
3082 
3083 				switch (ring->hangcheck.action) {
3084 				case HANGCHECK_IDLE:
3085 				case HANGCHECK_WAIT:
3086 				case HANGCHECK_ACTIVE:
3087 					break;
3088 				case HANGCHECK_ACTIVE_LOOP:
3089 					ring->hangcheck.score += BUSY;
3090 					break;
3091 				case HANGCHECK_KICK:
3092 					ring->hangcheck.score += KICK;
3093 					break;
3094 				case HANGCHECK_HUNG:
3095 					ring->hangcheck.score += HUNG;
3096 					stuck[i] = true;
3097 					break;
3098 				}
3099 			}
3100 		} else {
3101 			ring->hangcheck.action = HANGCHECK_ACTIVE;
3102 
3103 			/* Gradually reduce the count so that we catch DoS
3104 			 * attempts across multiple batches.
3105 			 */
3106 			if (ring->hangcheck.score > 0)
3107 				ring->hangcheck.score--;
3108 
3109 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3110 		}
3111 
3112 		ring->hangcheck.seqno = seqno;
3113 		ring->hangcheck.acthd = acthd;
3114 		busy_count += busy;
3115 	}
3116 
3117 	for_each_ring(ring, dev_priv, i) {
3118 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3119 			DRM_INFO("%s on %s\n",
3120 				 stuck[i] ? "stuck" : "no progress",
3121 				 ring->name);
3122 			rings_hung++;
3123 		}
3124 	}
3125 
3126 	if (rings_hung) {
3127 		i915_handle_error(dev, true, "Ring hung");
3128 		goto out;
3129 	}
3130 
3131 	if (busy_count)
3132 		/* Reset timer case chip hangs without another request
3133 		 * being added */
3134 		i915_queue_hangcheck(dev);
3135 
3136 out:
3137 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3138 }
3139 
3140 void i915_queue_hangcheck(struct drm_device *dev)
3141 {
3142 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3143 
3144 	if (!i915.enable_hangcheck)
3145 		return;
3146 
3147 	/* Don't continually defer the hangcheck so that it is always run at
3148 	 * least once after work has been scheduled on any ring. Otherwise,
3149 	 * we will ignore a hung ring if a second ring is kept busy.
3150 	 */
3151 
3152 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3153 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3154 }
3155 
3156 static void ibx_irq_reset(struct drm_device *dev)
3157 {
3158 	struct drm_i915_private *dev_priv = dev->dev_private;
3159 
3160 	if (HAS_PCH_NOP(dev))
3161 		return;
3162 
3163 	GEN5_IRQ_RESET(SDE);
3164 
3165 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3166 		I915_WRITE(SERR_INT, 0xffffffff);
3167 }
3168 
3169 /*
3170  * SDEIER is also touched by the interrupt handler to work around missed PCH
3171  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3172  * instead we unconditionally enable all PCH interrupt sources here, but then
3173  * only unmask them as needed with SDEIMR.
3174  *
3175  * This function needs to be called before interrupts are enabled.
3176  */
3177 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3178 {
3179 	struct drm_i915_private *dev_priv = dev->dev_private;
3180 
3181 	if (HAS_PCH_NOP(dev))
3182 		return;
3183 
3184 	WARN_ON(I915_READ(SDEIER) != 0);
3185 	I915_WRITE(SDEIER, 0xffffffff);
3186 	POSTING_READ(SDEIER);
3187 }
3188 
3189 static void gen5_gt_irq_reset(struct drm_device *dev)
3190 {
3191 	struct drm_i915_private *dev_priv = dev->dev_private;
3192 
3193 	GEN5_IRQ_RESET(GT);
3194 	if (INTEL_INFO(dev)->gen >= 6)
3195 		GEN5_IRQ_RESET(GEN6_PM);
3196 }
3197 
3198 /* drm_dma.h hooks
3199 */
3200 static void ironlake_irq_reset(struct drm_device *dev)
3201 {
3202 	struct drm_i915_private *dev_priv = dev->dev_private;
3203 
3204 	I915_WRITE(HWSTAM, 0xffffffff);
3205 
3206 	GEN5_IRQ_RESET(DE);
3207 	if (IS_GEN7(dev))
3208 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3209 
3210 	gen5_gt_irq_reset(dev);
3211 
3212 	ibx_irq_reset(dev);
3213 }
3214 
3215 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3216 {
3217 	enum pipe pipe;
3218 
3219 	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3220 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3221 
3222 	for_each_pipe(dev_priv, pipe)
3223 		I915_WRITE(PIPESTAT(pipe), 0xffff);
3224 
3225 	GEN5_IRQ_RESET(VLV_);
3226 }
3227 
3228 static void valleyview_irq_preinstall(struct drm_device *dev)
3229 {
3230 	struct drm_i915_private *dev_priv = dev->dev_private;
3231 
3232 	/* VLV magic */
3233 	I915_WRITE(VLV_IMR, 0);
3234 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3235 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3236 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3237 
3238 	gen5_gt_irq_reset(dev);
3239 
3240 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3241 
3242 	vlv_display_irq_reset(dev_priv);
3243 }
3244 
3245 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3246 {
3247 	GEN8_IRQ_RESET_NDX(GT, 0);
3248 	GEN8_IRQ_RESET_NDX(GT, 1);
3249 	GEN8_IRQ_RESET_NDX(GT, 2);
3250 	GEN8_IRQ_RESET_NDX(GT, 3);
3251 }
3252 
3253 static void gen8_irq_reset(struct drm_device *dev)
3254 {
3255 	struct drm_i915_private *dev_priv = dev->dev_private;
3256 	int pipe;
3257 
3258 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3259 	POSTING_READ(GEN8_MASTER_IRQ);
3260 
3261 	gen8_gt_irq_reset(dev_priv);
3262 
3263 	for_each_pipe(dev_priv, pipe)
3264 		if (intel_display_power_is_enabled(dev_priv,
3265 						   POWER_DOMAIN_PIPE(pipe)))
3266 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3267 
3268 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3269 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3270 	GEN5_IRQ_RESET(GEN8_PCU_);
3271 
3272 	if (HAS_PCH_SPLIT(dev))
3273 		ibx_irq_reset(dev);
3274 }
3275 
3276 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3277 				     unsigned int pipe_mask)
3278 {
3279 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3280 
3281 	spin_lock_irq(&dev_priv->irq_lock);
3282 	if (pipe_mask & 1 << PIPE_A)
3283 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3284 				  dev_priv->de_irq_mask[PIPE_A],
3285 				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3286 	if (pipe_mask & 1 << PIPE_B)
3287 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3288 				  dev_priv->de_irq_mask[PIPE_B],
3289 				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3290 	if (pipe_mask & 1 << PIPE_C)
3291 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3292 				  dev_priv->de_irq_mask[PIPE_C],
3293 				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3294 	spin_unlock_irq(&dev_priv->irq_lock);
3295 }
3296 
3297 static void cherryview_irq_preinstall(struct drm_device *dev)
3298 {
3299 	struct drm_i915_private *dev_priv = dev->dev_private;
3300 
3301 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3302 	POSTING_READ(GEN8_MASTER_IRQ);
3303 
3304 	gen8_gt_irq_reset(dev_priv);
3305 
3306 	GEN5_IRQ_RESET(GEN8_PCU_);
3307 
3308 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3309 
3310 	vlv_display_irq_reset(dev_priv);
3311 }
3312 
3313 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3314 				  const u32 hpd[HPD_NUM_PINS])
3315 {
3316 	struct drm_i915_private *dev_priv = to_i915(dev);
3317 	struct intel_encoder *encoder;
3318 	u32 enabled_irqs = 0;
3319 
3320 	for_each_intel_encoder(dev, encoder)
3321 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3322 			enabled_irqs |= hpd[encoder->hpd_pin];
3323 
3324 	return enabled_irqs;
3325 }
3326 
3327 static void ibx_hpd_irq_setup(struct drm_device *dev)
3328 {
3329 	struct drm_i915_private *dev_priv = dev->dev_private;
3330 	u32 hotplug_irqs, hotplug, enabled_irqs;
3331 
3332 	if (HAS_PCH_IBX(dev)) {
3333 		hotplug_irqs = SDE_HOTPLUG_MASK;
3334 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3335 	} else {
3336 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3337 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3338 	}
3339 
3340 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3341 
3342 	/*
3343 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3344 	 * duration to 2ms (which is the minimum in the Display Port spec).
3345 	 * The pulse duration bits are reserved on LPT+.
3346 	 */
3347 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3348 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3349 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3350 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3351 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3352 	/*
3353 	 * When CPU and PCH are on the same package, port A
3354 	 * HPD must be enabled in both north and south.
3355 	 */
3356 	if (HAS_PCH_LPT_LP(dev))
3357 		hotplug |= PORTA_HOTPLUG_ENABLE;
3358 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3359 }
3360 
3361 static void spt_hpd_irq_setup(struct drm_device *dev)
3362 {
3363 	struct drm_i915_private *dev_priv = dev->dev_private;
3364 	u32 hotplug_irqs, hotplug, enabled_irqs;
3365 
3366 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3367 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3368 
3369 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3370 
3371 	/* Enable digital hotplug on the PCH */
3372 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3373 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3374 		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3375 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3376 
3377 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3378 	hotplug |= PORTE_HOTPLUG_ENABLE;
3379 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3380 }
3381 
3382 static void ilk_hpd_irq_setup(struct drm_device *dev)
3383 {
3384 	struct drm_i915_private *dev_priv = dev->dev_private;
3385 	u32 hotplug_irqs, hotplug, enabled_irqs;
3386 
3387 	if (INTEL_INFO(dev)->gen >= 8) {
3388 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3389 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3390 
3391 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3392 	} else if (INTEL_INFO(dev)->gen >= 7) {
3393 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3394 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3395 
3396 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3397 	} else {
3398 		hotplug_irqs = DE_DP_A_HOTPLUG;
3399 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3400 
3401 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3402 	}
3403 
3404 	/*
3405 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3406 	 * duration to 2ms (which is the minimum in the Display Port spec)
3407 	 * The pulse duration bits are reserved on HSW+.
3408 	 */
3409 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3410 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3411 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3412 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3413 
3414 	ibx_hpd_irq_setup(dev);
3415 }
3416 
3417 static void bxt_hpd_irq_setup(struct drm_device *dev)
3418 {
3419 	struct drm_i915_private *dev_priv = dev->dev_private;
3420 	u32 hotplug_irqs, hotplug, enabled_irqs;
3421 
3422 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3423 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3424 
3425 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3426 
3427 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3428 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3429 		PORTA_HOTPLUG_ENABLE;
3430 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3431 }
3432 
3433 static void ibx_irq_postinstall(struct drm_device *dev)
3434 {
3435 	struct drm_i915_private *dev_priv = dev->dev_private;
3436 	u32 mask;
3437 
3438 	if (HAS_PCH_NOP(dev))
3439 		return;
3440 
3441 	if (HAS_PCH_IBX(dev))
3442 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3443 	else
3444 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3445 
3446 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3447 	I915_WRITE(SDEIMR, ~mask);
3448 }
3449 
3450 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3451 {
3452 	struct drm_i915_private *dev_priv = dev->dev_private;
3453 	u32 pm_irqs, gt_irqs;
3454 
3455 	pm_irqs = gt_irqs = 0;
3456 
3457 	dev_priv->gt_irq_mask = ~0;
3458 	if (HAS_L3_DPF(dev)) {
3459 		/* L3 parity interrupt is always unmasked. */
3460 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3461 		gt_irqs |= GT_PARITY_ERROR(dev);
3462 	}
3463 
3464 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3465 	if (IS_GEN5(dev)) {
3466 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3467 			   ILK_BSD_USER_INTERRUPT;
3468 	} else {
3469 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3470 	}
3471 
3472 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3473 
3474 	if (INTEL_INFO(dev)->gen >= 6) {
3475 		/*
3476 		 * RPS interrupts will get enabled/disabled on demand when RPS
3477 		 * itself is enabled/disabled.
3478 		 */
3479 		if (HAS_VEBOX(dev))
3480 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3481 
3482 		dev_priv->pm_irq_mask = 0xffffffff;
3483 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3484 	}
3485 }
3486 
3487 static int ironlake_irq_postinstall(struct drm_device *dev)
3488 {
3489 	struct drm_i915_private *dev_priv = dev->dev_private;
3490 	u32 display_mask, extra_mask;
3491 
3492 	if (INTEL_INFO(dev)->gen >= 7) {
3493 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3494 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3495 				DE_PLANEB_FLIP_DONE_IVB |
3496 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3497 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3498 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3499 			      DE_DP_A_HOTPLUG_IVB);
3500 	} else {
3501 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3502 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3503 				DE_AUX_CHANNEL_A |
3504 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3505 				DE_POISON);
3506 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3507 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3508 			      DE_DP_A_HOTPLUG);
3509 	}
3510 
3511 	dev_priv->irq_mask = ~display_mask;
3512 
3513 	I915_WRITE(HWSTAM, 0xeffe);
3514 
3515 	ibx_irq_pre_postinstall(dev);
3516 
3517 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3518 
3519 	gen5_gt_irq_postinstall(dev);
3520 
3521 	ibx_irq_postinstall(dev);
3522 
3523 	if (IS_IRONLAKE_M(dev)) {
3524 		/* Enable PCU event interrupts
3525 		 *
3526 		 * spinlocking not required here for correctness since interrupt
3527 		 * setup is guaranteed to run in single-threaded context. But we
3528 		 * need it to make the assert_spin_locked happy. */
3529 		spin_lock_irq(&dev_priv->irq_lock);
3530 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3531 		spin_unlock_irq(&dev_priv->irq_lock);
3532 	}
3533 
3534 	return 0;
3535 }
3536 
3537 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3538 {
3539 	u32 pipestat_mask;
3540 	u32 iir_mask;
3541 	enum pipe pipe;
3542 
3543 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3544 			PIPE_FIFO_UNDERRUN_STATUS;
3545 
3546 	for_each_pipe(dev_priv, pipe)
3547 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3548 	POSTING_READ(PIPESTAT(PIPE_A));
3549 
3550 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3551 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3552 
3553 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3554 	for_each_pipe(dev_priv, pipe)
3555 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3556 
3557 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3558 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3559 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3560 	if (IS_CHERRYVIEW(dev_priv))
3561 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3562 	dev_priv->irq_mask &= ~iir_mask;
3563 
3564 	I915_WRITE(VLV_IIR, iir_mask);
3565 	I915_WRITE(VLV_IIR, iir_mask);
3566 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3567 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3568 	POSTING_READ(VLV_IMR);
3569 }
3570 
3571 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3572 {
3573 	u32 pipestat_mask;
3574 	u32 iir_mask;
3575 	enum pipe pipe;
3576 
3577 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3578 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3579 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3580 	if (IS_CHERRYVIEW(dev_priv))
3581 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3582 
3583 	dev_priv->irq_mask |= iir_mask;
3584 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3585 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3586 	I915_WRITE(VLV_IIR, iir_mask);
3587 	I915_WRITE(VLV_IIR, iir_mask);
3588 	POSTING_READ(VLV_IIR);
3589 
3590 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3591 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3592 
3593 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3594 	for_each_pipe(dev_priv, pipe)
3595 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3596 
3597 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3598 			PIPE_FIFO_UNDERRUN_STATUS;
3599 
3600 	for_each_pipe(dev_priv, pipe)
3601 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3602 	POSTING_READ(PIPESTAT(PIPE_A));
3603 }
3604 
3605 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3606 {
3607 	assert_spin_locked(&dev_priv->irq_lock);
3608 
3609 	if (dev_priv->display_irqs_enabled)
3610 		return;
3611 
3612 	dev_priv->display_irqs_enabled = true;
3613 
3614 	if (intel_irqs_enabled(dev_priv))
3615 		valleyview_display_irqs_install(dev_priv);
3616 }
3617 
3618 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3619 {
3620 	assert_spin_locked(&dev_priv->irq_lock);
3621 
3622 	if (!dev_priv->display_irqs_enabled)
3623 		return;
3624 
3625 	dev_priv->display_irqs_enabled = false;
3626 
3627 	if (intel_irqs_enabled(dev_priv))
3628 		valleyview_display_irqs_uninstall(dev_priv);
3629 }
3630 
3631 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3632 {
3633 	dev_priv->irq_mask = ~0;
3634 
3635 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3636 	POSTING_READ(PORT_HOTPLUG_EN);
3637 
3638 	I915_WRITE(VLV_IIR, 0xffffffff);
3639 	I915_WRITE(VLV_IIR, 0xffffffff);
3640 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3641 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3642 	POSTING_READ(VLV_IMR);
3643 
3644 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3645 	 * just to make the assert_spin_locked check happy. */
3646 	spin_lock_irq(&dev_priv->irq_lock);
3647 	if (dev_priv->display_irqs_enabled)
3648 		valleyview_display_irqs_install(dev_priv);
3649 	spin_unlock_irq(&dev_priv->irq_lock);
3650 }
3651 
3652 static int valleyview_irq_postinstall(struct drm_device *dev)
3653 {
3654 	struct drm_i915_private *dev_priv = dev->dev_private;
3655 
3656 	vlv_display_irq_postinstall(dev_priv);
3657 
3658 	gen5_gt_irq_postinstall(dev);
3659 
3660 	/* ack & enable invalid PTE error interrupts */
3661 #if 0 /* FIXME: add support to irq handler for checking these bits */
3662 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3663 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3664 #endif
3665 
3666 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3667 
3668 	return 0;
3669 }
3670 
3671 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3672 {
3673 	/* These are interrupts we'll toggle with the ring mask register */
3674 	uint32_t gt_interrupts[] = {
3675 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3676 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3677 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3678 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3679 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3680 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3681 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3682 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3683 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3684 		0,
3685 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3686 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3687 		};
3688 
3689 	dev_priv->pm_irq_mask = 0xffffffff;
3690 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3691 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3692 	/*
3693 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3694 	 * is enabled/disabled.
3695 	 */
3696 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3697 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3698 }
3699 
3700 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3701 {
3702 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3703 	uint32_t de_pipe_enables;
3704 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3705 	u32 de_port_enables;
3706 	enum pipe pipe;
3707 
3708 	if (INTEL_INFO(dev_priv)->gen >= 9) {
3709 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3710 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3711 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3712 				  GEN9_AUX_CHANNEL_D;
3713 		if (IS_BROXTON(dev_priv))
3714 			de_port_masked |= BXT_DE_PORT_GMBUS;
3715 	} else {
3716 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3717 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3718 	}
3719 
3720 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3721 					   GEN8_PIPE_FIFO_UNDERRUN;
3722 
3723 	de_port_enables = de_port_masked;
3724 	if (IS_BROXTON(dev_priv))
3725 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3726 	else if (IS_BROADWELL(dev_priv))
3727 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3728 
3729 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3730 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3731 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3732 
3733 	for_each_pipe(dev_priv, pipe)
3734 		if (intel_display_power_is_enabled(dev_priv,
3735 				POWER_DOMAIN_PIPE(pipe)))
3736 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3737 					  dev_priv->de_irq_mask[pipe],
3738 					  de_pipe_enables);
3739 
3740 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3741 }
3742 
3743 static int gen8_irq_postinstall(struct drm_device *dev)
3744 {
3745 	struct drm_i915_private *dev_priv = dev->dev_private;
3746 
3747 	if (HAS_PCH_SPLIT(dev))
3748 		ibx_irq_pre_postinstall(dev);
3749 
3750 	gen8_gt_irq_postinstall(dev_priv);
3751 	gen8_de_irq_postinstall(dev_priv);
3752 
3753 	if (HAS_PCH_SPLIT(dev))
3754 		ibx_irq_postinstall(dev);
3755 
3756 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3757 	POSTING_READ(GEN8_MASTER_IRQ);
3758 
3759 	return 0;
3760 }
3761 
3762 static int cherryview_irq_postinstall(struct drm_device *dev)
3763 {
3764 	struct drm_i915_private *dev_priv = dev->dev_private;
3765 
3766 	vlv_display_irq_postinstall(dev_priv);
3767 
3768 	gen8_gt_irq_postinstall(dev_priv);
3769 
3770 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3771 	POSTING_READ(GEN8_MASTER_IRQ);
3772 
3773 	return 0;
3774 }
3775 
3776 static void gen8_irq_uninstall(struct drm_device *dev)
3777 {
3778 	struct drm_i915_private *dev_priv = dev->dev_private;
3779 
3780 	if (!dev_priv)
3781 		return;
3782 
3783 	gen8_irq_reset(dev);
3784 }
3785 
3786 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3787 {
3788 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3789 	 * just to make the assert_spin_locked check happy. */
3790 	spin_lock_irq(&dev_priv->irq_lock);
3791 	if (dev_priv->display_irqs_enabled)
3792 		valleyview_display_irqs_uninstall(dev_priv);
3793 	spin_unlock_irq(&dev_priv->irq_lock);
3794 
3795 	vlv_display_irq_reset(dev_priv);
3796 
3797 	dev_priv->irq_mask = ~0;
3798 }
3799 
3800 static void valleyview_irq_uninstall(struct drm_device *dev)
3801 {
3802 	struct drm_i915_private *dev_priv = dev->dev_private;
3803 
3804 	if (!dev_priv)
3805 		return;
3806 
3807 	I915_WRITE(VLV_MASTER_IER, 0);
3808 
3809 	gen5_gt_irq_reset(dev);
3810 
3811 	I915_WRITE(HWSTAM, 0xffffffff);
3812 
3813 	vlv_display_irq_uninstall(dev_priv);
3814 }
3815 
3816 static void cherryview_irq_uninstall(struct drm_device *dev)
3817 {
3818 	struct drm_i915_private *dev_priv = dev->dev_private;
3819 
3820 	if (!dev_priv)
3821 		return;
3822 
3823 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3824 	POSTING_READ(GEN8_MASTER_IRQ);
3825 
3826 	gen8_gt_irq_reset(dev_priv);
3827 
3828 	GEN5_IRQ_RESET(GEN8_PCU_);
3829 
3830 	vlv_display_irq_uninstall(dev_priv);
3831 }
3832 
3833 static void ironlake_irq_uninstall(struct drm_device *dev)
3834 {
3835 	struct drm_i915_private *dev_priv = dev->dev_private;
3836 
3837 	if (!dev_priv)
3838 		return;
3839 
3840 	ironlake_irq_reset(dev);
3841 }
3842 
3843 static void i8xx_irq_preinstall(struct drm_device * dev)
3844 {
3845 	struct drm_i915_private *dev_priv = dev->dev_private;
3846 	int pipe;
3847 
3848 	for_each_pipe(dev_priv, pipe)
3849 		I915_WRITE(PIPESTAT(pipe), 0);
3850 	I915_WRITE16(IMR, 0xffff);
3851 	I915_WRITE16(IER, 0x0);
3852 	POSTING_READ16(IER);
3853 }
3854 
3855 static int i8xx_irq_postinstall(struct drm_device *dev)
3856 {
3857 	struct drm_i915_private *dev_priv = dev->dev_private;
3858 
3859 	I915_WRITE16(EMR,
3860 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3861 
3862 	/* Unmask the interrupts that we always want on. */
3863 	dev_priv->irq_mask =
3864 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3865 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3866 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3867 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3868 	I915_WRITE16(IMR, dev_priv->irq_mask);
3869 
3870 	I915_WRITE16(IER,
3871 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3872 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3873 		     I915_USER_INTERRUPT);
3874 	POSTING_READ16(IER);
3875 
3876 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3877 	 * just to make the assert_spin_locked check happy. */
3878 	spin_lock_irq(&dev_priv->irq_lock);
3879 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3880 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3881 	spin_unlock_irq(&dev_priv->irq_lock);
3882 
3883 	return 0;
3884 }
3885 
3886 /*
3887  * Returns true when a page flip has completed.
3888  */
3889 static bool i8xx_handle_vblank(struct drm_device *dev,
3890 			       int plane, int pipe, u32 iir)
3891 {
3892 	struct drm_i915_private *dev_priv = dev->dev_private;
3893 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3894 
3895 	if (!intel_pipe_handle_vblank(dev, pipe))
3896 		return false;
3897 
3898 	if ((iir & flip_pending) == 0)
3899 		goto check_page_flip;
3900 
3901 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3902 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3903 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3904 	 * the flip is completed (no longer pending). Since this doesn't raise
3905 	 * an interrupt per se, we watch for the change at vblank.
3906 	 */
3907 	if (I915_READ16(ISR) & flip_pending)
3908 		goto check_page_flip;
3909 
3910 	intel_prepare_page_flip(dev, plane);
3911 	intel_finish_page_flip(dev, pipe);
3912 	return true;
3913 
3914 check_page_flip:
3915 	intel_check_page_flip(dev, pipe);
3916 	return false;
3917 }
3918 
3919 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3920 {
3921 	struct drm_device *dev = arg;
3922 	struct drm_i915_private *dev_priv = dev->dev_private;
3923 	u16 iir, new_iir;
3924 	u32 pipe_stats[2];
3925 	int pipe;
3926 	u16 flip_mask =
3927 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3928 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3929 	irqreturn_t ret;
3930 
3931 	if (!intel_irqs_enabled(dev_priv))
3932 		return IRQ_NONE;
3933 
3934 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3935 	disable_rpm_wakeref_asserts(dev_priv);
3936 
3937 	ret = IRQ_NONE;
3938 	iir = I915_READ16(IIR);
3939 	if (iir == 0)
3940 		goto out;
3941 
3942 	while (iir & ~flip_mask) {
3943 		/* Can't rely on pipestat interrupt bit in iir as it might
3944 		 * have been cleared after the pipestat interrupt was received.
3945 		 * It doesn't set the bit in iir again, but it still produces
3946 		 * interrupts (for non-MSI).
3947 		 */
3948 		spin_lock(&dev_priv->irq_lock);
3949 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3950 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3951 
3952 		for_each_pipe(dev_priv, pipe) {
3953 			i915_reg_t reg = PIPESTAT(pipe);
3954 			pipe_stats[pipe] = I915_READ(reg);
3955 
3956 			/*
3957 			 * Clear the PIPE*STAT regs before the IIR
3958 			 */
3959 			if (pipe_stats[pipe] & 0x8000ffff)
3960 				I915_WRITE(reg, pipe_stats[pipe]);
3961 		}
3962 		spin_unlock(&dev_priv->irq_lock);
3963 
3964 		I915_WRITE16(IIR, iir & ~flip_mask);
3965 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3966 
3967 		if (iir & I915_USER_INTERRUPT)
3968 			notify_ring(&dev_priv->ring[RCS]);
3969 
3970 		for_each_pipe(dev_priv, pipe) {
3971 			int plane = pipe;
3972 			if (HAS_FBC(dev))
3973 				plane = !plane;
3974 
3975 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3976 			    i8xx_handle_vblank(dev, plane, pipe, iir))
3977 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3978 
3979 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3980 				i9xx_pipe_crc_irq_handler(dev, pipe);
3981 
3982 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3983 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3984 								    pipe);
3985 		}
3986 
3987 		iir = new_iir;
3988 	}
3989 	ret = IRQ_HANDLED;
3990 
3991 out:
3992 	enable_rpm_wakeref_asserts(dev_priv);
3993 
3994 	return ret;
3995 }
3996 
3997 static void i8xx_irq_uninstall(struct drm_device * dev)
3998 {
3999 	struct drm_i915_private *dev_priv = dev->dev_private;
4000 	int pipe;
4001 
4002 	for_each_pipe(dev_priv, pipe) {
4003 		/* Clear enable bits; then clear status bits */
4004 		I915_WRITE(PIPESTAT(pipe), 0);
4005 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4006 	}
4007 	I915_WRITE16(IMR, 0xffff);
4008 	I915_WRITE16(IER, 0x0);
4009 	I915_WRITE16(IIR, I915_READ16(IIR));
4010 }
4011 
4012 static void i915_irq_preinstall(struct drm_device * dev)
4013 {
4014 	struct drm_i915_private *dev_priv = dev->dev_private;
4015 	int pipe;
4016 
4017 	if (I915_HAS_HOTPLUG(dev)) {
4018 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4019 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4020 	}
4021 
4022 	I915_WRITE16(HWSTAM, 0xeffe);
4023 	for_each_pipe(dev_priv, pipe)
4024 		I915_WRITE(PIPESTAT(pipe), 0);
4025 	I915_WRITE(IMR, 0xffffffff);
4026 	I915_WRITE(IER, 0x0);
4027 	POSTING_READ(IER);
4028 }
4029 
4030 static int i915_irq_postinstall(struct drm_device *dev)
4031 {
4032 	struct drm_i915_private *dev_priv = dev->dev_private;
4033 	u32 enable_mask;
4034 
4035 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4036 
4037 	/* Unmask the interrupts that we always want on. */
4038 	dev_priv->irq_mask =
4039 		~(I915_ASLE_INTERRUPT |
4040 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4041 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4042 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4043 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4044 
4045 	enable_mask =
4046 		I915_ASLE_INTERRUPT |
4047 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4048 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4049 		I915_USER_INTERRUPT;
4050 
4051 	if (I915_HAS_HOTPLUG(dev)) {
4052 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4053 		POSTING_READ(PORT_HOTPLUG_EN);
4054 
4055 		/* Enable in IER... */
4056 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4057 		/* and unmask in IMR */
4058 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4059 	}
4060 
4061 	I915_WRITE(IMR, dev_priv->irq_mask);
4062 	I915_WRITE(IER, enable_mask);
4063 	POSTING_READ(IER);
4064 
4065 	i915_enable_asle_pipestat(dev);
4066 
4067 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4068 	 * just to make the assert_spin_locked check happy. */
4069 	spin_lock_irq(&dev_priv->irq_lock);
4070 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4071 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4072 	spin_unlock_irq(&dev_priv->irq_lock);
4073 
4074 	return 0;
4075 }
4076 
4077 /*
4078  * Returns true when a page flip has completed.
4079  */
4080 static bool i915_handle_vblank(struct drm_device *dev,
4081 			       int plane, int pipe, u32 iir)
4082 {
4083 	struct drm_i915_private *dev_priv = dev->dev_private;
4084 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4085 
4086 	if (!intel_pipe_handle_vblank(dev, pipe))
4087 		return false;
4088 
4089 	if ((iir & flip_pending) == 0)
4090 		goto check_page_flip;
4091 
4092 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
4093 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4094 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4095 	 * the flip is completed (no longer pending). Since this doesn't raise
4096 	 * an interrupt per se, we watch for the change at vblank.
4097 	 */
4098 	if (I915_READ(ISR) & flip_pending)
4099 		goto check_page_flip;
4100 
4101 	intel_prepare_page_flip(dev, plane);
4102 	intel_finish_page_flip(dev, pipe);
4103 	return true;
4104 
4105 check_page_flip:
4106 	intel_check_page_flip(dev, pipe);
4107 	return false;
4108 }
4109 
4110 static irqreturn_t i915_irq_handler(int irq, void *arg)
4111 {
4112 	struct drm_device *dev = arg;
4113 	struct drm_i915_private *dev_priv = dev->dev_private;
4114 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4115 	u32 flip_mask =
4116 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4117 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4118 	int pipe, ret = IRQ_NONE;
4119 
4120 	if (!intel_irqs_enabled(dev_priv))
4121 		return IRQ_NONE;
4122 
4123 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4124 	disable_rpm_wakeref_asserts(dev_priv);
4125 
4126 	iir = I915_READ(IIR);
4127 	do {
4128 		bool irq_received = (iir & ~flip_mask) != 0;
4129 		bool blc_event = false;
4130 
4131 		/* Can't rely on pipestat interrupt bit in iir as it might
4132 		 * have been cleared after the pipestat interrupt was received.
4133 		 * It doesn't set the bit in iir again, but it still produces
4134 		 * interrupts (for non-MSI).
4135 		 */
4136 		spin_lock(&dev_priv->irq_lock);
4137 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4138 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4139 
4140 		for_each_pipe(dev_priv, pipe) {
4141 			i915_reg_t reg = PIPESTAT(pipe);
4142 			pipe_stats[pipe] = I915_READ(reg);
4143 
4144 			/* Clear the PIPE*STAT regs before the IIR */
4145 			if (pipe_stats[pipe] & 0x8000ffff) {
4146 				I915_WRITE(reg, pipe_stats[pipe]);
4147 				irq_received = true;
4148 			}
4149 		}
4150 		spin_unlock(&dev_priv->irq_lock);
4151 
4152 		if (!irq_received)
4153 			break;
4154 
4155 		/* Consume port.  Then clear IIR or we'll miss events */
4156 		if (I915_HAS_HOTPLUG(dev) &&
4157 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4158 			i9xx_hpd_irq_handler(dev);
4159 
4160 		I915_WRITE(IIR, iir & ~flip_mask);
4161 		new_iir = I915_READ(IIR); /* Flush posted writes */
4162 
4163 		if (iir & I915_USER_INTERRUPT)
4164 			notify_ring(&dev_priv->ring[RCS]);
4165 
4166 		for_each_pipe(dev_priv, pipe) {
4167 			int plane = pipe;
4168 			if (HAS_FBC(dev))
4169 				plane = !plane;
4170 
4171 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4172 			    i915_handle_vblank(dev, plane, pipe, iir))
4173 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4174 
4175 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4176 				blc_event = true;
4177 
4178 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4179 				i9xx_pipe_crc_irq_handler(dev, pipe);
4180 
4181 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4182 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4183 								    pipe);
4184 		}
4185 
4186 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4187 			intel_opregion_asle_intr(dev);
4188 
4189 		/* With MSI, interrupts are only generated when iir
4190 		 * transitions from zero to nonzero.  If another bit got
4191 		 * set while we were handling the existing iir bits, then
4192 		 * we would never get another interrupt.
4193 		 *
4194 		 * This is fine on non-MSI as well, as if we hit this path
4195 		 * we avoid exiting the interrupt handler only to generate
4196 		 * another one.
4197 		 *
4198 		 * Note that for MSI this could cause a stray interrupt report
4199 		 * if an interrupt landed in the time between writing IIR and
4200 		 * the posting read.  This should be rare enough to never
4201 		 * trigger the 99% of 100,000 interrupts test for disabling
4202 		 * stray interrupts.
4203 		 */
4204 		ret = IRQ_HANDLED;
4205 		iir = new_iir;
4206 	} while (iir & ~flip_mask);
4207 
4208 	enable_rpm_wakeref_asserts(dev_priv);
4209 
4210 	return ret;
4211 }
4212 
4213 static void i915_irq_uninstall(struct drm_device * dev)
4214 {
4215 	struct drm_i915_private *dev_priv = dev->dev_private;
4216 	int pipe;
4217 
4218 	if (I915_HAS_HOTPLUG(dev)) {
4219 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4220 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4221 	}
4222 
4223 	I915_WRITE16(HWSTAM, 0xffff);
4224 	for_each_pipe(dev_priv, pipe) {
4225 		/* Clear enable bits; then clear status bits */
4226 		I915_WRITE(PIPESTAT(pipe), 0);
4227 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4228 	}
4229 	I915_WRITE(IMR, 0xffffffff);
4230 	I915_WRITE(IER, 0x0);
4231 
4232 	I915_WRITE(IIR, I915_READ(IIR));
4233 }
4234 
4235 static void i965_irq_preinstall(struct drm_device * dev)
4236 {
4237 	struct drm_i915_private *dev_priv = dev->dev_private;
4238 	int pipe;
4239 
4240 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4241 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4242 
4243 	I915_WRITE(HWSTAM, 0xeffe);
4244 	for_each_pipe(dev_priv, pipe)
4245 		I915_WRITE(PIPESTAT(pipe), 0);
4246 	I915_WRITE(IMR, 0xffffffff);
4247 	I915_WRITE(IER, 0x0);
4248 	POSTING_READ(IER);
4249 }
4250 
4251 static int i965_irq_postinstall(struct drm_device *dev)
4252 {
4253 	struct drm_i915_private *dev_priv = dev->dev_private;
4254 	u32 enable_mask;
4255 	u32 error_mask;
4256 
4257 	/* Unmask the interrupts that we always want on. */
4258 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4259 			       I915_DISPLAY_PORT_INTERRUPT |
4260 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4261 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4262 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4263 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4264 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4265 
4266 	enable_mask = ~dev_priv->irq_mask;
4267 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4268 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4269 	enable_mask |= I915_USER_INTERRUPT;
4270 
4271 	if (IS_G4X(dev))
4272 		enable_mask |= I915_BSD_USER_INTERRUPT;
4273 
4274 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4275 	 * just to make the assert_spin_locked check happy. */
4276 	spin_lock_irq(&dev_priv->irq_lock);
4277 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4278 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4279 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4280 	spin_unlock_irq(&dev_priv->irq_lock);
4281 
4282 	/*
4283 	 * Enable some error detection, note the instruction error mask
4284 	 * bit is reserved, so we leave it masked.
4285 	 */
4286 	if (IS_G4X(dev)) {
4287 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4288 			       GM45_ERROR_MEM_PRIV |
4289 			       GM45_ERROR_CP_PRIV |
4290 			       I915_ERROR_MEMORY_REFRESH);
4291 	} else {
4292 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4293 			       I915_ERROR_MEMORY_REFRESH);
4294 	}
4295 	I915_WRITE(EMR, error_mask);
4296 
4297 	I915_WRITE(IMR, dev_priv->irq_mask);
4298 	I915_WRITE(IER, enable_mask);
4299 	POSTING_READ(IER);
4300 
4301 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4302 	POSTING_READ(PORT_HOTPLUG_EN);
4303 
4304 	i915_enable_asle_pipestat(dev);
4305 
4306 	return 0;
4307 }
4308 
4309 static void i915_hpd_irq_setup(struct drm_device *dev)
4310 {
4311 	struct drm_i915_private *dev_priv = dev->dev_private;
4312 	u32 hotplug_en;
4313 
4314 	assert_spin_locked(&dev_priv->irq_lock);
4315 
4316 	/* Note HDMI and DP share hotplug bits */
4317 	/* enable bits are the same for all generations */
4318 	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4319 	/* Programming the CRT detection parameters tends
4320 	   to generate a spurious hotplug event about three
4321 	   seconds later.  So just do it once.
4322 	*/
4323 	if (IS_G4X(dev))
4324 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4325 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4326 
4327 	/* Ignore TV since it's buggy */
4328 	i915_hotplug_interrupt_update_locked(dev_priv,
4329 					     HOTPLUG_INT_EN_MASK |
4330 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4331 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4332 					     hotplug_en);
4333 }
4334 
4335 static irqreturn_t i965_irq_handler(int irq, void *arg)
4336 {
4337 	struct drm_device *dev = arg;
4338 	struct drm_i915_private *dev_priv = dev->dev_private;
4339 	u32 iir, new_iir;
4340 	u32 pipe_stats[I915_MAX_PIPES];
4341 	int ret = IRQ_NONE, pipe;
4342 	u32 flip_mask =
4343 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4344 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4345 
4346 	if (!intel_irqs_enabled(dev_priv))
4347 		return IRQ_NONE;
4348 
4349 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4350 	disable_rpm_wakeref_asserts(dev_priv);
4351 
4352 	iir = I915_READ(IIR);
4353 
4354 	for (;;) {
4355 		bool irq_received = (iir & ~flip_mask) != 0;
4356 		bool blc_event = false;
4357 
4358 		/* Can't rely on pipestat interrupt bit in iir as it might
4359 		 * have been cleared after the pipestat interrupt was received.
4360 		 * It doesn't set the bit in iir again, but it still produces
4361 		 * interrupts (for non-MSI).
4362 		 */
4363 		spin_lock(&dev_priv->irq_lock);
4364 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4365 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4366 
4367 		for_each_pipe(dev_priv, pipe) {
4368 			i915_reg_t reg = PIPESTAT(pipe);
4369 			pipe_stats[pipe] = I915_READ(reg);
4370 
4371 			/*
4372 			 * Clear the PIPE*STAT regs before the IIR
4373 			 */
4374 			if (pipe_stats[pipe] & 0x8000ffff) {
4375 				I915_WRITE(reg, pipe_stats[pipe]);
4376 				irq_received = true;
4377 			}
4378 		}
4379 		spin_unlock(&dev_priv->irq_lock);
4380 
4381 		if (!irq_received)
4382 			break;
4383 
4384 		ret = IRQ_HANDLED;
4385 
4386 		/* Consume port.  Then clear IIR or we'll miss events */
4387 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4388 			i9xx_hpd_irq_handler(dev);
4389 
4390 		I915_WRITE(IIR, iir & ~flip_mask);
4391 		new_iir = I915_READ(IIR); /* Flush posted writes */
4392 
4393 		if (iir & I915_USER_INTERRUPT)
4394 			notify_ring(&dev_priv->ring[RCS]);
4395 		if (iir & I915_BSD_USER_INTERRUPT)
4396 			notify_ring(&dev_priv->ring[VCS]);
4397 
4398 		for_each_pipe(dev_priv, pipe) {
4399 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4400 			    i915_handle_vblank(dev, pipe, pipe, iir))
4401 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4402 
4403 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4404 				blc_event = true;
4405 
4406 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4407 				i9xx_pipe_crc_irq_handler(dev, pipe);
4408 
4409 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4410 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4411 		}
4412 
4413 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4414 			intel_opregion_asle_intr(dev);
4415 
4416 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4417 			gmbus_irq_handler(dev);
4418 
4419 		/* With MSI, interrupts are only generated when iir
4420 		 * transitions from zero to nonzero.  If another bit got
4421 		 * set while we were handling the existing iir bits, then
4422 		 * we would never get another interrupt.
4423 		 *
4424 		 * This is fine on non-MSI as well, as if we hit this path
4425 		 * we avoid exiting the interrupt handler only to generate
4426 		 * another one.
4427 		 *
4428 		 * Note that for MSI this could cause a stray interrupt report
4429 		 * if an interrupt landed in the time between writing IIR and
4430 		 * the posting read.  This should be rare enough to never
4431 		 * trigger the 99% of 100,000 interrupts test for disabling
4432 		 * stray interrupts.
4433 		 */
4434 		iir = new_iir;
4435 	}
4436 
4437 	enable_rpm_wakeref_asserts(dev_priv);
4438 
4439 	return ret;
4440 }
4441 
4442 static void i965_irq_uninstall(struct drm_device * dev)
4443 {
4444 	struct drm_i915_private *dev_priv = dev->dev_private;
4445 	int pipe;
4446 
4447 	if (!dev_priv)
4448 		return;
4449 
4450 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4451 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4452 
4453 	I915_WRITE(HWSTAM, 0xffffffff);
4454 	for_each_pipe(dev_priv, pipe)
4455 		I915_WRITE(PIPESTAT(pipe), 0);
4456 	I915_WRITE(IMR, 0xffffffff);
4457 	I915_WRITE(IER, 0x0);
4458 
4459 	for_each_pipe(dev_priv, pipe)
4460 		I915_WRITE(PIPESTAT(pipe),
4461 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4462 	I915_WRITE(IIR, I915_READ(IIR));
4463 }
4464 
4465 /**
4466  * intel_irq_init - initializes irq support
4467  * @dev_priv: i915 device instance
4468  *
4469  * This function initializes all the irq support including work items, timers
4470  * and all the vtables. It does not setup the interrupt itself though.
4471  */
4472 void intel_irq_init(struct drm_i915_private *dev_priv)
4473 {
4474 	struct drm_device *dev = dev_priv->dev;
4475 
4476 	intel_hpd_init_work(dev_priv);
4477 
4478 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4479 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4480 
4481 	/* Let's track the enabled rps events */
4482 	if (IS_VALLEYVIEW(dev_priv))
4483 		/* WaGsvRC0ResidencyMethod:vlv */
4484 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4485 	else
4486 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4487 
4488 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4489 			  i915_hangcheck_elapsed);
4490 
4491 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4492 
4493 	if (IS_GEN2(dev_priv)) {
4494 		dev->max_vblank_count = 0;
4495 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4496 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4497 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4498 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4499 	} else {
4500 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4501 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4502 	}
4503 
4504 	/*
4505 	 * Opt out of the vblank disable timer on everything except gen2.
4506 	 * Gen2 doesn't have a hardware frame counter and so depends on
4507 	 * vblank interrupts to produce sane vblank seuquence numbers.
4508 	 */
4509 	if (!IS_GEN2(dev_priv))
4510 		dev->vblank_disable_immediate = true;
4511 
4512 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4513 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4514 
4515 	if (IS_CHERRYVIEW(dev_priv)) {
4516 		dev->driver->irq_handler = cherryview_irq_handler;
4517 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4518 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4519 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4520 		dev->driver->enable_vblank = valleyview_enable_vblank;
4521 		dev->driver->disable_vblank = valleyview_disable_vblank;
4522 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4523 	} else if (IS_VALLEYVIEW(dev_priv)) {
4524 		dev->driver->irq_handler = valleyview_irq_handler;
4525 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4526 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4527 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4528 		dev->driver->enable_vblank = valleyview_enable_vblank;
4529 		dev->driver->disable_vblank = valleyview_disable_vblank;
4530 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4531 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4532 		dev->driver->irq_handler = gen8_irq_handler;
4533 		dev->driver->irq_preinstall = gen8_irq_reset;
4534 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4535 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4536 		dev->driver->enable_vblank = gen8_enable_vblank;
4537 		dev->driver->disable_vblank = gen8_disable_vblank;
4538 		if (IS_BROXTON(dev))
4539 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4540 		else if (HAS_PCH_SPT(dev))
4541 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4542 		else
4543 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4544 	} else if (HAS_PCH_SPLIT(dev)) {
4545 		dev->driver->irq_handler = ironlake_irq_handler;
4546 		dev->driver->irq_preinstall = ironlake_irq_reset;
4547 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4548 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4549 		dev->driver->enable_vblank = ironlake_enable_vblank;
4550 		dev->driver->disable_vblank = ironlake_disable_vblank;
4551 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4552 	} else {
4553 		if (INTEL_INFO(dev_priv)->gen == 2) {
4554 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4555 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4556 			dev->driver->irq_handler = i8xx_irq_handler;
4557 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4558 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4559 			dev->driver->irq_preinstall = i915_irq_preinstall;
4560 			dev->driver->irq_postinstall = i915_irq_postinstall;
4561 			dev->driver->irq_uninstall = i915_irq_uninstall;
4562 			dev->driver->irq_handler = i915_irq_handler;
4563 		} else {
4564 			dev->driver->irq_preinstall = i965_irq_preinstall;
4565 			dev->driver->irq_postinstall = i965_irq_postinstall;
4566 			dev->driver->irq_uninstall = i965_irq_uninstall;
4567 			dev->driver->irq_handler = i965_irq_handler;
4568 		}
4569 		if (I915_HAS_HOTPLUG(dev_priv))
4570 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4571 		dev->driver->enable_vblank = i915_enable_vblank;
4572 		dev->driver->disable_vblank = i915_disable_vblank;
4573 	}
4574 }
4575 
4576 /**
4577  * intel_irq_install - enables the hardware interrupt
4578  * @dev_priv: i915 device instance
4579  *
4580  * This function enables the hardware interrupt handling, but leaves the hotplug
4581  * handling still disabled. It is called after intel_irq_init().
4582  *
4583  * In the driver load and resume code we need working interrupts in a few places
4584  * but don't want to deal with the hassle of concurrent probe and hotplug
4585  * workers. Hence the split into this two-stage approach.
4586  */
4587 int intel_irq_install(struct drm_i915_private *dev_priv)
4588 {
4589 	/*
4590 	 * We enable some interrupt sources in our postinstall hooks, so mark
4591 	 * interrupts as enabled _before_ actually enabling them to avoid
4592 	 * special cases in our ordering checks.
4593 	 */
4594 	dev_priv->pm.irqs_enabled = true;
4595 
4596 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4597 }
4598 
4599 /**
4600  * intel_irq_uninstall - finilizes all irq handling
4601  * @dev_priv: i915 device instance
4602  *
4603  * This stops interrupt and hotplug handling and unregisters and frees all
4604  * resources acquired in the init functions.
4605  */
4606 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4607 {
4608 	drm_irq_uninstall(dev_priv->dev);
4609 	intel_hpd_cancel_work(dev_priv);
4610 	dev_priv->pm.irqs_enabled = false;
4611 }
4612 
4613 /**
4614  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4615  * @dev_priv: i915 device instance
4616  *
4617  * This function is used to disable interrupts at runtime, both in the runtime
4618  * pm and the system suspend/resume code.
4619  */
4620 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4621 {
4622 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4623 	dev_priv->pm.irqs_enabled = false;
4624 	synchronize_irq(dev_priv->dev->irq);
4625 }
4626 
4627 /**
4628  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4629  * @dev_priv: i915 device instance
4630  *
4631  * This function is used to enable interrupts at runtime, both in the runtime
4632  * pm and the system suspend/resume code.
4633  */
4634 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4635 {
4636 	dev_priv->pm.irqs_enabled = true;
4637 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4638 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4639 }
4640