xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 23bb4cb5122c2c6379be38aaa5c0fd6786ae6c3a)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3163eeaf38SJesse Barnes #include <linux/sysrq.h>
325a0e3ad6STejun Heo #include <linux/slab.h>
33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
34760285e7SDavid Howells #include <drm/drmP.h>
35760285e7SDavid Howells #include <drm/i915_drm.h>
36c0e09200SDave Airlie #include "i915_drv.h"
371c5d22f7SChris Wilson #include "i915_trace.h"
3879e53945SJesse Barnes #include "intel_drv.h"
39c0e09200SDave Airlie 
40fca52a55SDaniel Vetter /**
41fca52a55SDaniel Vetter  * DOC: interrupt handling
42fca52a55SDaniel Vetter  *
43fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
44fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
45fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
46fca52a55SDaniel Vetter  */
47fca52a55SDaniel Vetter 
48e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
49e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50e4ce95aaSVille Syrjälä };
51e4ce95aaSVille Syrjälä 
52*23bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
53*23bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54*23bb4cb5SVille Syrjälä };
55*23bb4cb5SVille Syrjälä 
567c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
57e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
58e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
59e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
60e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
61e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
62e5868a31SEgbert Eich };
63e5868a31SEgbert Eich 
647c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
65e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
6673c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
67e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
68e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
69e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
70e5868a31SEgbert Eich };
71e5868a31SEgbert Eich 
7226951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
7326951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
7426951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
7526951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
7626951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
7726951cafSXiong Zhang };
7826951cafSXiong Zhang 
797c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
80e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
81e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
82e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
83e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
84e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
85e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
86e5868a31SEgbert Eich };
87e5868a31SEgbert Eich 
887c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
89e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
90e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
91e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
92e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
93e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
94e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
95e5868a31SEgbert Eich };
96e5868a31SEgbert Eich 
974bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
98e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
99e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
100e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
101e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
102e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
103e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
104e5868a31SEgbert Eich };
105e5868a31SEgbert Eich 
106e0a20ad7SShashank Sharma /* BXT hpd list */
107e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1087f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
109e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
110e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
111e0a20ad7SShashank Sharma };
112e0a20ad7SShashank Sharma 
1135c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */
114f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \
1155c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
1165c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IMR(which)); \
1175c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), 0); \
1185c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1195c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1205c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1215c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1225c502442SPaulo Zanoni } while (0)
1235c502442SPaulo Zanoni 
124f86f3fb0SPaulo Zanoni #define GEN5_IRQ_RESET(type) do { \
125a9d356a6SPaulo Zanoni 	I915_WRITE(type##IMR, 0xffffffff); \
1265c502442SPaulo Zanoni 	POSTING_READ(type##IMR); \
127a9d356a6SPaulo Zanoni 	I915_WRITE(type##IER, 0); \
1285c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1295c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
1305c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1315c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
132a9d356a6SPaulo Zanoni } while (0)
133a9d356a6SPaulo Zanoni 
134337ba017SPaulo Zanoni /*
135337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
136337ba017SPaulo Zanoni  */
137337ba017SPaulo Zanoni #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
138337ba017SPaulo Zanoni 	u32 val = I915_READ(reg); \
139337ba017SPaulo Zanoni 	if (val) { \
140337ba017SPaulo Zanoni 		WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
141337ba017SPaulo Zanoni 		     (reg), val); \
142337ba017SPaulo Zanoni 		I915_WRITE((reg), 0xffffffff); \
143337ba017SPaulo Zanoni 		POSTING_READ(reg); \
144337ba017SPaulo Zanoni 		I915_WRITE((reg), 0xffffffff); \
145337ba017SPaulo Zanoni 		POSTING_READ(reg); \
146337ba017SPaulo Zanoni 	} \
147337ba017SPaulo Zanoni } while (0)
148337ba017SPaulo Zanoni 
14935079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
150337ba017SPaulo Zanoni 	GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
15135079899SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
1527d1bd539SVille Syrjälä 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
1537d1bd539SVille Syrjälä 	POSTING_READ(GEN8_##type##_IMR(which)); \
15435079899SPaulo Zanoni } while (0)
15535079899SPaulo Zanoni 
15635079899SPaulo Zanoni #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
157337ba017SPaulo Zanoni 	GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
15835079899SPaulo Zanoni 	I915_WRITE(type##IER, (ier_val)); \
1597d1bd539SVille Syrjälä 	I915_WRITE(type##IMR, (imr_val)); \
1607d1bd539SVille Syrjälä 	POSTING_READ(type##IMR); \
16135079899SPaulo Zanoni } while (0)
16235079899SPaulo Zanoni 
163c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
164c9a9a268SImre Deak 
165d9dc34f1SVille Syrjälä /**
166d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
167d9dc34f1SVille Syrjälä  * @dev_priv: driver private
168d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
169d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
170d9dc34f1SVille Syrjälä  */
171d9dc34f1SVille Syrjälä static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
172d9dc34f1SVille Syrjälä 				   uint32_t interrupt_mask,
173d9dc34f1SVille Syrjälä 				   uint32_t enabled_irq_mask)
174036a4a7dSZhenyu Wang {
175d9dc34f1SVille Syrjälä 	uint32_t new_val;
176d9dc34f1SVille Syrjälä 
1774bc9d430SDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
1784bc9d430SDaniel Vetter 
179d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
180d9dc34f1SVille Syrjälä 
1819df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
182c67a470bSPaulo Zanoni 		return;
183c67a470bSPaulo Zanoni 
184d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
185d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
186d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
187d9dc34f1SVille Syrjälä 
188d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
189d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
1901ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
1913143a2bfSChris Wilson 		POSTING_READ(DEIMR);
192036a4a7dSZhenyu Wang 	}
193036a4a7dSZhenyu Wang }
194036a4a7dSZhenyu Wang 
19547339cd9SDaniel Vetter void
196d9dc34f1SVille Syrjälä ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
197d9dc34f1SVille Syrjälä {
198d9dc34f1SVille Syrjälä 	ilk_update_display_irq(dev_priv, mask, mask);
199d9dc34f1SVille Syrjälä }
200d9dc34f1SVille Syrjälä 
201d9dc34f1SVille Syrjälä void
2022d1013ddSJani Nikula ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
203036a4a7dSZhenyu Wang {
204d9dc34f1SVille Syrjälä 	ilk_update_display_irq(dev_priv, mask, 0);
205036a4a7dSZhenyu Wang }
206036a4a7dSZhenyu Wang 
20743eaea13SPaulo Zanoni /**
20843eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
20943eaea13SPaulo Zanoni  * @dev_priv: driver private
21043eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
21143eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
21243eaea13SPaulo Zanoni  */
21343eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
21443eaea13SPaulo Zanoni 			      uint32_t interrupt_mask,
21543eaea13SPaulo Zanoni 			      uint32_t enabled_irq_mask)
21643eaea13SPaulo Zanoni {
21743eaea13SPaulo Zanoni 	assert_spin_locked(&dev_priv->irq_lock);
21843eaea13SPaulo Zanoni 
21915a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
22015a17aaeSDaniel Vetter 
2219df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
222c67a470bSPaulo Zanoni 		return;
223c67a470bSPaulo Zanoni 
22443eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
22543eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
22643eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
22743eaea13SPaulo Zanoni 	POSTING_READ(GTIMR);
22843eaea13SPaulo Zanoni }
22943eaea13SPaulo Zanoni 
230480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
23143eaea13SPaulo Zanoni {
23243eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
23343eaea13SPaulo Zanoni }
23443eaea13SPaulo Zanoni 
235480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
23643eaea13SPaulo Zanoni {
23743eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
23843eaea13SPaulo Zanoni }
23943eaea13SPaulo Zanoni 
240b900b949SImre Deak static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
241b900b949SImre Deak {
242b900b949SImre Deak 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
243b900b949SImre Deak }
244b900b949SImre Deak 
245a72fbc3aSImre Deak static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
246a72fbc3aSImre Deak {
247a72fbc3aSImre Deak 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
248a72fbc3aSImre Deak }
249a72fbc3aSImre Deak 
250b900b949SImre Deak static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
251b900b949SImre Deak {
252b900b949SImre Deak 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
253b900b949SImre Deak }
254b900b949SImre Deak 
255edbfdb45SPaulo Zanoni /**
256edbfdb45SPaulo Zanoni   * snb_update_pm_irq - update GEN6_PMIMR
257edbfdb45SPaulo Zanoni   * @dev_priv: driver private
258edbfdb45SPaulo Zanoni   * @interrupt_mask: mask of interrupt bits to update
259edbfdb45SPaulo Zanoni   * @enabled_irq_mask: mask of interrupt bits to enable
260edbfdb45SPaulo Zanoni   */
261edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
262edbfdb45SPaulo Zanoni 			      uint32_t interrupt_mask,
263edbfdb45SPaulo Zanoni 			      uint32_t enabled_irq_mask)
264edbfdb45SPaulo Zanoni {
265605cd25bSPaulo Zanoni 	uint32_t new_val;
266edbfdb45SPaulo Zanoni 
26715a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
26815a17aaeSDaniel Vetter 
269edbfdb45SPaulo Zanoni 	assert_spin_locked(&dev_priv->irq_lock);
270edbfdb45SPaulo Zanoni 
271605cd25bSPaulo Zanoni 	new_val = dev_priv->pm_irq_mask;
272f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
273f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
274f52ecbcfSPaulo Zanoni 
275605cd25bSPaulo Zanoni 	if (new_val != dev_priv->pm_irq_mask) {
276605cd25bSPaulo Zanoni 		dev_priv->pm_irq_mask = new_val;
277a72fbc3aSImre Deak 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
278a72fbc3aSImre Deak 		POSTING_READ(gen6_pm_imr(dev_priv));
279edbfdb45SPaulo Zanoni 	}
280f52ecbcfSPaulo Zanoni }
281edbfdb45SPaulo Zanoni 
282480c8033SDaniel Vetter void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
283edbfdb45SPaulo Zanoni {
2849939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
2859939fba2SImre Deak 		return;
2869939fba2SImre Deak 
287edbfdb45SPaulo Zanoni 	snb_update_pm_irq(dev_priv, mask, mask);
288edbfdb45SPaulo Zanoni }
289edbfdb45SPaulo Zanoni 
2909939fba2SImre Deak static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
2919939fba2SImre Deak 				  uint32_t mask)
2929939fba2SImre Deak {
2939939fba2SImre Deak 	snb_update_pm_irq(dev_priv, mask, 0);
2949939fba2SImre Deak }
2959939fba2SImre Deak 
296480c8033SDaniel Vetter void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
297edbfdb45SPaulo Zanoni {
2989939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
2999939fba2SImre Deak 		return;
3009939fba2SImre Deak 
3019939fba2SImre Deak 	__gen6_disable_pm_irq(dev_priv, mask);
302edbfdb45SPaulo Zanoni }
303edbfdb45SPaulo Zanoni 
3043cc134e3SImre Deak void gen6_reset_rps_interrupts(struct drm_device *dev)
3053cc134e3SImre Deak {
3063cc134e3SImre Deak 	struct drm_i915_private *dev_priv = dev->dev_private;
3073cc134e3SImre Deak 	uint32_t reg = gen6_pm_iir(dev_priv);
3083cc134e3SImre Deak 
3093cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
3103cc134e3SImre Deak 	I915_WRITE(reg, dev_priv->pm_rps_events);
3113cc134e3SImre Deak 	I915_WRITE(reg, dev_priv->pm_rps_events);
3123cc134e3SImre Deak 	POSTING_READ(reg);
313096fad9eSImre Deak 	dev_priv->rps.pm_iir = 0;
3143cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
3153cc134e3SImre Deak }
3163cc134e3SImre Deak 
317b900b949SImre Deak void gen6_enable_rps_interrupts(struct drm_device *dev)
318b900b949SImre Deak {
319b900b949SImre Deak 	struct drm_i915_private *dev_priv = dev->dev_private;
320b900b949SImre Deak 
321b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
32278e68d36SImre Deak 
323b900b949SImre Deak 	WARN_ON(dev_priv->rps.pm_iir);
3243cc134e3SImre Deak 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
325d4d70aa5SImre Deak 	dev_priv->rps.interrupts_enabled = true;
32678e68d36SImre Deak 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
32778e68d36SImre Deak 				dev_priv->pm_rps_events);
328b900b949SImre Deak 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
32978e68d36SImre Deak 
330b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
331b900b949SImre Deak }
332b900b949SImre Deak 
33359d02a1fSImre Deak u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
33459d02a1fSImre Deak {
33559d02a1fSImre Deak 	/*
336f24eeb19SImre Deak 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
33759d02a1fSImre Deak 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
338f24eeb19SImre Deak 	 *
339f24eeb19SImre Deak 	 * TODO: verify if this can be reproduced on VLV,CHV.
34059d02a1fSImre Deak 	 */
34159d02a1fSImre Deak 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
34259d02a1fSImre Deak 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
34359d02a1fSImre Deak 
34459d02a1fSImre Deak 	if (INTEL_INFO(dev_priv)->gen >= 8)
34559d02a1fSImre Deak 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
34659d02a1fSImre Deak 
34759d02a1fSImre Deak 	return mask;
34859d02a1fSImre Deak }
34959d02a1fSImre Deak 
350b900b949SImre Deak void gen6_disable_rps_interrupts(struct drm_device *dev)
351b900b949SImre Deak {
352b900b949SImre Deak 	struct drm_i915_private *dev_priv = dev->dev_private;
353b900b949SImre Deak 
354d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
355d4d70aa5SImre Deak 	dev_priv->rps.interrupts_enabled = false;
356d4d70aa5SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
357d4d70aa5SImre Deak 
358d4d70aa5SImre Deak 	cancel_work_sync(&dev_priv->rps.work);
359d4d70aa5SImre Deak 
3609939fba2SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
3619939fba2SImre Deak 
36259d02a1fSImre Deak 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
3639939fba2SImre Deak 
3649939fba2SImre Deak 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
365b900b949SImre Deak 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
366b900b949SImre Deak 				~dev_priv->pm_rps_events);
36758072ccbSImre Deak 
36858072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
36958072ccbSImre Deak 
37058072ccbSImre Deak 	synchronize_irq(dev->irq);
371b900b949SImre Deak }
372b900b949SImre Deak 
3730961021aSBen Widawsky /**
374fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
375fee884edSDaniel Vetter  * @dev_priv: driver private
376fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
377fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
378fee884edSDaniel Vetter  */
37947339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
380fee884edSDaniel Vetter 				  uint32_t interrupt_mask,
381fee884edSDaniel Vetter 				  uint32_t enabled_irq_mask)
382fee884edSDaniel Vetter {
383fee884edSDaniel Vetter 	uint32_t sdeimr = I915_READ(SDEIMR);
384fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
385fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
386fee884edSDaniel Vetter 
38715a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
38815a17aaeSDaniel Vetter 
389fee884edSDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
390fee884edSDaniel Vetter 
3919df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
392c67a470bSPaulo Zanoni 		return;
393c67a470bSPaulo Zanoni 
394fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
395fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
396fee884edSDaniel Vetter }
3978664281bSPaulo Zanoni 
398b5ea642aSDaniel Vetter static void
399755e9019SImre Deak __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
400755e9019SImre Deak 		       u32 enable_mask, u32 status_mask)
4017c463586SKeith Packard {
4029db4a9c7SJesse Barnes 	u32 reg = PIPESTAT(pipe);
403755e9019SImre Deak 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
4047c463586SKeith Packard 
405b79480baSDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
406d518ce50SDaniel Vetter 	WARN_ON(!intel_irqs_enabled(dev_priv));
407b79480baSDaniel Vetter 
40804feced9SVille Syrjälä 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
40904feced9SVille Syrjälä 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
41004feced9SVille Syrjälä 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
41104feced9SVille Syrjälä 		      pipe_name(pipe), enable_mask, status_mask))
412755e9019SImre Deak 		return;
413755e9019SImre Deak 
414755e9019SImre Deak 	if ((pipestat & enable_mask) == enable_mask)
41546c06a30SVille Syrjälä 		return;
41646c06a30SVille Syrjälä 
41791d181ddSImre Deak 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
41891d181ddSImre Deak 
4197c463586SKeith Packard 	/* Enable the interrupt, clear any pending status */
420755e9019SImre Deak 	pipestat |= enable_mask | status_mask;
42146c06a30SVille Syrjälä 	I915_WRITE(reg, pipestat);
4223143a2bfSChris Wilson 	POSTING_READ(reg);
4237c463586SKeith Packard }
4247c463586SKeith Packard 
425b5ea642aSDaniel Vetter static void
426755e9019SImre Deak __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
427755e9019SImre Deak 		        u32 enable_mask, u32 status_mask)
4287c463586SKeith Packard {
4299db4a9c7SJesse Barnes 	u32 reg = PIPESTAT(pipe);
430755e9019SImre Deak 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
4317c463586SKeith Packard 
432b79480baSDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
433d518ce50SDaniel Vetter 	WARN_ON(!intel_irqs_enabled(dev_priv));
434b79480baSDaniel Vetter 
43504feced9SVille Syrjälä 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
43604feced9SVille Syrjälä 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
43704feced9SVille Syrjälä 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
43804feced9SVille Syrjälä 		      pipe_name(pipe), enable_mask, status_mask))
43946c06a30SVille Syrjälä 		return;
44046c06a30SVille Syrjälä 
441755e9019SImre Deak 	if ((pipestat & enable_mask) == 0)
442755e9019SImre Deak 		return;
443755e9019SImre Deak 
44491d181ddSImre Deak 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
44591d181ddSImre Deak 
446755e9019SImre Deak 	pipestat &= ~enable_mask;
44746c06a30SVille Syrjälä 	I915_WRITE(reg, pipestat);
4483143a2bfSChris Wilson 	POSTING_READ(reg);
4497c463586SKeith Packard }
4507c463586SKeith Packard 
45110c59c51SImre Deak static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
45210c59c51SImre Deak {
45310c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
45410c59c51SImre Deak 
45510c59c51SImre Deak 	/*
456724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
457724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
45810c59c51SImre Deak 	 */
45910c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
46010c59c51SImre Deak 		return 0;
461724a6905SVille Syrjälä 	/*
462724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
463724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
464724a6905SVille Syrjälä 	 */
465724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
466724a6905SVille Syrjälä 		return 0;
46710c59c51SImre Deak 
46810c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
46910c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
47010c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
47110c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
47210c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
47310c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
47410c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
47510c59c51SImre Deak 
47610c59c51SImre Deak 	return enable_mask;
47710c59c51SImre Deak }
47810c59c51SImre Deak 
479755e9019SImre Deak void
480755e9019SImre Deak i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
481755e9019SImre Deak 		     u32 status_mask)
482755e9019SImre Deak {
483755e9019SImre Deak 	u32 enable_mask;
484755e9019SImre Deak 
48510c59c51SImre Deak 	if (IS_VALLEYVIEW(dev_priv->dev))
48610c59c51SImre Deak 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
48710c59c51SImre Deak 							   status_mask);
48810c59c51SImre Deak 	else
489755e9019SImre Deak 		enable_mask = status_mask << 16;
490755e9019SImre Deak 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
491755e9019SImre Deak }
492755e9019SImre Deak 
493755e9019SImre Deak void
494755e9019SImre Deak i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
495755e9019SImre Deak 		      u32 status_mask)
496755e9019SImre Deak {
497755e9019SImre Deak 	u32 enable_mask;
498755e9019SImre Deak 
49910c59c51SImre Deak 	if (IS_VALLEYVIEW(dev_priv->dev))
50010c59c51SImre Deak 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
50110c59c51SImre Deak 							   status_mask);
50210c59c51SImre Deak 	else
503755e9019SImre Deak 		enable_mask = status_mask << 16;
504755e9019SImre Deak 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
505755e9019SImre Deak }
506755e9019SImre Deak 
507c0e09200SDave Airlie /**
508f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
50901c66889SZhao Yakui  */
510f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev)
51101c66889SZhao Yakui {
5122d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
5131ec14ad3SChris Wilson 
514f49e38ddSJani Nikula 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
515f49e38ddSJani Nikula 		return;
516f49e38ddSJani Nikula 
51713321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
51801c66889SZhao Yakui 
519755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
520a6c45cf0SChris Wilson 	if (INTEL_INFO(dev)->gen >= 4)
5213b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
522755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
5231ec14ad3SChris Wilson 
52413321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
52501c66889SZhao Yakui }
52601c66889SZhao Yakui 
527f75f3746SVille Syrjälä /*
528f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
529f75f3746SVille Syrjälä  * around the vertical blanking period.
530f75f3746SVille Syrjälä  *
531f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
532f75f3746SVille Syrjälä  *  vblank_start >= 3
533f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
534f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
535f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
536f75f3746SVille Syrjälä  *
537f75f3746SVille Syrjälä  *           start of vblank:
538f75f3746SVille Syrjälä  *           latch double buffered registers
539f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
540f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
541f75f3746SVille Syrjälä  *           |
542f75f3746SVille Syrjälä  *           |          frame start:
543f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
544f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
545f75f3746SVille Syrjälä  *           |          |
546f75f3746SVille Syrjälä  *           |          |  start of vsync:
547f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
548f75f3746SVille Syrjälä  *           |          |  |
549f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
550f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
551f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
552f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
553f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
554f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
555f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
556f75f3746SVille Syrjälä  *       |          |                                         |
557f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
558f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
559f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
560f75f3746SVille Syrjälä  *
561f75f3746SVille Syrjälä  * x  = horizontal active
562f75f3746SVille Syrjälä  * _  = horizontal blanking
563f75f3746SVille Syrjälä  * hs = horizontal sync
564f75f3746SVille Syrjälä  * va = vertical active
565f75f3746SVille Syrjälä  * vb = vertical blanking
566f75f3746SVille Syrjälä  * vs = vertical sync
567f75f3746SVille Syrjälä  * vbs = vblank_start (number)
568f75f3746SVille Syrjälä  *
569f75f3746SVille Syrjälä  * Summary:
570f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
571f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
572f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
573f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
574f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
575f75f3746SVille Syrjälä  */
576f75f3746SVille Syrjälä 
5774cdb83ecSVille Syrjälä static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
5784cdb83ecSVille Syrjälä {
5794cdb83ecSVille Syrjälä 	/* Gen2 doesn't have a hardware frame counter */
5804cdb83ecSVille Syrjälä 	return 0;
5814cdb83ecSVille Syrjälä }
5824cdb83ecSVille Syrjälä 
58342f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
58442f52ef8SKeith Packard  * we use as a pipe index
58542f52ef8SKeith Packard  */
586f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
5870a3e67a4SJesse Barnes {
5882d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
5890a3e67a4SJesse Barnes 	unsigned long high_frame;
5900a3e67a4SJesse Barnes 	unsigned long low_frame;
5910b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
592391f75e2SVille Syrjälä 	struct intel_crtc *intel_crtc =
593391f75e2SVille Syrjälä 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
594fc467a22SMaarten Lankhorst 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
595391f75e2SVille Syrjälä 
5960b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
5970b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
5980b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
5990b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
6000b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
601391f75e2SVille Syrjälä 
6020b2a8e09SVille Syrjälä 	/* Convert to pixel count */
6030b2a8e09SVille Syrjälä 	vbl_start *= htotal;
6040b2a8e09SVille Syrjälä 
6050b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
6060b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
6070b2a8e09SVille Syrjälä 
6089db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
6099db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
6105eddb70bSChris Wilson 
6110a3e67a4SJesse Barnes 	/*
6120a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
6130a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
6140a3e67a4SJesse Barnes 	 * register.
6150a3e67a4SJesse Barnes 	 */
6160a3e67a4SJesse Barnes 	do {
6175eddb70bSChris Wilson 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
618391f75e2SVille Syrjälä 		low   = I915_READ(low_frame);
6195eddb70bSChris Wilson 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
6200a3e67a4SJesse Barnes 	} while (high1 != high2);
6210a3e67a4SJesse Barnes 
6225eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
623391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
6245eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
625391f75e2SVille Syrjälä 
626391f75e2SVille Syrjälä 	/*
627391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
628391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
629391f75e2SVille Syrjälä 	 * counter against vblank start.
630391f75e2SVille Syrjälä 	 */
631edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
6320a3e67a4SJesse Barnes }
6330a3e67a4SJesse Barnes 
634f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
6359880b7a5SJesse Barnes {
6362d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
6379db4a9c7SJesse Barnes 	int reg = PIPE_FRMCOUNT_GM45(pipe);
6389880b7a5SJesse Barnes 
6399880b7a5SJesse Barnes 	return I915_READ(reg);
6409880b7a5SJesse Barnes }
6419880b7a5SJesse Barnes 
642ad3543edSMario Kleiner /* raw reads, only for fast reads of display block, no need for forcewake etc. */
643ad3543edSMario Kleiner #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
644ad3543edSMario Kleiner 
645a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
646a225f079SVille Syrjälä {
647a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
648a225f079SVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
649fc467a22SMaarten Lankhorst 	const struct drm_display_mode *mode = &crtc->base.hwmode;
650a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
65180715b2fSVille Syrjälä 	int position, vtotal;
652a225f079SVille Syrjälä 
65380715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
654a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
655a225f079SVille Syrjälä 		vtotal /= 2;
656a225f079SVille Syrjälä 
657a225f079SVille Syrjälä 	if (IS_GEN2(dev))
658a225f079SVille Syrjälä 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
659a225f079SVille Syrjälä 	else
660a225f079SVille Syrjälä 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
661a225f079SVille Syrjälä 
662a225f079SVille Syrjälä 	/*
66380715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
66480715b2fSVille Syrjälä 	 * scanline_offset adjustment.
665a225f079SVille Syrjälä 	 */
66680715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
667a225f079SVille Syrjälä }
668a225f079SVille Syrjälä 
669f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
670abca9e45SVille Syrjälä 				    unsigned int flags, int *vpos, int *hpos,
671abca9e45SVille Syrjälä 				    ktime_t *stime, ktime_t *etime)
6720af7e4dfSMario Kleiner {
673c2baf4b7SVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
674c2baf4b7SVille Syrjälä 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
675c2baf4b7SVille Syrjälä 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
676fc467a22SMaarten Lankhorst 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
6773aa18df8SVille Syrjälä 	int position;
67878e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
6790af7e4dfSMario Kleiner 	bool in_vbl = true;
6800af7e4dfSMario Kleiner 	int ret = 0;
681ad3543edSMario Kleiner 	unsigned long irqflags;
6820af7e4dfSMario Kleiner 
683fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
6840af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
6859db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
6860af7e4dfSMario Kleiner 		return 0;
6870af7e4dfSMario Kleiner 	}
6880af7e4dfSMario Kleiner 
689c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
69078e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
691c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
692c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
693c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
6940af7e4dfSMario Kleiner 
695d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
696d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
697d31faf65SVille Syrjälä 		vbl_end /= 2;
698d31faf65SVille Syrjälä 		vtotal /= 2;
699d31faf65SVille Syrjälä 	}
700d31faf65SVille Syrjälä 
701c2baf4b7SVille Syrjälä 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
702c2baf4b7SVille Syrjälä 
703ad3543edSMario Kleiner 	/*
704ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
705ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
706ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
707ad3543edSMario Kleiner 	 */
708ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
709ad3543edSMario Kleiner 
710ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
711ad3543edSMario Kleiner 
712ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
713ad3543edSMario Kleiner 	if (stime)
714ad3543edSMario Kleiner 		*stime = ktime_get();
715ad3543edSMario Kleiner 
7167c06b08aSVille Syrjälä 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
7170af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
7180af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
7190af7e4dfSMario Kleiner 		 */
720a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
7210af7e4dfSMario Kleiner 	} else {
7220af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
7230af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
7240af7e4dfSMario Kleiner 		 * scanout position.
7250af7e4dfSMario Kleiner 		 */
726ad3543edSMario Kleiner 		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
7270af7e4dfSMario Kleiner 
7283aa18df8SVille Syrjälä 		/* convert to pixel counts */
7293aa18df8SVille Syrjälä 		vbl_start *= htotal;
7303aa18df8SVille Syrjälä 		vbl_end *= htotal;
7313aa18df8SVille Syrjälä 		vtotal *= htotal;
73278e8fc6bSVille Syrjälä 
73378e8fc6bSVille Syrjälä 		/*
7347e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
7357e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
7367e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
7377e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
7387e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
7397e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
7407e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
7417e78f1cbSVille Syrjälä 		 */
7427e78f1cbSVille Syrjälä 		if (position >= vtotal)
7437e78f1cbSVille Syrjälä 			position = vtotal - 1;
7447e78f1cbSVille Syrjälä 
7457e78f1cbSVille Syrjälä 		/*
74678e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
74778e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
74878e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
74978e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
75078e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
75178e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
75278e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
75378e8fc6bSVille Syrjälä 		 */
75478e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
7553aa18df8SVille Syrjälä 	}
7563aa18df8SVille Syrjälä 
757ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
758ad3543edSMario Kleiner 	if (etime)
759ad3543edSMario Kleiner 		*etime = ktime_get();
760ad3543edSMario Kleiner 
761ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
762ad3543edSMario Kleiner 
763ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
764ad3543edSMario Kleiner 
7653aa18df8SVille Syrjälä 	in_vbl = position >= vbl_start && position < vbl_end;
7663aa18df8SVille Syrjälä 
7673aa18df8SVille Syrjälä 	/*
7683aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
7693aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
7703aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
7713aa18df8SVille Syrjälä 	 * up since vbl_end.
7723aa18df8SVille Syrjälä 	 */
7733aa18df8SVille Syrjälä 	if (position >= vbl_start)
7743aa18df8SVille Syrjälä 		position -= vbl_end;
7753aa18df8SVille Syrjälä 	else
7763aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
7773aa18df8SVille Syrjälä 
7787c06b08aSVille Syrjälä 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
7793aa18df8SVille Syrjälä 		*vpos = position;
7803aa18df8SVille Syrjälä 		*hpos = 0;
7813aa18df8SVille Syrjälä 	} else {
7820af7e4dfSMario Kleiner 		*vpos = position / htotal;
7830af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
7840af7e4dfSMario Kleiner 	}
7850af7e4dfSMario Kleiner 
7860af7e4dfSMario Kleiner 	/* In vblank? */
7870af7e4dfSMario Kleiner 	if (in_vbl)
7883d3cbd84SDaniel Vetter 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
7890af7e4dfSMario Kleiner 
7900af7e4dfSMario Kleiner 	return ret;
7910af7e4dfSMario Kleiner }
7920af7e4dfSMario Kleiner 
793a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
794a225f079SVille Syrjälä {
795a225f079SVille Syrjälä 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
796a225f079SVille Syrjälä 	unsigned long irqflags;
797a225f079SVille Syrjälä 	int position;
798a225f079SVille Syrjälä 
799a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
800a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
801a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
802a225f079SVille Syrjälä 
803a225f079SVille Syrjälä 	return position;
804a225f079SVille Syrjälä }
805a225f079SVille Syrjälä 
806f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
8070af7e4dfSMario Kleiner 			      int *max_error,
8080af7e4dfSMario Kleiner 			      struct timeval *vblank_time,
8090af7e4dfSMario Kleiner 			      unsigned flags)
8100af7e4dfSMario Kleiner {
8114041b853SChris Wilson 	struct drm_crtc *crtc;
8120af7e4dfSMario Kleiner 
8137eb552aeSBen Widawsky 	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
8144041b853SChris Wilson 		DRM_ERROR("Invalid crtc %d\n", pipe);
8150af7e4dfSMario Kleiner 		return -EINVAL;
8160af7e4dfSMario Kleiner 	}
8170af7e4dfSMario Kleiner 
8180af7e4dfSMario Kleiner 	/* Get drm_crtc to timestamp: */
8194041b853SChris Wilson 	crtc = intel_get_crtc_for_pipe(dev, pipe);
8204041b853SChris Wilson 	if (crtc == NULL) {
8214041b853SChris Wilson 		DRM_ERROR("Invalid crtc %d\n", pipe);
8224041b853SChris Wilson 		return -EINVAL;
8234041b853SChris Wilson 	}
8244041b853SChris Wilson 
825fc467a22SMaarten Lankhorst 	if (!crtc->hwmode.crtc_clock) {
8264041b853SChris Wilson 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
8274041b853SChris Wilson 		return -EBUSY;
8284041b853SChris Wilson 	}
8290af7e4dfSMario Kleiner 
8300af7e4dfSMario Kleiner 	/* Helper routine in DRM core does all the work: */
8314041b853SChris Wilson 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
8324041b853SChris Wilson 						     vblank_time, flags,
8337da903efSVille Syrjälä 						     crtc,
834fc467a22SMaarten Lankhorst 						     &crtc->hwmode);
8350af7e4dfSMario Kleiner }
8360af7e4dfSMario Kleiner 
837d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev)
838f97108d1SJesse Barnes {
8392d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
840b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
8419270388eSDaniel Vetter 	u8 new_delay;
8429270388eSDaniel Vetter 
843d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
844f97108d1SJesse Barnes 
84573edd18fSDaniel Vetter 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
84673edd18fSDaniel Vetter 
84720e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
8489270388eSDaniel Vetter 
8497648fa99SJesse Barnes 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
850b5b72e89SMatthew Garrett 	busy_up = I915_READ(RCPREVBSYTUPAVG);
851b5b72e89SMatthew Garrett 	busy_down = I915_READ(RCPREVBSYTDNAVG);
852f97108d1SJesse Barnes 	max_avg = I915_READ(RCBMAXAVG);
853f97108d1SJesse Barnes 	min_avg = I915_READ(RCBMINAVG);
854f97108d1SJesse Barnes 
855f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
856b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
85720e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
85820e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
85920e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
86020e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
861b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
86220e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
86320e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
86420e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
86520e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
866f97108d1SJesse Barnes 	}
867f97108d1SJesse Barnes 
8687648fa99SJesse Barnes 	if (ironlake_set_drps(dev, new_delay))
86920e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
870f97108d1SJesse Barnes 
871d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
8729270388eSDaniel Vetter 
873f97108d1SJesse Barnes 	return;
874f97108d1SJesse Barnes }
875f97108d1SJesse Barnes 
87674cdb337SChris Wilson static void notify_ring(struct intel_engine_cs *ring)
877549f7365SChris Wilson {
87893b0a4e0SOscar Mateo 	if (!intel_ring_initialized(ring))
879475553deSChris Wilson 		return;
880475553deSChris Wilson 
881bcfcc8baSJohn Harrison 	trace_i915_gem_request_notify(ring);
8829862e600SChris Wilson 
883549f7365SChris Wilson 	wake_up_all(&ring->irq_queue);
884549f7365SChris Wilson }
885549f7365SChris Wilson 
88643cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
88743cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
88831685c25SDeepak S {
88943cf3bf0SChris Wilson 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
89043cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
89143cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
89231685c25SDeepak S }
89331685c25SDeepak S 
89443cf3bf0SChris Wilson static bool vlv_c0_above(struct drm_i915_private *dev_priv,
89543cf3bf0SChris Wilson 			 const struct intel_rps_ei *old,
89643cf3bf0SChris Wilson 			 const struct intel_rps_ei *now,
89743cf3bf0SChris Wilson 			 int threshold)
89831685c25SDeepak S {
89943cf3bf0SChris Wilson 	u64 time, c0;
90031685c25SDeepak S 
90143cf3bf0SChris Wilson 	if (old->cz_clock == 0)
90243cf3bf0SChris Wilson 		return false;
90331685c25SDeepak S 
90443cf3bf0SChris Wilson 	time = now->cz_clock - old->cz_clock;
90543cf3bf0SChris Wilson 	time *= threshold * dev_priv->mem_freq;
90631685c25SDeepak S 
90743cf3bf0SChris Wilson 	/* Workload can be split between render + media, e.g. SwapBuffers
90843cf3bf0SChris Wilson 	 * being blitted in X after being rendered in mesa. To account for
90943cf3bf0SChris Wilson 	 * this we need to combine both engines into our activity counter.
91043cf3bf0SChris Wilson 	 */
91143cf3bf0SChris Wilson 	c0 = now->render_c0 - old->render_c0;
91243cf3bf0SChris Wilson 	c0 += now->media_c0 - old->media_c0;
91343cf3bf0SChris Wilson 	c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
91431685c25SDeepak S 
91543cf3bf0SChris Wilson 	return c0 >= time;
91631685c25SDeepak S }
91731685c25SDeepak S 
91843cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
91943cf3bf0SChris Wilson {
92043cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
92143cf3bf0SChris Wilson 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
92243cf3bf0SChris Wilson }
92343cf3bf0SChris Wilson 
92443cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
92543cf3bf0SChris Wilson {
92643cf3bf0SChris Wilson 	struct intel_rps_ei now;
92743cf3bf0SChris Wilson 	u32 events = 0;
92843cf3bf0SChris Wilson 
9296f4b12f8SChris Wilson 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
93043cf3bf0SChris Wilson 		return 0;
93143cf3bf0SChris Wilson 
93243cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
93343cf3bf0SChris Wilson 	if (now.cz_clock == 0)
93443cf3bf0SChris Wilson 		return 0;
93531685c25SDeepak S 
93643cf3bf0SChris Wilson 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
93743cf3bf0SChris Wilson 		if (!vlv_c0_above(dev_priv,
93843cf3bf0SChris Wilson 				  &dev_priv->rps.down_ei, &now,
9398fb55197SChris Wilson 				  dev_priv->rps.down_threshold))
94043cf3bf0SChris Wilson 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
94143cf3bf0SChris Wilson 		dev_priv->rps.down_ei = now;
94231685c25SDeepak S 	}
94331685c25SDeepak S 
94443cf3bf0SChris Wilson 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
94543cf3bf0SChris Wilson 		if (vlv_c0_above(dev_priv,
94643cf3bf0SChris Wilson 				 &dev_priv->rps.up_ei, &now,
9478fb55197SChris Wilson 				 dev_priv->rps.up_threshold))
94843cf3bf0SChris Wilson 			events |= GEN6_PM_RP_UP_THRESHOLD;
94943cf3bf0SChris Wilson 		dev_priv->rps.up_ei = now;
95043cf3bf0SChris Wilson 	}
95143cf3bf0SChris Wilson 
95243cf3bf0SChris Wilson 	return events;
95331685c25SDeepak S }
95431685c25SDeepak S 
955f5a4c67dSChris Wilson static bool any_waiters(struct drm_i915_private *dev_priv)
956f5a4c67dSChris Wilson {
957f5a4c67dSChris Wilson 	struct intel_engine_cs *ring;
958f5a4c67dSChris Wilson 	int i;
959f5a4c67dSChris Wilson 
960f5a4c67dSChris Wilson 	for_each_ring(ring, dev_priv, i)
961f5a4c67dSChris Wilson 		if (ring->irq_refcount)
962f5a4c67dSChris Wilson 			return true;
963f5a4c67dSChris Wilson 
964f5a4c67dSChris Wilson 	return false;
965f5a4c67dSChris Wilson }
966f5a4c67dSChris Wilson 
9674912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
9683b8d8d91SJesse Barnes {
9692d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
9702d1013ddSJani Nikula 		container_of(work, struct drm_i915_private, rps.work);
9718d3afd7dSChris Wilson 	bool client_boost;
9728d3afd7dSChris Wilson 	int new_delay, adj, min, max;
973edbfdb45SPaulo Zanoni 	u32 pm_iir;
9743b8d8d91SJesse Barnes 
97559cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
976d4d70aa5SImre Deak 	/* Speed up work cancelation during disabling rps interrupts. */
977d4d70aa5SImre Deak 	if (!dev_priv->rps.interrupts_enabled) {
978d4d70aa5SImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
979d4d70aa5SImre Deak 		return;
980d4d70aa5SImre Deak 	}
981c6a828d3SDaniel Vetter 	pm_iir = dev_priv->rps.pm_iir;
982c6a828d3SDaniel Vetter 	dev_priv->rps.pm_iir = 0;
983a72fbc3aSImre Deak 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
984480c8033SDaniel Vetter 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
9858d3afd7dSChris Wilson 	client_boost = dev_priv->rps.client_boost;
9868d3afd7dSChris Wilson 	dev_priv->rps.client_boost = false;
98759cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
9884912d041SBen Widawsky 
98960611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
990a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
99160611c13SPaulo Zanoni 
9928d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
9933b8d8d91SJesse Barnes 		return;
9943b8d8d91SJesse Barnes 
9954fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
9967b9e0ae6SChris Wilson 
99743cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
99843cf3bf0SChris Wilson 
999dd75fdc8SChris Wilson 	adj = dev_priv->rps.last_adj;
1000edcf284bSChris Wilson 	new_delay = dev_priv->rps.cur_freq;
10018d3afd7dSChris Wilson 	min = dev_priv->rps.min_freq_softlimit;
10028d3afd7dSChris Wilson 	max = dev_priv->rps.max_freq_softlimit;
10038d3afd7dSChris Wilson 
10048d3afd7dSChris Wilson 	if (client_boost) {
10058d3afd7dSChris Wilson 		new_delay = dev_priv->rps.max_freq_softlimit;
10068d3afd7dSChris Wilson 		adj = 0;
10078d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1008dd75fdc8SChris Wilson 		if (adj > 0)
1009dd75fdc8SChris Wilson 			adj *= 2;
1010edcf284bSChris Wilson 		else /* CHV needs even encode values */
1011edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
10127425034aSVille Syrjälä 		/*
10137425034aSVille Syrjälä 		 * For better performance, jump directly
10147425034aSVille Syrjälä 		 * to RPe if we're below it.
10157425034aSVille Syrjälä 		 */
1016edcf284bSChris Wilson 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1017b39fb297SBen Widawsky 			new_delay = dev_priv->rps.efficient_freq;
1018edcf284bSChris Wilson 			adj = 0;
1019edcf284bSChris Wilson 		}
1020f5a4c67dSChris Wilson 	} else if (any_waiters(dev_priv)) {
1021f5a4c67dSChris Wilson 		adj = 0;
1022dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1023b39fb297SBen Widawsky 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1024b39fb297SBen Widawsky 			new_delay = dev_priv->rps.efficient_freq;
1025dd75fdc8SChris Wilson 		else
1026b39fb297SBen Widawsky 			new_delay = dev_priv->rps.min_freq_softlimit;
1027dd75fdc8SChris Wilson 		adj = 0;
1028dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1029dd75fdc8SChris Wilson 		if (adj < 0)
1030dd75fdc8SChris Wilson 			adj *= 2;
1031edcf284bSChris Wilson 		else /* CHV needs even encode values */
1032edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1033dd75fdc8SChris Wilson 	} else { /* unknown event */
1034edcf284bSChris Wilson 		adj = 0;
1035dd75fdc8SChris Wilson 	}
10363b8d8d91SJesse Barnes 
1037edcf284bSChris Wilson 	dev_priv->rps.last_adj = adj;
1038edcf284bSChris Wilson 
103979249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
104079249636SBen Widawsky 	 * interrupt
104179249636SBen Widawsky 	 */
1042edcf284bSChris Wilson 	new_delay += adj;
10438d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
104427544369SDeepak S 
1045ffe02b40SVille Syrjälä 	intel_set_rps(dev_priv->dev, new_delay);
10463b8d8d91SJesse Barnes 
10474fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
10483b8d8d91SJesse Barnes }
10493b8d8d91SJesse Barnes 
1050e3689190SBen Widawsky 
1051e3689190SBen Widawsky /**
1052e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1053e3689190SBen Widawsky  * occurred.
1054e3689190SBen Widawsky  * @work: workqueue struct
1055e3689190SBen Widawsky  *
1056e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1057e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1058e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1059e3689190SBen Widawsky  */
1060e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1061e3689190SBen Widawsky {
10622d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
10632d1013ddSJani Nikula 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1064e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
106535a85ac6SBen Widawsky 	char *parity_event[6];
1066e3689190SBen Widawsky 	uint32_t misccpctl;
106735a85ac6SBen Widawsky 	uint8_t slice = 0;
1068e3689190SBen Widawsky 
1069e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1070e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1071e3689190SBen Widawsky 	 * any time we access those registers.
1072e3689190SBen Widawsky 	 */
1073e3689190SBen Widawsky 	mutex_lock(&dev_priv->dev->struct_mutex);
1074e3689190SBen Widawsky 
107535a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
107635a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
107735a85ac6SBen Widawsky 		goto out;
107835a85ac6SBen Widawsky 
1079e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1080e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1081e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1082e3689190SBen Widawsky 
108335a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
108435a85ac6SBen Widawsky 		u32 reg;
108535a85ac6SBen Widawsky 
108635a85ac6SBen Widawsky 		slice--;
108735a85ac6SBen Widawsky 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
108835a85ac6SBen Widawsky 			break;
108935a85ac6SBen Widawsky 
109035a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
109135a85ac6SBen Widawsky 
109235a85ac6SBen Widawsky 		reg = GEN7_L3CDERRST1 + (slice * 0x200);
109335a85ac6SBen Widawsky 
109435a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1095e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1096e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1097e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1098e3689190SBen Widawsky 
109935a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
110035a85ac6SBen Widawsky 		POSTING_READ(reg);
1101e3689190SBen Widawsky 
1102cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1103e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1104e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1105e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
110635a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
110735a85ac6SBen Widawsky 		parity_event[5] = NULL;
1108e3689190SBen Widawsky 
11095bdebb18SDave Airlie 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1110e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1111e3689190SBen Widawsky 
111235a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
111335a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1114e3689190SBen Widawsky 
111535a85ac6SBen Widawsky 		kfree(parity_event[4]);
1116e3689190SBen Widawsky 		kfree(parity_event[3]);
1117e3689190SBen Widawsky 		kfree(parity_event[2]);
1118e3689190SBen Widawsky 		kfree(parity_event[1]);
1119e3689190SBen Widawsky 	}
1120e3689190SBen Widawsky 
112135a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
112235a85ac6SBen Widawsky 
112335a85ac6SBen Widawsky out:
112435a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
11254cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1126480c8033SDaniel Vetter 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
11274cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
112835a85ac6SBen Widawsky 
112935a85ac6SBen Widawsky 	mutex_unlock(&dev_priv->dev->struct_mutex);
113035a85ac6SBen Widawsky }
113135a85ac6SBen Widawsky 
113235a85ac6SBen Widawsky static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1133e3689190SBen Widawsky {
11342d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
1135e3689190SBen Widawsky 
1136040d2baaSBen Widawsky 	if (!HAS_L3_DPF(dev))
1137e3689190SBen Widawsky 		return;
1138e3689190SBen Widawsky 
1139d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1140480c8033SDaniel Vetter 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1141d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1142e3689190SBen Widawsky 
114335a85ac6SBen Widawsky 	iir &= GT_PARITY_ERROR(dev);
114435a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
114535a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
114635a85ac6SBen Widawsky 
114735a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
114835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
114935a85ac6SBen Widawsky 
1150a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1151e3689190SBen Widawsky }
1152e3689190SBen Widawsky 
1153f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev,
1154f1af8fc1SPaulo Zanoni 			       struct drm_i915_private *dev_priv,
1155f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1156f1af8fc1SPaulo Zanoni {
1157f1af8fc1SPaulo Zanoni 	if (gt_iir &
1158f1af8fc1SPaulo Zanoni 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
115974cdb337SChris Wilson 		notify_ring(&dev_priv->ring[RCS]);
1160f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
116174cdb337SChris Wilson 		notify_ring(&dev_priv->ring[VCS]);
1162f1af8fc1SPaulo Zanoni }
1163f1af8fc1SPaulo Zanoni 
1164e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev,
1165e7b4c6b1SDaniel Vetter 			       struct drm_i915_private *dev_priv,
1166e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1167e7b4c6b1SDaniel Vetter {
1168e7b4c6b1SDaniel Vetter 
1169cc609d5dSBen Widawsky 	if (gt_iir &
1170cc609d5dSBen Widawsky 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
117174cdb337SChris Wilson 		notify_ring(&dev_priv->ring[RCS]);
1172cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
117374cdb337SChris Wilson 		notify_ring(&dev_priv->ring[VCS]);
1174cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
117574cdb337SChris Wilson 		notify_ring(&dev_priv->ring[BCS]);
1176e7b4c6b1SDaniel Vetter 
1177cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1178cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1179aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1180aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1181e3689190SBen Widawsky 
118235a85ac6SBen Widawsky 	if (gt_iir & GT_PARITY_ERROR(dev))
118335a85ac6SBen Widawsky 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1184e7b4c6b1SDaniel Vetter }
1185e7b4c6b1SDaniel Vetter 
118674cdb337SChris Wilson static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1187abd58f01SBen Widawsky 				       u32 master_ctl)
1188abd58f01SBen Widawsky {
1189abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
1190abd58f01SBen Widawsky 
1191abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
119274cdb337SChris Wilson 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1193abd58f01SBen Widawsky 		if (tmp) {
1194cb0d205eSChris Wilson 			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1195abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
1196e981e7b1SThomas Daniel 
119774cdb337SChris Wilson 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
119874cdb337SChris Wilson 				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
119974cdb337SChris Wilson 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
120074cdb337SChris Wilson 				notify_ring(&dev_priv->ring[RCS]);
1201e981e7b1SThomas Daniel 
120274cdb337SChris Wilson 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
120374cdb337SChris Wilson 				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
120474cdb337SChris Wilson 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
120574cdb337SChris Wilson 				notify_ring(&dev_priv->ring[BCS]);
1206abd58f01SBen Widawsky 		} else
1207abd58f01SBen Widawsky 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1208abd58f01SBen Widawsky 	}
1209abd58f01SBen Widawsky 
121085f9b5f9SZhao Yakui 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
121174cdb337SChris Wilson 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1212abd58f01SBen Widawsky 		if (tmp) {
1213cb0d205eSChris Wilson 			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1214abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
1215e981e7b1SThomas Daniel 
121674cdb337SChris Wilson 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
121774cdb337SChris Wilson 				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
121874cdb337SChris Wilson 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
121974cdb337SChris Wilson 				notify_ring(&dev_priv->ring[VCS]);
1220e981e7b1SThomas Daniel 
122174cdb337SChris Wilson 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
122274cdb337SChris Wilson 				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
122374cdb337SChris Wilson 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
122474cdb337SChris Wilson 				notify_ring(&dev_priv->ring[VCS2]);
1225abd58f01SBen Widawsky 		} else
1226abd58f01SBen Widawsky 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1227abd58f01SBen Widawsky 	}
1228abd58f01SBen Widawsky 
122974cdb337SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
123074cdb337SChris Wilson 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
123174cdb337SChris Wilson 		if (tmp) {
123274cdb337SChris Wilson 			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
123374cdb337SChris Wilson 			ret = IRQ_HANDLED;
123474cdb337SChris Wilson 
123574cdb337SChris Wilson 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
123674cdb337SChris Wilson 				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
123774cdb337SChris Wilson 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
123874cdb337SChris Wilson 				notify_ring(&dev_priv->ring[VECS]);
123974cdb337SChris Wilson 		} else
124074cdb337SChris Wilson 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
124174cdb337SChris Wilson 	}
124274cdb337SChris Wilson 
12430961021aSBen Widawsky 	if (master_ctl & GEN8_GT_PM_IRQ) {
124474cdb337SChris Wilson 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
12450961021aSBen Widawsky 		if (tmp & dev_priv->pm_rps_events) {
1246cb0d205eSChris Wilson 			I915_WRITE_FW(GEN8_GT_IIR(2),
12470961021aSBen Widawsky 				      tmp & dev_priv->pm_rps_events);
124838cc46d7SOscar Mateo 			ret = IRQ_HANDLED;
1249c9a9a268SImre Deak 			gen6_rps_irq_handler(dev_priv, tmp);
12500961021aSBen Widawsky 		} else
12510961021aSBen Widawsky 			DRM_ERROR("The master control interrupt lied (PM)!\n");
12520961021aSBen Widawsky 	}
12530961021aSBen Widawsky 
1254abd58f01SBen Widawsky 	return ret;
1255abd58f01SBen Widawsky }
1256abd58f01SBen Widawsky 
125763c88d22SImre Deak static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
125863c88d22SImre Deak {
125963c88d22SImre Deak 	switch (port) {
126063c88d22SImre Deak 	case PORT_A:
1261195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
126263c88d22SImre Deak 	case PORT_B:
126363c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
126463c88d22SImre Deak 	case PORT_C:
126563c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
126663c88d22SImre Deak 	case PORT_D:
126763c88d22SImre Deak 		return val & PORTD_HOTPLUG_LONG_DETECT;
126863c88d22SImre Deak 	default:
126963c88d22SImre Deak 		return false;
127063c88d22SImre Deak 	}
127163c88d22SImre Deak }
127263c88d22SImre Deak 
12736dbf30ceSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
12746dbf30ceSVille Syrjälä {
12756dbf30ceSVille Syrjälä 	switch (port) {
12766dbf30ceSVille Syrjälä 	case PORT_E:
12776dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
12786dbf30ceSVille Syrjälä 	default:
12796dbf30ceSVille Syrjälä 		return false;
12806dbf30ceSVille Syrjälä 	}
12816dbf30ceSVille Syrjälä }
12826dbf30ceSVille Syrjälä 
1283e4ce95aaSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1284e4ce95aaSVille Syrjälä {
1285e4ce95aaSVille Syrjälä 	switch (port) {
1286e4ce95aaSVille Syrjälä 	case PORT_A:
1287e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1288e4ce95aaSVille Syrjälä 	default:
1289e4ce95aaSVille Syrjälä 		return false;
1290e4ce95aaSVille Syrjälä 	}
1291e4ce95aaSVille Syrjälä }
1292e4ce95aaSVille Syrjälä 
1293676574dfSJani Nikula static bool pch_port_hotplug_long_detect(enum port port, u32 val)
129413cf5504SDave Airlie {
129513cf5504SDave Airlie 	switch (port) {
129613cf5504SDave Airlie 	case PORT_B:
1297676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
129813cf5504SDave Airlie 	case PORT_C:
1299676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
130013cf5504SDave Airlie 	case PORT_D:
1301676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1302676574dfSJani Nikula 	default:
1303676574dfSJani Nikula 		return false;
130413cf5504SDave Airlie 	}
130513cf5504SDave Airlie }
130613cf5504SDave Airlie 
1307676574dfSJani Nikula static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
130813cf5504SDave Airlie {
130913cf5504SDave Airlie 	switch (port) {
131013cf5504SDave Airlie 	case PORT_B:
1311676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
131213cf5504SDave Airlie 	case PORT_C:
1313676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
131413cf5504SDave Airlie 	case PORT_D:
1315676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1316676574dfSJani Nikula 	default:
1317676574dfSJani Nikula 		return false;
131813cf5504SDave Airlie 	}
131913cf5504SDave Airlie }
132013cf5504SDave Airlie 
132142db67d6SVille Syrjälä /*
132242db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
132342db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
132442db67d6SVille Syrjälä  * hotplug detection results from several registers.
132542db67d6SVille Syrjälä  *
132642db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
132742db67d6SVille Syrjälä  */
1328fd63e2a9SImre Deak static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
13298c841e57SJani Nikula 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1330fd63e2a9SImre Deak 			     const u32 hpd[HPD_NUM_PINS],
1331fd63e2a9SImre Deak 			     bool long_pulse_detect(enum port port, u32 val))
1332676574dfSJani Nikula {
13338c841e57SJani Nikula 	enum port port;
1334676574dfSJani Nikula 	int i;
1335676574dfSJani Nikula 
1336676574dfSJani Nikula 	for_each_hpd_pin(i) {
13378c841e57SJani Nikula 		if ((hpd[i] & hotplug_trigger) == 0)
13388c841e57SJani Nikula 			continue;
13398c841e57SJani Nikula 
1340676574dfSJani Nikula 		*pin_mask |= BIT(i);
1341676574dfSJani Nikula 
1342cc24fcdcSImre Deak 		if (!intel_hpd_pin_to_port(i, &port))
1343cc24fcdcSImre Deak 			continue;
1344cc24fcdcSImre Deak 
1345fd63e2a9SImre Deak 		if (long_pulse_detect(port, dig_hotplug_reg))
1346676574dfSJani Nikula 			*long_mask |= BIT(i);
1347676574dfSJani Nikula 	}
1348676574dfSJani Nikula 
1349676574dfSJani Nikula 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1350676574dfSJani Nikula 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1351676574dfSJani Nikula 
1352676574dfSJani Nikula }
1353676574dfSJani Nikula 
1354515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev)
1355515ac2bbSDaniel Vetter {
13562d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
135728c70f16SDaniel Vetter 
135828c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1359515ac2bbSDaniel Vetter }
1360515ac2bbSDaniel Vetter 
1361ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev)
1362ce99c256SDaniel Vetter {
13632d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
13649ee32feaSDaniel Vetter 
13659ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1366ce99c256SDaniel Vetter }
1367ce99c256SDaniel Vetter 
13688bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
1369277de95eSDaniel Vetter static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1370eba94eb9SDaniel Vetter 					 uint32_t crc0, uint32_t crc1,
1371eba94eb9SDaniel Vetter 					 uint32_t crc2, uint32_t crc3,
13728bc5e955SDaniel Vetter 					 uint32_t crc4)
13738bf1e9f1SShuang He {
13748bf1e9f1SShuang He 	struct drm_i915_private *dev_priv = dev->dev_private;
13758bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
13768bf1e9f1SShuang He 	struct intel_pipe_crc_entry *entry;
1377ac2300d4SDamien Lespiau 	int head, tail;
1378b2c88f5bSDamien Lespiau 
1379d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
1380d538bbdfSDamien Lespiau 
13810c912c79SDamien Lespiau 	if (!pipe_crc->entries) {
1382d538bbdfSDamien Lespiau 		spin_unlock(&pipe_crc->lock);
138334273620SDaniel Vetter 		DRM_DEBUG_KMS("spurious interrupt\n");
13840c912c79SDamien Lespiau 		return;
13850c912c79SDamien Lespiau 	}
13860c912c79SDamien Lespiau 
1387d538bbdfSDamien Lespiau 	head = pipe_crc->head;
1388d538bbdfSDamien Lespiau 	tail = pipe_crc->tail;
1389b2c88f5bSDamien Lespiau 
1390b2c88f5bSDamien Lespiau 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1391d538bbdfSDamien Lespiau 		spin_unlock(&pipe_crc->lock);
1392b2c88f5bSDamien Lespiau 		DRM_ERROR("CRC buffer overflowing\n");
1393b2c88f5bSDamien Lespiau 		return;
1394b2c88f5bSDamien Lespiau 	}
1395b2c88f5bSDamien Lespiau 
1396b2c88f5bSDamien Lespiau 	entry = &pipe_crc->entries[head];
13978bf1e9f1SShuang He 
13988bc5e955SDaniel Vetter 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1399eba94eb9SDaniel Vetter 	entry->crc[0] = crc0;
1400eba94eb9SDaniel Vetter 	entry->crc[1] = crc1;
1401eba94eb9SDaniel Vetter 	entry->crc[2] = crc2;
1402eba94eb9SDaniel Vetter 	entry->crc[3] = crc3;
1403eba94eb9SDaniel Vetter 	entry->crc[4] = crc4;
1404b2c88f5bSDamien Lespiau 
1405b2c88f5bSDamien Lespiau 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1406d538bbdfSDamien Lespiau 	pipe_crc->head = head;
1407d538bbdfSDamien Lespiau 
1408d538bbdfSDamien Lespiau 	spin_unlock(&pipe_crc->lock);
140907144428SDamien Lespiau 
141007144428SDamien Lespiau 	wake_up_interruptible(&pipe_crc->wq);
14118bf1e9f1SShuang He }
1412277de95eSDaniel Vetter #else
1413277de95eSDaniel Vetter static inline void
1414277de95eSDaniel Vetter display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1415277de95eSDaniel Vetter 			     uint32_t crc0, uint32_t crc1,
1416277de95eSDaniel Vetter 			     uint32_t crc2, uint32_t crc3,
1417277de95eSDaniel Vetter 			     uint32_t crc4) {}
1418277de95eSDaniel Vetter #endif
1419eba94eb9SDaniel Vetter 
1420277de95eSDaniel Vetter 
1421277de95eSDaniel Vetter static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
14225a69b89fSDaniel Vetter {
14235a69b89fSDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
14245a69b89fSDaniel Vetter 
1425277de95eSDaniel Vetter 	display_pipe_crc_irq_handler(dev, pipe,
14265a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
14275a69b89fSDaniel Vetter 				     0, 0, 0, 0);
14285a69b89fSDaniel Vetter }
14295a69b89fSDaniel Vetter 
1430277de95eSDaniel Vetter static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1431eba94eb9SDaniel Vetter {
1432eba94eb9SDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
1433eba94eb9SDaniel Vetter 
1434277de95eSDaniel Vetter 	display_pipe_crc_irq_handler(dev, pipe,
1435eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1436eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1437eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1438eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
14398bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1440eba94eb9SDaniel Vetter }
14415b3a856bSDaniel Vetter 
1442277de95eSDaniel Vetter static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
14435b3a856bSDaniel Vetter {
14445b3a856bSDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
14450b5c5ed0SDaniel Vetter 	uint32_t res1, res2;
14460b5c5ed0SDaniel Vetter 
14470b5c5ed0SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 3)
14480b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
14490b5c5ed0SDaniel Vetter 	else
14500b5c5ed0SDaniel Vetter 		res1 = 0;
14510b5c5ed0SDaniel Vetter 
14520b5c5ed0SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14530b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
14540b5c5ed0SDaniel Vetter 	else
14550b5c5ed0SDaniel Vetter 		res2 = 0;
14565b3a856bSDaniel Vetter 
1457277de95eSDaniel Vetter 	display_pipe_crc_irq_handler(dev, pipe,
14580b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
14590b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
14600b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
14610b5c5ed0SDaniel Vetter 				     res1, res2);
14625b3a856bSDaniel Vetter }
14638bf1e9f1SShuang He 
14641403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
14651403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
14661403c0d4SPaulo Zanoni  * the work queue. */
14671403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1468baf02a1fSBen Widawsky {
1469a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
147059cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
1471480c8033SDaniel Vetter 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1472d4d70aa5SImre Deak 		if (dev_priv->rps.interrupts_enabled) {
1473d4d70aa5SImre Deak 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
14742adbee62SDaniel Vetter 			queue_work(dev_priv->wq, &dev_priv->rps.work);
147541a05a3aSDaniel Vetter 		}
1476d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1477d4d70aa5SImre Deak 	}
1478baf02a1fSBen Widawsky 
1479c9a9a268SImre Deak 	if (INTEL_INFO(dev_priv)->gen >= 8)
1480c9a9a268SImre Deak 		return;
1481c9a9a268SImre Deak 
14821403c0d4SPaulo Zanoni 	if (HAS_VEBOX(dev_priv->dev)) {
148312638c57SBen Widawsky 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
148474cdb337SChris Wilson 			notify_ring(&dev_priv->ring[VECS]);
148512638c57SBen Widawsky 
1486aaecdf61SDaniel Vetter 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1487aaecdf61SDaniel Vetter 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
148812638c57SBen Widawsky 	}
14891403c0d4SPaulo Zanoni }
1490baf02a1fSBen Widawsky 
14918d7849dbSVille Syrjälä static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
14928d7849dbSVille Syrjälä {
14938d7849dbSVille Syrjälä 	if (!drm_handle_vblank(dev, pipe))
14948d7849dbSVille Syrjälä 		return false;
14958d7849dbSVille Syrjälä 
14968d7849dbSVille Syrjälä 	return true;
14978d7849dbSVille Syrjälä }
14988d7849dbSVille Syrjälä 
1499c1874ed7SImre Deak static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
15007e231dbeSJesse Barnes {
1501c1874ed7SImre Deak 	struct drm_i915_private *dev_priv = dev->dev_private;
150291d181ddSImre Deak 	u32 pipe_stats[I915_MAX_PIPES] = { };
15037e231dbeSJesse Barnes 	int pipe;
15047e231dbeSJesse Barnes 
150558ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
1506055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
150791d181ddSImre Deak 		int reg;
1508bbb5eebfSDaniel Vetter 		u32 mask, iir_bit = 0;
150991d181ddSImre Deak 
1510bbb5eebfSDaniel Vetter 		/*
1511bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
1512bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
1513bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
1514bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
1515bbb5eebfSDaniel Vetter 		 * handle.
1516bbb5eebfSDaniel Vetter 		 */
15170f239f4cSDaniel Vetter 
15180f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
15190f239f4cSDaniel Vetter 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1520bbb5eebfSDaniel Vetter 
1521bbb5eebfSDaniel Vetter 		switch (pipe) {
1522bbb5eebfSDaniel Vetter 		case PIPE_A:
1523bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1524bbb5eebfSDaniel Vetter 			break;
1525bbb5eebfSDaniel Vetter 		case PIPE_B:
1526bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1527bbb5eebfSDaniel Vetter 			break;
15283278f67fSVille Syrjälä 		case PIPE_C:
15293278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
15303278f67fSVille Syrjälä 			break;
1531bbb5eebfSDaniel Vetter 		}
1532bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
1533bbb5eebfSDaniel Vetter 			mask |= dev_priv->pipestat_irq_mask[pipe];
1534bbb5eebfSDaniel Vetter 
1535bbb5eebfSDaniel Vetter 		if (!mask)
153691d181ddSImre Deak 			continue;
153791d181ddSImre Deak 
153891d181ddSImre Deak 		reg = PIPESTAT(pipe);
1539bbb5eebfSDaniel Vetter 		mask |= PIPESTAT_INT_ENABLE_MASK;
1540bbb5eebfSDaniel Vetter 		pipe_stats[pipe] = I915_READ(reg) & mask;
15417e231dbeSJesse Barnes 
15427e231dbeSJesse Barnes 		/*
15437e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
15447e231dbeSJesse Barnes 		 */
154591d181ddSImre Deak 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
154691d181ddSImre Deak 					PIPESTAT_INT_STATUS_MASK))
15477e231dbeSJesse Barnes 			I915_WRITE(reg, pipe_stats[pipe]);
15487e231dbeSJesse Barnes 	}
154958ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
15507e231dbeSJesse Barnes 
1551055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1552d6bbafa1SChris Wilson 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1553d6bbafa1SChris Wilson 		    intel_pipe_handle_vblank(dev, pipe))
1554d6bbafa1SChris Wilson 			intel_check_page_flip(dev, pipe);
155531acc7f5SJesse Barnes 
1556579a9b0eSImre Deak 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
155731acc7f5SJesse Barnes 			intel_prepare_page_flip(dev, pipe);
155831acc7f5SJesse Barnes 			intel_finish_page_flip(dev, pipe);
155931acc7f5SJesse Barnes 		}
15604356d586SDaniel Vetter 
15614356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1562277de95eSDaniel Vetter 			i9xx_pipe_crc_irq_handler(dev, pipe);
15632d9d2b0bSVille Syrjälä 
15641f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
15651f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
156631acc7f5SJesse Barnes 	}
156731acc7f5SJesse Barnes 
1568c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1569c1874ed7SImre Deak 		gmbus_irq_handler(dev);
1570c1874ed7SImre Deak }
1571c1874ed7SImre Deak 
157216c6c56bSVille Syrjälä static void i9xx_hpd_irq_handler(struct drm_device *dev)
157316c6c56bSVille Syrjälä {
157416c6c56bSVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
157516c6c56bSVille Syrjälä 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
157642db67d6SVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
157716c6c56bSVille Syrjälä 
15780d2e4297SJani Nikula 	if (!hotplug_status)
15790d2e4297SJani Nikula 		return;
15800d2e4297SJani Nikula 
15813ff60f89SOscar Mateo 	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
15823ff60f89SOscar Mateo 	/*
15833ff60f89SOscar Mateo 	 * Make sure hotplug status is cleared before we clear IIR, or else we
15843ff60f89SOscar Mateo 	 * may miss hotplug events.
15853ff60f89SOscar Mateo 	 */
15863ff60f89SOscar Mateo 	POSTING_READ(PORT_HOTPLUG_STAT);
15873ff60f89SOscar Mateo 
15884bca26d0SVille Syrjälä 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
158916c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
159016c6c56bSVille Syrjälä 
1591fd63e2a9SImre Deak 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1592fd63e2a9SImre Deak 				   hotplug_trigger, hpd_status_g4x,
1593fd63e2a9SImre Deak 				   i9xx_port_hotplug_long_detect);
1594676574dfSJani Nikula 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1595369712e8SJani Nikula 
1596369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1597369712e8SJani Nikula 			dp_aux_irq_handler(dev);
159816c6c56bSVille Syrjälä 	} else {
159916c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
160016c6c56bSVille Syrjälä 
1601fd63e2a9SImre Deak 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1602fd63e2a9SImre Deak 				   hotplug_trigger, hpd_status_g4x,
1603fd63e2a9SImre Deak 				   i9xx_port_hotplug_long_detect);
1604676574dfSJani Nikula 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
160516c6c56bSVille Syrjälä 	}
16063ff60f89SOscar Mateo }
160716c6c56bSVille Syrjälä 
1608c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1609c1874ed7SImre Deak {
161045a83f84SDaniel Vetter 	struct drm_device *dev = arg;
16112d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
1612c1874ed7SImre Deak 	u32 iir, gt_iir, pm_iir;
1613c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
1614c1874ed7SImre Deak 
16152dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
16162dd2a883SImre Deak 		return IRQ_NONE;
16172dd2a883SImre Deak 
1618c1874ed7SImre Deak 	while (true) {
16193ff60f89SOscar Mateo 		/* Find, clear, then process each source of interrupt */
16203ff60f89SOscar Mateo 
1621c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
16223ff60f89SOscar Mateo 		if (gt_iir)
16233ff60f89SOscar Mateo 			I915_WRITE(GTIIR, gt_iir);
16243ff60f89SOscar Mateo 
1625c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
16263ff60f89SOscar Mateo 		if (pm_iir)
16273ff60f89SOscar Mateo 			I915_WRITE(GEN6_PMIIR, pm_iir);
16283ff60f89SOscar Mateo 
16293ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
16303ff60f89SOscar Mateo 		if (iir) {
16313ff60f89SOscar Mateo 			/* Consume port before clearing IIR or we'll miss events */
16323ff60f89SOscar Mateo 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
16333ff60f89SOscar Mateo 				i9xx_hpd_irq_handler(dev);
16343ff60f89SOscar Mateo 			I915_WRITE(VLV_IIR, iir);
16353ff60f89SOscar Mateo 		}
1636c1874ed7SImre Deak 
1637c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1638c1874ed7SImre Deak 			goto out;
1639c1874ed7SImre Deak 
1640c1874ed7SImre Deak 		ret = IRQ_HANDLED;
1641c1874ed7SImre Deak 
16423ff60f89SOscar Mateo 		if (gt_iir)
1643c1874ed7SImre Deak 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
164460611c13SPaulo Zanoni 		if (pm_iir)
1645d0ecd7e2SDaniel Vetter 			gen6_rps_irq_handler(dev_priv, pm_iir);
16463ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
16473ff60f89SOscar Mateo 		 * signalled in iir */
16483ff60f89SOscar Mateo 		valleyview_pipestat_irq_handler(dev, iir);
16497e231dbeSJesse Barnes 	}
16507e231dbeSJesse Barnes 
16517e231dbeSJesse Barnes out:
16527e231dbeSJesse Barnes 	return ret;
16537e231dbeSJesse Barnes }
16547e231dbeSJesse Barnes 
165543f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
165643f328d7SVille Syrjälä {
165745a83f84SDaniel Vetter 	struct drm_device *dev = arg;
165843f328d7SVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
165943f328d7SVille Syrjälä 	u32 master_ctl, iir;
166043f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
166143f328d7SVille Syrjälä 
16622dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
16632dd2a883SImre Deak 		return IRQ_NONE;
16642dd2a883SImre Deak 
16658e5fd599SVille Syrjälä 	for (;;) {
16668e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
16673278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
16683278f67fSVille Syrjälä 
16693278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
16708e5fd599SVille Syrjälä 			break;
167143f328d7SVille Syrjälä 
167227b6c122SOscar Mateo 		ret = IRQ_HANDLED;
167327b6c122SOscar Mateo 
167443f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
167543f328d7SVille Syrjälä 
167627b6c122SOscar Mateo 		/* Find, clear, then process each source of interrupt */
167727b6c122SOscar Mateo 
167827b6c122SOscar Mateo 		if (iir) {
167927b6c122SOscar Mateo 			/* Consume port before clearing IIR or we'll miss events */
168027b6c122SOscar Mateo 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
168127b6c122SOscar Mateo 				i9xx_hpd_irq_handler(dev);
168227b6c122SOscar Mateo 			I915_WRITE(VLV_IIR, iir);
168327b6c122SOscar Mateo 		}
168427b6c122SOscar Mateo 
168574cdb337SChris Wilson 		gen8_gt_irq_handler(dev_priv, master_ctl);
168643f328d7SVille Syrjälä 
168727b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
168827b6c122SOscar Mateo 		 * signalled in iir */
16893278f67fSVille Syrjälä 		valleyview_pipestat_irq_handler(dev, iir);
169043f328d7SVille Syrjälä 
169143f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
169243f328d7SVille Syrjälä 		POSTING_READ(GEN8_MASTER_IRQ);
16938e5fd599SVille Syrjälä 	}
16943278f67fSVille Syrjälä 
169543f328d7SVille Syrjälä 	return ret;
169643f328d7SVille Syrjälä }
169743f328d7SVille Syrjälä 
169823e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1699776ad806SJesse Barnes {
17002d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
17019db4a9c7SJesse Barnes 	int pipe;
1702b543fb04SEgbert Eich 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1703aaf5ec2eSSonika Jindal 
1704aaf5ec2eSSonika Jindal 	if (hotplug_trigger) {
170542db67d6SVille Syrjälä 		u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1706776ad806SJesse Barnes 
170713cf5504SDave Airlie 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
170813cf5504SDave Airlie 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
170913cf5504SDave Airlie 
1710fd63e2a9SImre Deak 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1711fd63e2a9SImre Deak 				   dig_hotplug_reg, hpd_ibx,
1712fd63e2a9SImre Deak 				   pch_port_hotplug_long_detect);
1713676574dfSJani Nikula 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1714aaf5ec2eSSonika Jindal 	}
171591d131d2SDaniel Vetter 
1716cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1717cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1718776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
1719cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1720cfc33bf7SVille Syrjälä 				 port_name(port));
1721cfc33bf7SVille Syrjälä 	}
1722776ad806SJesse Barnes 
1723ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
1724ce99c256SDaniel Vetter 		dp_aux_irq_handler(dev);
1725ce99c256SDaniel Vetter 
1726776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
1727515ac2bbSDaniel Vetter 		gmbus_irq_handler(dev);
1728776ad806SJesse Barnes 
1729776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1730776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1731776ad806SJesse Barnes 
1732776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1733776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1734776ad806SJesse Barnes 
1735776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
1736776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
1737776ad806SJesse Barnes 
17389db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
1739055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
17409db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
17419db4a9c7SJesse Barnes 					 pipe_name(pipe),
17429db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
1743776ad806SJesse Barnes 
1744776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1745776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1746776ad806SJesse Barnes 
1747776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1748776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1749776ad806SJesse Barnes 
1750776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
17511f7247c0SDaniel Vetter 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
17528664281bSPaulo Zanoni 
17538664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
17541f7247c0SDaniel Vetter 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
17558664281bSPaulo Zanoni }
17568664281bSPaulo Zanoni 
17578664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev)
17588664281bSPaulo Zanoni {
17598664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
17608664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
17615a69b89fSDaniel Vetter 	enum pipe pipe;
17628664281bSPaulo Zanoni 
1763de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
1764de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
1765de032bf4SPaulo Zanoni 
1766055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
17671f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
17681f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
17698664281bSPaulo Zanoni 
17705a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
17715a69b89fSDaniel Vetter 			if (IS_IVYBRIDGE(dev))
1772277de95eSDaniel Vetter 				ivb_pipe_crc_irq_handler(dev, pipe);
17735a69b89fSDaniel Vetter 			else
1774277de95eSDaniel Vetter 				hsw_pipe_crc_irq_handler(dev, pipe);
17755a69b89fSDaniel Vetter 		}
17765a69b89fSDaniel Vetter 	}
17778bf1e9f1SShuang He 
17788664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
17798664281bSPaulo Zanoni }
17808664281bSPaulo Zanoni 
17818664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev)
17828664281bSPaulo Zanoni {
17838664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
17848664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
17858664281bSPaulo Zanoni 
1786de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
1787de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
1788de032bf4SPaulo Zanoni 
17898664281bSPaulo Zanoni 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
17901f7247c0SDaniel Vetter 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
17918664281bSPaulo Zanoni 
17928664281bSPaulo Zanoni 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
17931f7247c0SDaniel Vetter 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
17948664281bSPaulo Zanoni 
17958664281bSPaulo Zanoni 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
17961f7247c0SDaniel Vetter 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
17978664281bSPaulo Zanoni 
17988664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
1799776ad806SJesse Barnes }
1800776ad806SJesse Barnes 
180123e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
180223e81d69SAdam Jackson {
18032d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
180423e81d69SAdam Jackson 	int pipe;
18056dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1806aaf5ec2eSSonika Jindal 
1807aaf5ec2eSSonika Jindal 	if (hotplug_trigger) {
180842db67d6SVille Syrjälä 		u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
180923e81d69SAdam Jackson 
181013cf5504SDave Airlie 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
181113cf5504SDave Airlie 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1812fd63e2a9SImre Deak 
181326951cafSXiong Zhang 		intel_get_hpd_pins(&pin_mask, &long_mask,
181426951cafSXiong Zhang 				   hotplug_trigger,
1815fd63e2a9SImre Deak 				   dig_hotplug_reg, hpd_cpt,
1816fd63e2a9SImre Deak 				   pch_port_hotplug_long_detect);
181726951cafSXiong Zhang 
1818676574dfSJani Nikula 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1819aaf5ec2eSSonika Jindal 	}
182091d131d2SDaniel Vetter 
1821cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1822cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
182323e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
1824cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1825cfc33bf7SVille Syrjälä 				 port_name(port));
1826cfc33bf7SVille Syrjälä 	}
182723e81d69SAdam Jackson 
182823e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
1829ce99c256SDaniel Vetter 		dp_aux_irq_handler(dev);
183023e81d69SAdam Jackson 
183123e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
1832515ac2bbSDaniel Vetter 		gmbus_irq_handler(dev);
183323e81d69SAdam Jackson 
183423e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
183523e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
183623e81d69SAdam Jackson 
183723e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
183823e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
183923e81d69SAdam Jackson 
184023e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
1841055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
184223e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
184323e81d69SAdam Jackson 					 pipe_name(pipe),
184423e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
18458664281bSPaulo Zanoni 
18468664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
18478664281bSPaulo Zanoni 		cpt_serr_int_handler(dev);
184823e81d69SAdam Jackson }
184923e81d69SAdam Jackson 
18506dbf30ceSVille Syrjälä static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
18516dbf30ceSVille Syrjälä {
18526dbf30ceSVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
18536dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
18546dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
18556dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
18566dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
18576dbf30ceSVille Syrjälä 
18586dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
18596dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
18606dbf30ceSVille Syrjälä 
18616dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
18626dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
18636dbf30ceSVille Syrjälä 
18646dbf30ceSVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
18656dbf30ceSVille Syrjälä 				   dig_hotplug_reg, hpd_spt,
18666dbf30ceSVille Syrjälä 				   pch_port_hotplug_long_detect);
18676dbf30ceSVille Syrjälä 	}
18686dbf30ceSVille Syrjälä 
18696dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
18706dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
18716dbf30ceSVille Syrjälä 
18726dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
18736dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
18746dbf30ceSVille Syrjälä 
18756dbf30ceSVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
18766dbf30ceSVille Syrjälä 				   dig_hotplug_reg, hpd_spt,
18776dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
18786dbf30ceSVille Syrjälä 	}
18796dbf30ceSVille Syrjälä 
18806dbf30ceSVille Syrjälä 	if (pin_mask)
18816dbf30ceSVille Syrjälä 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
18826dbf30ceSVille Syrjälä 
18836dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
18846dbf30ceSVille Syrjälä 		gmbus_irq_handler(dev);
18856dbf30ceSVille Syrjälä }
18866dbf30ceSVille Syrjälä 
1887c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1888c008bc6eSPaulo Zanoni {
1889c008bc6eSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
189040da17c2SDaniel Vetter 	enum pipe pipe;
1891e4ce95aaSVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1892e4ce95aaSVille Syrjälä 
1893e4ce95aaSVille Syrjälä 	if (hotplug_trigger) {
1894e4ce95aaSVille Syrjälä 		u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1895e4ce95aaSVille Syrjälä 
1896e4ce95aaSVille Syrjälä 		dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1897e4ce95aaSVille Syrjälä 		I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1898e4ce95aaSVille Syrjälä 
1899e4ce95aaSVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1900e4ce95aaSVille Syrjälä 				   dig_hotplug_reg, hpd_ilk,
1901e4ce95aaSVille Syrjälä 				   ilk_port_hotplug_long_detect);
1902e4ce95aaSVille Syrjälä 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1903e4ce95aaSVille Syrjälä 	}
1904c008bc6eSPaulo Zanoni 
1905c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
1906c008bc6eSPaulo Zanoni 		dp_aux_irq_handler(dev);
1907c008bc6eSPaulo Zanoni 
1908c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
1909c008bc6eSPaulo Zanoni 		intel_opregion_asle_intr(dev);
1910c008bc6eSPaulo Zanoni 
1911c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
1912c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
1913c008bc6eSPaulo Zanoni 
1914055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1915d6bbafa1SChris Wilson 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
1916d6bbafa1SChris Wilson 		    intel_pipe_handle_vblank(dev, pipe))
1917d6bbafa1SChris Wilson 			intel_check_page_flip(dev, pipe);
1918c008bc6eSPaulo Zanoni 
191940da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
19201f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1921c008bc6eSPaulo Zanoni 
192240da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
192340da17c2SDaniel Vetter 			i9xx_pipe_crc_irq_handler(dev, pipe);
19245b3a856bSDaniel Vetter 
192540da17c2SDaniel Vetter 		/* plane/pipes map 1:1 on ilk+ */
192640da17c2SDaniel Vetter 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
192740da17c2SDaniel Vetter 			intel_prepare_page_flip(dev, pipe);
192840da17c2SDaniel Vetter 			intel_finish_page_flip_plane(dev, pipe);
1929c008bc6eSPaulo Zanoni 		}
1930c008bc6eSPaulo Zanoni 	}
1931c008bc6eSPaulo Zanoni 
1932c008bc6eSPaulo Zanoni 	/* check event from PCH */
1933c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
1934c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
1935c008bc6eSPaulo Zanoni 
1936c008bc6eSPaulo Zanoni 		if (HAS_PCH_CPT(dev))
1937c008bc6eSPaulo Zanoni 			cpt_irq_handler(dev, pch_iir);
1938c008bc6eSPaulo Zanoni 		else
1939c008bc6eSPaulo Zanoni 			ibx_irq_handler(dev, pch_iir);
1940c008bc6eSPaulo Zanoni 
1941c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
1942c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
1943c008bc6eSPaulo Zanoni 	}
1944c008bc6eSPaulo Zanoni 
1945c008bc6eSPaulo Zanoni 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1946c008bc6eSPaulo Zanoni 		ironlake_rps_change_irq_handler(dev);
1947c008bc6eSPaulo Zanoni }
1948c008bc6eSPaulo Zanoni 
19499719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
19509719fb98SPaulo Zanoni {
19519719fb98SPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
195207d27e20SDamien Lespiau 	enum pipe pipe;
1953*23bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1954*23bb4cb5SVille Syrjälä 
1955*23bb4cb5SVille Syrjälä 	if (hotplug_trigger) {
1956*23bb4cb5SVille Syrjälä 		u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1957*23bb4cb5SVille Syrjälä 
1958*23bb4cb5SVille Syrjälä 		dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1959*23bb4cb5SVille Syrjälä 		I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1960*23bb4cb5SVille Syrjälä 
1961*23bb4cb5SVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1962*23bb4cb5SVille Syrjälä 				   dig_hotplug_reg, hpd_ivb,
1963*23bb4cb5SVille Syrjälä 				   ilk_port_hotplug_long_detect);
1964*23bb4cb5SVille Syrjälä 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1965*23bb4cb5SVille Syrjälä 	}
19669719fb98SPaulo Zanoni 
19679719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
19689719fb98SPaulo Zanoni 		ivb_err_int_handler(dev);
19699719fb98SPaulo Zanoni 
19709719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
19719719fb98SPaulo Zanoni 		dp_aux_irq_handler(dev);
19729719fb98SPaulo Zanoni 
19739719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
19749719fb98SPaulo Zanoni 		intel_opregion_asle_intr(dev);
19759719fb98SPaulo Zanoni 
1976055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1977d6bbafa1SChris Wilson 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1978d6bbafa1SChris Wilson 		    intel_pipe_handle_vblank(dev, pipe))
1979d6bbafa1SChris Wilson 			intel_check_page_flip(dev, pipe);
198040da17c2SDaniel Vetter 
198140da17c2SDaniel Vetter 		/* plane/pipes map 1:1 on ilk+ */
198207d27e20SDamien Lespiau 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
198307d27e20SDamien Lespiau 			intel_prepare_page_flip(dev, pipe);
198407d27e20SDamien Lespiau 			intel_finish_page_flip_plane(dev, pipe);
19859719fb98SPaulo Zanoni 		}
19869719fb98SPaulo Zanoni 	}
19879719fb98SPaulo Zanoni 
19889719fb98SPaulo Zanoni 	/* check event from PCH */
19899719fb98SPaulo Zanoni 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
19909719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
19919719fb98SPaulo Zanoni 
19929719fb98SPaulo Zanoni 		cpt_irq_handler(dev, pch_iir);
19939719fb98SPaulo Zanoni 
19949719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
19959719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
19969719fb98SPaulo Zanoni 	}
19979719fb98SPaulo Zanoni }
19989719fb98SPaulo Zanoni 
199972c90f62SOscar Mateo /*
200072c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
200172c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
200272c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
200372c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
200472c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
200572c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
200672c90f62SOscar Mateo  */
2007f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2008b1f14ad0SJesse Barnes {
200945a83f84SDaniel Vetter 	struct drm_device *dev = arg;
20102d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
2011f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
20120e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2013b1f14ad0SJesse Barnes 
20142dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
20152dd2a883SImre Deak 		return IRQ_NONE;
20162dd2a883SImre Deak 
20178664281bSPaulo Zanoni 	/* We get interrupts on unclaimed registers, so check for this before we
20188664281bSPaulo Zanoni 	 * do any I915_{READ,WRITE}. */
2019907b28c5SChris Wilson 	intel_uncore_check_errors(dev);
20208664281bSPaulo Zanoni 
2021b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2022b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2023b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
202423a78516SPaulo Zanoni 	POSTING_READ(DEIER);
20250e43406bSChris Wilson 
202644498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
202744498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
202844498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
202944498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
203044498aeaSPaulo Zanoni 	 * due to its back queue). */
2031ab5c608bSBen Widawsky 	if (!HAS_PCH_NOP(dev)) {
203244498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
203344498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
203444498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2035ab5c608bSBen Widawsky 	}
203644498aeaSPaulo Zanoni 
203772c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
203872c90f62SOscar Mateo 
20390e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
20400e43406bSChris Wilson 	if (gt_iir) {
204172c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
204272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
2043d8fc8a47SPaulo Zanoni 		if (INTEL_INFO(dev)->gen >= 6)
20440e43406bSChris Wilson 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2045d8fc8a47SPaulo Zanoni 		else
2046d8fc8a47SPaulo Zanoni 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
20470e43406bSChris Wilson 	}
2048b1f14ad0SJesse Barnes 
2049b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
20500e43406bSChris Wilson 	if (de_iir) {
205172c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
205272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
2053f1af8fc1SPaulo Zanoni 		if (INTEL_INFO(dev)->gen >= 7)
20549719fb98SPaulo Zanoni 			ivb_display_irq_handler(dev, de_iir);
2055f1af8fc1SPaulo Zanoni 		else
2056f1af8fc1SPaulo Zanoni 			ilk_display_irq_handler(dev, de_iir);
20570e43406bSChris Wilson 	}
20580e43406bSChris Wilson 
2059f1af8fc1SPaulo Zanoni 	if (INTEL_INFO(dev)->gen >= 6) {
2060f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
20610e43406bSChris Wilson 		if (pm_iir) {
2062b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
20630e43406bSChris Wilson 			ret = IRQ_HANDLED;
206472c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
20650e43406bSChris Wilson 		}
2066f1af8fc1SPaulo Zanoni 	}
2067b1f14ad0SJesse Barnes 
2068b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
2069b1f14ad0SJesse Barnes 	POSTING_READ(DEIER);
2070ab5c608bSBen Widawsky 	if (!HAS_PCH_NOP(dev)) {
207144498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
207244498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2073ab5c608bSBen Widawsky 	}
2074b1f14ad0SJesse Barnes 
2075b1f14ad0SJesse Barnes 	return ret;
2076b1f14ad0SJesse Barnes }
2077b1f14ad0SJesse Barnes 
2078d04a492dSShashank Sharma static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2079d04a492dSShashank Sharma {
2080d04a492dSShashank Sharma 	struct drm_i915_private *dev_priv = dev->dev_private;
2081676574dfSJani Nikula 	u32 hp_control, hp_trigger;
208242db67d6SVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
2083d04a492dSShashank Sharma 
2084d04a492dSShashank Sharma 	/* Get the status */
2085d04a492dSShashank Sharma 	hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2086d04a492dSShashank Sharma 	hp_control = I915_READ(BXT_HOTPLUG_CTL);
2087d04a492dSShashank Sharma 
2088d04a492dSShashank Sharma 	/* Hotplug not enabled ? */
2089d04a492dSShashank Sharma 	if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2090d04a492dSShashank Sharma 		DRM_ERROR("Interrupt when HPD disabled\n");
2091d04a492dSShashank Sharma 		return;
2092d04a492dSShashank Sharma 	}
2093d04a492dSShashank Sharma 
2094d04a492dSShashank Sharma 	/* Clear sticky bits in hpd status */
2095d04a492dSShashank Sharma 	I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
2096475c2e3bSJani Nikula 
2097fd63e2a9SImre Deak 	intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
209863c88d22SImre Deak 			   hpd_bxt, bxt_port_hotplug_long_detect);
2099475c2e3bSJani Nikula 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2100d04a492dSShashank Sharma }
2101d04a492dSShashank Sharma 
2102abd58f01SBen Widawsky static irqreturn_t gen8_irq_handler(int irq, void *arg)
2103abd58f01SBen Widawsky {
2104abd58f01SBen Widawsky 	struct drm_device *dev = arg;
2105abd58f01SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
2106abd58f01SBen Widawsky 	u32 master_ctl;
2107abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2108abd58f01SBen Widawsky 	uint32_t tmp = 0;
2109c42664ccSDaniel Vetter 	enum pipe pipe;
211088e04703SJesse Barnes 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
211188e04703SJesse Barnes 
21122dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
21132dd2a883SImre Deak 		return IRQ_NONE;
21142dd2a883SImre Deak 
211588e04703SJesse Barnes 	if (IS_GEN9(dev))
211688e04703SJesse Barnes 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
211788e04703SJesse Barnes 			GEN9_AUX_CHANNEL_D;
2118abd58f01SBen Widawsky 
2119cb0d205eSChris Wilson 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2120abd58f01SBen Widawsky 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2121abd58f01SBen Widawsky 	if (!master_ctl)
2122abd58f01SBen Widawsky 		return IRQ_NONE;
2123abd58f01SBen Widawsky 
2124cb0d205eSChris Wilson 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2125abd58f01SBen Widawsky 
212638cc46d7SOscar Mateo 	/* Find, clear, then process each source of interrupt */
212738cc46d7SOscar Mateo 
212874cdb337SChris Wilson 	ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2129abd58f01SBen Widawsky 
2130abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2131abd58f01SBen Widawsky 		tmp = I915_READ(GEN8_DE_MISC_IIR);
2132abd58f01SBen Widawsky 		if (tmp) {
2133abd58f01SBen Widawsky 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2134abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
213538cc46d7SOscar Mateo 			if (tmp & GEN8_DE_MISC_GSE)
213638cc46d7SOscar Mateo 				intel_opregion_asle_intr(dev);
213738cc46d7SOscar Mateo 			else
213838cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2139abd58f01SBen Widawsky 		}
214038cc46d7SOscar Mateo 		else
214138cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2142abd58f01SBen Widawsky 	}
2143abd58f01SBen Widawsky 
21446d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
21456d766f02SDaniel Vetter 		tmp = I915_READ(GEN8_DE_PORT_IIR);
21466d766f02SDaniel Vetter 		if (tmp) {
2147d04a492dSShashank Sharma 			bool found = false;
2148d04a492dSShashank Sharma 
21496d766f02SDaniel Vetter 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
21506d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
215188e04703SJesse Barnes 
2152d04a492dSShashank Sharma 			if (tmp & aux_mask) {
215338cc46d7SOscar Mateo 				dp_aux_irq_handler(dev);
2154d04a492dSShashank Sharma 				found = true;
2155d04a492dSShashank Sharma 			}
2156d04a492dSShashank Sharma 
2157d04a492dSShashank Sharma 			if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2158d04a492dSShashank Sharma 				bxt_hpd_handler(dev, tmp);
2159d04a492dSShashank Sharma 				found = true;
2160d04a492dSShashank Sharma 			}
2161d04a492dSShashank Sharma 
21629e63743eSShashank Sharma 			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
21639e63743eSShashank Sharma 				gmbus_irq_handler(dev);
21649e63743eSShashank Sharma 				found = true;
21659e63743eSShashank Sharma 			}
21669e63743eSShashank Sharma 
2167d04a492dSShashank Sharma 			if (!found)
216838cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
21696d766f02SDaniel Vetter 		}
217038cc46d7SOscar Mateo 		else
217138cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
21726d766f02SDaniel Vetter 	}
21736d766f02SDaniel Vetter 
2174055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2175770de83dSDamien Lespiau 		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2176abd58f01SBen Widawsky 
2177c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2178c42664ccSDaniel Vetter 			continue;
2179c42664ccSDaniel Vetter 
2180abd58f01SBen Widawsky 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
218138cc46d7SOscar Mateo 		if (pipe_iir) {
218238cc46d7SOscar Mateo 			ret = IRQ_HANDLED;
218338cc46d7SOscar Mateo 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2184770de83dSDamien Lespiau 
2185d6bbafa1SChris Wilson 			if (pipe_iir & GEN8_PIPE_VBLANK &&
2186d6bbafa1SChris Wilson 			    intel_pipe_handle_vblank(dev, pipe))
2187d6bbafa1SChris Wilson 				intel_check_page_flip(dev, pipe);
2188abd58f01SBen Widawsky 
2189770de83dSDamien Lespiau 			if (IS_GEN9(dev))
2190770de83dSDamien Lespiau 				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2191770de83dSDamien Lespiau 			else
2192770de83dSDamien Lespiau 				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2193770de83dSDamien Lespiau 
2194770de83dSDamien Lespiau 			if (flip_done) {
2195abd58f01SBen Widawsky 				intel_prepare_page_flip(dev, pipe);
2196abd58f01SBen Widawsky 				intel_finish_page_flip_plane(dev, pipe);
2197abd58f01SBen Widawsky 			}
2198abd58f01SBen Widawsky 
21990fbe7870SDaniel Vetter 			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
22000fbe7870SDaniel Vetter 				hsw_pipe_crc_irq_handler(dev, pipe);
22010fbe7870SDaniel Vetter 
22021f7247c0SDaniel Vetter 			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
22031f7247c0SDaniel Vetter 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
22041f7247c0SDaniel Vetter 								    pipe);
220538d83c96SDaniel Vetter 
2206770de83dSDamien Lespiau 
2207770de83dSDamien Lespiau 			if (IS_GEN9(dev))
2208770de83dSDamien Lespiau 				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2209770de83dSDamien Lespiau 			else
2210770de83dSDamien Lespiau 				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2211770de83dSDamien Lespiau 
2212770de83dSDamien Lespiau 			if (fault_errors)
221330100f2bSDaniel Vetter 				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
221430100f2bSDaniel Vetter 					  pipe_name(pipe),
221530100f2bSDaniel Vetter 					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2216c42664ccSDaniel Vetter 		} else
2217abd58f01SBen Widawsky 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2218abd58f01SBen Widawsky 	}
2219abd58f01SBen Widawsky 
2220266ea3d9SShashank Sharma 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2221266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
222292d03a80SDaniel Vetter 		/*
222392d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
222492d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
222592d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
222692d03a80SDaniel Vetter 		 */
222792d03a80SDaniel Vetter 		u32 pch_iir = I915_READ(SDEIIR);
222892d03a80SDaniel Vetter 		if (pch_iir) {
222992d03a80SDaniel Vetter 			I915_WRITE(SDEIIR, pch_iir);
223092d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
22316dbf30ceSVille Syrjälä 
22326dbf30ceSVille Syrjälä 			if (HAS_PCH_SPT(dev_priv))
22336dbf30ceSVille Syrjälä 				spt_irq_handler(dev, pch_iir);
22346dbf30ceSVille Syrjälä 			else
223538cc46d7SOscar Mateo 				cpt_irq_handler(dev, pch_iir);
223638cc46d7SOscar Mateo 		} else
223738cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (SDE)!\n");
223838cc46d7SOscar Mateo 
223992d03a80SDaniel Vetter 	}
224092d03a80SDaniel Vetter 
2241cb0d205eSChris Wilson 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2242cb0d205eSChris Wilson 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2243abd58f01SBen Widawsky 
2244abd58f01SBen Widawsky 	return ret;
2245abd58f01SBen Widawsky }
2246abd58f01SBen Widawsky 
224717e1df07SDaniel Vetter static void i915_error_wake_up(struct drm_i915_private *dev_priv,
224817e1df07SDaniel Vetter 			       bool reset_completed)
224917e1df07SDaniel Vetter {
2250a4872ba6SOscar Mateo 	struct intel_engine_cs *ring;
225117e1df07SDaniel Vetter 	int i;
225217e1df07SDaniel Vetter 
225317e1df07SDaniel Vetter 	/*
225417e1df07SDaniel Vetter 	 * Notify all waiters for GPU completion events that reset state has
225517e1df07SDaniel Vetter 	 * been changed, and that they need to restart their wait after
225617e1df07SDaniel Vetter 	 * checking for potential errors (and bail out to drop locks if there is
225717e1df07SDaniel Vetter 	 * a gpu reset pending so that i915_error_work_func can acquire them).
225817e1df07SDaniel Vetter 	 */
225917e1df07SDaniel Vetter 
226017e1df07SDaniel Vetter 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
226117e1df07SDaniel Vetter 	for_each_ring(ring, dev_priv, i)
226217e1df07SDaniel Vetter 		wake_up_all(&ring->irq_queue);
226317e1df07SDaniel Vetter 
226417e1df07SDaniel Vetter 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
226517e1df07SDaniel Vetter 	wake_up_all(&dev_priv->pending_flip_queue);
226617e1df07SDaniel Vetter 
226717e1df07SDaniel Vetter 	/*
226817e1df07SDaniel Vetter 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
226917e1df07SDaniel Vetter 	 * reset state is cleared.
227017e1df07SDaniel Vetter 	 */
227117e1df07SDaniel Vetter 	if (reset_completed)
227217e1df07SDaniel Vetter 		wake_up_all(&dev_priv->gpu_error.reset_queue);
227317e1df07SDaniel Vetter }
227417e1df07SDaniel Vetter 
22758a905236SJesse Barnes /**
2276b8d24a06SMika Kuoppala  * i915_reset_and_wakeup - do process context error handling work
22778a905236SJesse Barnes  *
22788a905236SJesse Barnes  * Fire an error uevent so userspace can see that a hang or error
22798a905236SJesse Barnes  * was detected.
22808a905236SJesse Barnes  */
2281b8d24a06SMika Kuoppala static void i915_reset_and_wakeup(struct drm_device *dev)
22828a905236SJesse Barnes {
2283b8d24a06SMika Kuoppala 	struct drm_i915_private *dev_priv = to_i915(dev);
2284b8d24a06SMika Kuoppala 	struct i915_gpu_error *error = &dev_priv->gpu_error;
2285cce723edSBen Widawsky 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2286cce723edSBen Widawsky 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2287cce723edSBen Widawsky 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
228817e1df07SDaniel Vetter 	int ret;
22898a905236SJesse Barnes 
22905bdebb18SDave Airlie 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
22918a905236SJesse Barnes 
22927db0ba24SDaniel Vetter 	/*
22937db0ba24SDaniel Vetter 	 * Note that there's only one work item which does gpu resets, so we
22947db0ba24SDaniel Vetter 	 * need not worry about concurrent gpu resets potentially incrementing
22957db0ba24SDaniel Vetter 	 * error->reset_counter twice. We only need to take care of another
22967db0ba24SDaniel Vetter 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
22977db0ba24SDaniel Vetter 	 * quick check for that is good enough: schedule_work ensures the
22987db0ba24SDaniel Vetter 	 * correct ordering between hang detection and this work item, and since
22997db0ba24SDaniel Vetter 	 * the reset in-progress bit is only ever set by code outside of this
23007db0ba24SDaniel Vetter 	 * work we don't need to worry about any other races.
23017db0ba24SDaniel Vetter 	 */
23027db0ba24SDaniel Vetter 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
230344d98a61SZhao Yakui 		DRM_DEBUG_DRIVER("resetting chip\n");
23045bdebb18SDave Airlie 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
23057db0ba24SDaniel Vetter 				   reset_event);
23061f83fee0SDaniel Vetter 
230717e1df07SDaniel Vetter 		/*
2308f454c694SImre Deak 		 * In most cases it's guaranteed that we get here with an RPM
2309f454c694SImre Deak 		 * reference held, for example because there is a pending GPU
2310f454c694SImre Deak 		 * request that won't finish until the reset is done. This
2311f454c694SImre Deak 		 * isn't the case at least when we get here by doing a
2312f454c694SImre Deak 		 * simulated reset via debugs, so get an RPM reference.
2313f454c694SImre Deak 		 */
2314f454c694SImre Deak 		intel_runtime_pm_get(dev_priv);
23157514747dSVille Syrjälä 
23167514747dSVille Syrjälä 		intel_prepare_reset(dev);
23177514747dSVille Syrjälä 
2318f454c694SImre Deak 		/*
231917e1df07SDaniel Vetter 		 * All state reset _must_ be completed before we update the
232017e1df07SDaniel Vetter 		 * reset counter, for otherwise waiters might miss the reset
232117e1df07SDaniel Vetter 		 * pending state and not properly drop locks, resulting in
232217e1df07SDaniel Vetter 		 * deadlocks with the reset work.
232317e1df07SDaniel Vetter 		 */
2324f69061beSDaniel Vetter 		ret = i915_reset(dev);
2325f69061beSDaniel Vetter 
23267514747dSVille Syrjälä 		intel_finish_reset(dev);
232717e1df07SDaniel Vetter 
2328f454c694SImre Deak 		intel_runtime_pm_put(dev_priv);
2329f454c694SImre Deak 
2330f69061beSDaniel Vetter 		if (ret == 0) {
2331f69061beSDaniel Vetter 			/*
2332f69061beSDaniel Vetter 			 * After all the gem state is reset, increment the reset
2333f69061beSDaniel Vetter 			 * counter and wake up everyone waiting for the reset to
2334f69061beSDaniel Vetter 			 * complete.
2335f69061beSDaniel Vetter 			 *
2336f69061beSDaniel Vetter 			 * Since unlock operations are a one-sided barrier only,
2337f69061beSDaniel Vetter 			 * we need to insert a barrier here to order any seqno
2338f69061beSDaniel Vetter 			 * updates before
2339f69061beSDaniel Vetter 			 * the counter increment.
2340f69061beSDaniel Vetter 			 */
23414e857c58SPeter Zijlstra 			smp_mb__before_atomic();
2342f69061beSDaniel Vetter 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2343f69061beSDaniel Vetter 
23445bdebb18SDave Airlie 			kobject_uevent_env(&dev->primary->kdev->kobj,
2345f69061beSDaniel Vetter 					   KOBJ_CHANGE, reset_done_event);
23461f83fee0SDaniel Vetter 		} else {
23472ac0f450SMika Kuoppala 			atomic_set_mask(I915_WEDGED, &error->reset_counter);
2348f316a42cSBen Gamari 		}
23491f83fee0SDaniel Vetter 
235017e1df07SDaniel Vetter 		/*
235117e1df07SDaniel Vetter 		 * Note: The wake_up also serves as a memory barrier so that
235217e1df07SDaniel Vetter 		 * waiters see the update value of the reset counter atomic_t.
235317e1df07SDaniel Vetter 		 */
235417e1df07SDaniel Vetter 		i915_error_wake_up(dev_priv, true);
2355f316a42cSBen Gamari 	}
23568a905236SJesse Barnes }
23578a905236SJesse Barnes 
235835aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev)
2359c0e09200SDave Airlie {
23608a905236SJesse Barnes 	struct drm_i915_private *dev_priv = dev->dev_private;
2361bd9854f9SBen Widawsky 	uint32_t instdone[I915_NUM_INSTDONE_REG];
236263eeaf38SJesse Barnes 	u32 eir = I915_READ(EIR);
2363050ee91fSBen Widawsky 	int pipe, i;
236463eeaf38SJesse Barnes 
236535aed2e6SChris Wilson 	if (!eir)
236635aed2e6SChris Wilson 		return;
236763eeaf38SJesse Barnes 
2368a70491ccSJoe Perches 	pr_err("render error detected, EIR: 0x%08x\n", eir);
23698a905236SJesse Barnes 
2370bd9854f9SBen Widawsky 	i915_get_extra_instdone(dev, instdone);
2371bd9854f9SBen Widawsky 
23728a905236SJesse Barnes 	if (IS_G4X(dev)) {
23738a905236SJesse Barnes 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
23748a905236SJesse Barnes 			u32 ipeir = I915_READ(IPEIR_I965);
23758a905236SJesse Barnes 
2376a70491ccSJoe Perches 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2377a70491ccSJoe Perches 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2378050ee91fSBen Widawsky 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2379050ee91fSBen Widawsky 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2380a70491ccSJoe Perches 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2381a70491ccSJoe Perches 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
23828a905236SJesse Barnes 			I915_WRITE(IPEIR_I965, ipeir);
23833143a2bfSChris Wilson 			POSTING_READ(IPEIR_I965);
23848a905236SJesse Barnes 		}
23858a905236SJesse Barnes 		if (eir & GM45_ERROR_PAGE_TABLE) {
23868a905236SJesse Barnes 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2387a70491ccSJoe Perches 			pr_err("page table error\n");
2388a70491ccSJoe Perches 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
23898a905236SJesse Barnes 			I915_WRITE(PGTBL_ER, pgtbl_err);
23903143a2bfSChris Wilson 			POSTING_READ(PGTBL_ER);
23918a905236SJesse Barnes 		}
23928a905236SJesse Barnes 	}
23938a905236SJesse Barnes 
2394a6c45cf0SChris Wilson 	if (!IS_GEN2(dev)) {
239563eeaf38SJesse Barnes 		if (eir & I915_ERROR_PAGE_TABLE) {
239663eeaf38SJesse Barnes 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2397a70491ccSJoe Perches 			pr_err("page table error\n");
2398a70491ccSJoe Perches 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
239963eeaf38SJesse Barnes 			I915_WRITE(PGTBL_ER, pgtbl_err);
24003143a2bfSChris Wilson 			POSTING_READ(PGTBL_ER);
240163eeaf38SJesse Barnes 		}
24028a905236SJesse Barnes 	}
24038a905236SJesse Barnes 
240463eeaf38SJesse Barnes 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2405a70491ccSJoe Perches 		pr_err("memory refresh error:\n");
2406055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
2407a70491ccSJoe Perches 			pr_err("pipe %c stat: 0x%08x\n",
24089db4a9c7SJesse Barnes 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
240963eeaf38SJesse Barnes 		/* pipestat has already been acked */
241063eeaf38SJesse Barnes 	}
241163eeaf38SJesse Barnes 	if (eir & I915_ERROR_INSTRUCTION) {
2412a70491ccSJoe Perches 		pr_err("instruction error\n");
2413a70491ccSJoe Perches 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2414050ee91fSBen Widawsky 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2415050ee91fSBen Widawsky 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2416a6c45cf0SChris Wilson 		if (INTEL_INFO(dev)->gen < 4) {
241763eeaf38SJesse Barnes 			u32 ipeir = I915_READ(IPEIR);
241863eeaf38SJesse Barnes 
2419a70491ccSJoe Perches 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2420a70491ccSJoe Perches 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2421a70491ccSJoe Perches 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
242263eeaf38SJesse Barnes 			I915_WRITE(IPEIR, ipeir);
24233143a2bfSChris Wilson 			POSTING_READ(IPEIR);
242463eeaf38SJesse Barnes 		} else {
242563eeaf38SJesse Barnes 			u32 ipeir = I915_READ(IPEIR_I965);
242663eeaf38SJesse Barnes 
2427a70491ccSJoe Perches 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2428a70491ccSJoe Perches 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2429a70491ccSJoe Perches 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2430a70491ccSJoe Perches 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
243163eeaf38SJesse Barnes 			I915_WRITE(IPEIR_I965, ipeir);
24323143a2bfSChris Wilson 			POSTING_READ(IPEIR_I965);
243363eeaf38SJesse Barnes 		}
243463eeaf38SJesse Barnes 	}
243563eeaf38SJesse Barnes 
243663eeaf38SJesse Barnes 	I915_WRITE(EIR, eir);
24373143a2bfSChris Wilson 	POSTING_READ(EIR);
243863eeaf38SJesse Barnes 	eir = I915_READ(EIR);
243963eeaf38SJesse Barnes 	if (eir) {
244063eeaf38SJesse Barnes 		/*
244163eeaf38SJesse Barnes 		 * some errors might have become stuck,
244263eeaf38SJesse Barnes 		 * mask them.
244363eeaf38SJesse Barnes 		 */
244463eeaf38SJesse Barnes 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
244563eeaf38SJesse Barnes 		I915_WRITE(EMR, I915_READ(EMR) | eir);
244663eeaf38SJesse Barnes 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
244763eeaf38SJesse Barnes 	}
244835aed2e6SChris Wilson }
244935aed2e6SChris Wilson 
245035aed2e6SChris Wilson /**
2451b8d24a06SMika Kuoppala  * i915_handle_error - handle a gpu error
245235aed2e6SChris Wilson  * @dev: drm device
245335aed2e6SChris Wilson  *
2454b8d24a06SMika Kuoppala  * Do some basic checking of regsiter state at error time and
245535aed2e6SChris Wilson  * dump it to the syslog.  Also call i915_capture_error_state() to make
245635aed2e6SChris Wilson  * sure we get a record and make it available in debugfs.  Fire a uevent
245735aed2e6SChris Wilson  * so userspace knows something bad happened (should trigger collection
245835aed2e6SChris Wilson  * of a ring dump etc.).
245935aed2e6SChris Wilson  */
246058174462SMika Kuoppala void i915_handle_error(struct drm_device *dev, bool wedged,
246158174462SMika Kuoppala 		       const char *fmt, ...)
246235aed2e6SChris Wilson {
246335aed2e6SChris Wilson 	struct drm_i915_private *dev_priv = dev->dev_private;
246458174462SMika Kuoppala 	va_list args;
246558174462SMika Kuoppala 	char error_msg[80];
246635aed2e6SChris Wilson 
246758174462SMika Kuoppala 	va_start(args, fmt);
246858174462SMika Kuoppala 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
246958174462SMika Kuoppala 	va_end(args);
247058174462SMika Kuoppala 
247158174462SMika Kuoppala 	i915_capture_error_state(dev, wedged, error_msg);
247235aed2e6SChris Wilson 	i915_report_and_clear_eir(dev);
24738a905236SJesse Barnes 
2474ba1234d1SBen Gamari 	if (wedged) {
2475f69061beSDaniel Vetter 		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2476f69061beSDaniel Vetter 				&dev_priv->gpu_error.reset_counter);
2477ba1234d1SBen Gamari 
247811ed50ecSBen Gamari 		/*
2479b8d24a06SMika Kuoppala 		 * Wakeup waiting processes so that the reset function
2480b8d24a06SMika Kuoppala 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2481b8d24a06SMika Kuoppala 		 * various locks. By bumping the reset counter first, the woken
248217e1df07SDaniel Vetter 		 * processes will see a reset in progress and back off,
248317e1df07SDaniel Vetter 		 * releasing their locks and then wait for the reset completion.
248417e1df07SDaniel Vetter 		 * We must do this for _all_ gpu waiters that might hold locks
248517e1df07SDaniel Vetter 		 * that the reset work needs to acquire.
248617e1df07SDaniel Vetter 		 *
248717e1df07SDaniel Vetter 		 * Note: The wake_up serves as the required memory barrier to
248817e1df07SDaniel Vetter 		 * ensure that the waiters see the updated value of the reset
248917e1df07SDaniel Vetter 		 * counter atomic_t.
249011ed50ecSBen Gamari 		 */
249117e1df07SDaniel Vetter 		i915_error_wake_up(dev_priv, false);
249211ed50ecSBen Gamari 	}
249311ed50ecSBen Gamari 
2494b8d24a06SMika Kuoppala 	i915_reset_and_wakeup(dev);
24958a905236SJesse Barnes }
24968a905236SJesse Barnes 
249742f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
249842f52ef8SKeith Packard  * we use as a pipe index
249942f52ef8SKeith Packard  */
2500f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe)
25010a3e67a4SJesse Barnes {
25022d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
2503e9d21d7fSKeith Packard 	unsigned long irqflags;
250471e0ffa5SJesse Barnes 
25051ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2506f796cf8fSJesse Barnes 	if (INTEL_INFO(dev)->gen >= 4)
25077c463586SKeith Packard 		i915_enable_pipestat(dev_priv, pipe,
2508755e9019SImre Deak 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
25090a3e67a4SJesse Barnes 	else
25107c463586SKeith Packard 		i915_enable_pipestat(dev_priv, pipe,
2511755e9019SImre Deak 				     PIPE_VBLANK_INTERRUPT_STATUS);
25121ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
25138692d00eSChris Wilson 
25140a3e67a4SJesse Barnes 	return 0;
25150a3e67a4SJesse Barnes }
25160a3e67a4SJesse Barnes 
2517f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2518f796cf8fSJesse Barnes {
25192d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
2520f796cf8fSJesse Barnes 	unsigned long irqflags;
2521b518421fSPaulo Zanoni 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
252240da17c2SDaniel Vetter 						     DE_PIPE_VBLANK(pipe);
2523f796cf8fSJesse Barnes 
2524f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2525b518421fSPaulo Zanoni 	ironlake_enable_display_irq(dev_priv, bit);
2526b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2527b1f14ad0SJesse Barnes 
2528b1f14ad0SJesse Barnes 	return 0;
2529b1f14ad0SJesse Barnes }
2530b1f14ad0SJesse Barnes 
25317e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
25327e231dbeSJesse Barnes {
25332d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
25347e231dbeSJesse Barnes 	unsigned long irqflags;
25357e231dbeSJesse Barnes 
25367e231dbeSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
253731acc7f5SJesse Barnes 	i915_enable_pipestat(dev_priv, pipe,
2538755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
25397e231dbeSJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
25407e231dbeSJesse Barnes 
25417e231dbeSJesse Barnes 	return 0;
25427e231dbeSJesse Barnes }
25437e231dbeSJesse Barnes 
2544abd58f01SBen Widawsky static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2545abd58f01SBen Widawsky {
2546abd58f01SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
2547abd58f01SBen Widawsky 	unsigned long irqflags;
2548abd58f01SBen Widawsky 
2549abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
25507167d7c6SDaniel Vetter 	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
25517167d7c6SDaniel Vetter 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2552abd58f01SBen Widawsky 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2553abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2554abd58f01SBen Widawsky 	return 0;
2555abd58f01SBen Widawsky }
2556abd58f01SBen Widawsky 
255742f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
255842f52ef8SKeith Packard  * we use as a pipe index
255942f52ef8SKeith Packard  */
2560f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe)
25610a3e67a4SJesse Barnes {
25622d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
2563e9d21d7fSKeith Packard 	unsigned long irqflags;
25640a3e67a4SJesse Barnes 
25651ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
25667c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
2567755e9019SImre Deak 			      PIPE_VBLANK_INTERRUPT_STATUS |
2568755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
25691ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
25700a3e67a4SJesse Barnes }
25710a3e67a4SJesse Barnes 
2572f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2573f796cf8fSJesse Barnes {
25742d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
2575f796cf8fSJesse Barnes 	unsigned long irqflags;
2576b518421fSPaulo Zanoni 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
257740da17c2SDaniel Vetter 						     DE_PIPE_VBLANK(pipe);
2578f796cf8fSJesse Barnes 
2579f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2580b518421fSPaulo Zanoni 	ironlake_disable_display_irq(dev_priv, bit);
2581b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2582b1f14ad0SJesse Barnes }
2583b1f14ad0SJesse Barnes 
25847e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
25857e231dbeSJesse Barnes {
25862d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
25877e231dbeSJesse Barnes 	unsigned long irqflags;
25887e231dbeSJesse Barnes 
25897e231dbeSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
259031acc7f5SJesse Barnes 	i915_disable_pipestat(dev_priv, pipe,
2591755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
25927e231dbeSJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
25937e231dbeSJesse Barnes }
25947e231dbeSJesse Barnes 
2595abd58f01SBen Widawsky static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2596abd58f01SBen Widawsky {
2597abd58f01SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
2598abd58f01SBen Widawsky 	unsigned long irqflags;
2599abd58f01SBen Widawsky 
2600abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
26017167d7c6SDaniel Vetter 	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
26027167d7c6SDaniel Vetter 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2603abd58f01SBen Widawsky 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2604abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2605abd58f01SBen Widawsky }
2606abd58f01SBen Widawsky 
26079107e9d2SChris Wilson static bool
260894f7bbe1STomas Elf ring_idle(struct intel_engine_cs *ring, u32 seqno)
2609893eead0SChris Wilson {
26109107e9d2SChris Wilson 	return (list_empty(&ring->request_list) ||
261194f7bbe1STomas Elf 		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2612f65d9421SBen Gamari }
2613f65d9421SBen Gamari 
2614a028c4b0SDaniel Vetter static bool
2615a028c4b0SDaniel Vetter ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2616a028c4b0SDaniel Vetter {
2617a028c4b0SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 8) {
2618a6cdb93aSRodrigo Vivi 		return (ipehr >> 23) == 0x1c;
2619a028c4b0SDaniel Vetter 	} else {
2620a028c4b0SDaniel Vetter 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2621a028c4b0SDaniel Vetter 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2622a028c4b0SDaniel Vetter 				 MI_SEMAPHORE_REGISTER);
2623a028c4b0SDaniel Vetter 	}
2624a028c4b0SDaniel Vetter }
2625a028c4b0SDaniel Vetter 
2626a4872ba6SOscar Mateo static struct intel_engine_cs *
2627a6cdb93aSRodrigo Vivi semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2628921d42eaSDaniel Vetter {
2629921d42eaSDaniel Vetter 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2630a4872ba6SOscar Mateo 	struct intel_engine_cs *signaller;
2631921d42eaSDaniel Vetter 	int i;
2632921d42eaSDaniel Vetter 
2633921d42eaSDaniel Vetter 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2634a6cdb93aSRodrigo Vivi 		for_each_ring(signaller, dev_priv, i) {
2635a6cdb93aSRodrigo Vivi 			if (ring == signaller)
2636a6cdb93aSRodrigo Vivi 				continue;
2637a6cdb93aSRodrigo Vivi 
2638a6cdb93aSRodrigo Vivi 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2639a6cdb93aSRodrigo Vivi 				return signaller;
2640a6cdb93aSRodrigo Vivi 		}
2641921d42eaSDaniel Vetter 	} else {
2642921d42eaSDaniel Vetter 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2643921d42eaSDaniel Vetter 
2644921d42eaSDaniel Vetter 		for_each_ring(signaller, dev_priv, i) {
2645921d42eaSDaniel Vetter 			if(ring == signaller)
2646921d42eaSDaniel Vetter 				continue;
2647921d42eaSDaniel Vetter 
2648ebc348b2SBen Widawsky 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2649921d42eaSDaniel Vetter 				return signaller;
2650921d42eaSDaniel Vetter 		}
2651921d42eaSDaniel Vetter 	}
2652921d42eaSDaniel Vetter 
2653a6cdb93aSRodrigo Vivi 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2654a6cdb93aSRodrigo Vivi 		  ring->id, ipehr, offset);
2655921d42eaSDaniel Vetter 
2656921d42eaSDaniel Vetter 	return NULL;
2657921d42eaSDaniel Vetter }
2658921d42eaSDaniel Vetter 
2659a4872ba6SOscar Mateo static struct intel_engine_cs *
2660a4872ba6SOscar Mateo semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2661a24a11e6SChris Wilson {
2662a24a11e6SChris Wilson 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
266388fe429dSDaniel Vetter 	u32 cmd, ipehr, head;
2664a6cdb93aSRodrigo Vivi 	u64 offset = 0;
2665a6cdb93aSRodrigo Vivi 	int i, backwards;
2666a24a11e6SChris Wilson 
2667a24a11e6SChris Wilson 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2668a028c4b0SDaniel Vetter 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
26696274f212SChris Wilson 		return NULL;
2670a24a11e6SChris Wilson 
267188fe429dSDaniel Vetter 	/*
267288fe429dSDaniel Vetter 	 * HEAD is likely pointing to the dword after the actual command,
267388fe429dSDaniel Vetter 	 * so scan backwards until we find the MBOX. But limit it to just 3
2674a6cdb93aSRodrigo Vivi 	 * or 4 dwords depending on the semaphore wait command size.
2675a6cdb93aSRodrigo Vivi 	 * Note that we don't care about ACTHD here since that might
267688fe429dSDaniel Vetter 	 * point at at batch, and semaphores are always emitted into the
267788fe429dSDaniel Vetter 	 * ringbuffer itself.
2678a24a11e6SChris Wilson 	 */
267988fe429dSDaniel Vetter 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2680a6cdb93aSRodrigo Vivi 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
268188fe429dSDaniel Vetter 
2682a6cdb93aSRodrigo Vivi 	for (i = backwards; i; --i) {
268388fe429dSDaniel Vetter 		/*
268488fe429dSDaniel Vetter 		 * Be paranoid and presume the hw has gone off into the wild -
268588fe429dSDaniel Vetter 		 * our ring is smaller than what the hardware (and hence
268688fe429dSDaniel Vetter 		 * HEAD_ADDR) allows. Also handles wrap-around.
268788fe429dSDaniel Vetter 		 */
2688ee1b1e5eSOscar Mateo 		head &= ring->buffer->size - 1;
268988fe429dSDaniel Vetter 
269088fe429dSDaniel Vetter 		/* This here seems to blow up */
2691ee1b1e5eSOscar Mateo 		cmd = ioread32(ring->buffer->virtual_start + head);
2692a24a11e6SChris Wilson 		if (cmd == ipehr)
2693a24a11e6SChris Wilson 			break;
2694a24a11e6SChris Wilson 
269588fe429dSDaniel Vetter 		head -= 4;
269688fe429dSDaniel Vetter 	}
2697a24a11e6SChris Wilson 
269888fe429dSDaniel Vetter 	if (!i)
269988fe429dSDaniel Vetter 		return NULL;
270088fe429dSDaniel Vetter 
2701ee1b1e5eSOscar Mateo 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2702a6cdb93aSRodrigo Vivi 	if (INTEL_INFO(ring->dev)->gen >= 8) {
2703a6cdb93aSRodrigo Vivi 		offset = ioread32(ring->buffer->virtual_start + head + 12);
2704a6cdb93aSRodrigo Vivi 		offset <<= 32;
2705a6cdb93aSRodrigo Vivi 		offset = ioread32(ring->buffer->virtual_start + head + 8);
2706a6cdb93aSRodrigo Vivi 	}
2707a6cdb93aSRodrigo Vivi 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2708a24a11e6SChris Wilson }
2709a24a11e6SChris Wilson 
2710a4872ba6SOscar Mateo static int semaphore_passed(struct intel_engine_cs *ring)
27116274f212SChris Wilson {
27126274f212SChris Wilson 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2713a4872ba6SOscar Mateo 	struct intel_engine_cs *signaller;
2714a0d036b0SChris Wilson 	u32 seqno;
27156274f212SChris Wilson 
27164be17381SChris Wilson 	ring->hangcheck.deadlock++;
27176274f212SChris Wilson 
27186274f212SChris Wilson 	signaller = semaphore_waits_for(ring, &seqno);
27194be17381SChris Wilson 	if (signaller == NULL)
27204be17381SChris Wilson 		return -1;
27214be17381SChris Wilson 
27224be17381SChris Wilson 	/* Prevent pathological recursion due to driver bugs */
27234be17381SChris Wilson 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
27246274f212SChris Wilson 		return -1;
27256274f212SChris Wilson 
27264be17381SChris Wilson 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
27274be17381SChris Wilson 		return 1;
27284be17381SChris Wilson 
2729a0d036b0SChris Wilson 	/* cursory check for an unkickable deadlock */
2730a0d036b0SChris Wilson 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2731a0d036b0SChris Wilson 	    semaphore_passed(signaller) < 0)
27324be17381SChris Wilson 		return -1;
27334be17381SChris Wilson 
27344be17381SChris Wilson 	return 0;
27356274f212SChris Wilson }
27366274f212SChris Wilson 
27376274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
27386274f212SChris Wilson {
2739a4872ba6SOscar Mateo 	struct intel_engine_cs *ring;
27406274f212SChris Wilson 	int i;
27416274f212SChris Wilson 
27426274f212SChris Wilson 	for_each_ring(ring, dev_priv, i)
27434be17381SChris Wilson 		ring->hangcheck.deadlock = 0;
27446274f212SChris Wilson }
27456274f212SChris Wilson 
2746ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action
2747a4872ba6SOscar Mateo ring_stuck(struct intel_engine_cs *ring, u64 acthd)
27481ec14ad3SChris Wilson {
27491ec14ad3SChris Wilson 	struct drm_device *dev = ring->dev;
27501ec14ad3SChris Wilson 	struct drm_i915_private *dev_priv = dev->dev_private;
27519107e9d2SChris Wilson 	u32 tmp;
27529107e9d2SChris Wilson 
2753f260fe7bSMika Kuoppala 	if (acthd != ring->hangcheck.acthd) {
2754f260fe7bSMika Kuoppala 		if (acthd > ring->hangcheck.max_acthd) {
2755f260fe7bSMika Kuoppala 			ring->hangcheck.max_acthd = acthd;
2756f2f4d82fSJani Nikula 			return HANGCHECK_ACTIVE;
2757f260fe7bSMika Kuoppala 		}
2758f260fe7bSMika Kuoppala 
2759f260fe7bSMika Kuoppala 		return HANGCHECK_ACTIVE_LOOP;
2760f260fe7bSMika Kuoppala 	}
27616274f212SChris Wilson 
27629107e9d2SChris Wilson 	if (IS_GEN2(dev))
2763f2f4d82fSJani Nikula 		return HANGCHECK_HUNG;
27649107e9d2SChris Wilson 
27659107e9d2SChris Wilson 	/* Is the chip hanging on a WAIT_FOR_EVENT?
27669107e9d2SChris Wilson 	 * If so we can simply poke the RB_WAIT bit
27679107e9d2SChris Wilson 	 * and break the hang. This should work on
27689107e9d2SChris Wilson 	 * all but the second generation chipsets.
27699107e9d2SChris Wilson 	 */
27709107e9d2SChris Wilson 	tmp = I915_READ_CTL(ring);
27711ec14ad3SChris Wilson 	if (tmp & RING_WAIT) {
277258174462SMika Kuoppala 		i915_handle_error(dev, false,
277358174462SMika Kuoppala 				  "Kicking stuck wait on %s",
27741ec14ad3SChris Wilson 				  ring->name);
27751ec14ad3SChris Wilson 		I915_WRITE_CTL(ring, tmp);
2776f2f4d82fSJani Nikula 		return HANGCHECK_KICK;
27771ec14ad3SChris Wilson 	}
2778a24a11e6SChris Wilson 
27796274f212SChris Wilson 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
27806274f212SChris Wilson 		switch (semaphore_passed(ring)) {
27816274f212SChris Wilson 		default:
2782f2f4d82fSJani Nikula 			return HANGCHECK_HUNG;
27836274f212SChris Wilson 		case 1:
278458174462SMika Kuoppala 			i915_handle_error(dev, false,
278558174462SMika Kuoppala 					  "Kicking stuck semaphore on %s",
2786a24a11e6SChris Wilson 					  ring->name);
2787a24a11e6SChris Wilson 			I915_WRITE_CTL(ring, tmp);
2788f2f4d82fSJani Nikula 			return HANGCHECK_KICK;
27896274f212SChris Wilson 		case 0:
2790f2f4d82fSJani Nikula 			return HANGCHECK_WAIT;
27916274f212SChris Wilson 		}
27929107e9d2SChris Wilson 	}
27939107e9d2SChris Wilson 
2794f2f4d82fSJani Nikula 	return HANGCHECK_HUNG;
2795a24a11e6SChris Wilson }
2796d1e61e7fSChris Wilson 
2797737b1506SChris Wilson /*
2798f65d9421SBen Gamari  * This is called when the chip hasn't reported back with completed
279905407ff8SMika Kuoppala  * batchbuffers in a long time. We keep track per ring seqno progress and
280005407ff8SMika Kuoppala  * if there are no progress, hangcheck score for that ring is increased.
280105407ff8SMika Kuoppala  * Further, acthd is inspected to see if the ring is stuck. On stuck case
280205407ff8SMika Kuoppala  * we kick the ring. If we see no progress on three subsequent calls
280305407ff8SMika Kuoppala  * we assume chip is wedged and try to fix it by resetting the chip.
2804f65d9421SBen Gamari  */
2805737b1506SChris Wilson static void i915_hangcheck_elapsed(struct work_struct *work)
2806f65d9421SBen Gamari {
2807737b1506SChris Wilson 	struct drm_i915_private *dev_priv =
2808737b1506SChris Wilson 		container_of(work, typeof(*dev_priv),
2809737b1506SChris Wilson 			     gpu_error.hangcheck_work.work);
2810737b1506SChris Wilson 	struct drm_device *dev = dev_priv->dev;
2811a4872ba6SOscar Mateo 	struct intel_engine_cs *ring;
2812b4519513SChris Wilson 	int i;
281305407ff8SMika Kuoppala 	int busy_count = 0, rings_hung = 0;
28149107e9d2SChris Wilson 	bool stuck[I915_NUM_RINGS] = { 0 };
28159107e9d2SChris Wilson #define BUSY 1
28169107e9d2SChris Wilson #define KICK 5
28179107e9d2SChris Wilson #define HUNG 20
2818893eead0SChris Wilson 
2819d330a953SJani Nikula 	if (!i915.enable_hangcheck)
28203e0dc6b0SBen Widawsky 		return;
28213e0dc6b0SBen Widawsky 
2822b4519513SChris Wilson 	for_each_ring(ring, dev_priv, i) {
282350877445SChris Wilson 		u64 acthd;
282450877445SChris Wilson 		u32 seqno;
28259107e9d2SChris Wilson 		bool busy = true;
2826b4519513SChris Wilson 
28276274f212SChris Wilson 		semaphore_clear_deadlocks(dev_priv);
28286274f212SChris Wilson 
282905407ff8SMika Kuoppala 		seqno = ring->get_seqno(ring, false);
283005407ff8SMika Kuoppala 		acthd = intel_ring_get_active_head(ring);
283105407ff8SMika Kuoppala 
283205407ff8SMika Kuoppala 		if (ring->hangcheck.seqno == seqno) {
283394f7bbe1STomas Elf 			if (ring_idle(ring, seqno)) {
2834da661464SMika Kuoppala 				ring->hangcheck.action = HANGCHECK_IDLE;
2835da661464SMika Kuoppala 
28369107e9d2SChris Wilson 				if (waitqueue_active(&ring->irq_queue)) {
28379107e9d2SChris Wilson 					/* Issue a wake-up to catch stuck h/w. */
2838094f9a54SChris Wilson 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2839f4adcd24SDaniel Vetter 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
28409107e9d2SChris Wilson 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
28419107e9d2SChris Wilson 								  ring->name);
2842f4adcd24SDaniel Vetter 						else
2843f4adcd24SDaniel Vetter 							DRM_INFO("Fake missed irq on %s\n",
2844f4adcd24SDaniel Vetter 								 ring->name);
28459107e9d2SChris Wilson 						wake_up_all(&ring->irq_queue);
2846094f9a54SChris Wilson 					}
2847094f9a54SChris Wilson 					/* Safeguard against driver failure */
2848094f9a54SChris Wilson 					ring->hangcheck.score += BUSY;
28499107e9d2SChris Wilson 				} else
28509107e9d2SChris Wilson 					busy = false;
285105407ff8SMika Kuoppala 			} else {
28526274f212SChris Wilson 				/* We always increment the hangcheck score
28536274f212SChris Wilson 				 * if the ring is busy and still processing
28546274f212SChris Wilson 				 * the same request, so that no single request
28556274f212SChris Wilson 				 * can run indefinitely (such as a chain of
28566274f212SChris Wilson 				 * batches). The only time we do not increment
28576274f212SChris Wilson 				 * the hangcheck score on this ring, if this
28586274f212SChris Wilson 				 * ring is in a legitimate wait for another
28596274f212SChris Wilson 				 * ring. In that case the waiting ring is a
28606274f212SChris Wilson 				 * victim and we want to be sure we catch the
28616274f212SChris Wilson 				 * right culprit. Then every time we do kick
28626274f212SChris Wilson 				 * the ring, add a small increment to the
28636274f212SChris Wilson 				 * score so that we can catch a batch that is
28646274f212SChris Wilson 				 * being repeatedly kicked and so responsible
28656274f212SChris Wilson 				 * for stalling the machine.
28669107e9d2SChris Wilson 				 */
2867ad8beaeaSMika Kuoppala 				ring->hangcheck.action = ring_stuck(ring,
2868ad8beaeaSMika Kuoppala 								    acthd);
2869ad8beaeaSMika Kuoppala 
2870ad8beaeaSMika Kuoppala 				switch (ring->hangcheck.action) {
2871da661464SMika Kuoppala 				case HANGCHECK_IDLE:
2872f2f4d82fSJani Nikula 				case HANGCHECK_WAIT:
2873f2f4d82fSJani Nikula 				case HANGCHECK_ACTIVE:
2874f260fe7bSMika Kuoppala 					break;
2875f260fe7bSMika Kuoppala 				case HANGCHECK_ACTIVE_LOOP:
2876ea04cb31SJani Nikula 					ring->hangcheck.score += BUSY;
28776274f212SChris Wilson 					break;
2878f2f4d82fSJani Nikula 				case HANGCHECK_KICK:
2879ea04cb31SJani Nikula 					ring->hangcheck.score += KICK;
28806274f212SChris Wilson 					break;
2881f2f4d82fSJani Nikula 				case HANGCHECK_HUNG:
2882ea04cb31SJani Nikula 					ring->hangcheck.score += HUNG;
28836274f212SChris Wilson 					stuck[i] = true;
28846274f212SChris Wilson 					break;
28856274f212SChris Wilson 				}
288605407ff8SMika Kuoppala 			}
28879107e9d2SChris Wilson 		} else {
2888da661464SMika Kuoppala 			ring->hangcheck.action = HANGCHECK_ACTIVE;
2889da661464SMika Kuoppala 
28909107e9d2SChris Wilson 			/* Gradually reduce the count so that we catch DoS
28919107e9d2SChris Wilson 			 * attempts across multiple batches.
28929107e9d2SChris Wilson 			 */
28939107e9d2SChris Wilson 			if (ring->hangcheck.score > 0)
28949107e9d2SChris Wilson 				ring->hangcheck.score--;
2895f260fe7bSMika Kuoppala 
2896f260fe7bSMika Kuoppala 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2897cbb465e7SChris Wilson 		}
2898f65d9421SBen Gamari 
289905407ff8SMika Kuoppala 		ring->hangcheck.seqno = seqno;
290005407ff8SMika Kuoppala 		ring->hangcheck.acthd = acthd;
29019107e9d2SChris Wilson 		busy_count += busy;
290205407ff8SMika Kuoppala 	}
290305407ff8SMika Kuoppala 
290405407ff8SMika Kuoppala 	for_each_ring(ring, dev_priv, i) {
2905b6b0fac0SMika Kuoppala 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2906b8d88d1dSDaniel Vetter 			DRM_INFO("%s on %s\n",
290705407ff8SMika Kuoppala 				 stuck[i] ? "stuck" : "no progress",
2908a43adf07SChris Wilson 				 ring->name);
2909a43adf07SChris Wilson 			rings_hung++;
291005407ff8SMika Kuoppala 		}
291105407ff8SMika Kuoppala 	}
291205407ff8SMika Kuoppala 
291305407ff8SMika Kuoppala 	if (rings_hung)
291458174462SMika Kuoppala 		return i915_handle_error(dev, true, "Ring hung");
291505407ff8SMika Kuoppala 
291605407ff8SMika Kuoppala 	if (busy_count)
291705407ff8SMika Kuoppala 		/* Reset timer case chip hangs without another request
291805407ff8SMika Kuoppala 		 * being added */
291910cd45b6SMika Kuoppala 		i915_queue_hangcheck(dev);
292010cd45b6SMika Kuoppala }
292110cd45b6SMika Kuoppala 
292210cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev)
292310cd45b6SMika Kuoppala {
2924737b1506SChris Wilson 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
2925672e7b7cSChris Wilson 
2926d330a953SJani Nikula 	if (!i915.enable_hangcheck)
292710cd45b6SMika Kuoppala 		return;
292810cd45b6SMika Kuoppala 
2929737b1506SChris Wilson 	/* Don't continually defer the hangcheck so that it is always run at
2930737b1506SChris Wilson 	 * least once after work has been scheduled on any ring. Otherwise,
2931737b1506SChris Wilson 	 * we will ignore a hung ring if a second ring is kept busy.
2932737b1506SChris Wilson 	 */
2933737b1506SChris Wilson 
2934737b1506SChris Wilson 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2935737b1506SChris Wilson 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
2936f65d9421SBen Gamari }
2937f65d9421SBen Gamari 
29381c69eb42SPaulo Zanoni static void ibx_irq_reset(struct drm_device *dev)
293991738a95SPaulo Zanoni {
294091738a95SPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
294191738a95SPaulo Zanoni 
294291738a95SPaulo Zanoni 	if (HAS_PCH_NOP(dev))
294391738a95SPaulo Zanoni 		return;
294491738a95SPaulo Zanoni 
2945f86f3fb0SPaulo Zanoni 	GEN5_IRQ_RESET(SDE);
2946105b122eSPaulo Zanoni 
2947105b122eSPaulo Zanoni 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2948105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
2949622364b6SPaulo Zanoni }
2950105b122eSPaulo Zanoni 
295191738a95SPaulo Zanoni /*
2952622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
2953622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2954622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
2955622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
2956622364b6SPaulo Zanoni  *
2957622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
295891738a95SPaulo Zanoni  */
2959622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev)
2960622364b6SPaulo Zanoni {
2961622364b6SPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
2962622364b6SPaulo Zanoni 
2963622364b6SPaulo Zanoni 	if (HAS_PCH_NOP(dev))
2964622364b6SPaulo Zanoni 		return;
2965622364b6SPaulo Zanoni 
2966622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
296791738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
296891738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
296991738a95SPaulo Zanoni }
297091738a95SPaulo Zanoni 
29717c4d664eSPaulo Zanoni static void gen5_gt_irq_reset(struct drm_device *dev)
2972d18ea1b5SDaniel Vetter {
2973d18ea1b5SDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
2974d18ea1b5SDaniel Vetter 
2975f86f3fb0SPaulo Zanoni 	GEN5_IRQ_RESET(GT);
2976a9d356a6SPaulo Zanoni 	if (INTEL_INFO(dev)->gen >= 6)
2977f86f3fb0SPaulo Zanoni 		GEN5_IRQ_RESET(GEN6_PM);
2978d18ea1b5SDaniel Vetter }
2979d18ea1b5SDaniel Vetter 
2980c0e09200SDave Airlie /* drm_dma.h hooks
2981c0e09200SDave Airlie */
2982be30b29fSPaulo Zanoni static void ironlake_irq_reset(struct drm_device *dev)
2983036a4a7dSZhenyu Wang {
29842d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
2985036a4a7dSZhenyu Wang 
29860c841212SPaulo Zanoni 	I915_WRITE(HWSTAM, 0xffffffff);
2987bdfcdb63SDaniel Vetter 
2988f86f3fb0SPaulo Zanoni 	GEN5_IRQ_RESET(DE);
2989c6d954c1SPaulo Zanoni 	if (IS_GEN7(dev))
2990c6d954c1SPaulo Zanoni 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2991036a4a7dSZhenyu Wang 
29927c4d664eSPaulo Zanoni 	gen5_gt_irq_reset(dev);
2993c650156aSZhenyu Wang 
29941c69eb42SPaulo Zanoni 	ibx_irq_reset(dev);
29957d99163dSBen Widawsky }
29967d99163dSBen Widawsky 
299770591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
299870591a41SVille Syrjälä {
299970591a41SVille Syrjälä 	enum pipe pipe;
300070591a41SVille Syrjälä 
300170591a41SVille Syrjälä 	I915_WRITE(PORT_HOTPLUG_EN, 0);
300270591a41SVille Syrjälä 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
300370591a41SVille Syrjälä 
300470591a41SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
300570591a41SVille Syrjälä 		I915_WRITE(PIPESTAT(pipe), 0xffff);
300670591a41SVille Syrjälä 
300770591a41SVille Syrjälä 	GEN5_IRQ_RESET(VLV_);
300870591a41SVille Syrjälä }
300970591a41SVille Syrjälä 
30107e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev)
30117e231dbeSJesse Barnes {
30122d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
30137e231dbeSJesse Barnes 
30147e231dbeSJesse Barnes 	/* VLV magic */
30157e231dbeSJesse Barnes 	I915_WRITE(VLV_IMR, 0);
30167e231dbeSJesse Barnes 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
30177e231dbeSJesse Barnes 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
30187e231dbeSJesse Barnes 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
30197e231dbeSJesse Barnes 
30207c4d664eSPaulo Zanoni 	gen5_gt_irq_reset(dev);
30217e231dbeSJesse Barnes 
30227c4cde39SVille Syrjälä 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
30237e231dbeSJesse Barnes 
302470591a41SVille Syrjälä 	vlv_display_irq_reset(dev_priv);
30257e231dbeSJesse Barnes }
30267e231dbeSJesse Barnes 
3027d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3028d6e3cca3SDaniel Vetter {
3029d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 0);
3030d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 1);
3031d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 2);
3032d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 3);
3033d6e3cca3SDaniel Vetter }
3034d6e3cca3SDaniel Vetter 
3035823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev)
3036abd58f01SBen Widawsky {
3037abd58f01SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
3038abd58f01SBen Widawsky 	int pipe;
3039abd58f01SBen Widawsky 
3040abd58f01SBen Widawsky 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3041abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3042abd58f01SBen Widawsky 
3043d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3044abd58f01SBen Widawsky 
3045055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3046f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3047813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3048f86f3fb0SPaulo Zanoni 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3049abd58f01SBen Widawsky 
3050f86f3fb0SPaulo Zanoni 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3051f86f3fb0SPaulo Zanoni 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3052f86f3fb0SPaulo Zanoni 	GEN5_IRQ_RESET(GEN8_PCU_);
3053abd58f01SBen Widawsky 
3054266ea3d9SShashank Sharma 	if (HAS_PCH_SPLIT(dev))
30551c69eb42SPaulo Zanoni 		ibx_irq_reset(dev);
3056abd58f01SBen Widawsky }
3057abd58f01SBen Widawsky 
30584c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
30594c6c03beSDamien Lespiau 				     unsigned int pipe_mask)
3060d49bdb0eSPaulo Zanoni {
30611180e206SPaulo Zanoni 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3062d49bdb0eSPaulo Zanoni 
306313321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3064d14c0343SDamien Lespiau 	if (pipe_mask & 1 << PIPE_A)
3065d14c0343SDamien Lespiau 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3066d14c0343SDamien Lespiau 				  dev_priv->de_irq_mask[PIPE_A],
3067d14c0343SDamien Lespiau 				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
30684c6c03beSDamien Lespiau 	if (pipe_mask & 1 << PIPE_B)
30694c6c03beSDamien Lespiau 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
30704c6c03beSDamien Lespiau 				  dev_priv->de_irq_mask[PIPE_B],
30711180e206SPaulo Zanoni 				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
30724c6c03beSDamien Lespiau 	if (pipe_mask & 1 << PIPE_C)
30734c6c03beSDamien Lespiau 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
30744c6c03beSDamien Lespiau 				  dev_priv->de_irq_mask[PIPE_C],
30751180e206SPaulo Zanoni 				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
307613321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3077d49bdb0eSPaulo Zanoni }
3078d49bdb0eSPaulo Zanoni 
307943f328d7SVille Syrjälä static void cherryview_irq_preinstall(struct drm_device *dev)
308043f328d7SVille Syrjälä {
308143f328d7SVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
308243f328d7SVille Syrjälä 
308343f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
308443f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
308543f328d7SVille Syrjälä 
3086d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
308743f328d7SVille Syrjälä 
308843f328d7SVille Syrjälä 	GEN5_IRQ_RESET(GEN8_PCU_);
308943f328d7SVille Syrjälä 
309043f328d7SVille Syrjälä 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
309143f328d7SVille Syrjälä 
309270591a41SVille Syrjälä 	vlv_display_irq_reset(dev_priv);
309343f328d7SVille Syrjälä }
309443f328d7SVille Syrjälä 
309587a02106SVille Syrjälä static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
309687a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
309787a02106SVille Syrjälä {
309887a02106SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(dev);
309987a02106SVille Syrjälä 	struct intel_encoder *encoder;
310087a02106SVille Syrjälä 	u32 enabled_irqs = 0;
310187a02106SVille Syrjälä 
310287a02106SVille Syrjälä 	for_each_intel_encoder(dev, encoder)
310387a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
310487a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
310587a02106SVille Syrjälä 
310687a02106SVille Syrjälä 	return enabled_irqs;
310787a02106SVille Syrjälä }
310887a02106SVille Syrjälä 
310982a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev)
311082a28bcfSDaniel Vetter {
31112d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
311287a02106SVille Syrjälä 	u32 hotplug_irqs, hotplug, enabled_irqs;
311382a28bcfSDaniel Vetter 
311482a28bcfSDaniel Vetter 	if (HAS_PCH_IBX(dev)) {
3115fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
311687a02106SVille Syrjälä 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
311782a28bcfSDaniel Vetter 	} else {
3118fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
311987a02106SVille Syrjälä 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
312082a28bcfSDaniel Vetter 	}
312182a28bcfSDaniel Vetter 
3122fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
312382a28bcfSDaniel Vetter 
31247fe0b973SKeith Packard 	/*
31257fe0b973SKeith Packard 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
31266dbf30ceSVille Syrjälä 	 * duration to 2ms (which is the minimum in the Display Port spec).
31276dbf30ceSVille Syrjälä 	 * The pulse duration bits are reserved on LPT+.
31287fe0b973SKeith Packard 	 */
31297fe0b973SKeith Packard 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
31307fe0b973SKeith Packard 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
31317fe0b973SKeith Packard 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
31327fe0b973SKeith Packard 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
31337fe0b973SKeith Packard 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
31347fe0b973SKeith Packard 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
31356dbf30ceSVille Syrjälä }
313626951cafSXiong Zhang 
31376dbf30ceSVille Syrjälä static void spt_hpd_irq_setup(struct drm_device *dev)
31386dbf30ceSVille Syrjälä {
31396dbf30ceSVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
31406dbf30ceSVille Syrjälä 	u32 hotplug_irqs, hotplug, enabled_irqs;
31416dbf30ceSVille Syrjälä 
31426dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
31436dbf30ceSVille Syrjälä 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
31446dbf30ceSVille Syrjälä 
31456dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
31466dbf30ceSVille Syrjälä 
31476dbf30ceSVille Syrjälä 	/* Enable digital hotplug on the PCH */
31486dbf30ceSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
31496dbf30ceSVille Syrjälä 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
31506dbf30ceSVille Syrjälä 		PORTB_HOTPLUG_ENABLE;
31516dbf30ceSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
31526dbf30ceSVille Syrjälä 
315326951cafSXiong Zhang 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
315426951cafSXiong Zhang 	hotplug |= PORTE_HOTPLUG_ENABLE;
315526951cafSXiong Zhang 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
315626951cafSXiong Zhang }
31577fe0b973SKeith Packard 
3158e4ce95aaSVille Syrjälä static void ilk_hpd_irq_setup(struct drm_device *dev)
3159e4ce95aaSVille Syrjälä {
3160e4ce95aaSVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
3161e4ce95aaSVille Syrjälä 	u32 hotplug_irqs, hotplug, enabled_irqs;
3162e4ce95aaSVille Syrjälä 
3163*23bb4cb5SVille Syrjälä 	if (INTEL_INFO(dev)->gen >= 7) {
3164*23bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3165*23bb4cb5SVille Syrjälä 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3166*23bb4cb5SVille Syrjälä 	} else {
3167e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
3168e4ce95aaSVille Syrjälä 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3169*23bb4cb5SVille Syrjälä 	}
3170e4ce95aaSVille Syrjälä 
3171e4ce95aaSVille Syrjälä 	ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3172e4ce95aaSVille Syrjälä 
3173e4ce95aaSVille Syrjälä 	/*
3174e4ce95aaSVille Syrjälä 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3175e4ce95aaSVille Syrjälä 	 * duration to 2ms (which is the minimum in the Display Port spec)
3176*23bb4cb5SVille Syrjälä 	 * The pulse duration bits are reserved on HSW+.
3177e4ce95aaSVille Syrjälä 	 */
3178e4ce95aaSVille Syrjälä 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3179e4ce95aaSVille Syrjälä 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3180e4ce95aaSVille Syrjälä 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3181e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3182e4ce95aaSVille Syrjälä 
3183e4ce95aaSVille Syrjälä 	ibx_hpd_irq_setup(dev);
3184e4ce95aaSVille Syrjälä }
3185e4ce95aaSVille Syrjälä 
3186e0a20ad7SShashank Sharma static void bxt_hpd_irq_setup(struct drm_device *dev)
3187e0a20ad7SShashank Sharma {
3188e0a20ad7SShashank Sharma 	struct drm_i915_private *dev_priv = dev->dev_private;
318987a02106SVille Syrjälä 	u32 hotplug_port;
3190e0a20ad7SShashank Sharma 	u32 hotplug_ctrl;
3191e0a20ad7SShashank Sharma 
319287a02106SVille Syrjälä 	hotplug_port = intel_hpd_enabled_irqs(dev, hpd_bxt);
3193e0a20ad7SShashank Sharma 
3194e0a20ad7SShashank Sharma 	hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3195e0a20ad7SShashank Sharma 
31967f3561beSSonika Jindal 	if (hotplug_port & BXT_DE_PORT_HP_DDIA)
31977f3561beSSonika Jindal 		hotplug_ctrl |= BXT_DDIA_HPD_ENABLE;
3198e0a20ad7SShashank Sharma 	if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3199e0a20ad7SShashank Sharma 		hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3200e0a20ad7SShashank Sharma 	if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3201e0a20ad7SShashank Sharma 		hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3202e0a20ad7SShashank Sharma 	I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3203e0a20ad7SShashank Sharma 
3204e0a20ad7SShashank Sharma 	hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3205e0a20ad7SShashank Sharma 	I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3206e0a20ad7SShashank Sharma 
3207e0a20ad7SShashank Sharma 	hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3208e0a20ad7SShashank Sharma 	I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3209e0a20ad7SShashank Sharma 	POSTING_READ(GEN8_DE_PORT_IER);
3210e0a20ad7SShashank Sharma }
3211e0a20ad7SShashank Sharma 
3212d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev)
3213d46da437SPaulo Zanoni {
32142d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
321582a28bcfSDaniel Vetter 	u32 mask;
3216d46da437SPaulo Zanoni 
3217692a04cfSDaniel Vetter 	if (HAS_PCH_NOP(dev))
3218692a04cfSDaniel Vetter 		return;
3219692a04cfSDaniel Vetter 
3220105b122eSPaulo Zanoni 	if (HAS_PCH_IBX(dev))
32215c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3222105b122eSPaulo Zanoni 	else
32235c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
32248664281bSPaulo Zanoni 
3225337ba017SPaulo Zanoni 	GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3226d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
3227d46da437SPaulo Zanoni }
3228d46da437SPaulo Zanoni 
32290a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev)
32300a9a8c91SDaniel Vetter {
32310a9a8c91SDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
32320a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
32330a9a8c91SDaniel Vetter 
32340a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
32350a9a8c91SDaniel Vetter 
32360a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
3237040d2baaSBen Widawsky 	if (HAS_L3_DPF(dev)) {
32380a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
323935a85ac6SBen Widawsky 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
324035a85ac6SBen Widawsky 		gt_irqs |= GT_PARITY_ERROR(dev);
32410a9a8c91SDaniel Vetter 	}
32420a9a8c91SDaniel Vetter 
32430a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
32440a9a8c91SDaniel Vetter 	if (IS_GEN5(dev)) {
32450a9a8c91SDaniel Vetter 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
32460a9a8c91SDaniel Vetter 			   ILK_BSD_USER_INTERRUPT;
32470a9a8c91SDaniel Vetter 	} else {
32480a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
32490a9a8c91SDaniel Vetter 	}
32500a9a8c91SDaniel Vetter 
325135079899SPaulo Zanoni 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
32520a9a8c91SDaniel Vetter 
32530a9a8c91SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 6) {
325478e68d36SImre Deak 		/*
325578e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
325678e68d36SImre Deak 		 * itself is enabled/disabled.
325778e68d36SImre Deak 		 */
32580a9a8c91SDaniel Vetter 		if (HAS_VEBOX(dev))
32590a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
32600a9a8c91SDaniel Vetter 
3261605cd25bSPaulo Zanoni 		dev_priv->pm_irq_mask = 0xffffffff;
326235079899SPaulo Zanoni 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
32630a9a8c91SDaniel Vetter 	}
32640a9a8c91SDaniel Vetter }
32650a9a8c91SDaniel Vetter 
3266f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev)
3267036a4a7dSZhenyu Wang {
32682d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
32698e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
32708e76f8dcSPaulo Zanoni 
32718e76f8dcSPaulo Zanoni 	if (INTEL_INFO(dev)->gen >= 7) {
32728e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
32738e76f8dcSPaulo Zanoni 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
32748e76f8dcSPaulo Zanoni 				DE_PLANEB_FLIP_DONE_IVB |
32755c673b60SDaniel Vetter 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
32768e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3277*23bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3278*23bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
32798e76f8dcSPaulo Zanoni 	} else {
32808e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3281ce99c256SDaniel Vetter 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
32825b3a856bSDaniel Vetter 				DE_AUX_CHANNEL_A |
32835b3a856bSDaniel Vetter 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
32845b3a856bSDaniel Vetter 				DE_POISON);
3285e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3286e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3287e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
32888e76f8dcSPaulo Zanoni 	}
3289036a4a7dSZhenyu Wang 
32901ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
3291036a4a7dSZhenyu Wang 
32920c841212SPaulo Zanoni 	I915_WRITE(HWSTAM, 0xeffe);
32930c841212SPaulo Zanoni 
3294622364b6SPaulo Zanoni 	ibx_irq_pre_postinstall(dev);
3295622364b6SPaulo Zanoni 
329635079899SPaulo Zanoni 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3297036a4a7dSZhenyu Wang 
32980a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
3299036a4a7dSZhenyu Wang 
3300d46da437SPaulo Zanoni 	ibx_irq_postinstall(dev);
33017fe0b973SKeith Packard 
3302f97108d1SJesse Barnes 	if (IS_IRONLAKE_M(dev)) {
33036005ce42SDaniel Vetter 		/* Enable PCU event interrupts
33046005ce42SDaniel Vetter 		 *
33056005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
33064bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
33074bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
3308d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
3309f97108d1SJesse Barnes 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3310d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
3311f97108d1SJesse Barnes 	}
3312f97108d1SJesse Barnes 
3313036a4a7dSZhenyu Wang 	return 0;
3314036a4a7dSZhenyu Wang }
3315036a4a7dSZhenyu Wang 
3316f8b79e58SImre Deak static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3317f8b79e58SImre Deak {
3318f8b79e58SImre Deak 	u32 pipestat_mask;
3319f8b79e58SImre Deak 	u32 iir_mask;
3320120dda4fSVille Syrjälä 	enum pipe pipe;
3321f8b79e58SImre Deak 
3322f8b79e58SImre Deak 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3323f8b79e58SImre Deak 			PIPE_FIFO_UNDERRUN_STATUS;
3324f8b79e58SImre Deak 
3325120dda4fSVille Syrjälä 	for_each_pipe(dev_priv, pipe)
3326120dda4fSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3327f8b79e58SImre Deak 	POSTING_READ(PIPESTAT(PIPE_A));
3328f8b79e58SImre Deak 
3329f8b79e58SImre Deak 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3330f8b79e58SImre Deak 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3331f8b79e58SImre Deak 
3332120dda4fSVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3333120dda4fSVille Syrjälä 	for_each_pipe(dev_priv, pipe)
3334120dda4fSVille Syrjälä 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3335f8b79e58SImre Deak 
3336f8b79e58SImre Deak 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3337f8b79e58SImre Deak 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3338f8b79e58SImre Deak 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3339120dda4fSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3340120dda4fSVille Syrjälä 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3341f8b79e58SImre Deak 	dev_priv->irq_mask &= ~iir_mask;
3342f8b79e58SImre Deak 
3343f8b79e58SImre Deak 	I915_WRITE(VLV_IIR, iir_mask);
3344f8b79e58SImre Deak 	I915_WRITE(VLV_IIR, iir_mask);
3345f8b79e58SImre Deak 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
334676e41860SVille Syrjälä 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
334776e41860SVille Syrjälä 	POSTING_READ(VLV_IMR);
3348f8b79e58SImre Deak }
3349f8b79e58SImre Deak 
3350f8b79e58SImre Deak static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3351f8b79e58SImre Deak {
3352f8b79e58SImre Deak 	u32 pipestat_mask;
3353f8b79e58SImre Deak 	u32 iir_mask;
3354120dda4fSVille Syrjälä 	enum pipe pipe;
3355f8b79e58SImre Deak 
3356f8b79e58SImre Deak 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3357f8b79e58SImre Deak 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
33586c7fba04SImre Deak 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3359120dda4fSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3360120dda4fSVille Syrjälä 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3361f8b79e58SImre Deak 
3362f8b79e58SImre Deak 	dev_priv->irq_mask |= iir_mask;
3363f8b79e58SImre Deak 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
336476e41860SVille Syrjälä 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3365f8b79e58SImre Deak 	I915_WRITE(VLV_IIR, iir_mask);
3366f8b79e58SImre Deak 	I915_WRITE(VLV_IIR, iir_mask);
3367f8b79e58SImre Deak 	POSTING_READ(VLV_IIR);
3368f8b79e58SImre Deak 
3369f8b79e58SImre Deak 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3370f8b79e58SImre Deak 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3371f8b79e58SImre Deak 
3372120dda4fSVille Syrjälä 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3373120dda4fSVille Syrjälä 	for_each_pipe(dev_priv, pipe)
3374120dda4fSVille Syrjälä 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3375f8b79e58SImre Deak 
3376f8b79e58SImre Deak 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3377f8b79e58SImre Deak 			PIPE_FIFO_UNDERRUN_STATUS;
3378120dda4fSVille Syrjälä 
3379120dda4fSVille Syrjälä 	for_each_pipe(dev_priv, pipe)
3380120dda4fSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3381f8b79e58SImre Deak 	POSTING_READ(PIPESTAT(PIPE_A));
3382f8b79e58SImre Deak }
3383f8b79e58SImre Deak 
3384f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3385f8b79e58SImre Deak {
3386f8b79e58SImre Deak 	assert_spin_locked(&dev_priv->irq_lock);
3387f8b79e58SImre Deak 
3388f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
3389f8b79e58SImre Deak 		return;
3390f8b79e58SImre Deak 
3391f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
3392f8b79e58SImre Deak 
3393950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
3394f8b79e58SImre Deak 		valleyview_display_irqs_install(dev_priv);
3395f8b79e58SImre Deak }
3396f8b79e58SImre Deak 
3397f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3398f8b79e58SImre Deak {
3399f8b79e58SImre Deak 	assert_spin_locked(&dev_priv->irq_lock);
3400f8b79e58SImre Deak 
3401f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
3402f8b79e58SImre Deak 		return;
3403f8b79e58SImre Deak 
3404f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
3405f8b79e58SImre Deak 
3406950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
3407f8b79e58SImre Deak 		valleyview_display_irqs_uninstall(dev_priv);
3408f8b79e58SImre Deak }
3409f8b79e58SImre Deak 
34100e6c9a9eSVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
34117e231dbeSJesse Barnes {
3412f8b79e58SImre Deak 	dev_priv->irq_mask = ~0;
34137e231dbeSJesse Barnes 
341420afbda2SDaniel Vetter 	I915_WRITE(PORT_HOTPLUG_EN, 0);
341520afbda2SDaniel Vetter 	POSTING_READ(PORT_HOTPLUG_EN);
341620afbda2SDaniel Vetter 
34177e231dbeSJesse Barnes 	I915_WRITE(VLV_IIR, 0xffffffff);
341876e41860SVille Syrjälä 	I915_WRITE(VLV_IIR, 0xffffffff);
341976e41860SVille Syrjälä 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
342076e41860SVille Syrjälä 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
342176e41860SVille Syrjälä 	POSTING_READ(VLV_IMR);
34227e231dbeSJesse Barnes 
3423b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3424b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3425d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3426f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
3427f8b79e58SImre Deak 		valleyview_display_irqs_install(dev_priv);
3428d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
34290e6c9a9eSVille Syrjälä }
34300e6c9a9eSVille Syrjälä 
34310e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev)
34320e6c9a9eSVille Syrjälä {
34330e6c9a9eSVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
34340e6c9a9eSVille Syrjälä 
34350e6c9a9eSVille Syrjälä 	vlv_display_irq_postinstall(dev_priv);
34367e231dbeSJesse Barnes 
34370a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
34387e231dbeSJesse Barnes 
34397e231dbeSJesse Barnes 	/* ack & enable invalid PTE error interrupts */
34407e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */
34417e231dbeSJesse Barnes 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
34427e231dbeSJesse Barnes 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
34437e231dbeSJesse Barnes #endif
34447e231dbeSJesse Barnes 
34457e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
344620afbda2SDaniel Vetter 
344720afbda2SDaniel Vetter 	return 0;
344820afbda2SDaniel Vetter }
344920afbda2SDaniel Vetter 
3450abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3451abd58f01SBen Widawsky {
3452abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
3453abd58f01SBen Widawsky 	uint32_t gt_interrupts[] = {
3454abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
345573d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3456abd58f01SBen Widawsky 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
345773d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
345873d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3459abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
346073d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
346173d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
346273d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3463abd58f01SBen Widawsky 		0,
346473d477f6SOscar Mateo 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
346573d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3466abd58f01SBen Widawsky 		};
3467abd58f01SBen Widawsky 
34680961021aSBen Widawsky 	dev_priv->pm_irq_mask = 0xffffffff;
34699a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
34709a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
347178e68d36SImre Deak 	/*
347278e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
347378e68d36SImre Deak 	 * is enabled/disabled.
347478e68d36SImre Deak 	 */
347578e68d36SImre Deak 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
34769a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3477abd58f01SBen Widawsky }
3478abd58f01SBen Widawsky 
3479abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3480abd58f01SBen Widawsky {
3481770de83dSDamien Lespiau 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3482770de83dSDamien Lespiau 	uint32_t de_pipe_enables;
3483abd58f01SBen Widawsky 	int pipe;
34849e63743eSShashank Sharma 	u32 de_port_en = GEN8_AUX_CHANNEL_A;
3485770de83dSDamien Lespiau 
348688e04703SJesse Barnes 	if (IS_GEN9(dev_priv)) {
3487770de83dSDamien Lespiau 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3488770de83dSDamien Lespiau 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
34899e63743eSShashank Sharma 		de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
349088e04703SJesse Barnes 			GEN9_AUX_CHANNEL_D;
34919e63743eSShashank Sharma 
34929e63743eSShashank Sharma 		if (IS_BROXTON(dev_priv))
34939e63743eSShashank Sharma 			de_port_en |= BXT_DE_PORT_GMBUS;
349488e04703SJesse Barnes 	} else
3495770de83dSDamien Lespiau 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3496770de83dSDamien Lespiau 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3497770de83dSDamien Lespiau 
3498770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3499770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
3500770de83dSDamien Lespiau 
350113b3a0a7SDaniel Vetter 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
350213b3a0a7SDaniel Vetter 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
350313b3a0a7SDaniel Vetter 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3504abd58f01SBen Widawsky 
3505055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3506f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3507813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
3508813bde43SPaulo Zanoni 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3509813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
351035079899SPaulo Zanoni 					  de_pipe_enables);
3511abd58f01SBen Widawsky 
35129e63743eSShashank Sharma 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
3513abd58f01SBen Widawsky }
3514abd58f01SBen Widawsky 
3515abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev)
3516abd58f01SBen Widawsky {
3517abd58f01SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
3518abd58f01SBen Widawsky 
3519266ea3d9SShashank Sharma 	if (HAS_PCH_SPLIT(dev))
3520622364b6SPaulo Zanoni 		ibx_irq_pre_postinstall(dev);
3521622364b6SPaulo Zanoni 
3522abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
3523abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
3524abd58f01SBen Widawsky 
3525266ea3d9SShashank Sharma 	if (HAS_PCH_SPLIT(dev))
3526abd58f01SBen Widawsky 		ibx_irq_postinstall(dev);
3527abd58f01SBen Widawsky 
3528abd58f01SBen Widawsky 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3529abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3530abd58f01SBen Widawsky 
3531abd58f01SBen Widawsky 	return 0;
3532abd58f01SBen Widawsky }
3533abd58f01SBen Widawsky 
353443f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev)
353543f328d7SVille Syrjälä {
353643f328d7SVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
353743f328d7SVille Syrjälä 
3538c2b66797SVille Syrjälä 	vlv_display_irq_postinstall(dev_priv);
353943f328d7SVille Syrjälä 
354043f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
354143f328d7SVille Syrjälä 
354243f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
354343f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
354443f328d7SVille Syrjälä 
354543f328d7SVille Syrjälä 	return 0;
354643f328d7SVille Syrjälä }
354743f328d7SVille Syrjälä 
3548abd58f01SBen Widawsky static void gen8_irq_uninstall(struct drm_device *dev)
3549abd58f01SBen Widawsky {
3550abd58f01SBen Widawsky 	struct drm_i915_private *dev_priv = dev->dev_private;
3551abd58f01SBen Widawsky 
3552abd58f01SBen Widawsky 	if (!dev_priv)
3553abd58f01SBen Widawsky 		return;
3554abd58f01SBen Widawsky 
3555823f6b38SPaulo Zanoni 	gen8_irq_reset(dev);
3556abd58f01SBen Widawsky }
3557abd58f01SBen Widawsky 
35588ea0be4fSVille Syrjälä static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
35598ea0be4fSVille Syrjälä {
35608ea0be4fSVille Syrjälä 	/* Interrupt setup is already guaranteed to be single-threaded, this is
35618ea0be4fSVille Syrjälä 	 * just to make the assert_spin_locked check happy. */
35628ea0be4fSVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35638ea0be4fSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
35648ea0be4fSVille Syrjälä 		valleyview_display_irqs_uninstall(dev_priv);
35658ea0be4fSVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
35668ea0be4fSVille Syrjälä 
35678ea0be4fSVille Syrjälä 	vlv_display_irq_reset(dev_priv);
35688ea0be4fSVille Syrjälä 
3569c352d1baSImre Deak 	dev_priv->irq_mask = ~0;
35708ea0be4fSVille Syrjälä }
35718ea0be4fSVille Syrjälä 
35727e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev)
35737e231dbeSJesse Barnes {
35742d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
35757e231dbeSJesse Barnes 
35767e231dbeSJesse Barnes 	if (!dev_priv)
35777e231dbeSJesse Barnes 		return;
35787e231dbeSJesse Barnes 
3579843d0e7dSImre Deak 	I915_WRITE(VLV_MASTER_IER, 0);
3580843d0e7dSImre Deak 
3581893fce8eSVille Syrjälä 	gen5_gt_irq_reset(dev);
3582893fce8eSVille Syrjälä 
35837e231dbeSJesse Barnes 	I915_WRITE(HWSTAM, 0xffffffff);
3584f8b79e58SImre Deak 
35858ea0be4fSVille Syrjälä 	vlv_display_irq_uninstall(dev_priv);
35867e231dbeSJesse Barnes }
35877e231dbeSJesse Barnes 
358843f328d7SVille Syrjälä static void cherryview_irq_uninstall(struct drm_device *dev)
358943f328d7SVille Syrjälä {
359043f328d7SVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
359143f328d7SVille Syrjälä 
359243f328d7SVille Syrjälä 	if (!dev_priv)
359343f328d7SVille Syrjälä 		return;
359443f328d7SVille Syrjälä 
359543f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
359643f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
359743f328d7SVille Syrjälä 
3598a2c30fbaSVille Syrjälä 	gen8_gt_irq_reset(dev_priv);
359943f328d7SVille Syrjälä 
3600a2c30fbaSVille Syrjälä 	GEN5_IRQ_RESET(GEN8_PCU_);
360143f328d7SVille Syrjälä 
3602c2b66797SVille Syrjälä 	vlv_display_irq_uninstall(dev_priv);
360343f328d7SVille Syrjälä }
360443f328d7SVille Syrjälä 
3605f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev)
3606036a4a7dSZhenyu Wang {
36072d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
36084697995bSJesse Barnes 
36094697995bSJesse Barnes 	if (!dev_priv)
36104697995bSJesse Barnes 		return;
36114697995bSJesse Barnes 
3612be30b29fSPaulo Zanoni 	ironlake_irq_reset(dev);
3613036a4a7dSZhenyu Wang }
3614036a4a7dSZhenyu Wang 
3615c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev)
3616c2798b19SChris Wilson {
36172d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
3618c2798b19SChris Wilson 	int pipe;
3619c2798b19SChris Wilson 
3620055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3621c2798b19SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
3622c2798b19SChris Wilson 	I915_WRITE16(IMR, 0xffff);
3623c2798b19SChris Wilson 	I915_WRITE16(IER, 0x0);
3624c2798b19SChris Wilson 	POSTING_READ16(IER);
3625c2798b19SChris Wilson }
3626c2798b19SChris Wilson 
3627c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev)
3628c2798b19SChris Wilson {
36292d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
3630c2798b19SChris Wilson 
3631c2798b19SChris Wilson 	I915_WRITE16(EMR,
3632c2798b19SChris Wilson 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3633c2798b19SChris Wilson 
3634c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
3635c2798b19SChris Wilson 	dev_priv->irq_mask =
3636c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3637c2798b19SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3638c2798b19SChris Wilson 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
363937ef01abSDaniel Vetter 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3640c2798b19SChris Wilson 	I915_WRITE16(IMR, dev_priv->irq_mask);
3641c2798b19SChris Wilson 
3642c2798b19SChris Wilson 	I915_WRITE16(IER,
3643c2798b19SChris Wilson 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3644c2798b19SChris Wilson 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3645c2798b19SChris Wilson 		     I915_USER_INTERRUPT);
3646c2798b19SChris Wilson 	POSTING_READ16(IER);
3647c2798b19SChris Wilson 
3648379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3649379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3650d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3651755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3652755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3653d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3654379ef82dSDaniel Vetter 
3655c2798b19SChris Wilson 	return 0;
3656c2798b19SChris Wilson }
3657c2798b19SChris Wilson 
365890a72f87SVille Syrjälä /*
365990a72f87SVille Syrjälä  * Returns true when a page flip has completed.
366090a72f87SVille Syrjälä  */
366190a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev,
36621f1c2e24SVille Syrjälä 			       int plane, int pipe, u32 iir)
366390a72f87SVille Syrjälä {
36642d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
36651f1c2e24SVille Syrjälä 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
366690a72f87SVille Syrjälä 
36678d7849dbSVille Syrjälä 	if (!intel_pipe_handle_vblank(dev, pipe))
366890a72f87SVille Syrjälä 		return false;
366990a72f87SVille Syrjälä 
367090a72f87SVille Syrjälä 	if ((iir & flip_pending) == 0)
3671d6bbafa1SChris Wilson 		goto check_page_flip;
367290a72f87SVille Syrjälä 
367390a72f87SVille Syrjälä 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
367490a72f87SVille Syrjälä 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
367590a72f87SVille Syrjälä 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
367690a72f87SVille Syrjälä 	 * the flip is completed (no longer pending). Since this doesn't raise
367790a72f87SVille Syrjälä 	 * an interrupt per se, we watch for the change at vblank.
367890a72f87SVille Syrjälä 	 */
367990a72f87SVille Syrjälä 	if (I915_READ16(ISR) & flip_pending)
3680d6bbafa1SChris Wilson 		goto check_page_flip;
368190a72f87SVille Syrjälä 
36827d47559eSVille Syrjälä 	intel_prepare_page_flip(dev, plane);
368390a72f87SVille Syrjälä 	intel_finish_page_flip(dev, pipe);
368490a72f87SVille Syrjälä 	return true;
3685d6bbafa1SChris Wilson 
3686d6bbafa1SChris Wilson check_page_flip:
3687d6bbafa1SChris Wilson 	intel_check_page_flip(dev, pipe);
3688d6bbafa1SChris Wilson 	return false;
368990a72f87SVille Syrjälä }
369090a72f87SVille Syrjälä 
3691ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3692c2798b19SChris Wilson {
369345a83f84SDaniel Vetter 	struct drm_device *dev = arg;
36942d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
3695c2798b19SChris Wilson 	u16 iir, new_iir;
3696c2798b19SChris Wilson 	u32 pipe_stats[2];
3697c2798b19SChris Wilson 	int pipe;
3698c2798b19SChris Wilson 	u16 flip_mask =
3699c2798b19SChris Wilson 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3700c2798b19SChris Wilson 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3701c2798b19SChris Wilson 
37022dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
37032dd2a883SImre Deak 		return IRQ_NONE;
37042dd2a883SImre Deak 
3705c2798b19SChris Wilson 	iir = I915_READ16(IIR);
3706c2798b19SChris Wilson 	if (iir == 0)
3707c2798b19SChris Wilson 		return IRQ_NONE;
3708c2798b19SChris Wilson 
3709c2798b19SChris Wilson 	while (iir & ~flip_mask) {
3710c2798b19SChris Wilson 		/* Can't rely on pipestat interrupt bit in iir as it might
3711c2798b19SChris Wilson 		 * have been cleared after the pipestat interrupt was received.
3712c2798b19SChris Wilson 		 * It doesn't set the bit in iir again, but it still produces
3713c2798b19SChris Wilson 		 * interrupts (for non-MSI).
3714c2798b19SChris Wilson 		 */
3715222c7f51SDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
3716c2798b19SChris Wilson 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3717aaecdf61SDaniel Vetter 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3718c2798b19SChris Wilson 
3719055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe) {
3720c2798b19SChris Wilson 			int reg = PIPESTAT(pipe);
3721c2798b19SChris Wilson 			pipe_stats[pipe] = I915_READ(reg);
3722c2798b19SChris Wilson 
3723c2798b19SChris Wilson 			/*
3724c2798b19SChris Wilson 			 * Clear the PIPE*STAT regs before the IIR
3725c2798b19SChris Wilson 			 */
37262d9d2b0bSVille Syrjälä 			if (pipe_stats[pipe] & 0x8000ffff)
3727c2798b19SChris Wilson 				I915_WRITE(reg, pipe_stats[pipe]);
3728c2798b19SChris Wilson 		}
3729222c7f51SDaniel Vetter 		spin_unlock(&dev_priv->irq_lock);
3730c2798b19SChris Wilson 
3731c2798b19SChris Wilson 		I915_WRITE16(IIR, iir & ~flip_mask);
3732c2798b19SChris Wilson 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3733c2798b19SChris Wilson 
3734c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
373574cdb337SChris Wilson 			notify_ring(&dev_priv->ring[RCS]);
3736c2798b19SChris Wilson 
3737055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe) {
37381f1c2e24SVille Syrjälä 			int plane = pipe;
37393a77c4c4SDaniel Vetter 			if (HAS_FBC(dev))
37401f1c2e24SVille Syrjälä 				plane = !plane;
37411f1c2e24SVille Syrjälä 
37424356d586SDaniel Vetter 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
37431f1c2e24SVille Syrjälä 			    i8xx_handle_vblank(dev, plane, pipe, iir))
37441f1c2e24SVille Syrjälä 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3745c2798b19SChris Wilson 
37464356d586SDaniel Vetter 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3747277de95eSDaniel Vetter 				i9xx_pipe_crc_irq_handler(dev, pipe);
37482d9d2b0bSVille Syrjälä 
37491f7247c0SDaniel Vetter 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
37501f7247c0SDaniel Vetter 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
37511f7247c0SDaniel Vetter 								    pipe);
37524356d586SDaniel Vetter 		}
3753c2798b19SChris Wilson 
3754c2798b19SChris Wilson 		iir = new_iir;
3755c2798b19SChris Wilson 	}
3756c2798b19SChris Wilson 
3757c2798b19SChris Wilson 	return IRQ_HANDLED;
3758c2798b19SChris Wilson }
3759c2798b19SChris Wilson 
3760c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev)
3761c2798b19SChris Wilson {
37622d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
3763c2798b19SChris Wilson 	int pipe;
3764c2798b19SChris Wilson 
3765055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
3766c2798b19SChris Wilson 		/* Clear enable bits; then clear status bits */
3767c2798b19SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
3768c2798b19SChris Wilson 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3769c2798b19SChris Wilson 	}
3770c2798b19SChris Wilson 	I915_WRITE16(IMR, 0xffff);
3771c2798b19SChris Wilson 	I915_WRITE16(IER, 0x0);
3772c2798b19SChris Wilson 	I915_WRITE16(IIR, I915_READ16(IIR));
3773c2798b19SChris Wilson }
3774c2798b19SChris Wilson 
3775a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev)
3776a266c7d5SChris Wilson {
37772d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
3778a266c7d5SChris Wilson 	int pipe;
3779a266c7d5SChris Wilson 
3780a266c7d5SChris Wilson 	if (I915_HAS_HOTPLUG(dev)) {
3781a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3782a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3783a266c7d5SChris Wilson 	}
3784a266c7d5SChris Wilson 
378500d98ebdSChris Wilson 	I915_WRITE16(HWSTAM, 0xeffe);
3786055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3787a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
3788a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
3789a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
3790a266c7d5SChris Wilson 	POSTING_READ(IER);
3791a266c7d5SChris Wilson }
3792a266c7d5SChris Wilson 
3793a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev)
3794a266c7d5SChris Wilson {
37952d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
379638bde180SChris Wilson 	u32 enable_mask;
3797a266c7d5SChris Wilson 
379838bde180SChris Wilson 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
379938bde180SChris Wilson 
380038bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
380138bde180SChris Wilson 	dev_priv->irq_mask =
380238bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
380338bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
380438bde180SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
380538bde180SChris Wilson 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
380637ef01abSDaniel Vetter 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
380738bde180SChris Wilson 
380838bde180SChris Wilson 	enable_mask =
380938bde180SChris Wilson 		I915_ASLE_INTERRUPT |
381038bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
381138bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
381238bde180SChris Wilson 		I915_USER_INTERRUPT;
381338bde180SChris Wilson 
3814a266c7d5SChris Wilson 	if (I915_HAS_HOTPLUG(dev)) {
381520afbda2SDaniel Vetter 		I915_WRITE(PORT_HOTPLUG_EN, 0);
381620afbda2SDaniel Vetter 		POSTING_READ(PORT_HOTPLUG_EN);
381720afbda2SDaniel Vetter 
3818a266c7d5SChris Wilson 		/* Enable in IER... */
3819a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3820a266c7d5SChris Wilson 		/* and unmask in IMR */
3821a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3822a266c7d5SChris Wilson 	}
3823a266c7d5SChris Wilson 
3824a266c7d5SChris Wilson 	I915_WRITE(IMR, dev_priv->irq_mask);
3825a266c7d5SChris Wilson 	I915_WRITE(IER, enable_mask);
3826a266c7d5SChris Wilson 	POSTING_READ(IER);
3827a266c7d5SChris Wilson 
3828f49e38ddSJani Nikula 	i915_enable_asle_pipestat(dev);
382920afbda2SDaniel Vetter 
3830379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3831379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3832d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3833755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3834755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3835d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3836379ef82dSDaniel Vetter 
383720afbda2SDaniel Vetter 	return 0;
383820afbda2SDaniel Vetter }
383920afbda2SDaniel Vetter 
384090a72f87SVille Syrjälä /*
384190a72f87SVille Syrjälä  * Returns true when a page flip has completed.
384290a72f87SVille Syrjälä  */
384390a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev,
384490a72f87SVille Syrjälä 			       int plane, int pipe, u32 iir)
384590a72f87SVille Syrjälä {
38462d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
384790a72f87SVille Syrjälä 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
384890a72f87SVille Syrjälä 
38498d7849dbSVille Syrjälä 	if (!intel_pipe_handle_vblank(dev, pipe))
385090a72f87SVille Syrjälä 		return false;
385190a72f87SVille Syrjälä 
385290a72f87SVille Syrjälä 	if ((iir & flip_pending) == 0)
3853d6bbafa1SChris Wilson 		goto check_page_flip;
385490a72f87SVille Syrjälä 
385590a72f87SVille Syrjälä 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
385690a72f87SVille Syrjälä 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
385790a72f87SVille Syrjälä 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
385890a72f87SVille Syrjälä 	 * the flip is completed (no longer pending). Since this doesn't raise
385990a72f87SVille Syrjälä 	 * an interrupt per se, we watch for the change at vblank.
386090a72f87SVille Syrjälä 	 */
386190a72f87SVille Syrjälä 	if (I915_READ(ISR) & flip_pending)
3862d6bbafa1SChris Wilson 		goto check_page_flip;
386390a72f87SVille Syrjälä 
38647d47559eSVille Syrjälä 	intel_prepare_page_flip(dev, plane);
386590a72f87SVille Syrjälä 	intel_finish_page_flip(dev, pipe);
386690a72f87SVille Syrjälä 	return true;
3867d6bbafa1SChris Wilson 
3868d6bbafa1SChris Wilson check_page_flip:
3869d6bbafa1SChris Wilson 	intel_check_page_flip(dev, pipe);
3870d6bbafa1SChris Wilson 	return false;
387190a72f87SVille Syrjälä }
387290a72f87SVille Syrjälä 
3873ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
3874a266c7d5SChris Wilson {
387545a83f84SDaniel Vetter 	struct drm_device *dev = arg;
38762d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
38778291ee90SChris Wilson 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
387838bde180SChris Wilson 	u32 flip_mask =
387938bde180SChris Wilson 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
388038bde180SChris Wilson 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
388138bde180SChris Wilson 	int pipe, ret = IRQ_NONE;
3882a266c7d5SChris Wilson 
38832dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
38842dd2a883SImre Deak 		return IRQ_NONE;
38852dd2a883SImre Deak 
3886a266c7d5SChris Wilson 	iir = I915_READ(IIR);
388738bde180SChris Wilson 	do {
388838bde180SChris Wilson 		bool irq_received = (iir & ~flip_mask) != 0;
38898291ee90SChris Wilson 		bool blc_event = false;
3890a266c7d5SChris Wilson 
3891a266c7d5SChris Wilson 		/* Can't rely on pipestat interrupt bit in iir as it might
3892a266c7d5SChris Wilson 		 * have been cleared after the pipestat interrupt was received.
3893a266c7d5SChris Wilson 		 * It doesn't set the bit in iir again, but it still produces
3894a266c7d5SChris Wilson 		 * interrupts (for non-MSI).
3895a266c7d5SChris Wilson 		 */
3896222c7f51SDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
3897a266c7d5SChris Wilson 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3898aaecdf61SDaniel Vetter 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3899a266c7d5SChris Wilson 
3900055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe) {
3901a266c7d5SChris Wilson 			int reg = PIPESTAT(pipe);
3902a266c7d5SChris Wilson 			pipe_stats[pipe] = I915_READ(reg);
3903a266c7d5SChris Wilson 
390438bde180SChris Wilson 			/* Clear the PIPE*STAT regs before the IIR */
3905a266c7d5SChris Wilson 			if (pipe_stats[pipe] & 0x8000ffff) {
3906a266c7d5SChris Wilson 				I915_WRITE(reg, pipe_stats[pipe]);
390738bde180SChris Wilson 				irq_received = true;
3908a266c7d5SChris Wilson 			}
3909a266c7d5SChris Wilson 		}
3910222c7f51SDaniel Vetter 		spin_unlock(&dev_priv->irq_lock);
3911a266c7d5SChris Wilson 
3912a266c7d5SChris Wilson 		if (!irq_received)
3913a266c7d5SChris Wilson 			break;
3914a266c7d5SChris Wilson 
3915a266c7d5SChris Wilson 		/* Consume port.  Then clear IIR or we'll miss events */
391616c6c56bSVille Syrjälä 		if (I915_HAS_HOTPLUG(dev) &&
391716c6c56bSVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
391816c6c56bSVille Syrjälä 			i9xx_hpd_irq_handler(dev);
3919a266c7d5SChris Wilson 
392038bde180SChris Wilson 		I915_WRITE(IIR, iir & ~flip_mask);
3921a266c7d5SChris Wilson 		new_iir = I915_READ(IIR); /* Flush posted writes */
3922a266c7d5SChris Wilson 
3923a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
392474cdb337SChris Wilson 			notify_ring(&dev_priv->ring[RCS]);
3925a266c7d5SChris Wilson 
3926055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe) {
392738bde180SChris Wilson 			int plane = pipe;
39283a77c4c4SDaniel Vetter 			if (HAS_FBC(dev))
392938bde180SChris Wilson 				plane = !plane;
39305e2032d4SVille Syrjälä 
393190a72f87SVille Syrjälä 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
393290a72f87SVille Syrjälä 			    i915_handle_vblank(dev, plane, pipe, iir))
393390a72f87SVille Syrjälä 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3934a266c7d5SChris Wilson 
3935a266c7d5SChris Wilson 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3936a266c7d5SChris Wilson 				blc_event = true;
39374356d586SDaniel Vetter 
39384356d586SDaniel Vetter 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3939277de95eSDaniel Vetter 				i9xx_pipe_crc_irq_handler(dev, pipe);
39402d9d2b0bSVille Syrjälä 
39411f7247c0SDaniel Vetter 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
39421f7247c0SDaniel Vetter 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
39431f7247c0SDaniel Vetter 								    pipe);
3944a266c7d5SChris Wilson 		}
3945a266c7d5SChris Wilson 
3946a266c7d5SChris Wilson 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3947a266c7d5SChris Wilson 			intel_opregion_asle_intr(dev);
3948a266c7d5SChris Wilson 
3949a266c7d5SChris Wilson 		/* With MSI, interrupts are only generated when iir
3950a266c7d5SChris Wilson 		 * transitions from zero to nonzero.  If another bit got
3951a266c7d5SChris Wilson 		 * set while we were handling the existing iir bits, then
3952a266c7d5SChris Wilson 		 * we would never get another interrupt.
3953a266c7d5SChris Wilson 		 *
3954a266c7d5SChris Wilson 		 * This is fine on non-MSI as well, as if we hit this path
3955a266c7d5SChris Wilson 		 * we avoid exiting the interrupt handler only to generate
3956a266c7d5SChris Wilson 		 * another one.
3957a266c7d5SChris Wilson 		 *
3958a266c7d5SChris Wilson 		 * Note that for MSI this could cause a stray interrupt report
3959a266c7d5SChris Wilson 		 * if an interrupt landed in the time between writing IIR and
3960a266c7d5SChris Wilson 		 * the posting read.  This should be rare enough to never
3961a266c7d5SChris Wilson 		 * trigger the 99% of 100,000 interrupts test for disabling
3962a266c7d5SChris Wilson 		 * stray interrupts.
3963a266c7d5SChris Wilson 		 */
396438bde180SChris Wilson 		ret = IRQ_HANDLED;
3965a266c7d5SChris Wilson 		iir = new_iir;
396638bde180SChris Wilson 	} while (iir & ~flip_mask);
3967a266c7d5SChris Wilson 
3968a266c7d5SChris Wilson 	return ret;
3969a266c7d5SChris Wilson }
3970a266c7d5SChris Wilson 
3971a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev)
3972a266c7d5SChris Wilson {
39732d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
3974a266c7d5SChris Wilson 	int pipe;
3975a266c7d5SChris Wilson 
3976a266c7d5SChris Wilson 	if (I915_HAS_HOTPLUG(dev)) {
3977a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3978a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3979a266c7d5SChris Wilson 	}
3980a266c7d5SChris Wilson 
398100d98ebdSChris Wilson 	I915_WRITE16(HWSTAM, 0xffff);
3982055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
398355b39755SChris Wilson 		/* Clear enable bits; then clear status bits */
3984a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
398555b39755SChris Wilson 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
398655b39755SChris Wilson 	}
3987a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
3988a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
3989a266c7d5SChris Wilson 
3990a266c7d5SChris Wilson 	I915_WRITE(IIR, I915_READ(IIR));
3991a266c7d5SChris Wilson }
3992a266c7d5SChris Wilson 
3993a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev)
3994a266c7d5SChris Wilson {
39952d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
3996a266c7d5SChris Wilson 	int pipe;
3997a266c7d5SChris Wilson 
3998a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3999a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4000a266c7d5SChris Wilson 
4001a266c7d5SChris Wilson 	I915_WRITE(HWSTAM, 0xeffe);
4002055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
4003a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
4004a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
4005a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
4006a266c7d5SChris Wilson 	POSTING_READ(IER);
4007a266c7d5SChris Wilson }
4008a266c7d5SChris Wilson 
4009a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev)
4010a266c7d5SChris Wilson {
40112d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
4012bbba0a97SChris Wilson 	u32 enable_mask;
4013a266c7d5SChris Wilson 	u32 error_mask;
4014a266c7d5SChris Wilson 
4015a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
4016bbba0a97SChris Wilson 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4017adca4730SChris Wilson 			       I915_DISPLAY_PORT_INTERRUPT |
4018bbba0a97SChris Wilson 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4019bbba0a97SChris Wilson 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4020bbba0a97SChris Wilson 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4021bbba0a97SChris Wilson 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4022bbba0a97SChris Wilson 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4023bbba0a97SChris Wilson 
4024bbba0a97SChris Wilson 	enable_mask = ~dev_priv->irq_mask;
402521ad8330SVille Syrjälä 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
402621ad8330SVille Syrjälä 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4027bbba0a97SChris Wilson 	enable_mask |= I915_USER_INTERRUPT;
4028bbba0a97SChris Wilson 
4029bbba0a97SChris Wilson 	if (IS_G4X(dev))
4030bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
4031a266c7d5SChris Wilson 
4032b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4033b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4034d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4035755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4036755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4037755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4038d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4039a266c7d5SChris Wilson 
4040a266c7d5SChris Wilson 	/*
4041a266c7d5SChris Wilson 	 * Enable some error detection, note the instruction error mask
4042a266c7d5SChris Wilson 	 * bit is reserved, so we leave it masked.
4043a266c7d5SChris Wilson 	 */
4044a266c7d5SChris Wilson 	if (IS_G4X(dev)) {
4045a266c7d5SChris Wilson 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4046a266c7d5SChris Wilson 			       GM45_ERROR_MEM_PRIV |
4047a266c7d5SChris Wilson 			       GM45_ERROR_CP_PRIV |
4048a266c7d5SChris Wilson 			       I915_ERROR_MEMORY_REFRESH);
4049a266c7d5SChris Wilson 	} else {
4050a266c7d5SChris Wilson 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4051a266c7d5SChris Wilson 			       I915_ERROR_MEMORY_REFRESH);
4052a266c7d5SChris Wilson 	}
4053a266c7d5SChris Wilson 	I915_WRITE(EMR, error_mask);
4054a266c7d5SChris Wilson 
4055a266c7d5SChris Wilson 	I915_WRITE(IMR, dev_priv->irq_mask);
4056a266c7d5SChris Wilson 	I915_WRITE(IER, enable_mask);
4057a266c7d5SChris Wilson 	POSTING_READ(IER);
4058a266c7d5SChris Wilson 
405920afbda2SDaniel Vetter 	I915_WRITE(PORT_HOTPLUG_EN, 0);
406020afbda2SDaniel Vetter 	POSTING_READ(PORT_HOTPLUG_EN);
406120afbda2SDaniel Vetter 
4062f49e38ddSJani Nikula 	i915_enable_asle_pipestat(dev);
406320afbda2SDaniel Vetter 
406420afbda2SDaniel Vetter 	return 0;
406520afbda2SDaniel Vetter }
406620afbda2SDaniel Vetter 
4067bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev)
406820afbda2SDaniel Vetter {
40692d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
407020afbda2SDaniel Vetter 	u32 hotplug_en;
407120afbda2SDaniel Vetter 
4072b5ea2d56SDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
4073b5ea2d56SDaniel Vetter 
4074bac56d5bSEgbert Eich 	hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4075bac56d5bSEgbert Eich 	hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4076adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
4077e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
407887a02106SVille Syrjälä 	hotplug_en |= intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4079a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
4080a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
4081a266c7d5SChris Wilson 	   seconds later.  So just do it once.
4082a266c7d5SChris Wilson 	*/
4083a266c7d5SChris Wilson 	if (IS_G4X(dev))
4084a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
408585fc95baSDaniel Vetter 	hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4086a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4087a266c7d5SChris Wilson 
4088a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
4089a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4090a266c7d5SChris Wilson }
4091a266c7d5SChris Wilson 
4092ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
4093a266c7d5SChris Wilson {
409445a83f84SDaniel Vetter 	struct drm_device *dev = arg;
40952d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
4096a266c7d5SChris Wilson 	u32 iir, new_iir;
4097a266c7d5SChris Wilson 	u32 pipe_stats[I915_MAX_PIPES];
4098a266c7d5SChris Wilson 	int ret = IRQ_NONE, pipe;
409921ad8330SVille Syrjälä 	u32 flip_mask =
410021ad8330SVille Syrjälä 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
410121ad8330SVille Syrjälä 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4102a266c7d5SChris Wilson 
41032dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
41042dd2a883SImre Deak 		return IRQ_NONE;
41052dd2a883SImre Deak 
4106a266c7d5SChris Wilson 	iir = I915_READ(IIR);
4107a266c7d5SChris Wilson 
4108a266c7d5SChris Wilson 	for (;;) {
4109501e01d7SVille Syrjälä 		bool irq_received = (iir & ~flip_mask) != 0;
41102c8ba29fSChris Wilson 		bool blc_event = false;
41112c8ba29fSChris Wilson 
4112a266c7d5SChris Wilson 		/* Can't rely on pipestat interrupt bit in iir as it might
4113a266c7d5SChris Wilson 		 * have been cleared after the pipestat interrupt was received.
4114a266c7d5SChris Wilson 		 * It doesn't set the bit in iir again, but it still produces
4115a266c7d5SChris Wilson 		 * interrupts (for non-MSI).
4116a266c7d5SChris Wilson 		 */
4117222c7f51SDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
4118a266c7d5SChris Wilson 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4119aaecdf61SDaniel Vetter 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4120a266c7d5SChris Wilson 
4121055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe) {
4122a266c7d5SChris Wilson 			int reg = PIPESTAT(pipe);
4123a266c7d5SChris Wilson 			pipe_stats[pipe] = I915_READ(reg);
4124a266c7d5SChris Wilson 
4125a266c7d5SChris Wilson 			/*
4126a266c7d5SChris Wilson 			 * Clear the PIPE*STAT regs before the IIR
4127a266c7d5SChris Wilson 			 */
4128a266c7d5SChris Wilson 			if (pipe_stats[pipe] & 0x8000ffff) {
4129a266c7d5SChris Wilson 				I915_WRITE(reg, pipe_stats[pipe]);
4130501e01d7SVille Syrjälä 				irq_received = true;
4131a266c7d5SChris Wilson 			}
4132a266c7d5SChris Wilson 		}
4133222c7f51SDaniel Vetter 		spin_unlock(&dev_priv->irq_lock);
4134a266c7d5SChris Wilson 
4135a266c7d5SChris Wilson 		if (!irq_received)
4136a266c7d5SChris Wilson 			break;
4137a266c7d5SChris Wilson 
4138a266c7d5SChris Wilson 		ret = IRQ_HANDLED;
4139a266c7d5SChris Wilson 
4140a266c7d5SChris Wilson 		/* Consume port.  Then clear IIR or we'll miss events */
414116c6c56bSVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
414216c6c56bSVille Syrjälä 			i9xx_hpd_irq_handler(dev);
4143a266c7d5SChris Wilson 
414421ad8330SVille Syrjälä 		I915_WRITE(IIR, iir & ~flip_mask);
4145a266c7d5SChris Wilson 		new_iir = I915_READ(IIR); /* Flush posted writes */
4146a266c7d5SChris Wilson 
4147a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
414874cdb337SChris Wilson 			notify_ring(&dev_priv->ring[RCS]);
4149a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
415074cdb337SChris Wilson 			notify_ring(&dev_priv->ring[VCS]);
4151a266c7d5SChris Wilson 
4152055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe) {
41532c8ba29fSChris Wilson 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
415490a72f87SVille Syrjälä 			    i915_handle_vblank(dev, pipe, pipe, iir))
415590a72f87SVille Syrjälä 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4156a266c7d5SChris Wilson 
4157a266c7d5SChris Wilson 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4158a266c7d5SChris Wilson 				blc_event = true;
41594356d586SDaniel Vetter 
41604356d586SDaniel Vetter 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4161277de95eSDaniel Vetter 				i9xx_pipe_crc_irq_handler(dev, pipe);
4162a266c7d5SChris Wilson 
41631f7247c0SDaniel Vetter 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
41641f7247c0SDaniel Vetter 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
41652d9d2b0bSVille Syrjälä 		}
4166a266c7d5SChris Wilson 
4167a266c7d5SChris Wilson 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4168a266c7d5SChris Wilson 			intel_opregion_asle_intr(dev);
4169a266c7d5SChris Wilson 
4170515ac2bbSDaniel Vetter 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4171515ac2bbSDaniel Vetter 			gmbus_irq_handler(dev);
4172515ac2bbSDaniel Vetter 
4173a266c7d5SChris Wilson 		/* With MSI, interrupts are only generated when iir
4174a266c7d5SChris Wilson 		 * transitions from zero to nonzero.  If another bit got
4175a266c7d5SChris Wilson 		 * set while we were handling the existing iir bits, then
4176a266c7d5SChris Wilson 		 * we would never get another interrupt.
4177a266c7d5SChris Wilson 		 *
4178a266c7d5SChris Wilson 		 * This is fine on non-MSI as well, as if we hit this path
4179a266c7d5SChris Wilson 		 * we avoid exiting the interrupt handler only to generate
4180a266c7d5SChris Wilson 		 * another one.
4181a266c7d5SChris Wilson 		 *
4182a266c7d5SChris Wilson 		 * Note that for MSI this could cause a stray interrupt report
4183a266c7d5SChris Wilson 		 * if an interrupt landed in the time between writing IIR and
4184a266c7d5SChris Wilson 		 * the posting read.  This should be rare enough to never
4185a266c7d5SChris Wilson 		 * trigger the 99% of 100,000 interrupts test for disabling
4186a266c7d5SChris Wilson 		 * stray interrupts.
4187a266c7d5SChris Wilson 		 */
4188a266c7d5SChris Wilson 		iir = new_iir;
4189a266c7d5SChris Wilson 	}
4190a266c7d5SChris Wilson 
4191a266c7d5SChris Wilson 	return ret;
4192a266c7d5SChris Wilson }
4193a266c7d5SChris Wilson 
4194a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev)
4195a266c7d5SChris Wilson {
41962d1013ddSJani Nikula 	struct drm_i915_private *dev_priv = dev->dev_private;
4197a266c7d5SChris Wilson 	int pipe;
4198a266c7d5SChris Wilson 
4199a266c7d5SChris Wilson 	if (!dev_priv)
4200a266c7d5SChris Wilson 		return;
4201a266c7d5SChris Wilson 
4202a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_EN, 0);
4203a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4204a266c7d5SChris Wilson 
4205a266c7d5SChris Wilson 	I915_WRITE(HWSTAM, 0xffffffff);
4206055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
4207a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
4208a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
4209a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
4210a266c7d5SChris Wilson 
4211055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
4212a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe),
4213a266c7d5SChris Wilson 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4214a266c7d5SChris Wilson 	I915_WRITE(IIR, I915_READ(IIR));
4215a266c7d5SChris Wilson }
4216a266c7d5SChris Wilson 
4217fca52a55SDaniel Vetter /**
4218fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4219fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4220fca52a55SDaniel Vetter  *
4221fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4222fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4223fca52a55SDaniel Vetter  */
4224b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4225f71d4af4SJesse Barnes {
4226b963291cSDaniel Vetter 	struct drm_device *dev = dev_priv->dev;
42278b2e326dSChris Wilson 
422877913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
422977913b39SJani Nikula 
4230c6a828d3SDaniel Vetter 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4231a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
42328b2e326dSChris Wilson 
4233a6706b45SDeepak S 	/* Let's track the enabled rps events */
4234b963291cSDaniel Vetter 	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
42356c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
42366f4b12f8SChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
423731685c25SDeepak S 	else
4238a6706b45SDeepak S 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4239a6706b45SDeepak S 
4240737b1506SChris Wilson 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4241737b1506SChris Wilson 			  i915_hangcheck_elapsed);
424261bac78eSDaniel Vetter 
424397a19a24STomas Janousek 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
42449ee32feaSDaniel Vetter 
4245b963291cSDaniel Vetter 	if (IS_GEN2(dev_priv)) {
42464cdb83ecSVille Syrjälä 		dev->max_vblank_count = 0;
42474cdb83ecSVille Syrjälä 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4248b963291cSDaniel Vetter 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4249f71d4af4SJesse Barnes 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4250f71d4af4SJesse Barnes 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4251391f75e2SVille Syrjälä 	} else {
4252391f75e2SVille Syrjälä 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4253391f75e2SVille Syrjälä 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4254f71d4af4SJesse Barnes 	}
4255f71d4af4SJesse Barnes 
425621da2700SVille Syrjälä 	/*
425721da2700SVille Syrjälä 	 * Opt out of the vblank disable timer on everything except gen2.
425821da2700SVille Syrjälä 	 * Gen2 doesn't have a hardware frame counter and so depends on
425921da2700SVille Syrjälä 	 * vblank interrupts to produce sane vblank seuquence numbers.
426021da2700SVille Syrjälä 	 */
4261b963291cSDaniel Vetter 	if (!IS_GEN2(dev_priv))
426221da2700SVille Syrjälä 		dev->vblank_disable_immediate = true;
426321da2700SVille Syrjälä 
4264f71d4af4SJesse Barnes 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4265f71d4af4SJesse Barnes 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4266f71d4af4SJesse Barnes 
4267b963291cSDaniel Vetter 	if (IS_CHERRYVIEW(dev_priv)) {
426843f328d7SVille Syrjälä 		dev->driver->irq_handler = cherryview_irq_handler;
426943f328d7SVille Syrjälä 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
427043f328d7SVille Syrjälä 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
427143f328d7SVille Syrjälä 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
427243f328d7SVille Syrjälä 		dev->driver->enable_vblank = valleyview_enable_vblank;
427343f328d7SVille Syrjälä 		dev->driver->disable_vblank = valleyview_disable_vblank;
427443f328d7SVille Syrjälä 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4275b963291cSDaniel Vetter 	} else if (IS_VALLEYVIEW(dev_priv)) {
42767e231dbeSJesse Barnes 		dev->driver->irq_handler = valleyview_irq_handler;
42777e231dbeSJesse Barnes 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
42787e231dbeSJesse Barnes 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
42797e231dbeSJesse Barnes 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
42807e231dbeSJesse Barnes 		dev->driver->enable_vblank = valleyview_enable_vblank;
42817e231dbeSJesse Barnes 		dev->driver->disable_vblank = valleyview_disable_vblank;
4282fa00abe0SEgbert Eich 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4283b963291cSDaniel Vetter 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4284abd58f01SBen Widawsky 		dev->driver->irq_handler = gen8_irq_handler;
4285723761b8SDaniel Vetter 		dev->driver->irq_preinstall = gen8_irq_reset;
4286abd58f01SBen Widawsky 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4287abd58f01SBen Widawsky 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4288abd58f01SBen Widawsky 		dev->driver->enable_vblank = gen8_enable_vblank;
4289abd58f01SBen Widawsky 		dev->driver->disable_vblank = gen8_disable_vblank;
42906dbf30ceSVille Syrjälä 		if (IS_BROXTON(dev))
4291e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
42926dbf30ceSVille Syrjälä 		else if (HAS_PCH_SPT(dev))
42936dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
42946dbf30ceSVille Syrjälä 		else
42956dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4296f71d4af4SJesse Barnes 	} else if (HAS_PCH_SPLIT(dev)) {
4297f71d4af4SJesse Barnes 		dev->driver->irq_handler = ironlake_irq_handler;
4298723761b8SDaniel Vetter 		dev->driver->irq_preinstall = ironlake_irq_reset;
4299f71d4af4SJesse Barnes 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4300f71d4af4SJesse Barnes 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4301f71d4af4SJesse Barnes 		dev->driver->enable_vblank = ironlake_enable_vblank;
4302f71d4af4SJesse Barnes 		dev->driver->disable_vblank = ironlake_disable_vblank;
4303e4ce95aaSVille Syrjälä 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4304f71d4af4SJesse Barnes 	} else {
4305b963291cSDaniel Vetter 		if (INTEL_INFO(dev_priv)->gen == 2) {
4306c2798b19SChris Wilson 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4307c2798b19SChris Wilson 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4308c2798b19SChris Wilson 			dev->driver->irq_handler = i8xx_irq_handler;
4309c2798b19SChris Wilson 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4310b963291cSDaniel Vetter 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4311a266c7d5SChris Wilson 			dev->driver->irq_preinstall = i915_irq_preinstall;
4312a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i915_irq_postinstall;
4313a266c7d5SChris Wilson 			dev->driver->irq_uninstall = i915_irq_uninstall;
4314a266c7d5SChris Wilson 			dev->driver->irq_handler = i915_irq_handler;
4315c2798b19SChris Wilson 		} else {
4316a266c7d5SChris Wilson 			dev->driver->irq_preinstall = i965_irq_preinstall;
4317a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i965_irq_postinstall;
4318a266c7d5SChris Wilson 			dev->driver->irq_uninstall = i965_irq_uninstall;
4319a266c7d5SChris Wilson 			dev->driver->irq_handler = i965_irq_handler;
4320c2798b19SChris Wilson 		}
4321778eb334SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
4322778eb334SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4323f71d4af4SJesse Barnes 		dev->driver->enable_vblank = i915_enable_vblank;
4324f71d4af4SJesse Barnes 		dev->driver->disable_vblank = i915_disable_vblank;
4325f71d4af4SJesse Barnes 	}
4326f71d4af4SJesse Barnes }
432720afbda2SDaniel Vetter 
4328fca52a55SDaniel Vetter /**
4329fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4330fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4331fca52a55SDaniel Vetter  *
4332fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4333fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4334fca52a55SDaniel Vetter  *
4335fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4336fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4337fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4338fca52a55SDaniel Vetter  */
43392aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
43402aeb7d3aSDaniel Vetter {
43412aeb7d3aSDaniel Vetter 	/*
43422aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
43432aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
43442aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
43452aeb7d3aSDaniel Vetter 	 */
43462aeb7d3aSDaniel Vetter 	dev_priv->pm.irqs_enabled = true;
43472aeb7d3aSDaniel Vetter 
43482aeb7d3aSDaniel Vetter 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
43492aeb7d3aSDaniel Vetter }
43502aeb7d3aSDaniel Vetter 
4351fca52a55SDaniel Vetter /**
4352fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4353fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4354fca52a55SDaniel Vetter  *
4355fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4356fca52a55SDaniel Vetter  * resources acquired in the init functions.
4357fca52a55SDaniel Vetter  */
43582aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
43592aeb7d3aSDaniel Vetter {
43602aeb7d3aSDaniel Vetter 	drm_irq_uninstall(dev_priv->dev);
43612aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
43622aeb7d3aSDaniel Vetter 	dev_priv->pm.irqs_enabled = false;
43632aeb7d3aSDaniel Vetter }
43642aeb7d3aSDaniel Vetter 
4365fca52a55SDaniel Vetter /**
4366fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4367fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4368fca52a55SDaniel Vetter  *
4369fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4370fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4371fca52a55SDaniel Vetter  */
4372b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4373c67a470bSPaulo Zanoni {
4374b963291cSDaniel Vetter 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
43752aeb7d3aSDaniel Vetter 	dev_priv->pm.irqs_enabled = false;
43762dd2a883SImre Deak 	synchronize_irq(dev_priv->dev->irq);
4377c67a470bSPaulo Zanoni }
4378c67a470bSPaulo Zanoni 
4379fca52a55SDaniel Vetter /**
4380fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4381fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4382fca52a55SDaniel Vetter  *
4383fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
4384fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4385fca52a55SDaniel Vetter  */
4386b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4387c67a470bSPaulo Zanoni {
43882aeb7d3aSDaniel Vetter 	dev_priv->pm.irqs_enabled = true;
4389b963291cSDaniel Vetter 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4390b963291cSDaniel Vetter 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4391c67a470bSPaulo Zanoni }
4392