xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 1be333d34e22db8fd07dca7efa78b93189eddf6b)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3163eeaf38SJesse Barnes #include <linux/sysrq.h>
325a0e3ad6STejun Heo #include <linux/slab.h>
33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
34760285e7SDavid Howells #include <drm/drmP.h>
35760285e7SDavid Howells #include <drm/i915_drm.h>
36c0e09200SDave Airlie #include "i915_drv.h"
371c5d22f7SChris Wilson #include "i915_trace.h"
3879e53945SJesse Barnes #include "intel_drv.h"
39c0e09200SDave Airlie 
40fca52a55SDaniel Vetter /**
41fca52a55SDaniel Vetter  * DOC: interrupt handling
42fca52a55SDaniel Vetter  *
43fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
44fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
45fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
46fca52a55SDaniel Vetter  */
47fca52a55SDaniel Vetter 
48e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
49e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50e4ce95aaSVille Syrjälä };
51e4ce95aaSVille Syrjälä 
5223bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
5323bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
5423bb4cb5SVille Syrjälä };
5523bb4cb5SVille Syrjälä 
563a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
573a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
583a3b3c7dSVille Syrjälä };
593a3b3c7dSVille Syrjälä 
607c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
61e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
62e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66e5868a31SEgbert Eich };
67e5868a31SEgbert Eich 
687c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
69e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
7073c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74e5868a31SEgbert Eich };
75e5868a31SEgbert Eich 
7626951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
7774c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
7826951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
7926951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
8026951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
8126951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
8226951cafSXiong Zhang };
8326951cafSXiong Zhang 
847c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91e5868a31SEgbert Eich };
92e5868a31SEgbert Eich 
937c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100e5868a31SEgbert Eich };
101e5868a31SEgbert Eich 
1024bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109e5868a31SEgbert Eich };
110e5868a31SEgbert Eich 
111e0a20ad7SShashank Sharma /* BXT hpd list */
112e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1137f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116e0a20ad7SShashank Sharma };
117e0a20ad7SShashank Sharma 
1185c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */
119f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \
1205c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
1215c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IMR(which)); \
1225c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), 0); \
1235c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1245c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1255c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1265c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1275c502442SPaulo Zanoni } while (0)
1285c502442SPaulo Zanoni 
1293488d4ebSVille Syrjälä #define GEN3_IRQ_RESET(type) do { \
130a9d356a6SPaulo Zanoni 	I915_WRITE(type##IMR, 0xffffffff); \
1315c502442SPaulo Zanoni 	POSTING_READ(type##IMR); \
132a9d356a6SPaulo Zanoni 	I915_WRITE(type##IER, 0); \
1335c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1345c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
1355c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1365c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
137a9d356a6SPaulo Zanoni } while (0)
138a9d356a6SPaulo Zanoni 
139e9e9848aSVille Syrjälä #define GEN2_IRQ_RESET(type) do { \
140e9e9848aSVille Syrjälä 	I915_WRITE16(type##IMR, 0xffff); \
141e9e9848aSVille Syrjälä 	POSTING_READ16(type##IMR); \
142e9e9848aSVille Syrjälä 	I915_WRITE16(type##IER, 0); \
143e9e9848aSVille Syrjälä 	I915_WRITE16(type##IIR, 0xffff); \
144e9e9848aSVille Syrjälä 	POSTING_READ16(type##IIR); \
145e9e9848aSVille Syrjälä 	I915_WRITE16(type##IIR, 0xffff); \
146e9e9848aSVille Syrjälä 	POSTING_READ16(type##IIR); \
147e9e9848aSVille Syrjälä } while (0)
148e9e9848aSVille Syrjälä 
149337ba017SPaulo Zanoni /*
150337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
151337ba017SPaulo Zanoni  */
1523488d4ebSVille Syrjälä static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
153f0f59a00SVille Syrjälä 				    i915_reg_t reg)
154b51a2842SVille Syrjälä {
155b51a2842SVille Syrjälä 	u32 val = I915_READ(reg);
156b51a2842SVille Syrjälä 
157b51a2842SVille Syrjälä 	if (val == 0)
158b51a2842SVille Syrjälä 		return;
159b51a2842SVille Syrjälä 
160b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
161f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
162b51a2842SVille Syrjälä 	I915_WRITE(reg, 0xffffffff);
163b51a2842SVille Syrjälä 	POSTING_READ(reg);
164b51a2842SVille Syrjälä 	I915_WRITE(reg, 0xffffffff);
165b51a2842SVille Syrjälä 	POSTING_READ(reg);
166b51a2842SVille Syrjälä }
167337ba017SPaulo Zanoni 
168e9e9848aSVille Syrjälä static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
169e9e9848aSVille Syrjälä 				    i915_reg_t reg)
170e9e9848aSVille Syrjälä {
171e9e9848aSVille Syrjälä 	u16 val = I915_READ16(reg);
172e9e9848aSVille Syrjälä 
173e9e9848aSVille Syrjälä 	if (val == 0)
174e9e9848aSVille Syrjälä 		return;
175e9e9848aSVille Syrjälä 
176e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
177e9e9848aSVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
178e9e9848aSVille Syrjälä 	I915_WRITE16(reg, 0xffff);
179e9e9848aSVille Syrjälä 	POSTING_READ16(reg);
180e9e9848aSVille Syrjälä 	I915_WRITE16(reg, 0xffff);
181e9e9848aSVille Syrjälä 	POSTING_READ16(reg);
182e9e9848aSVille Syrjälä }
183e9e9848aSVille Syrjälä 
18435079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
1853488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
18635079899SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
1877d1bd539SVille Syrjälä 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
1887d1bd539SVille Syrjälä 	POSTING_READ(GEN8_##type##_IMR(which)); \
18935079899SPaulo Zanoni } while (0)
19035079899SPaulo Zanoni 
1913488d4ebSVille Syrjälä #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
1923488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, type##IIR); \
19335079899SPaulo Zanoni 	I915_WRITE(type##IER, (ier_val)); \
1947d1bd539SVille Syrjälä 	I915_WRITE(type##IMR, (imr_val)); \
1957d1bd539SVille Syrjälä 	POSTING_READ(type##IMR); \
19635079899SPaulo Zanoni } while (0)
19735079899SPaulo Zanoni 
198e9e9848aSVille Syrjälä #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
199e9e9848aSVille Syrjälä 	gen2_assert_iir_is_zero(dev_priv, type##IIR); \
200e9e9848aSVille Syrjälä 	I915_WRITE16(type##IER, (ier_val)); \
201e9e9848aSVille Syrjälä 	I915_WRITE16(type##IMR, (imr_val)); \
202e9e9848aSVille Syrjälä 	POSTING_READ16(type##IMR); \
203e9e9848aSVille Syrjälä } while (0)
204e9e9848aSVille Syrjälä 
205c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
20626705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
207c9a9a268SImre Deak 
2080706f17cSEgbert Eich /* For display hotplug interrupt */
2090706f17cSEgbert Eich static inline void
2100706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
2110706f17cSEgbert Eich 				     uint32_t mask,
2120706f17cSEgbert Eich 				     uint32_t bits)
2130706f17cSEgbert Eich {
2140706f17cSEgbert Eich 	uint32_t val;
2150706f17cSEgbert Eich 
21667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2170706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2180706f17cSEgbert Eich 
2190706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2200706f17cSEgbert Eich 	val &= ~mask;
2210706f17cSEgbert Eich 	val |= bits;
2220706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2230706f17cSEgbert Eich }
2240706f17cSEgbert Eich 
2250706f17cSEgbert Eich /**
2260706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2270706f17cSEgbert Eich  * @dev_priv: driver private
2280706f17cSEgbert Eich  * @mask: bits to update
2290706f17cSEgbert Eich  * @bits: bits to enable
2300706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2310706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2320706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2330706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2340706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2350706f17cSEgbert Eich  * version is also available.
2360706f17cSEgbert Eich  */
2370706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2380706f17cSEgbert Eich 				   uint32_t mask,
2390706f17cSEgbert Eich 				   uint32_t bits)
2400706f17cSEgbert Eich {
2410706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
2420706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
2430706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
2440706f17cSEgbert Eich }
2450706f17cSEgbert Eich 
246d9dc34f1SVille Syrjälä /**
247d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
248d9dc34f1SVille Syrjälä  * @dev_priv: driver private
249d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
250d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
251d9dc34f1SVille Syrjälä  */
252fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
253d9dc34f1SVille Syrjälä 			    uint32_t interrupt_mask,
254d9dc34f1SVille Syrjälä 			    uint32_t enabled_irq_mask)
255036a4a7dSZhenyu Wang {
256d9dc34f1SVille Syrjälä 	uint32_t new_val;
257d9dc34f1SVille Syrjälä 
25867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2594bc9d430SDaniel Vetter 
260d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
261d9dc34f1SVille Syrjälä 
2629df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263c67a470bSPaulo Zanoni 		return;
264c67a470bSPaulo Zanoni 
265d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
266d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
267d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
268d9dc34f1SVille Syrjälä 
269d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
270d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
2711ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
2723143a2bfSChris Wilson 		POSTING_READ(DEIMR);
273036a4a7dSZhenyu Wang 	}
274036a4a7dSZhenyu Wang }
275036a4a7dSZhenyu Wang 
27643eaea13SPaulo Zanoni /**
27743eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
27843eaea13SPaulo Zanoni  * @dev_priv: driver private
27943eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
28043eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
28143eaea13SPaulo Zanoni  */
28243eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
28343eaea13SPaulo Zanoni 			      uint32_t interrupt_mask,
28443eaea13SPaulo Zanoni 			      uint32_t enabled_irq_mask)
28543eaea13SPaulo Zanoni {
28667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
28743eaea13SPaulo Zanoni 
28815a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
28915a17aaeSDaniel Vetter 
2909df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
291c67a470bSPaulo Zanoni 		return;
292c67a470bSPaulo Zanoni 
29343eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
29443eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
29543eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
29643eaea13SPaulo Zanoni }
29743eaea13SPaulo Zanoni 
298480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
29943eaea13SPaulo Zanoni {
30043eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
30131bb59ccSChris Wilson 	POSTING_READ_FW(GTIMR);
30243eaea13SPaulo Zanoni }
30343eaea13SPaulo Zanoni 
304480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
30543eaea13SPaulo Zanoni {
30643eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
30743eaea13SPaulo Zanoni }
30843eaea13SPaulo Zanoni 
309f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
310b900b949SImre Deak {
311bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
312b900b949SImre Deak }
313b900b949SImre Deak 
314f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
315a72fbc3aSImre Deak {
316bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
317a72fbc3aSImre Deak }
318a72fbc3aSImre Deak 
319f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
320b900b949SImre Deak {
321bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
322b900b949SImre Deak }
323b900b949SImre Deak 
324edbfdb45SPaulo Zanoni /**
325edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
326edbfdb45SPaulo Zanoni  * @dev_priv: driver private
327edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
328edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
329edbfdb45SPaulo Zanoni  */
330edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
331edbfdb45SPaulo Zanoni 			      uint32_t interrupt_mask,
332edbfdb45SPaulo Zanoni 			      uint32_t enabled_irq_mask)
333edbfdb45SPaulo Zanoni {
334605cd25bSPaulo Zanoni 	uint32_t new_val;
335edbfdb45SPaulo Zanoni 
33615a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
33715a17aaeSDaniel Vetter 
33867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
339edbfdb45SPaulo Zanoni 
340f4e9af4fSAkash Goel 	new_val = dev_priv->pm_imr;
341f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
342f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
343f52ecbcfSPaulo Zanoni 
344f4e9af4fSAkash Goel 	if (new_val != dev_priv->pm_imr) {
345f4e9af4fSAkash Goel 		dev_priv->pm_imr = new_val;
346f4e9af4fSAkash Goel 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
347a72fbc3aSImre Deak 		POSTING_READ(gen6_pm_imr(dev_priv));
348edbfdb45SPaulo Zanoni 	}
349f52ecbcfSPaulo Zanoni }
350edbfdb45SPaulo Zanoni 
351f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
352edbfdb45SPaulo Zanoni {
3539939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
3549939fba2SImre Deak 		return;
3559939fba2SImre Deak 
356edbfdb45SPaulo Zanoni 	snb_update_pm_irq(dev_priv, mask, mask);
357edbfdb45SPaulo Zanoni }
358edbfdb45SPaulo Zanoni 
359f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
3609939fba2SImre Deak {
3619939fba2SImre Deak 	snb_update_pm_irq(dev_priv, mask, 0);
3629939fba2SImre Deak }
3639939fba2SImre Deak 
364f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
365edbfdb45SPaulo Zanoni {
3669939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
3679939fba2SImre Deak 		return;
3689939fba2SImre Deak 
369f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, mask);
370f4e9af4fSAkash Goel }
371f4e9af4fSAkash Goel 
3723814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
373f4e9af4fSAkash Goel {
374f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
375f4e9af4fSAkash Goel 
37667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
377f4e9af4fSAkash Goel 
378f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
379f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
380f4e9af4fSAkash Goel 	POSTING_READ(reg);
381f4e9af4fSAkash Goel }
382f4e9af4fSAkash Goel 
3833814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
384f4e9af4fSAkash Goel {
38567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
386f4e9af4fSAkash Goel 
387f4e9af4fSAkash Goel 	dev_priv->pm_ier |= enable_mask;
388f4e9af4fSAkash Goel 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
389f4e9af4fSAkash Goel 	gen6_unmask_pm_irq(dev_priv, enable_mask);
390f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
391f4e9af4fSAkash Goel }
392f4e9af4fSAkash Goel 
3933814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
394f4e9af4fSAkash Goel {
39567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
396f4e9af4fSAkash Goel 
397f4e9af4fSAkash Goel 	dev_priv->pm_ier &= ~disable_mask;
398f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, disable_mask);
399f4e9af4fSAkash Goel 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
400f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
401edbfdb45SPaulo Zanoni }
402edbfdb45SPaulo Zanoni 
403dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
4043cc134e3SImre Deak {
4053cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
406f4e9af4fSAkash Goel 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
407562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
4083cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
4093cc134e3SImre Deak }
4103cc134e3SImre Deak 
41191d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
412b900b949SImre Deak {
413562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
414562d9baeSSagar Arun Kamble 
415562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
416f2a91d1aSChris Wilson 		return;
417f2a91d1aSChris Wilson 
418b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
419562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
420c33d247dSChris Wilson 	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
421562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
422b900b949SImre Deak 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
42378e68d36SImre Deak 
424b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
425b900b949SImre Deak }
426b900b949SImre Deak 
42791d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
428b900b949SImre Deak {
429562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
430562d9baeSSagar Arun Kamble 
431562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
432f2a91d1aSChris Wilson 		return;
433f2a91d1aSChris Wilson 
434d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
435562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
4369939fba2SImre Deak 
437b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
4389939fba2SImre Deak 
439f4e9af4fSAkash Goel 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
44058072ccbSImre Deak 
44158072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
44291c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
443c33d247dSChris Wilson 
444c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
4453814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
446c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
447c33d247dSChris Wilson 	 * state of the worker can be discarded.
448c33d247dSChris Wilson 	 */
449562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
450c33d247dSChris Wilson 	gen6_reset_rps_interrupts(dev_priv);
451b900b949SImre Deak }
452b900b949SImre Deak 
45326705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
45426705e20SSagar Arun Kamble {
455*1be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
456*1be333d3SSagar Arun Kamble 
45726705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
45826705e20SSagar Arun Kamble 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
45926705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
46026705e20SSagar Arun Kamble }
46126705e20SSagar Arun Kamble 
46226705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
46326705e20SSagar Arun Kamble {
464*1be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
465*1be333d3SSagar Arun Kamble 
46626705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
46726705e20SSagar Arun Kamble 	if (!dev_priv->guc.interrupts_enabled) {
46826705e20SSagar Arun Kamble 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
46926705e20SSagar Arun Kamble 				       dev_priv->pm_guc_events);
47026705e20SSagar Arun Kamble 		dev_priv->guc.interrupts_enabled = true;
47126705e20SSagar Arun Kamble 		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
47226705e20SSagar Arun Kamble 	}
47326705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
47426705e20SSagar Arun Kamble }
47526705e20SSagar Arun Kamble 
47626705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
47726705e20SSagar Arun Kamble {
478*1be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
479*1be333d3SSagar Arun Kamble 
48026705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
48126705e20SSagar Arun Kamble 	dev_priv->guc.interrupts_enabled = false;
48226705e20SSagar Arun Kamble 
48326705e20SSagar Arun Kamble 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
48426705e20SSagar Arun Kamble 
48526705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
48626705e20SSagar Arun Kamble 	synchronize_irq(dev_priv->drm.irq);
48726705e20SSagar Arun Kamble 
48826705e20SSagar Arun Kamble 	gen9_reset_guc_interrupts(dev_priv);
48926705e20SSagar Arun Kamble }
49026705e20SSagar Arun Kamble 
4910961021aSBen Widawsky /**
4923a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
4933a3b3c7dSVille Syrjälä  * @dev_priv: driver private
4943a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
4953a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
4963a3b3c7dSVille Syrjälä  */
4973a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
4983a3b3c7dSVille Syrjälä 				uint32_t interrupt_mask,
4993a3b3c7dSVille Syrjälä 				uint32_t enabled_irq_mask)
5003a3b3c7dSVille Syrjälä {
5013a3b3c7dSVille Syrjälä 	uint32_t new_val;
5023a3b3c7dSVille Syrjälä 	uint32_t old_val;
5033a3b3c7dSVille Syrjälä 
50467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
5053a3b3c7dSVille Syrjälä 
5063a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
5073a3b3c7dSVille Syrjälä 
5083a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
5093a3b3c7dSVille Syrjälä 		return;
5103a3b3c7dSVille Syrjälä 
5113a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
5123a3b3c7dSVille Syrjälä 
5133a3b3c7dSVille Syrjälä 	new_val = old_val;
5143a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
5153a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
5163a3b3c7dSVille Syrjälä 
5173a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
5183a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
5193a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
5203a3b3c7dSVille Syrjälä 	}
5213a3b3c7dSVille Syrjälä }
5223a3b3c7dSVille Syrjälä 
5233a3b3c7dSVille Syrjälä /**
524013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
525013d3752SVille Syrjälä  * @dev_priv: driver private
526013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
527013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
528013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
529013d3752SVille Syrjälä  */
530013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
531013d3752SVille Syrjälä 			 enum pipe pipe,
532013d3752SVille Syrjälä 			 uint32_t interrupt_mask,
533013d3752SVille Syrjälä 			 uint32_t enabled_irq_mask)
534013d3752SVille Syrjälä {
535013d3752SVille Syrjälä 	uint32_t new_val;
536013d3752SVille Syrjälä 
53767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
538013d3752SVille Syrjälä 
539013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
540013d3752SVille Syrjälä 
541013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
542013d3752SVille Syrjälä 		return;
543013d3752SVille Syrjälä 
544013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
545013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
546013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
547013d3752SVille Syrjälä 
548013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
549013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
550013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
551013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
552013d3752SVille Syrjälä 	}
553013d3752SVille Syrjälä }
554013d3752SVille Syrjälä 
555013d3752SVille Syrjälä /**
556fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
557fee884edSDaniel Vetter  * @dev_priv: driver private
558fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
559fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
560fee884edSDaniel Vetter  */
56147339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
562fee884edSDaniel Vetter 				  uint32_t interrupt_mask,
563fee884edSDaniel Vetter 				  uint32_t enabled_irq_mask)
564fee884edSDaniel Vetter {
565fee884edSDaniel Vetter 	uint32_t sdeimr = I915_READ(SDEIMR);
566fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
567fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
568fee884edSDaniel Vetter 
56915a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
57015a17aaeSDaniel Vetter 
57167520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
572fee884edSDaniel Vetter 
5739df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
574c67a470bSPaulo Zanoni 		return;
575c67a470bSPaulo Zanoni 
576fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
577fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
578fee884edSDaniel Vetter }
5798664281bSPaulo Zanoni 
5806b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
5816b12ca56SVille Syrjälä 			      enum pipe pipe)
5827c463586SKeith Packard {
5836b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
58410c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
58510c59c51SImre Deak 
5866b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
5876b12ca56SVille Syrjälä 
5886b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
5896b12ca56SVille Syrjälä 		goto out;
5906b12ca56SVille Syrjälä 
59110c59c51SImre Deak 	/*
592724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
593724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
59410c59c51SImre Deak 	 */
59510c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
59610c59c51SImre Deak 		return 0;
597724a6905SVille Syrjälä 	/*
598724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
599724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
600724a6905SVille Syrjälä 	 */
601724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
602724a6905SVille Syrjälä 		return 0;
60310c59c51SImre Deak 
60410c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
60510c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
60610c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
60710c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
60810c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
60910c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
61010c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
61110c59c51SImre Deak 
6126b12ca56SVille Syrjälä out:
6136b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
6146b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
6156b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
6166b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
6176b12ca56SVille Syrjälä 
61810c59c51SImre Deak 	return enable_mask;
61910c59c51SImre Deak }
62010c59c51SImre Deak 
6216b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
6226b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
623755e9019SImre Deak {
6246b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
625755e9019SImre Deak 	u32 enable_mask;
626755e9019SImre Deak 
6276b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
6286b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
6296b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
6306b12ca56SVille Syrjälä 
6316b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
6326b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
6336b12ca56SVille Syrjälä 
6346b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
6356b12ca56SVille Syrjälä 		return;
6366b12ca56SVille Syrjälä 
6376b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
6386b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
6396b12ca56SVille Syrjälä 
6406b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
6416b12ca56SVille Syrjälä 	POSTING_READ(reg);
642755e9019SImre Deak }
643755e9019SImre Deak 
6446b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
6456b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
646755e9019SImre Deak {
6476b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
648755e9019SImre Deak 	u32 enable_mask;
649755e9019SImre Deak 
6506b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
6516b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
6526b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
6536b12ca56SVille Syrjälä 
6546b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
6556b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
6566b12ca56SVille Syrjälä 
6576b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
6586b12ca56SVille Syrjälä 		return;
6596b12ca56SVille Syrjälä 
6606b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
6616b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
6626b12ca56SVille Syrjälä 
6636b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
6646b12ca56SVille Syrjälä 	POSTING_READ(reg);
665755e9019SImre Deak }
666755e9019SImre Deak 
667c0e09200SDave Airlie /**
668f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
66914bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
67001c66889SZhao Yakui  */
67191d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
67201c66889SZhao Yakui {
67391d14251STvrtko Ursulin 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
674f49e38ddSJani Nikula 		return;
675f49e38ddSJani Nikula 
67613321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
67701c66889SZhao Yakui 
678755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
67991d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
6803b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
681755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
6821ec14ad3SChris Wilson 
68313321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
68401c66889SZhao Yakui }
68501c66889SZhao Yakui 
686f75f3746SVille Syrjälä /*
687f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
688f75f3746SVille Syrjälä  * around the vertical blanking period.
689f75f3746SVille Syrjälä  *
690f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
691f75f3746SVille Syrjälä  *  vblank_start >= 3
692f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
693f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
694f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
695f75f3746SVille Syrjälä  *
696f75f3746SVille Syrjälä  *           start of vblank:
697f75f3746SVille Syrjälä  *           latch double buffered registers
698f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
699f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
700f75f3746SVille Syrjälä  *           |
701f75f3746SVille Syrjälä  *           |          frame start:
702f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
703f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
704f75f3746SVille Syrjälä  *           |          |
705f75f3746SVille Syrjälä  *           |          |  start of vsync:
706f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
707f75f3746SVille Syrjälä  *           |          |  |
708f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
709f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
710f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
711f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
712f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
713f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
714f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
715f75f3746SVille Syrjälä  *       |          |                                         |
716f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
717f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
718f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
719f75f3746SVille Syrjälä  *
720f75f3746SVille Syrjälä  * x  = horizontal active
721f75f3746SVille Syrjälä  * _  = horizontal blanking
722f75f3746SVille Syrjälä  * hs = horizontal sync
723f75f3746SVille Syrjälä  * va = vertical active
724f75f3746SVille Syrjälä  * vb = vertical blanking
725f75f3746SVille Syrjälä  * vs = vertical sync
726f75f3746SVille Syrjälä  * vbs = vblank_start (number)
727f75f3746SVille Syrjälä  *
728f75f3746SVille Syrjälä  * Summary:
729f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
730f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
731f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
732f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
733f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
734f75f3746SVille Syrjälä  */
735f75f3746SVille Syrjälä 
73642f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
73742f52ef8SKeith Packard  * we use as a pipe index
73842f52ef8SKeith Packard  */
73988e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
7400a3e67a4SJesse Barnes {
741fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
742f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
7430b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
7445caa0feaSDaniel Vetter 	const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
745694e409dSVille Syrjälä 	unsigned long irqflags;
746391f75e2SVille Syrjälä 
7470b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
7480b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
7490b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
7500b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
7510b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
752391f75e2SVille Syrjälä 
7530b2a8e09SVille Syrjälä 	/* Convert to pixel count */
7540b2a8e09SVille Syrjälä 	vbl_start *= htotal;
7550b2a8e09SVille Syrjälä 
7560b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
7570b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
7580b2a8e09SVille Syrjälä 
7599db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
7609db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
7615eddb70bSChris Wilson 
762694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
763694e409dSVille Syrjälä 
7640a3e67a4SJesse Barnes 	/*
7650a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
7660a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
7670a3e67a4SJesse Barnes 	 * register.
7680a3e67a4SJesse Barnes 	 */
7690a3e67a4SJesse Barnes 	do {
770694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
771694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
772694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
7730a3e67a4SJesse Barnes 	} while (high1 != high2);
7740a3e67a4SJesse Barnes 
775694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
776694e409dSVille Syrjälä 
7775eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
778391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
7795eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
780391f75e2SVille Syrjälä 
781391f75e2SVille Syrjälä 	/*
782391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
783391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
784391f75e2SVille Syrjälä 	 * counter against vblank start.
785391f75e2SVille Syrjälä 	 */
786edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
7870a3e67a4SJesse Barnes }
7880a3e67a4SJesse Barnes 
789974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
7909880b7a5SJesse Barnes {
791fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
7929880b7a5SJesse Barnes 
793649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
7949880b7a5SJesse Barnes }
7959880b7a5SJesse Barnes 
796aec0246fSUma Shankar /*
797aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
798aec0246fSUma Shankar  * scanline register will not work to get the scanline,
799aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
800aec0246fSUma Shankar  * with scanline register updates.
801aec0246fSUma Shankar  * This function will use Framestamp and current
802aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
803aec0246fSUma Shankar  */
804aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
805aec0246fSUma Shankar {
806aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
807aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
808aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
809aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
810aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
811aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
812aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
813aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
814aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
815aec0246fSUma Shankar 
816aec0246fSUma Shankar 	/*
817aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
818aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
819aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
820aec0246fSUma Shankar 	 * during the same frame.
821aec0246fSUma Shankar 	 */
822aec0246fSUma Shankar 	do {
823aec0246fSUma Shankar 		/*
824aec0246fSUma Shankar 		 * This field provides read back of the display
825aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
826aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
827aec0246fSUma Shankar 		 */
828aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
829aec0246fSUma Shankar 
830aec0246fSUma Shankar 		/*
831aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
832aec0246fSUma Shankar 		 * time stamp value.
833aec0246fSUma Shankar 		 */
834aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
835aec0246fSUma Shankar 
836aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
837aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
838aec0246fSUma Shankar 
839aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
840aec0246fSUma Shankar 					clock), 1000 * htotal);
841aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
842aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
843aec0246fSUma Shankar 
844aec0246fSUma Shankar 	return scanline;
845aec0246fSUma Shankar }
846aec0246fSUma Shankar 
84775aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
848a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
849a225f079SVille Syrjälä {
850a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
851fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
8525caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
8535caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
854a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
85580715b2fSVille Syrjälä 	int position, vtotal;
856a225f079SVille Syrjälä 
85772259536SVille Syrjälä 	if (!crtc->active)
85872259536SVille Syrjälä 		return -1;
85972259536SVille Syrjälä 
8605caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
8615caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
8625caa0feaSDaniel Vetter 
863aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
864aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
865aec0246fSUma Shankar 
86680715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
867a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
868a225f079SVille Syrjälä 		vtotal /= 2;
869a225f079SVille Syrjälä 
87091d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv))
87175aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
872a225f079SVille Syrjälä 	else
87375aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
874a225f079SVille Syrjälä 
875a225f079SVille Syrjälä 	/*
87641b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
87741b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
87841b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
87941b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
88041b578fbSJesse Barnes 	 *
88141b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
88241b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
88341b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
88441b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
88541b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
88641b578fbSJesse Barnes 	 */
88791d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
88841b578fbSJesse Barnes 		int i, temp;
88941b578fbSJesse Barnes 
89041b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
89141b578fbSJesse Barnes 			udelay(1);
892707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
89341b578fbSJesse Barnes 			if (temp != position) {
89441b578fbSJesse Barnes 				position = temp;
89541b578fbSJesse Barnes 				break;
89641b578fbSJesse Barnes 			}
89741b578fbSJesse Barnes 		}
89841b578fbSJesse Barnes 	}
89941b578fbSJesse Barnes 
90041b578fbSJesse Barnes 	/*
90180715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
90280715b2fSVille Syrjälä 	 * scanline_offset adjustment.
903a225f079SVille Syrjälä 	 */
90480715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
905a225f079SVille Syrjälä }
906a225f079SVille Syrjälä 
9071bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
9081bf6ad62SDaniel Vetter 				     bool in_vblank_irq, int *vpos, int *hpos,
9093bb403bfSVille Syrjälä 				     ktime_t *stime, ktime_t *etime,
9103bb403bfSVille Syrjälä 				     const struct drm_display_mode *mode)
9110af7e4dfSMario Kleiner {
912fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
91398187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
91498187836SVille Syrjälä 								pipe);
9153aa18df8SVille Syrjälä 	int position;
91678e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
917ad3543edSMario Kleiner 	unsigned long irqflags;
9180af7e4dfSMario Kleiner 
919fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
9200af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9219db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
9221bf6ad62SDaniel Vetter 		return false;
9230af7e4dfSMario Kleiner 	}
9240af7e4dfSMario Kleiner 
925c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
92678e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
927c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
928c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
929c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
9300af7e4dfSMario Kleiner 
931d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
932d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
933d31faf65SVille Syrjälä 		vbl_end /= 2;
934d31faf65SVille Syrjälä 		vtotal /= 2;
935d31faf65SVille Syrjälä 	}
936d31faf65SVille Syrjälä 
937ad3543edSMario Kleiner 	/*
938ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
939ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
940ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
941ad3543edSMario Kleiner 	 */
942ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
943ad3543edSMario Kleiner 
944ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
945ad3543edSMario Kleiner 
946ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
947ad3543edSMario Kleiner 	if (stime)
948ad3543edSMario Kleiner 		*stime = ktime_get();
949ad3543edSMario Kleiner 
95091d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
9510af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
9520af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
9530af7e4dfSMario Kleiner 		 */
954a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
9550af7e4dfSMario Kleiner 	} else {
9560af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
9570af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
9580af7e4dfSMario Kleiner 		 * scanout position.
9590af7e4dfSMario Kleiner 		 */
96075aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
9610af7e4dfSMario Kleiner 
9623aa18df8SVille Syrjälä 		/* convert to pixel counts */
9633aa18df8SVille Syrjälä 		vbl_start *= htotal;
9643aa18df8SVille Syrjälä 		vbl_end *= htotal;
9653aa18df8SVille Syrjälä 		vtotal *= htotal;
96678e8fc6bSVille Syrjälä 
96778e8fc6bSVille Syrjälä 		/*
9687e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
9697e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
9707e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
9717e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
9727e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
9737e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
9747e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
9757e78f1cbSVille Syrjälä 		 */
9767e78f1cbSVille Syrjälä 		if (position >= vtotal)
9777e78f1cbSVille Syrjälä 			position = vtotal - 1;
9787e78f1cbSVille Syrjälä 
9797e78f1cbSVille Syrjälä 		/*
98078e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
98178e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
98278e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
98378e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
98478e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
98578e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
98678e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
98778e8fc6bSVille Syrjälä 		 */
98878e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
9893aa18df8SVille Syrjälä 	}
9903aa18df8SVille Syrjälä 
991ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
992ad3543edSMario Kleiner 	if (etime)
993ad3543edSMario Kleiner 		*etime = ktime_get();
994ad3543edSMario Kleiner 
995ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
996ad3543edSMario Kleiner 
997ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
998ad3543edSMario Kleiner 
9993aa18df8SVille Syrjälä 	/*
10003aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
10013aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
10023aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
10033aa18df8SVille Syrjälä 	 * up since vbl_end.
10043aa18df8SVille Syrjälä 	 */
10053aa18df8SVille Syrjälä 	if (position >= vbl_start)
10063aa18df8SVille Syrjälä 		position -= vbl_end;
10073aa18df8SVille Syrjälä 	else
10083aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
10093aa18df8SVille Syrjälä 
101091d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
10113aa18df8SVille Syrjälä 		*vpos = position;
10123aa18df8SVille Syrjälä 		*hpos = 0;
10133aa18df8SVille Syrjälä 	} else {
10140af7e4dfSMario Kleiner 		*vpos = position / htotal;
10150af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
10160af7e4dfSMario Kleiner 	}
10170af7e4dfSMario Kleiner 
10181bf6ad62SDaniel Vetter 	return true;
10190af7e4dfSMario Kleiner }
10200af7e4dfSMario Kleiner 
1021a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1022a225f079SVille Syrjälä {
1023fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1024a225f079SVille Syrjälä 	unsigned long irqflags;
1025a225f079SVille Syrjälä 	int position;
1026a225f079SVille Syrjälä 
1027a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1028a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1029a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1030a225f079SVille Syrjälä 
1031a225f079SVille Syrjälä 	return position;
1032a225f079SVille Syrjälä }
1033a225f079SVille Syrjälä 
103491d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1035f97108d1SJesse Barnes {
1036b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
10379270388eSDaniel Vetter 	u8 new_delay;
10389270388eSDaniel Vetter 
1039d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1040f97108d1SJesse Barnes 
104173edd18fSDaniel Vetter 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
104273edd18fSDaniel Vetter 
104320e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
10449270388eSDaniel Vetter 
10457648fa99SJesse Barnes 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1046b5b72e89SMatthew Garrett 	busy_up = I915_READ(RCPREVBSYTUPAVG);
1047b5b72e89SMatthew Garrett 	busy_down = I915_READ(RCPREVBSYTDNAVG);
1048f97108d1SJesse Barnes 	max_avg = I915_READ(RCBMAXAVG);
1049f97108d1SJesse Barnes 	min_avg = I915_READ(RCBMINAVG);
1050f97108d1SJesse Barnes 
1051f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1052b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
105320e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
105420e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
105520e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
105620e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1057b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
105820e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
105920e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
106020e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
106120e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1062f97108d1SJesse Barnes 	}
1063f97108d1SJesse Barnes 
106491d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
106520e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1066f97108d1SJesse Barnes 
1067d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
10689270388eSDaniel Vetter 
1069f97108d1SJesse Barnes 	return;
1070f97108d1SJesse Barnes }
1071f97108d1SJesse Barnes 
10720bc40be8STvrtko Ursulin static void notify_ring(struct intel_engine_cs *engine)
1073549f7365SChris Wilson {
107456299fb7SChris Wilson 	struct drm_i915_gem_request *rq = NULL;
107556299fb7SChris Wilson 	struct intel_wait *wait;
1076dffabc8fSTvrtko Ursulin 
1077bcbd5c33SChris Wilson 	if (!engine->breadcrumbs.irq_armed)
1078bcbd5c33SChris Wilson 		return;
1079bcbd5c33SChris Wilson 
10802246bea6SChris Wilson 	atomic_inc(&engine->irq_count);
1081538b257dSChris Wilson 	set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
108256299fb7SChris Wilson 
108361d3dc70SChris Wilson 	spin_lock(&engine->breadcrumbs.irq_lock);
108461d3dc70SChris Wilson 	wait = engine->breadcrumbs.irq_wait;
108556299fb7SChris Wilson 	if (wait) {
108617b51ad8SChris Wilson 		bool wakeup = engine->irq_seqno_barrier;
108717b51ad8SChris Wilson 
108856299fb7SChris Wilson 		/* We use a callback from the dma-fence to submit
108956299fb7SChris Wilson 		 * requests after waiting on our own requests. To
109056299fb7SChris Wilson 		 * ensure minimum delay in queuing the next request to
109156299fb7SChris Wilson 		 * hardware, signal the fence now rather than wait for
109256299fb7SChris Wilson 		 * the signaler to be woken up. We still wake up the
109356299fb7SChris Wilson 		 * waiter in order to handle the irq-seqno coherency
109456299fb7SChris Wilson 		 * issues (we may receive the interrupt before the
109556299fb7SChris Wilson 		 * seqno is written, see __i915_request_irq_complete())
109656299fb7SChris Wilson 		 * and to handle coalescing of multiple seqno updates
109756299fb7SChris Wilson 		 * and many waiters.
109856299fb7SChris Wilson 		 */
109956299fb7SChris Wilson 		if (i915_seqno_passed(intel_engine_get_seqno(engine),
110017b51ad8SChris Wilson 				      wait->seqno)) {
1101de4d2106SChris Wilson 			struct drm_i915_gem_request *waiter = wait->request;
1102de4d2106SChris Wilson 
110317b51ad8SChris Wilson 			wakeup = true;
110417b51ad8SChris Wilson 			if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1105de4d2106SChris Wilson 				      &waiter->fence.flags) &&
1106de4d2106SChris Wilson 			    intel_wait_check_request(wait, waiter))
1107de4d2106SChris Wilson 				rq = i915_gem_request_get(waiter);
110817b51ad8SChris Wilson 		}
110956299fb7SChris Wilson 
111017b51ad8SChris Wilson 		if (wakeup)
111156299fb7SChris Wilson 			wake_up_process(wait->tsk);
111267b807a8SChris Wilson 	} else {
1113bcbd5c33SChris Wilson 		if (engine->breadcrumbs.irq_armed)
111467b807a8SChris Wilson 			__intel_engine_disarm_breadcrumbs(engine);
111556299fb7SChris Wilson 	}
111661d3dc70SChris Wilson 	spin_unlock(&engine->breadcrumbs.irq_lock);
111756299fb7SChris Wilson 
111824754d75SChris Wilson 	if (rq) {
111956299fb7SChris Wilson 		dma_fence_signal(&rq->fence);
112024754d75SChris Wilson 		i915_gem_request_put(rq);
112124754d75SChris Wilson 	}
112256299fb7SChris Wilson 
112356299fb7SChris Wilson 	trace_intel_engine_notify(engine, wait);
1124549f7365SChris Wilson }
1125549f7365SChris Wilson 
112643cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
112743cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
112831685c25SDeepak S {
1129679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
113043cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
113143cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
113231685c25SDeepak S }
113331685c25SDeepak S 
113443cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
113543cf3bf0SChris Wilson {
1136562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
113743cf3bf0SChris Wilson }
113843cf3bf0SChris Wilson 
113943cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
114043cf3bf0SChris Wilson {
1141562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1142562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
114343cf3bf0SChris Wilson 	struct intel_rps_ei now;
114443cf3bf0SChris Wilson 	u32 events = 0;
114543cf3bf0SChris Wilson 
1146e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
114743cf3bf0SChris Wilson 		return 0;
114843cf3bf0SChris Wilson 
114943cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
115031685c25SDeepak S 
1151679cb6c1SMika Kuoppala 	if (prev->ktime) {
1152e0e8c7cbSChris Wilson 		u64 time, c0;
1153569884e3SChris Wilson 		u32 render, media;
1154e0e8c7cbSChris Wilson 
1155679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
11568f68d591SChris Wilson 
1157e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1158e0e8c7cbSChris Wilson 
1159e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1160e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1161e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1162e0e8c7cbSChris Wilson 		 * into our activity counter.
1163e0e8c7cbSChris Wilson 		 */
1164569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1165569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1166569884e3SChris Wilson 		c0 = max(render, media);
11676b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1168e0e8c7cbSChris Wilson 
1169562d9baeSSagar Arun Kamble 		if (c0 > time * rps->up_threshold)
1170e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
1171562d9baeSSagar Arun Kamble 		else if (c0 < time * rps->down_threshold)
1172e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
117331685c25SDeepak S 	}
117431685c25SDeepak S 
1175562d9baeSSagar Arun Kamble 	rps->ei = now;
117643cf3bf0SChris Wilson 	return events;
117731685c25SDeepak S }
117831685c25SDeepak S 
11794912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
11803b8d8d91SJesse Barnes {
11812d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1182562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1183562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
11847c0a16adSChris Wilson 	bool client_boost = false;
11858d3afd7dSChris Wilson 	int new_delay, adj, min, max;
11867c0a16adSChris Wilson 	u32 pm_iir = 0;
11873b8d8d91SJesse Barnes 
118859cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1189562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1190562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1191562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1192d4d70aa5SImre Deak 	}
119359cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
11944912d041SBen Widawsky 
119560611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1196a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
11978d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
11987c0a16adSChris Wilson 		goto out;
11993b8d8d91SJesse Barnes 
12009f817501SSagar Arun Kamble 	mutex_lock(&dev_priv->pcu_lock);
12017b9e0ae6SChris Wilson 
120243cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
120343cf3bf0SChris Wilson 
1204562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1205562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1206562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1207562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
12087b92c1bdSChris Wilson 	if (client_boost)
1209562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1210562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1211562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
12128d3afd7dSChris Wilson 		adj = 0;
12138d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1214dd75fdc8SChris Wilson 		if (adj > 0)
1215dd75fdc8SChris Wilson 			adj *= 2;
1216edcf284bSChris Wilson 		else /* CHV needs even encode values */
1217edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
12187e79a683SSagar Arun Kamble 
1219562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
12207e79a683SSagar Arun Kamble 			adj = 0;
12217b92c1bdSChris Wilson 	} else if (client_boost) {
1222f5a4c67dSChris Wilson 		adj = 0;
1223dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1224562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1225562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1226562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1227562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1228dd75fdc8SChris Wilson 		adj = 0;
1229dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1230dd75fdc8SChris Wilson 		if (adj < 0)
1231dd75fdc8SChris Wilson 			adj *= 2;
1232edcf284bSChris Wilson 		else /* CHV needs even encode values */
1233edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
12347e79a683SSagar Arun Kamble 
1235562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
12367e79a683SSagar Arun Kamble 			adj = 0;
1237dd75fdc8SChris Wilson 	} else { /* unknown event */
1238edcf284bSChris Wilson 		adj = 0;
1239dd75fdc8SChris Wilson 	}
12403b8d8d91SJesse Barnes 
1241562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1242edcf284bSChris Wilson 
124379249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
124479249636SBen Widawsky 	 * interrupt
124579249636SBen Widawsky 	 */
1246edcf284bSChris Wilson 	new_delay += adj;
12478d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
124827544369SDeepak S 
12499fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
12509fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1251562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
12529fcee2f7SChris Wilson 	}
12533b8d8d91SJesse Barnes 
12549f817501SSagar Arun Kamble 	mutex_unlock(&dev_priv->pcu_lock);
12557c0a16adSChris Wilson 
12567c0a16adSChris Wilson out:
12577c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
12587c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1259562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
12607c0a16adSChris Wilson 		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
12617c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
12623b8d8d91SJesse Barnes }
12633b8d8d91SJesse Barnes 
1264e3689190SBen Widawsky 
1265e3689190SBen Widawsky /**
1266e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1267e3689190SBen Widawsky  * occurred.
1268e3689190SBen Widawsky  * @work: workqueue struct
1269e3689190SBen Widawsky  *
1270e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1271e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1272e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1273e3689190SBen Widawsky  */
1274e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1275e3689190SBen Widawsky {
12762d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1277cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1278e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
127935a85ac6SBen Widawsky 	char *parity_event[6];
1280e3689190SBen Widawsky 	uint32_t misccpctl;
128135a85ac6SBen Widawsky 	uint8_t slice = 0;
1282e3689190SBen Widawsky 
1283e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1284e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1285e3689190SBen Widawsky 	 * any time we access those registers.
1286e3689190SBen Widawsky 	 */
128791c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1288e3689190SBen Widawsky 
128935a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
129035a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
129135a85ac6SBen Widawsky 		goto out;
129235a85ac6SBen Widawsky 
1293e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1294e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1295e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1296e3689190SBen Widawsky 
129735a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1298f0f59a00SVille Syrjälä 		i915_reg_t reg;
129935a85ac6SBen Widawsky 
130035a85ac6SBen Widawsky 		slice--;
13012d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
130235a85ac6SBen Widawsky 			break;
130335a85ac6SBen Widawsky 
130435a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
130535a85ac6SBen Widawsky 
13066fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
130735a85ac6SBen Widawsky 
130835a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1309e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1310e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1311e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1312e3689190SBen Widawsky 
131335a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
131435a85ac6SBen Widawsky 		POSTING_READ(reg);
1315e3689190SBen Widawsky 
1316cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1317e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1318e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1319e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
132035a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
132135a85ac6SBen Widawsky 		parity_event[5] = NULL;
1322e3689190SBen Widawsky 
132391c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1324e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1325e3689190SBen Widawsky 
132635a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
132735a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1328e3689190SBen Widawsky 
132935a85ac6SBen Widawsky 		kfree(parity_event[4]);
1330e3689190SBen Widawsky 		kfree(parity_event[3]);
1331e3689190SBen Widawsky 		kfree(parity_event[2]);
1332e3689190SBen Widawsky 		kfree(parity_event[1]);
1333e3689190SBen Widawsky 	}
1334e3689190SBen Widawsky 
133535a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
133635a85ac6SBen Widawsky 
133735a85ac6SBen Widawsky out:
133835a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
13394cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
13402d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
13414cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
134235a85ac6SBen Widawsky 
134391c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
134435a85ac6SBen Widawsky }
134535a85ac6SBen Widawsky 
1346261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1347261e40b8SVille Syrjälä 					       u32 iir)
1348e3689190SBen Widawsky {
1349261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1350e3689190SBen Widawsky 		return;
1351e3689190SBen Widawsky 
1352d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1353261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1354d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1355e3689190SBen Widawsky 
1356261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
135735a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
135835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
135935a85ac6SBen Widawsky 
136035a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
136135a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
136235a85ac6SBen Widawsky 
1363a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1364e3689190SBen Widawsky }
1365e3689190SBen Widawsky 
1366261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1367f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1368f1af8fc1SPaulo Zanoni {
1369f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
13703b3f1650SAkash Goel 		notify_ring(dev_priv->engine[RCS]);
1371f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
13723b3f1650SAkash Goel 		notify_ring(dev_priv->engine[VCS]);
1373f1af8fc1SPaulo Zanoni }
1374f1af8fc1SPaulo Zanoni 
1375261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1376e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1377e7b4c6b1SDaniel Vetter {
1378f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
13793b3f1650SAkash Goel 		notify_ring(dev_priv->engine[RCS]);
1380cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
13813b3f1650SAkash Goel 		notify_ring(dev_priv->engine[VCS]);
1382cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
13833b3f1650SAkash Goel 		notify_ring(dev_priv->engine[BCS]);
1384e7b4c6b1SDaniel Vetter 
1385cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1386cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1387aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1388aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1389e3689190SBen Widawsky 
1390261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1391261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1392e7b4c6b1SDaniel Vetter }
1393e7b4c6b1SDaniel Vetter 
13945d3d69d5SChris Wilson static void
13950bc40be8STvrtko Ursulin gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1396fbcc1a0cSNick Hoath {
1397b620e870SMika Kuoppala 	struct intel_engine_execlists * const execlists = &engine->execlists;
139831de7350SChris Wilson 	bool tasklet = false;
1399f747026cSChris Wilson 
1400f747026cSChris Wilson 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
14014a118ecbSChris Wilson 		if (READ_ONCE(engine->execlists.active)) {
1402955a4b89SChris Wilson 			__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
140331de7350SChris Wilson 			tasklet = true;
1404f747026cSChris Wilson 		}
14054a118ecbSChris Wilson 	}
140631de7350SChris Wilson 
140731de7350SChris Wilson 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
140831de7350SChris Wilson 		notify_ring(engine);
140993ffbe8eSMichal Wajdeczko 		tasklet |= USES_GUC_SUBMISSION(engine->i915);
141031de7350SChris Wilson 	}
141131de7350SChris Wilson 
141231de7350SChris Wilson 	if (tasklet)
1413c6dce8f1SSagar Arun Kamble 		tasklet_hi_schedule(&execlists->tasklet);
1414fbcc1a0cSNick Hoath }
1415fbcc1a0cSNick Hoath 
1416e30e251aSVille Syrjälä static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1417e30e251aSVille Syrjälä 				   u32 master_ctl,
1418e30e251aSVille Syrjälä 				   u32 gt_iir[4])
1419abd58f01SBen Widawsky {
1420abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
1421abd58f01SBen Widawsky 
1422abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1423e30e251aSVille Syrjälä 		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1424e30e251aSVille Syrjälä 		if (gt_iir[0]) {
1425e30e251aSVille Syrjälä 			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1426abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
1427abd58f01SBen Widawsky 		} else
1428abd58f01SBen Widawsky 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1429abd58f01SBen Widawsky 	}
1430abd58f01SBen Widawsky 
143185f9b5f9SZhao Yakui 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1432e30e251aSVille Syrjälä 		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1433e30e251aSVille Syrjälä 		if (gt_iir[1]) {
1434e30e251aSVille Syrjälä 			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1435abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
1436abd58f01SBen Widawsky 		} else
1437abd58f01SBen Widawsky 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1438abd58f01SBen Widawsky 	}
1439abd58f01SBen Widawsky 
144074cdb337SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1441e30e251aSVille Syrjälä 		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1442e30e251aSVille Syrjälä 		if (gt_iir[3]) {
1443e30e251aSVille Syrjälä 			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
144474cdb337SChris Wilson 			ret = IRQ_HANDLED;
144574cdb337SChris Wilson 		} else
144674cdb337SChris Wilson 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
144774cdb337SChris Wilson 	}
144874cdb337SChris Wilson 
144926705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1450e30e251aSVille Syrjälä 		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
145126705e20SSagar Arun Kamble 		if (gt_iir[2] & (dev_priv->pm_rps_events |
145226705e20SSagar Arun Kamble 				 dev_priv->pm_guc_events)) {
1453cb0d205eSChris Wilson 			I915_WRITE_FW(GEN8_GT_IIR(2),
145426705e20SSagar Arun Kamble 				      gt_iir[2] & (dev_priv->pm_rps_events |
145526705e20SSagar Arun Kamble 						   dev_priv->pm_guc_events));
145638cc46d7SOscar Mateo 			ret = IRQ_HANDLED;
14570961021aSBen Widawsky 		} else
14580961021aSBen Widawsky 			DRM_ERROR("The master control interrupt lied (PM)!\n");
14590961021aSBen Widawsky 	}
14600961021aSBen Widawsky 
1461abd58f01SBen Widawsky 	return ret;
1462abd58f01SBen Widawsky }
1463abd58f01SBen Widawsky 
1464e30e251aSVille Syrjälä static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1465e30e251aSVille Syrjälä 				u32 gt_iir[4])
1466e30e251aSVille Syrjälä {
1467e30e251aSVille Syrjälä 	if (gt_iir[0]) {
14683b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[RCS],
1469e30e251aSVille Syrjälä 				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
14703b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[BCS],
1471e30e251aSVille Syrjälä 				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1472e30e251aSVille Syrjälä 	}
1473e30e251aSVille Syrjälä 
1474e30e251aSVille Syrjälä 	if (gt_iir[1]) {
14753b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[VCS],
1476e30e251aSVille Syrjälä 				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
14773b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[VCS2],
1478e30e251aSVille Syrjälä 				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1479e30e251aSVille Syrjälä 	}
1480e30e251aSVille Syrjälä 
1481e30e251aSVille Syrjälä 	if (gt_iir[3])
14823b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[VECS],
1483e30e251aSVille Syrjälä 				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1484e30e251aSVille Syrjälä 
1485e30e251aSVille Syrjälä 	if (gt_iir[2] & dev_priv->pm_rps_events)
1486e30e251aSVille Syrjälä 		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
148726705e20SSagar Arun Kamble 
148826705e20SSagar Arun Kamble 	if (gt_iir[2] & dev_priv->pm_guc_events)
148926705e20SSagar Arun Kamble 		gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1490e30e251aSVille Syrjälä }
1491e30e251aSVille Syrjälä 
149263c88d22SImre Deak static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
149363c88d22SImre Deak {
149463c88d22SImre Deak 	switch (port) {
149563c88d22SImre Deak 	case PORT_A:
1496195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
149763c88d22SImre Deak 	case PORT_B:
149863c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
149963c88d22SImre Deak 	case PORT_C:
150063c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
150163c88d22SImre Deak 	default:
150263c88d22SImre Deak 		return false;
150363c88d22SImre Deak 	}
150463c88d22SImre Deak }
150563c88d22SImre Deak 
15066dbf30ceSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
15076dbf30ceSVille Syrjälä {
15086dbf30ceSVille Syrjälä 	switch (port) {
15096dbf30ceSVille Syrjälä 	case PORT_E:
15106dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
15116dbf30ceSVille Syrjälä 	default:
15126dbf30ceSVille Syrjälä 		return false;
15136dbf30ceSVille Syrjälä 	}
15146dbf30ceSVille Syrjälä }
15156dbf30ceSVille Syrjälä 
151674c0b395SVille Syrjälä static bool spt_port_hotplug_long_detect(enum port port, u32 val)
151774c0b395SVille Syrjälä {
151874c0b395SVille Syrjälä 	switch (port) {
151974c0b395SVille Syrjälä 	case PORT_A:
152074c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
152174c0b395SVille Syrjälä 	case PORT_B:
152274c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
152374c0b395SVille Syrjälä 	case PORT_C:
152474c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
152574c0b395SVille Syrjälä 	case PORT_D:
152674c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
152774c0b395SVille Syrjälä 	default:
152874c0b395SVille Syrjälä 		return false;
152974c0b395SVille Syrjälä 	}
153074c0b395SVille Syrjälä }
153174c0b395SVille Syrjälä 
1532e4ce95aaSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1533e4ce95aaSVille Syrjälä {
1534e4ce95aaSVille Syrjälä 	switch (port) {
1535e4ce95aaSVille Syrjälä 	case PORT_A:
1536e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1537e4ce95aaSVille Syrjälä 	default:
1538e4ce95aaSVille Syrjälä 		return false;
1539e4ce95aaSVille Syrjälä 	}
1540e4ce95aaSVille Syrjälä }
1541e4ce95aaSVille Syrjälä 
1542676574dfSJani Nikula static bool pch_port_hotplug_long_detect(enum port port, u32 val)
154313cf5504SDave Airlie {
154413cf5504SDave Airlie 	switch (port) {
154513cf5504SDave Airlie 	case PORT_B:
1546676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
154713cf5504SDave Airlie 	case PORT_C:
1548676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
154913cf5504SDave Airlie 	case PORT_D:
1550676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1551676574dfSJani Nikula 	default:
1552676574dfSJani Nikula 		return false;
155313cf5504SDave Airlie 	}
155413cf5504SDave Airlie }
155513cf5504SDave Airlie 
1556676574dfSJani Nikula static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
155713cf5504SDave Airlie {
155813cf5504SDave Airlie 	switch (port) {
155913cf5504SDave Airlie 	case PORT_B:
1560676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
156113cf5504SDave Airlie 	case PORT_C:
1562676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
156313cf5504SDave Airlie 	case PORT_D:
1564676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1565676574dfSJani Nikula 	default:
1566676574dfSJani Nikula 		return false;
156713cf5504SDave Airlie 	}
156813cf5504SDave Airlie }
156913cf5504SDave Airlie 
157042db67d6SVille Syrjälä /*
157142db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
157242db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
157342db67d6SVille Syrjälä  * hotplug detection results from several registers.
157442db67d6SVille Syrjälä  *
157542db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
157642db67d6SVille Syrjälä  */
1577fd63e2a9SImre Deak static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
15788c841e57SJani Nikula 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1579fd63e2a9SImre Deak 			     const u32 hpd[HPD_NUM_PINS],
1580fd63e2a9SImre Deak 			     bool long_pulse_detect(enum port port, u32 val))
1581676574dfSJani Nikula {
15828c841e57SJani Nikula 	enum port port;
1583676574dfSJani Nikula 	int i;
1584676574dfSJani Nikula 
1585676574dfSJani Nikula 	for_each_hpd_pin(i) {
15868c841e57SJani Nikula 		if ((hpd[i] & hotplug_trigger) == 0)
15878c841e57SJani Nikula 			continue;
15888c841e57SJani Nikula 
1589676574dfSJani Nikula 		*pin_mask |= BIT(i);
1590676574dfSJani Nikula 
1591256cfddeSRodrigo Vivi 		port = intel_hpd_pin_to_port(i);
1592256cfddeSRodrigo Vivi 		if (port == PORT_NONE)
1593cc24fcdcSImre Deak 			continue;
1594cc24fcdcSImre Deak 
1595fd63e2a9SImre Deak 		if (long_pulse_detect(port, dig_hotplug_reg))
1596676574dfSJani Nikula 			*long_mask |= BIT(i);
1597676574dfSJani Nikula 	}
1598676574dfSJani Nikula 
1599676574dfSJani Nikula 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1600676574dfSJani Nikula 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1601676574dfSJani Nikula 
1602676574dfSJani Nikula }
1603676574dfSJani Nikula 
160491d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1605515ac2bbSDaniel Vetter {
160628c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1607515ac2bbSDaniel Vetter }
1608515ac2bbSDaniel Vetter 
160991d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1610ce99c256SDaniel Vetter {
16119ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1612ce99c256SDaniel Vetter }
1613ce99c256SDaniel Vetter 
16148bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
161591d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
161691d14251STvrtko Ursulin 					 enum pipe pipe,
1617eba94eb9SDaniel Vetter 					 uint32_t crc0, uint32_t crc1,
1618eba94eb9SDaniel Vetter 					 uint32_t crc2, uint32_t crc3,
16198bc5e955SDaniel Vetter 					 uint32_t crc4)
16208bf1e9f1SShuang He {
16218bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
16228bf1e9f1SShuang He 	struct intel_pipe_crc_entry *entry;
16238c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16248c6b709dSTomeu Vizoso 	struct drm_driver *driver = dev_priv->drm.driver;
16258c6b709dSTomeu Vizoso 	uint32_t crcs[5];
1626ac2300d4SDamien Lespiau 	int head, tail;
1627b2c88f5bSDamien Lespiau 
1628d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
16298c6b709dSTomeu Vizoso 	if (pipe_crc->source) {
16300c912c79SDamien Lespiau 		if (!pipe_crc->entries) {
1631d538bbdfSDamien Lespiau 			spin_unlock(&pipe_crc->lock);
163234273620SDaniel Vetter 			DRM_DEBUG_KMS("spurious interrupt\n");
16330c912c79SDamien Lespiau 			return;
16340c912c79SDamien Lespiau 		}
16350c912c79SDamien Lespiau 
1636d538bbdfSDamien Lespiau 		head = pipe_crc->head;
1637d538bbdfSDamien Lespiau 		tail = pipe_crc->tail;
1638b2c88f5bSDamien Lespiau 
1639b2c88f5bSDamien Lespiau 		if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1640d538bbdfSDamien Lespiau 			spin_unlock(&pipe_crc->lock);
1641b2c88f5bSDamien Lespiau 			DRM_ERROR("CRC buffer overflowing\n");
1642b2c88f5bSDamien Lespiau 			return;
1643b2c88f5bSDamien Lespiau 		}
1644b2c88f5bSDamien Lespiau 
1645b2c88f5bSDamien Lespiau 		entry = &pipe_crc->entries[head];
16468bf1e9f1SShuang He 
16478c6b709dSTomeu Vizoso 		entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1648eba94eb9SDaniel Vetter 		entry->crc[0] = crc0;
1649eba94eb9SDaniel Vetter 		entry->crc[1] = crc1;
1650eba94eb9SDaniel Vetter 		entry->crc[2] = crc2;
1651eba94eb9SDaniel Vetter 		entry->crc[3] = crc3;
1652eba94eb9SDaniel Vetter 		entry->crc[4] = crc4;
1653b2c88f5bSDamien Lespiau 
1654b2c88f5bSDamien Lespiau 		head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1655d538bbdfSDamien Lespiau 		pipe_crc->head = head;
1656d538bbdfSDamien Lespiau 
1657d538bbdfSDamien Lespiau 		spin_unlock(&pipe_crc->lock);
165807144428SDamien Lespiau 
165907144428SDamien Lespiau 		wake_up_interruptible(&pipe_crc->wq);
16608c6b709dSTomeu Vizoso 	} else {
16618c6b709dSTomeu Vizoso 		/*
16628c6b709dSTomeu Vizoso 		 * For some not yet identified reason, the first CRC is
16638c6b709dSTomeu Vizoso 		 * bonkers. So let's just wait for the next vblank and read
16648c6b709dSTomeu Vizoso 		 * out the buggy result.
16658c6b709dSTomeu Vizoso 		 *
1666163e8aecSRodrigo Vivi 		 * On GEN8+ sometimes the second CRC is bonkers as well, so
16678c6b709dSTomeu Vizoso 		 * don't trust that one either.
16688c6b709dSTomeu Vizoso 		 */
16698c6b709dSTomeu Vizoso 		if (pipe_crc->skipped == 0 ||
1670163e8aecSRodrigo Vivi 		    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
16718c6b709dSTomeu Vizoso 			pipe_crc->skipped++;
16728c6b709dSTomeu Vizoso 			spin_unlock(&pipe_crc->lock);
16738c6b709dSTomeu Vizoso 			return;
16748c6b709dSTomeu Vizoso 		}
16758c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
16768c6b709dSTomeu Vizoso 		crcs[0] = crc0;
16778c6b709dSTomeu Vizoso 		crcs[1] = crc1;
16788c6b709dSTomeu Vizoso 		crcs[2] = crc2;
16798c6b709dSTomeu Vizoso 		crcs[3] = crc3;
16808c6b709dSTomeu Vizoso 		crcs[4] = crc4;
1681246ee524STomeu Vizoso 		drm_crtc_add_crc_entry(&crtc->base, true,
1682ca814b25SDaniel Vetter 				       drm_crtc_accurate_vblank_count(&crtc->base),
1683246ee524STomeu Vizoso 				       crcs);
16848c6b709dSTomeu Vizoso 	}
16858bf1e9f1SShuang He }
1686277de95eSDaniel Vetter #else
1687277de95eSDaniel Vetter static inline void
168891d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
168991d14251STvrtko Ursulin 			     enum pipe pipe,
1690277de95eSDaniel Vetter 			     uint32_t crc0, uint32_t crc1,
1691277de95eSDaniel Vetter 			     uint32_t crc2, uint32_t crc3,
1692277de95eSDaniel Vetter 			     uint32_t crc4) {}
1693277de95eSDaniel Vetter #endif
1694eba94eb9SDaniel Vetter 
1695277de95eSDaniel Vetter 
169691d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
169791d14251STvrtko Ursulin 				     enum pipe pipe)
16985a69b89fSDaniel Vetter {
169991d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
17005a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
17015a69b89fSDaniel Vetter 				     0, 0, 0, 0);
17025a69b89fSDaniel Vetter }
17035a69b89fSDaniel Vetter 
170491d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
170591d14251STvrtko Ursulin 				     enum pipe pipe)
1706eba94eb9SDaniel Vetter {
170791d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1708eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1709eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1710eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1711eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
17128bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1713eba94eb9SDaniel Vetter }
17145b3a856bSDaniel Vetter 
171591d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
171691d14251STvrtko Ursulin 				      enum pipe pipe)
17175b3a856bSDaniel Vetter {
17180b5c5ed0SDaniel Vetter 	uint32_t res1, res2;
17190b5c5ed0SDaniel Vetter 
172091d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
17210b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
17220b5c5ed0SDaniel Vetter 	else
17230b5c5ed0SDaniel Vetter 		res1 = 0;
17240b5c5ed0SDaniel Vetter 
172591d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
17260b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
17270b5c5ed0SDaniel Vetter 	else
17280b5c5ed0SDaniel Vetter 		res2 = 0;
17295b3a856bSDaniel Vetter 
173091d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
17310b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
17320b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
17330b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
17340b5c5ed0SDaniel Vetter 				     res1, res2);
17355b3a856bSDaniel Vetter }
17368bf1e9f1SShuang He 
17371403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
17381403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
17391403c0d4SPaulo Zanoni  * the work queue. */
17401403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1741baf02a1fSBen Widawsky {
1742562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1743562d9baeSSagar Arun Kamble 
1744a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
174559cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
1746f4e9af4fSAkash Goel 		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1747562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1748562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1749562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
175041a05a3aSDaniel Vetter 		}
1751d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1752d4d70aa5SImre Deak 	}
1753baf02a1fSBen Widawsky 
1754bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1755c9a9a268SImre Deak 		return;
1756c9a9a268SImre Deak 
17572d1fe073SJoonas Lahtinen 	if (HAS_VEBOX(dev_priv)) {
175812638c57SBen Widawsky 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
17593b3f1650SAkash Goel 			notify_ring(dev_priv->engine[VECS]);
176012638c57SBen Widawsky 
1761aaecdf61SDaniel Vetter 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1762aaecdf61SDaniel Vetter 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
176312638c57SBen Widawsky 	}
17641403c0d4SPaulo Zanoni }
1765baf02a1fSBen Widawsky 
176626705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
176726705e20SSagar Arun Kamble {
176826705e20SSagar Arun Kamble 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
17694100b2abSSagar Arun Kamble 		/* Sample the log buffer flush related bits & clear them out now
17704100b2abSSagar Arun Kamble 		 * itself from the message identity register to minimize the
17714100b2abSSagar Arun Kamble 		 * probability of losing a flush interrupt, when there are back
17724100b2abSSagar Arun Kamble 		 * to back flush interrupts.
17734100b2abSSagar Arun Kamble 		 * There can be a new flush interrupt, for different log buffer
17744100b2abSSagar Arun Kamble 		 * type (like for ISR), whilst Host is handling one (for DPC).
17754100b2abSSagar Arun Kamble 		 * Since same bit is used in message register for ISR & DPC, it
17764100b2abSSagar Arun Kamble 		 * could happen that GuC sets the bit for 2nd interrupt but Host
17774100b2abSSagar Arun Kamble 		 * clears out the bit on handling the 1st interrupt.
17784100b2abSSagar Arun Kamble 		 */
17794100b2abSSagar Arun Kamble 		u32 msg, flush;
17804100b2abSSagar Arun Kamble 
17814100b2abSSagar Arun Kamble 		msg = I915_READ(SOFT_SCRATCH(15));
1782a80bc45fSArkadiusz Hiler 		flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
1783a80bc45fSArkadiusz Hiler 			       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
17844100b2abSSagar Arun Kamble 		if (flush) {
17854100b2abSSagar Arun Kamble 			/* Clear the message bits that are handled */
17864100b2abSSagar Arun Kamble 			I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
17874100b2abSSagar Arun Kamble 
17884100b2abSSagar Arun Kamble 			/* Handle flush interrupt in bottom half */
1789e7465473SOscar Mateo 			queue_work(dev_priv->guc.log.runtime.flush_wq,
1790e7465473SOscar Mateo 				   &dev_priv->guc.log.runtime.flush_work);
17915aa1ee4bSAkash Goel 
17925aa1ee4bSAkash Goel 			dev_priv->guc.log.flush_interrupt_count++;
17934100b2abSSagar Arun Kamble 		} else {
17944100b2abSSagar Arun Kamble 			/* Not clearing of unhandled event bits won't result in
17954100b2abSSagar Arun Kamble 			 * re-triggering of the interrupt.
17964100b2abSSagar Arun Kamble 			 */
17974100b2abSSagar Arun Kamble 		}
179826705e20SSagar Arun Kamble 	}
179926705e20SSagar Arun Kamble }
180026705e20SSagar Arun Kamble 
180144d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
180244d9241eSVille Syrjälä {
180344d9241eSVille Syrjälä 	enum pipe pipe;
180444d9241eSVille Syrjälä 
180544d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
180644d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
180744d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
180844d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
180944d9241eSVille Syrjälä 
181044d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
181144d9241eSVille Syrjälä 	}
181244d9241eSVille Syrjälä }
181344d9241eSVille Syrjälä 
1814eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
181591d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
18167e231dbeSJesse Barnes {
18177e231dbeSJesse Barnes 	int pipe;
18187e231dbeSJesse Barnes 
181958ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
18201ca993d2SVille Syrjälä 
18211ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
18221ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
18231ca993d2SVille Syrjälä 		return;
18241ca993d2SVille Syrjälä 	}
18251ca993d2SVille Syrjälä 
1826055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1827f0f59a00SVille Syrjälä 		i915_reg_t reg;
18286b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
182991d181ddSImre Deak 
1830bbb5eebfSDaniel Vetter 		/*
1831bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
1832bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
1833bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
1834bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
1835bbb5eebfSDaniel Vetter 		 * handle.
1836bbb5eebfSDaniel Vetter 		 */
18370f239f4cSDaniel Vetter 
18380f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
18396b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1840bbb5eebfSDaniel Vetter 
1841bbb5eebfSDaniel Vetter 		switch (pipe) {
1842bbb5eebfSDaniel Vetter 		case PIPE_A:
1843bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1844bbb5eebfSDaniel Vetter 			break;
1845bbb5eebfSDaniel Vetter 		case PIPE_B:
1846bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1847bbb5eebfSDaniel Vetter 			break;
18483278f67fSVille Syrjälä 		case PIPE_C:
18493278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
18503278f67fSVille Syrjälä 			break;
1851bbb5eebfSDaniel Vetter 		}
1852bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
18536b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1854bbb5eebfSDaniel Vetter 
18556b12ca56SVille Syrjälä 		if (!status_mask)
185691d181ddSImre Deak 			continue;
185791d181ddSImre Deak 
185891d181ddSImre Deak 		reg = PIPESTAT(pipe);
18596b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
18606b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
18617e231dbeSJesse Barnes 
18627e231dbeSJesse Barnes 		/*
18637e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
18647e231dbeSJesse Barnes 		 */
18656b12ca56SVille Syrjälä 		if (pipe_stats[pipe])
18666b12ca56SVille Syrjälä 			I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
18677e231dbeSJesse Barnes 	}
186858ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
18692ecb8ca4SVille Syrjälä }
18702ecb8ca4SVille Syrjälä 
1871eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1872eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1873eb64343cSVille Syrjälä {
1874eb64343cSVille Syrjälä 	enum pipe pipe;
1875eb64343cSVille Syrjälä 
1876eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1877eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1878eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1879eb64343cSVille Syrjälä 
1880eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1881eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1882eb64343cSVille Syrjälä 
1883eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1884eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1885eb64343cSVille Syrjälä 	}
1886eb64343cSVille Syrjälä }
1887eb64343cSVille Syrjälä 
1888eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1889eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1890eb64343cSVille Syrjälä {
1891eb64343cSVille Syrjälä 	bool blc_event = false;
1892eb64343cSVille Syrjälä 	enum pipe pipe;
1893eb64343cSVille Syrjälä 
1894eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1895eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1896eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1897eb64343cSVille Syrjälä 
1898eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1899eb64343cSVille Syrjälä 			blc_event = true;
1900eb64343cSVille Syrjälä 
1901eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1902eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1903eb64343cSVille Syrjälä 
1904eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1905eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1906eb64343cSVille Syrjälä 	}
1907eb64343cSVille Syrjälä 
1908eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1909eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
1910eb64343cSVille Syrjälä }
1911eb64343cSVille Syrjälä 
1912eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1913eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1914eb64343cSVille Syrjälä {
1915eb64343cSVille Syrjälä 	bool blc_event = false;
1916eb64343cSVille Syrjälä 	enum pipe pipe;
1917eb64343cSVille Syrjälä 
1918eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1919eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1920eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1921eb64343cSVille Syrjälä 
1922eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1923eb64343cSVille Syrjälä 			blc_event = true;
1924eb64343cSVille Syrjälä 
1925eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1926eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1927eb64343cSVille Syrjälä 
1928eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1929eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1930eb64343cSVille Syrjälä 	}
1931eb64343cSVille Syrjälä 
1932eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1933eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
1934eb64343cSVille Syrjälä 
1935eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1936eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
1937eb64343cSVille Syrjälä }
1938eb64343cSVille Syrjälä 
193991d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
19402ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
19412ecb8ca4SVille Syrjälä {
19422ecb8ca4SVille Syrjälä 	enum pipe pipe;
19437e231dbeSJesse Barnes 
1944055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1945fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1946fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
19474356d586SDaniel Vetter 
19484356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
194991d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
19502d9d2b0bSVille Syrjälä 
19511f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
19521f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
195331acc7f5SJesse Barnes 	}
195431acc7f5SJesse Barnes 
1955c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
195691d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
1957c1874ed7SImre Deak }
1958c1874ed7SImre Deak 
19591ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
196016c6c56bSVille Syrjälä {
196116c6c56bSVille Syrjälä 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
196216c6c56bSVille Syrjälä 
19631ae3c34cSVille Syrjälä 	if (hotplug_status)
19643ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
19651ae3c34cSVille Syrjälä 
19661ae3c34cSVille Syrjälä 	return hotplug_status;
19671ae3c34cSVille Syrjälä }
19681ae3c34cSVille Syrjälä 
196991d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
19701ae3c34cSVille Syrjälä 				 u32 hotplug_status)
19711ae3c34cSVille Syrjälä {
19721ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
19733ff60f89SOscar Mateo 
197491d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
197591d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
197616c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
197716c6c56bSVille Syrjälä 
197858f2cf24SVille Syrjälä 		if (hotplug_trigger) {
1979fd63e2a9SImre Deak 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1980fd63e2a9SImre Deak 					   hotplug_trigger, hpd_status_g4x,
1981fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
198258f2cf24SVille Syrjälä 
198391d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
198458f2cf24SVille Syrjälä 		}
1985369712e8SJani Nikula 
1986369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
198791d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
198816c6c56bSVille Syrjälä 	} else {
198916c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
199016c6c56bSVille Syrjälä 
199158f2cf24SVille Syrjälä 		if (hotplug_trigger) {
1992fd63e2a9SImre Deak 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
19934e3d1e26SVille Syrjälä 					   hotplug_trigger, hpd_status_i915,
1994fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
199591d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
199616c6c56bSVille Syrjälä 		}
19973ff60f89SOscar Mateo 	}
199858f2cf24SVille Syrjälä }
199916c6c56bSVille Syrjälä 
2000c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2001c1874ed7SImre Deak {
200245a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2003fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2004c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
2005c1874ed7SImre Deak 
20062dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
20072dd2a883SImre Deak 		return IRQ_NONE;
20082dd2a883SImre Deak 
20091f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
20101f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
20111f814dacSImre Deak 
20121e1cace9SVille Syrjälä 	do {
20136e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
20142ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
20151ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2016a5e485a9SVille Syrjälä 		u32 ier = 0;
20173ff60f89SOscar Mateo 
2018c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2019c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
20203ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2021c1874ed7SImre Deak 
2022c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
20231e1cace9SVille Syrjälä 			break;
2024c1874ed7SImre Deak 
2025c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2026c1874ed7SImre Deak 
2027a5e485a9SVille Syrjälä 		/*
2028a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2029a5e485a9SVille Syrjälä 		 *
2030a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2031a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2032a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2033a5e485a9SVille Syrjälä 		 *
2034a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2035a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2036a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2037a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2038a5e485a9SVille Syrjälä 		 * bits this time around.
2039a5e485a9SVille Syrjälä 		 */
20404a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2041a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2042a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
20434a0a0202SVille Syrjälä 
20444a0a0202SVille Syrjälä 		if (gt_iir)
20454a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
20464a0a0202SVille Syrjälä 		if (pm_iir)
20474a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
20484a0a0202SVille Syrjälä 
20497ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
20501ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
20517ce4d1f2SVille Syrjälä 
20523ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
20533ff60f89SOscar Mateo 		 * signalled in iir */
2054eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
20557ce4d1f2SVille Syrjälä 
2056eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2057eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2058eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2059eef57324SJerome Anand 
20607ce4d1f2SVille Syrjälä 		/*
20617ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
20627ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
20637ce4d1f2SVille Syrjälä 		 */
20647ce4d1f2SVille Syrjälä 		if (iir)
20657ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
20664a0a0202SVille Syrjälä 
2067a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
20684a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20694a0a0202SVille Syrjälä 		POSTING_READ(VLV_MASTER_IER);
20701ae3c34cSVille Syrjälä 
207152894874SVille Syrjälä 		if (gt_iir)
2072261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
207352894874SVille Syrjälä 		if (pm_iir)
207452894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
207552894874SVille Syrjälä 
20761ae3c34cSVille Syrjälä 		if (hotplug_status)
207791d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
20782ecb8ca4SVille Syrjälä 
207991d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
20801e1cace9SVille Syrjälä 	} while (0);
20817e231dbeSJesse Barnes 
20821f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
20831f814dacSImre Deak 
20847e231dbeSJesse Barnes 	return ret;
20857e231dbeSJesse Barnes }
20867e231dbeSJesse Barnes 
208743f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
208843f328d7SVille Syrjälä {
208945a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2090fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
209143f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
209243f328d7SVille Syrjälä 
20932dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
20942dd2a883SImre Deak 		return IRQ_NONE;
20952dd2a883SImre Deak 
20961f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
20971f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
20981f814dacSImre Deak 
2099579de73bSChris Wilson 	do {
21006e814800SVille Syrjälä 		u32 master_ctl, iir;
2101e30e251aSVille Syrjälä 		u32 gt_iir[4] = {};
21022ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
21031ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2104a5e485a9SVille Syrjälä 		u32 ier = 0;
2105a5e485a9SVille Syrjälä 
21068e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
21073278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
21083278f67fSVille Syrjälä 
21093278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
21108e5fd599SVille Syrjälä 			break;
211143f328d7SVille Syrjälä 
211227b6c122SOscar Mateo 		ret = IRQ_HANDLED;
211327b6c122SOscar Mateo 
2114a5e485a9SVille Syrjälä 		/*
2115a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2116a5e485a9SVille Syrjälä 		 *
2117a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2118a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2119a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2120a5e485a9SVille Syrjälä 		 *
2121a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2122a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2123a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2124a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2125a5e485a9SVille Syrjälä 		 * bits this time around.
2126a5e485a9SVille Syrjälä 		 */
212743f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2128a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2129a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
213043f328d7SVille Syrjälä 
2131e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
213227b6c122SOscar Mateo 
213327b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
21341ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
213543f328d7SVille Syrjälä 
213627b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
213727b6c122SOscar Mateo 		 * signalled in iir */
2138eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
213943f328d7SVille Syrjälä 
2140eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2141eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2142eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2143eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2144eef57324SJerome Anand 
21457ce4d1f2SVille Syrjälä 		/*
21467ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
21477ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
21487ce4d1f2SVille Syrjälä 		 */
21497ce4d1f2SVille Syrjälä 		if (iir)
21507ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
21517ce4d1f2SVille Syrjälä 
2152a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2153e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
215443f328d7SVille Syrjälä 		POSTING_READ(GEN8_MASTER_IRQ);
21551ae3c34cSVille Syrjälä 
2156e30e251aSVille Syrjälä 		gen8_gt_irq_handler(dev_priv, gt_iir);
2157e30e251aSVille Syrjälä 
21581ae3c34cSVille Syrjälä 		if (hotplug_status)
215991d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
21602ecb8ca4SVille Syrjälä 
216191d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2162579de73bSChris Wilson 	} while (0);
21633278f67fSVille Syrjälä 
21641f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
21651f814dacSImre Deak 
216643f328d7SVille Syrjälä 	return ret;
216743f328d7SVille Syrjälä }
216843f328d7SVille Syrjälä 
216991d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
217091d14251STvrtko Ursulin 				u32 hotplug_trigger,
217140e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2172776ad806SJesse Barnes {
217342db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2174776ad806SJesse Barnes 
21756a39d7c9SJani Nikula 	/*
21766a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
21776a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
21786a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
21796a39d7c9SJani Nikula 	 * errors.
21806a39d7c9SJani Nikula 	 */
218113cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
21826a39d7c9SJani Nikula 	if (!hotplug_trigger) {
21836a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
21846a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
21856a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
21866a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
21876a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
21886a39d7c9SJani Nikula 	}
21896a39d7c9SJani Nikula 
219013cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
21916a39d7c9SJani Nikula 	if (!hotplug_trigger)
21926a39d7c9SJani Nikula 		return;
219313cf5504SDave Airlie 
2194fd63e2a9SImre Deak 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
219540e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2196fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
219740e56410SVille Syrjälä 
219891d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2199aaf5ec2eSSonika Jindal }
220091d131d2SDaniel Vetter 
220191d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
220240e56410SVille Syrjälä {
220340e56410SVille Syrjälä 	int pipe;
220440e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
220540e56410SVille Syrjälä 
220691d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
220740e56410SVille Syrjälä 
2208cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2209cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2210776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2211cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2212cfc33bf7SVille Syrjälä 				 port_name(port));
2213cfc33bf7SVille Syrjälä 	}
2214776ad806SJesse Barnes 
2215ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
221691d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2217ce99c256SDaniel Vetter 
2218776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
221991d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2220776ad806SJesse Barnes 
2221776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2222776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2223776ad806SJesse Barnes 
2224776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2225776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2226776ad806SJesse Barnes 
2227776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2228776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2229776ad806SJesse Barnes 
22309db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2231055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
22329db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
22339db4a9c7SJesse Barnes 					 pipe_name(pipe),
22349db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2235776ad806SJesse Barnes 
2236776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2237776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2238776ad806SJesse Barnes 
2239776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2240776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2241776ad806SJesse Barnes 
2242776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2243a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
22448664281bSPaulo Zanoni 
22458664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2246a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
22478664281bSPaulo Zanoni }
22488664281bSPaulo Zanoni 
224991d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
22508664281bSPaulo Zanoni {
22518664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
22525a69b89fSDaniel Vetter 	enum pipe pipe;
22538664281bSPaulo Zanoni 
2254de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2255de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2256de032bf4SPaulo Zanoni 
2257055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
22581f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
22591f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
22608664281bSPaulo Zanoni 
22615a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
226291d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
226391d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
22645a69b89fSDaniel Vetter 			else
226591d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
22665a69b89fSDaniel Vetter 		}
22675a69b89fSDaniel Vetter 	}
22688bf1e9f1SShuang He 
22698664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
22708664281bSPaulo Zanoni }
22718664281bSPaulo Zanoni 
227291d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
22738664281bSPaulo Zanoni {
22748664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
227545c1cd87SMika Kahola 	enum pipe pipe;
22768664281bSPaulo Zanoni 
2277de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2278de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2279de032bf4SPaulo Zanoni 
228045c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
228145c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
228245c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
22838664281bSPaulo Zanoni 
22848664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2285776ad806SJesse Barnes }
2286776ad806SJesse Barnes 
228791d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
228823e81d69SAdam Jackson {
228923e81d69SAdam Jackson 	int pipe;
22906dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2291aaf5ec2eSSonika Jindal 
229291d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
229391d131d2SDaniel Vetter 
2294cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2295cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
229623e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2297cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2298cfc33bf7SVille Syrjälä 				 port_name(port));
2299cfc33bf7SVille Syrjälä 	}
230023e81d69SAdam Jackson 
230123e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
230291d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
230323e81d69SAdam Jackson 
230423e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
230591d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
230623e81d69SAdam Jackson 
230723e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
230823e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
230923e81d69SAdam Jackson 
231023e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
231123e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
231223e81d69SAdam Jackson 
231323e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2314055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
231523e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
231623e81d69SAdam Jackson 					 pipe_name(pipe),
231723e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
23188664281bSPaulo Zanoni 
23198664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
232091d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
232123e81d69SAdam Jackson }
232223e81d69SAdam Jackson 
232391d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23246dbf30ceSVille Syrjälä {
23256dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
23266dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
23276dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
23286dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
23296dbf30ceSVille Syrjälä 
23306dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
23316dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
23326dbf30ceSVille Syrjälä 
23336dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23346dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23356dbf30ceSVille Syrjälä 
23366dbf30ceSVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
23376dbf30ceSVille Syrjälä 				   dig_hotplug_reg, hpd_spt,
233874c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
23396dbf30ceSVille Syrjälä 	}
23406dbf30ceSVille Syrjälä 
23416dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
23426dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
23436dbf30ceSVille Syrjälä 
23446dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
23456dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
23466dbf30ceSVille Syrjälä 
23476dbf30ceSVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
23486dbf30ceSVille Syrjälä 				   dig_hotplug_reg, hpd_spt,
23496dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
23506dbf30ceSVille Syrjälä 	}
23516dbf30ceSVille Syrjälä 
23526dbf30ceSVille Syrjälä 	if (pin_mask)
235391d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
23546dbf30ceSVille Syrjälä 
23556dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
235691d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
23576dbf30ceSVille Syrjälä }
23586dbf30ceSVille Syrjälä 
235991d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
236091d14251STvrtko Ursulin 				u32 hotplug_trigger,
236140e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2362c008bc6eSPaulo Zanoni {
2363e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2364e4ce95aaSVille Syrjälä 
2365e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2366e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2367e4ce95aaSVille Syrjälä 
2368e4ce95aaSVille Syrjälä 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
236940e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2370e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
237140e56410SVille Syrjälä 
237291d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2373e4ce95aaSVille Syrjälä }
2374c008bc6eSPaulo Zanoni 
237591d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
237691d14251STvrtko Ursulin 				    u32 de_iir)
237740e56410SVille Syrjälä {
237840e56410SVille Syrjälä 	enum pipe pipe;
237940e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
238040e56410SVille Syrjälä 
238140e56410SVille Syrjälä 	if (hotplug_trigger)
238291d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
238340e56410SVille Syrjälä 
2384c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
238591d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2386c008bc6eSPaulo Zanoni 
2387c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
238891d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2389c008bc6eSPaulo Zanoni 
2390c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2391c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2392c008bc6eSPaulo Zanoni 
2393055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2394fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2395fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2396c008bc6eSPaulo Zanoni 
239740da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
23981f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2399c008bc6eSPaulo Zanoni 
240040da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
240191d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2402c008bc6eSPaulo Zanoni 	}
2403c008bc6eSPaulo Zanoni 
2404c008bc6eSPaulo Zanoni 	/* check event from PCH */
2405c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2406c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2407c008bc6eSPaulo Zanoni 
240891d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
240991d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2410c008bc6eSPaulo Zanoni 		else
241191d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2412c008bc6eSPaulo Zanoni 
2413c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2414c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2415c008bc6eSPaulo Zanoni 	}
2416c008bc6eSPaulo Zanoni 
241791d14251STvrtko Ursulin 	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
241891d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2419c008bc6eSPaulo Zanoni }
2420c008bc6eSPaulo Zanoni 
242191d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
242291d14251STvrtko Ursulin 				    u32 de_iir)
24239719fb98SPaulo Zanoni {
242407d27e20SDamien Lespiau 	enum pipe pipe;
242523bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
242623bb4cb5SVille Syrjälä 
242740e56410SVille Syrjälä 	if (hotplug_trigger)
242891d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
24299719fb98SPaulo Zanoni 
24309719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
243191d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
24329719fb98SPaulo Zanoni 
24339719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
243491d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
24359719fb98SPaulo Zanoni 
24369719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
243791d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
24389719fb98SPaulo Zanoni 
2439055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2440fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2441fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
24429719fb98SPaulo Zanoni 	}
24439719fb98SPaulo Zanoni 
24449719fb98SPaulo Zanoni 	/* check event from PCH */
244591d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
24469719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
24479719fb98SPaulo Zanoni 
244891d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
24499719fb98SPaulo Zanoni 
24509719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
24519719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
24529719fb98SPaulo Zanoni 	}
24539719fb98SPaulo Zanoni }
24549719fb98SPaulo Zanoni 
245572c90f62SOscar Mateo /*
245672c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
245772c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
245872c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
245972c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
246072c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
246172c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
246272c90f62SOscar Mateo  */
2463f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2464b1f14ad0SJesse Barnes {
246545a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2466fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2467f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
24680e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2469b1f14ad0SJesse Barnes 
24702dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
24712dd2a883SImre Deak 		return IRQ_NONE;
24722dd2a883SImre Deak 
24731f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
24741f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
24751f814dacSImre Deak 
2476b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2477b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2478b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
247923a78516SPaulo Zanoni 	POSTING_READ(DEIER);
24800e43406bSChris Wilson 
248144498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
248244498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
248344498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
248444498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
248544498aeaSPaulo Zanoni 	 * due to its back queue). */
248691d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
248744498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
248844498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
248944498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2490ab5c608bSBen Widawsky 	}
249144498aeaSPaulo Zanoni 
249272c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
249372c90f62SOscar Mateo 
24940e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
24950e43406bSChris Wilson 	if (gt_iir) {
249672c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
249772c90f62SOscar Mateo 		ret = IRQ_HANDLED;
249891d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2499261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2500d8fc8a47SPaulo Zanoni 		else
2501261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
25020e43406bSChris Wilson 	}
2503b1f14ad0SJesse Barnes 
2504b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
25050e43406bSChris Wilson 	if (de_iir) {
250672c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
250772c90f62SOscar Mateo 		ret = IRQ_HANDLED;
250891d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
250991d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2510f1af8fc1SPaulo Zanoni 		else
251191d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
25120e43406bSChris Wilson 	}
25130e43406bSChris Wilson 
251491d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2515f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
25160e43406bSChris Wilson 		if (pm_iir) {
2517b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
25180e43406bSChris Wilson 			ret = IRQ_HANDLED;
251972c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
25200e43406bSChris Wilson 		}
2521f1af8fc1SPaulo Zanoni 	}
2522b1f14ad0SJesse Barnes 
2523b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
2524b1f14ad0SJesse Barnes 	POSTING_READ(DEIER);
252591d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
252644498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
252744498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2528ab5c608bSBen Widawsky 	}
2529b1f14ad0SJesse Barnes 
25301f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
25311f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
25321f814dacSImre Deak 
2533b1f14ad0SJesse Barnes 	return ret;
2534b1f14ad0SJesse Barnes }
2535b1f14ad0SJesse Barnes 
253691d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
253791d14251STvrtko Ursulin 				u32 hotplug_trigger,
253840e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2539d04a492dSShashank Sharma {
2540cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2541d04a492dSShashank Sharma 
2542a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2543a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2544d04a492dSShashank Sharma 
2545cebd87a0SVille Syrjälä 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
254640e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2547cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
254840e56410SVille Syrjälä 
254991d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2550d04a492dSShashank Sharma }
2551d04a492dSShashank Sharma 
2552f11a0f46STvrtko Ursulin static irqreturn_t
2553f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2554abd58f01SBen Widawsky {
2555abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2556f11a0f46STvrtko Ursulin 	u32 iir;
2557c42664ccSDaniel Vetter 	enum pipe pipe;
255888e04703SJesse Barnes 
2559abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2560e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2561e32192e1STvrtko Ursulin 		if (iir) {
2562e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2563abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2564e32192e1STvrtko Ursulin 			if (iir & GEN8_DE_MISC_GSE)
256591d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
256638cc46d7SOscar Mateo 			else
256738cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2568abd58f01SBen Widawsky 		}
256938cc46d7SOscar Mateo 		else
257038cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2571abd58f01SBen Widawsky 	}
2572abd58f01SBen Widawsky 
25736d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2574e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2575e32192e1STvrtko Ursulin 		if (iir) {
2576e32192e1STvrtko Ursulin 			u32 tmp_mask;
2577d04a492dSShashank Sharma 			bool found = false;
2578cebd87a0SVille Syrjälä 
2579e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
25806d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
258188e04703SJesse Barnes 
2582e32192e1STvrtko Ursulin 			tmp_mask = GEN8_AUX_CHANNEL_A;
2583bca2bf2aSPandiyan, Dhinakaran 			if (INTEL_GEN(dev_priv) >= 9)
2584e32192e1STvrtko Ursulin 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2585e32192e1STvrtko Ursulin 					    GEN9_AUX_CHANNEL_C |
2586e32192e1STvrtko Ursulin 					    GEN9_AUX_CHANNEL_D;
2587e32192e1STvrtko Ursulin 
2588e32192e1STvrtko Ursulin 			if (iir & tmp_mask) {
258991d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2590d04a492dSShashank Sharma 				found = true;
2591d04a492dSShashank Sharma 			}
2592d04a492dSShashank Sharma 
2593cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2594e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2595e32192e1STvrtko Ursulin 				if (tmp_mask) {
259691d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
259791d14251STvrtko Ursulin 							    hpd_bxt);
2598d04a492dSShashank Sharma 					found = true;
2599d04a492dSShashank Sharma 				}
2600e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2601e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2602e32192e1STvrtko Ursulin 				if (tmp_mask) {
260391d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
260491d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2605e32192e1STvrtko Ursulin 					found = true;
2606e32192e1STvrtko Ursulin 				}
2607e32192e1STvrtko Ursulin 			}
2608d04a492dSShashank Sharma 
2609cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
261091d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
26119e63743eSShashank Sharma 				found = true;
26129e63743eSShashank Sharma 			}
26139e63743eSShashank Sharma 
2614d04a492dSShashank Sharma 			if (!found)
261538cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
26166d766f02SDaniel Vetter 		}
261738cc46d7SOscar Mateo 		else
261838cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
26196d766f02SDaniel Vetter 	}
26206d766f02SDaniel Vetter 
2621055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2622fd3a4024SDaniel Vetter 		u32 fault_errors;
2623abd58f01SBen Widawsky 
2624c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2625c42664ccSDaniel Vetter 			continue;
2626c42664ccSDaniel Vetter 
2627e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2628e32192e1STvrtko Ursulin 		if (!iir) {
2629e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2630e32192e1STvrtko Ursulin 			continue;
2631e32192e1STvrtko Ursulin 		}
2632770de83dSDamien Lespiau 
2633e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2634e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2635e32192e1STvrtko Ursulin 
2636fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2637fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2638abd58f01SBen Widawsky 
2639e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
264091d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
26410fbe7870SDaniel Vetter 
2642e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2643e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
264438d83c96SDaniel Vetter 
2645e32192e1STvrtko Ursulin 		fault_errors = iir;
2646bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2647e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2648770de83dSDamien Lespiau 		else
2649e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2650770de83dSDamien Lespiau 
2651770de83dSDamien Lespiau 		if (fault_errors)
26521353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
265330100f2bSDaniel Vetter 				  pipe_name(pipe),
2654e32192e1STvrtko Ursulin 				  fault_errors);
2655abd58f01SBen Widawsky 	}
2656abd58f01SBen Widawsky 
265791d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2658266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
265992d03a80SDaniel Vetter 		/*
266092d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
266192d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
266292d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
266392d03a80SDaniel Vetter 		 */
2664e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2665e32192e1STvrtko Ursulin 		if (iir) {
2666e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
266792d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
26686dbf30ceSVille Syrjälä 
26697b22b8c4SRodrigo Vivi 			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
26707b22b8c4SRodrigo Vivi 			    HAS_PCH_CNP(dev_priv))
267191d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
26726dbf30ceSVille Syrjälä 			else
267391d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
26742dfb0b81SJani Nikula 		} else {
26752dfb0b81SJani Nikula 			/*
26762dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
26772dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
26782dfb0b81SJani Nikula 			 */
26792dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
26802dfb0b81SJani Nikula 		}
268192d03a80SDaniel Vetter 	}
268292d03a80SDaniel Vetter 
2683f11a0f46STvrtko Ursulin 	return ret;
2684f11a0f46STvrtko Ursulin }
2685f11a0f46STvrtko Ursulin 
2686f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
2687f11a0f46STvrtko Ursulin {
2688f11a0f46STvrtko Ursulin 	struct drm_device *dev = arg;
2689fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2690f11a0f46STvrtko Ursulin 	u32 master_ctl;
2691e30e251aSVille Syrjälä 	u32 gt_iir[4] = {};
2692f11a0f46STvrtko Ursulin 	irqreturn_t ret;
2693f11a0f46STvrtko Ursulin 
2694f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
2695f11a0f46STvrtko Ursulin 		return IRQ_NONE;
2696f11a0f46STvrtko Ursulin 
2697f11a0f46STvrtko Ursulin 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2698f11a0f46STvrtko Ursulin 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2699f11a0f46STvrtko Ursulin 	if (!master_ctl)
2700f11a0f46STvrtko Ursulin 		return IRQ_NONE;
2701f11a0f46STvrtko Ursulin 
2702f11a0f46STvrtko Ursulin 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2703f11a0f46STvrtko Ursulin 
2704f11a0f46STvrtko Ursulin 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2705f11a0f46STvrtko Ursulin 	disable_rpm_wakeref_asserts(dev_priv);
2706f11a0f46STvrtko Ursulin 
2707f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
2708e30e251aSVille Syrjälä 	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2709e30e251aSVille Syrjälä 	gen8_gt_irq_handler(dev_priv, gt_iir);
2710f11a0f46STvrtko Ursulin 	ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2711f11a0f46STvrtko Ursulin 
2712cb0d205eSChris Wilson 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2713cb0d205eSChris Wilson 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2714abd58f01SBen Widawsky 
27151f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
27161f814dacSImre Deak 
2717abd58f01SBen Widawsky 	return ret;
2718abd58f01SBen Widawsky }
2719abd58f01SBen Widawsky 
272036703e79SChris Wilson struct wedge_me {
272136703e79SChris Wilson 	struct delayed_work work;
272236703e79SChris Wilson 	struct drm_i915_private *i915;
272336703e79SChris Wilson 	const char *name;
272436703e79SChris Wilson };
272536703e79SChris Wilson 
272636703e79SChris Wilson static void wedge_me(struct work_struct *work)
272736703e79SChris Wilson {
272836703e79SChris Wilson 	struct wedge_me *w = container_of(work, typeof(*w), work.work);
272936703e79SChris Wilson 
273036703e79SChris Wilson 	dev_err(w->i915->drm.dev,
273136703e79SChris Wilson 		"%s timed out, cancelling all in-flight rendering.\n",
273236703e79SChris Wilson 		w->name);
273336703e79SChris Wilson 	i915_gem_set_wedged(w->i915);
273436703e79SChris Wilson }
273536703e79SChris Wilson 
273636703e79SChris Wilson static void __init_wedge(struct wedge_me *w,
273736703e79SChris Wilson 			 struct drm_i915_private *i915,
273836703e79SChris Wilson 			 long timeout,
273936703e79SChris Wilson 			 const char *name)
274036703e79SChris Wilson {
274136703e79SChris Wilson 	w->i915 = i915;
274236703e79SChris Wilson 	w->name = name;
274336703e79SChris Wilson 
274436703e79SChris Wilson 	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
274536703e79SChris Wilson 	schedule_delayed_work(&w->work, timeout);
274636703e79SChris Wilson }
274736703e79SChris Wilson 
274836703e79SChris Wilson static void __fini_wedge(struct wedge_me *w)
274936703e79SChris Wilson {
275036703e79SChris Wilson 	cancel_delayed_work_sync(&w->work);
275136703e79SChris Wilson 	destroy_delayed_work_on_stack(&w->work);
275236703e79SChris Wilson 	w->i915 = NULL;
275336703e79SChris Wilson }
275436703e79SChris Wilson 
275536703e79SChris Wilson #define i915_wedge_on_timeout(W, DEV, TIMEOUT)				\
275636703e79SChris Wilson 	for (__init_wedge((W), (DEV), (TIMEOUT), __func__);		\
275736703e79SChris Wilson 	     (W)->i915;							\
275836703e79SChris Wilson 	     __fini_wedge((W)))
275936703e79SChris Wilson 
27608a905236SJesse Barnes /**
2761d5367307SChris Wilson  * i915_reset_device - do process context error handling work
276214bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
27638a905236SJesse Barnes  *
27648a905236SJesse Barnes  * Fire an error uevent so userspace can see that a hang or error
27658a905236SJesse Barnes  * was detected.
27668a905236SJesse Barnes  */
2767d5367307SChris Wilson static void i915_reset_device(struct drm_i915_private *dev_priv)
27688a905236SJesse Barnes {
276991c8a326SChris Wilson 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2770cce723edSBen Widawsky 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2771cce723edSBen Widawsky 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2772cce723edSBen Widawsky 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
277336703e79SChris Wilson 	struct wedge_me w;
27748a905236SJesse Barnes 
2775c033666aSChris Wilson 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
27768a905236SJesse Barnes 
277744d98a61SZhao Yakui 	DRM_DEBUG_DRIVER("resetting chip\n");
2778c033666aSChris Wilson 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
27791f83fee0SDaniel Vetter 
278036703e79SChris Wilson 	/* Use a watchdog to ensure that our reset completes */
278136703e79SChris Wilson 	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
2782c033666aSChris Wilson 		intel_prepare_reset(dev_priv);
27837514747dSVille Syrjälä 
278436703e79SChris Wilson 		/* Signal that locked waiters should reset the GPU */
27858c185ecaSChris Wilson 		set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
27868c185ecaSChris Wilson 		wake_up_all(&dev_priv->gpu_error.wait_queue);
27878c185ecaSChris Wilson 
278836703e79SChris Wilson 		/* Wait for anyone holding the lock to wakeup, without
278936703e79SChris Wilson 		 * blocking indefinitely on struct_mutex.
279017e1df07SDaniel Vetter 		 */
279136703e79SChris Wilson 		do {
2792780f262aSChris Wilson 			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2793535275d3SChris Wilson 				i915_reset(dev_priv, 0);
2794221fe799SChris Wilson 				mutex_unlock(&dev_priv->drm.struct_mutex);
2795780f262aSChris Wilson 			}
2796780f262aSChris Wilson 		} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
27978c185ecaSChris Wilson 					     I915_RESET_HANDOFF,
2798780f262aSChris Wilson 					     TASK_UNINTERRUPTIBLE,
279936703e79SChris Wilson 					     1));
2800f69061beSDaniel Vetter 
2801c033666aSChris Wilson 		intel_finish_reset(dev_priv);
280236703e79SChris Wilson 	}
2803f454c694SImre Deak 
2804780f262aSChris Wilson 	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2805c033666aSChris Wilson 		kobject_uevent_env(kobj,
2806f69061beSDaniel Vetter 				   KOBJ_CHANGE, reset_done_event);
2807f316a42cSBen Gamari }
28088a905236SJesse Barnes 
2809eaa14c24SChris Wilson static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2810c0e09200SDave Airlie {
2811eaa14c24SChris Wilson 	u32 eir;
281263eeaf38SJesse Barnes 
2813eaa14c24SChris Wilson 	if (!IS_GEN2(dev_priv))
2814eaa14c24SChris Wilson 		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
281563eeaf38SJesse Barnes 
2816eaa14c24SChris Wilson 	if (INTEL_GEN(dev_priv) < 4)
2817eaa14c24SChris Wilson 		I915_WRITE(IPEIR, I915_READ(IPEIR));
2818eaa14c24SChris Wilson 	else
2819eaa14c24SChris Wilson 		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
28208a905236SJesse Barnes 
2821eaa14c24SChris Wilson 	I915_WRITE(EIR, I915_READ(EIR));
282263eeaf38SJesse Barnes 	eir = I915_READ(EIR);
282363eeaf38SJesse Barnes 	if (eir) {
282463eeaf38SJesse Barnes 		/*
282563eeaf38SJesse Barnes 		 * some errors might have become stuck,
282663eeaf38SJesse Barnes 		 * mask them.
282763eeaf38SJesse Barnes 		 */
2828eaa14c24SChris Wilson 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
282963eeaf38SJesse Barnes 		I915_WRITE(EMR, I915_READ(EMR) | eir);
283063eeaf38SJesse Barnes 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
283163eeaf38SJesse Barnes 	}
283235aed2e6SChris Wilson }
283335aed2e6SChris Wilson 
283435aed2e6SChris Wilson /**
2835b8d24a06SMika Kuoppala  * i915_handle_error - handle a gpu error
283614bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
283714b730fcSarun.siluvery@linux.intel.com  * @engine_mask: mask representing engines that are hung
283887c390b6SMichel Thierry  * @fmt: Error message format string
283987c390b6SMichel Thierry  *
2840aafd8581SJavier Martinez Canillas  * Do some basic checking of register state at error time and
284135aed2e6SChris Wilson  * dump it to the syslog.  Also call i915_capture_error_state() to make
284235aed2e6SChris Wilson  * sure we get a record and make it available in debugfs.  Fire a uevent
284335aed2e6SChris Wilson  * so userspace knows something bad happened (should trigger collection
284435aed2e6SChris Wilson  * of a ring dump etc.).
284535aed2e6SChris Wilson  */
2846c033666aSChris Wilson void i915_handle_error(struct drm_i915_private *dev_priv,
2847c033666aSChris Wilson 		       u32 engine_mask,
284858174462SMika Kuoppala 		       const char *fmt, ...)
284935aed2e6SChris Wilson {
2850142bc7d9SMichel Thierry 	struct intel_engine_cs *engine;
2851142bc7d9SMichel Thierry 	unsigned int tmp;
285258174462SMika Kuoppala 	va_list args;
285358174462SMika Kuoppala 	char error_msg[80];
285435aed2e6SChris Wilson 
285558174462SMika Kuoppala 	va_start(args, fmt);
285658174462SMika Kuoppala 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
285758174462SMika Kuoppala 	va_end(args);
285858174462SMika Kuoppala 
28591604a86dSChris Wilson 	/*
28601604a86dSChris Wilson 	 * In most cases it's guaranteed that we get here with an RPM
28611604a86dSChris Wilson 	 * reference held, for example because there is a pending GPU
28621604a86dSChris Wilson 	 * request that won't finish until the reset is done. This
28631604a86dSChris Wilson 	 * isn't the case at least when we get here by doing a
28641604a86dSChris Wilson 	 * simulated reset via debugfs, so get an RPM reference.
28651604a86dSChris Wilson 	 */
28661604a86dSChris Wilson 	intel_runtime_pm_get(dev_priv);
28671604a86dSChris Wilson 
2868c033666aSChris Wilson 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
2869eaa14c24SChris Wilson 	i915_clear_error_registers(dev_priv);
28708a905236SJesse Barnes 
2871142bc7d9SMichel Thierry 	/*
2872142bc7d9SMichel Thierry 	 * Try engine reset when available. We fall back to full reset if
2873142bc7d9SMichel Thierry 	 * single reset fails.
2874142bc7d9SMichel Thierry 	 */
2875142bc7d9SMichel Thierry 	if (intel_has_reset_engine(dev_priv)) {
2876142bc7d9SMichel Thierry 		for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
28779db529aaSDaniel Vetter 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
2878142bc7d9SMichel Thierry 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2879142bc7d9SMichel Thierry 					     &dev_priv->gpu_error.flags))
2880142bc7d9SMichel Thierry 				continue;
2881142bc7d9SMichel Thierry 
2882535275d3SChris Wilson 			if (i915_reset_engine(engine, 0) == 0)
2883142bc7d9SMichel Thierry 				engine_mask &= ~intel_engine_flag(engine);
2884142bc7d9SMichel Thierry 
2885142bc7d9SMichel Thierry 			clear_bit(I915_RESET_ENGINE + engine->id,
2886142bc7d9SMichel Thierry 				  &dev_priv->gpu_error.flags);
2887142bc7d9SMichel Thierry 			wake_up_bit(&dev_priv->gpu_error.flags,
2888142bc7d9SMichel Thierry 				    I915_RESET_ENGINE + engine->id);
2889142bc7d9SMichel Thierry 		}
2890142bc7d9SMichel Thierry 	}
2891142bc7d9SMichel Thierry 
28928af29b0cSChris Wilson 	if (!engine_mask)
28931604a86dSChris Wilson 		goto out;
28948af29b0cSChris Wilson 
2895142bc7d9SMichel Thierry 	/* Full reset needs the mutex, stop any other user trying to do so. */
2896d5367307SChris Wilson 	if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
2897d5367307SChris Wilson 		wait_event(dev_priv->gpu_error.reset_queue,
2898d5367307SChris Wilson 			   !test_bit(I915_RESET_BACKOFF,
2899d5367307SChris Wilson 				     &dev_priv->gpu_error.flags));
29001604a86dSChris Wilson 		goto out;
2901d5367307SChris Wilson 	}
2902ba1234d1SBen Gamari 
2903142bc7d9SMichel Thierry 	/* Prevent any other reset-engine attempt. */
2904142bc7d9SMichel Thierry 	for_each_engine(engine, dev_priv, tmp) {
2905142bc7d9SMichel Thierry 		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2906142bc7d9SMichel Thierry 					&dev_priv->gpu_error.flags))
2907142bc7d9SMichel Thierry 			wait_on_bit(&dev_priv->gpu_error.flags,
2908142bc7d9SMichel Thierry 				    I915_RESET_ENGINE + engine->id,
2909142bc7d9SMichel Thierry 				    TASK_UNINTERRUPTIBLE);
2910142bc7d9SMichel Thierry 	}
2911142bc7d9SMichel Thierry 
2912d5367307SChris Wilson 	i915_reset_device(dev_priv);
2913d5367307SChris Wilson 
2914142bc7d9SMichel Thierry 	for_each_engine(engine, dev_priv, tmp) {
2915142bc7d9SMichel Thierry 		clear_bit(I915_RESET_ENGINE + engine->id,
2916142bc7d9SMichel Thierry 			  &dev_priv->gpu_error.flags);
2917142bc7d9SMichel Thierry 	}
2918142bc7d9SMichel Thierry 
2919d5367307SChris Wilson 	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
2920d5367307SChris Wilson 	wake_up_all(&dev_priv->gpu_error.reset_queue);
29211604a86dSChris Wilson 
29221604a86dSChris Wilson out:
29231604a86dSChris Wilson 	intel_runtime_pm_put(dev_priv);
29248a905236SJesse Barnes }
29258a905236SJesse Barnes 
292642f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
292742f52ef8SKeith Packard  * we use as a pipe index
292842f52ef8SKeith Packard  */
292986e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
29300a3e67a4SJesse Barnes {
2931fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2932e9d21d7fSKeith Packard 	unsigned long irqflags;
293371e0ffa5SJesse Barnes 
29341ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
293586e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
293686e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
293786e83e35SChris Wilson 
293886e83e35SChris Wilson 	return 0;
293986e83e35SChris Wilson }
294086e83e35SChris Wilson 
294186e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
294286e83e35SChris Wilson {
294386e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
294486e83e35SChris Wilson 	unsigned long irqflags;
294586e83e35SChris Wilson 
294686e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
29477c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
2948755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
29491ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
29508692d00eSChris Wilson 
29510a3e67a4SJesse Barnes 	return 0;
29520a3e67a4SJesse Barnes }
29530a3e67a4SJesse Barnes 
295488e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2955f796cf8fSJesse Barnes {
2956fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2957f796cf8fSJesse Barnes 	unsigned long irqflags;
295855b8f2a7STvrtko Ursulin 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
295986e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2960f796cf8fSJesse Barnes 
2961f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2962fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
2963b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2964b1f14ad0SJesse Barnes 
2965b1f14ad0SJesse Barnes 	return 0;
2966b1f14ad0SJesse Barnes }
2967b1f14ad0SJesse Barnes 
296888e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2969abd58f01SBen Widawsky {
2970fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2971abd58f01SBen Widawsky 	unsigned long irqflags;
2972abd58f01SBen Widawsky 
2973abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2974013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2975abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2976013d3752SVille Syrjälä 
2977abd58f01SBen Widawsky 	return 0;
2978abd58f01SBen Widawsky }
2979abd58f01SBen Widawsky 
298042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
298142f52ef8SKeith Packard  * we use as a pipe index
298242f52ef8SKeith Packard  */
298386e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
298486e83e35SChris Wilson {
298586e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
298686e83e35SChris Wilson 	unsigned long irqflags;
298786e83e35SChris Wilson 
298886e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
298986e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
299086e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
299186e83e35SChris Wilson }
299286e83e35SChris Wilson 
299386e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
29940a3e67a4SJesse Barnes {
2995fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2996e9d21d7fSKeith Packard 	unsigned long irqflags;
29970a3e67a4SJesse Barnes 
29981ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
29997c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
3000755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
30011ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
30020a3e67a4SJesse Barnes }
30030a3e67a4SJesse Barnes 
300488e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3005f796cf8fSJesse Barnes {
3006fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3007f796cf8fSJesse Barnes 	unsigned long irqflags;
300855b8f2a7STvrtko Ursulin 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
300986e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3010f796cf8fSJesse Barnes 
3011f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3012fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3013b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3014b1f14ad0SJesse Barnes }
3015b1f14ad0SJesse Barnes 
301688e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3017abd58f01SBen Widawsky {
3018fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3019abd58f01SBen Widawsky 	unsigned long irqflags;
3020abd58f01SBen Widawsky 
3021abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3022013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3023abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3024abd58f01SBen Widawsky }
3025abd58f01SBen Widawsky 
3026b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
302791738a95SPaulo Zanoni {
30286e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
302991738a95SPaulo Zanoni 		return;
303091738a95SPaulo Zanoni 
30313488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(SDE);
3032105b122eSPaulo Zanoni 
30336e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3034105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3035622364b6SPaulo Zanoni }
3036105b122eSPaulo Zanoni 
303791738a95SPaulo Zanoni /*
3038622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3039622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3040622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3041622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3042622364b6SPaulo Zanoni  *
3043622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
304491738a95SPaulo Zanoni  */
3045622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev)
3046622364b6SPaulo Zanoni {
3047fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3048622364b6SPaulo Zanoni 
30496e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3050622364b6SPaulo Zanoni 		return;
3051622364b6SPaulo Zanoni 
3052622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
305391738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
305491738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
305591738a95SPaulo Zanoni }
305691738a95SPaulo Zanoni 
3057b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3058d18ea1b5SDaniel Vetter {
30593488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GT);
3060b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
30613488d4ebSVille Syrjälä 		GEN3_IRQ_RESET(GEN6_PM);
3062d18ea1b5SDaniel Vetter }
3063d18ea1b5SDaniel Vetter 
306470591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
306570591a41SVille Syrjälä {
306671b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
306771b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
306871b8b41dSVille Syrjälä 	else
306971b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
307071b8b41dSVille Syrjälä 
3071ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
307270591a41SVille Syrjälä 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
307370591a41SVille Syrjälä 
307444d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
307570591a41SVille Syrjälä 
30763488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(VLV_);
30778bd099a7SChris Wilson 	dev_priv->irq_mask = ~0u;
307870591a41SVille Syrjälä }
307970591a41SVille Syrjälä 
30808bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
30818bb61306SVille Syrjälä {
30828bb61306SVille Syrjälä 	u32 pipestat_mask;
30839ab981f2SVille Syrjälä 	u32 enable_mask;
30848bb61306SVille Syrjälä 	enum pipe pipe;
30858bb61306SVille Syrjälä 
3086842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
30878bb61306SVille Syrjälä 
30888bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
30898bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
30908bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
30918bb61306SVille Syrjälä 
30929ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
30938bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3094ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3095ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3096ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3097ebf5f921SVille Syrjälä 
30988bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3099ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3100ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
31016b7eafc1SVille Syrjälä 
31028bd099a7SChris Wilson 	WARN_ON(dev_priv->irq_mask != ~0u);
31036b7eafc1SVille Syrjälä 
31049ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
31058bb61306SVille Syrjälä 
31063488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
31078bb61306SVille Syrjälä }
31088bb61306SVille Syrjälä 
31098bb61306SVille Syrjälä /* drm_dma.h hooks
31108bb61306SVille Syrjälä */
31118bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev)
31128bb61306SVille Syrjälä {
3113fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
31148bb61306SVille Syrjälä 
3115d420a50cSVille Syrjälä 	if (IS_GEN5(dev_priv))
31168bb61306SVille Syrjälä 		I915_WRITE(HWSTAM, 0xffffffff);
31178bb61306SVille Syrjälä 
31183488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(DE);
31195db94019STvrtko Ursulin 	if (IS_GEN7(dev_priv))
31208bb61306SVille Syrjälä 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
31218bb61306SVille Syrjälä 
3122b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
31238bb61306SVille Syrjälä 
3124b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
31258bb61306SVille Syrjälä }
31268bb61306SVille Syrjälä 
31276bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev)
31287e231dbeSJesse Barnes {
3129fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
31307e231dbeSJesse Barnes 
313134c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
313234c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
313334c7b8a7SVille Syrjälä 
3134b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
31357e231dbeSJesse Barnes 
3136ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
31379918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
313870591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3139ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
31407e231dbeSJesse Barnes }
31417e231dbeSJesse Barnes 
3142d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3143d6e3cca3SDaniel Vetter {
3144d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 0);
3145d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 1);
3146d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 2);
3147d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 3);
3148d6e3cca3SDaniel Vetter }
3149d6e3cca3SDaniel Vetter 
3150823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev)
3151abd58f01SBen Widawsky {
3152fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3153abd58f01SBen Widawsky 	int pipe;
3154abd58f01SBen Widawsky 
3155abd58f01SBen Widawsky 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3156abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3157abd58f01SBen Widawsky 
3158d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3159abd58f01SBen Widawsky 
3160055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3161f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3162813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3163f86f3fb0SPaulo Zanoni 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3164abd58f01SBen Widawsky 
31653488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_DE_PORT_);
31663488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_DE_MISC_);
31673488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_PCU_);
3168abd58f01SBen Widawsky 
31696e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3170b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3171abd58f01SBen Widawsky }
3172abd58f01SBen Widawsky 
31734c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3174001bd2cbSImre Deak 				     u8 pipe_mask)
3175d49bdb0eSPaulo Zanoni {
31761180e206SPaulo Zanoni 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
31776831f3e3SVille Syrjälä 	enum pipe pipe;
3178d49bdb0eSPaulo Zanoni 
317913321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
31809dfe2e3aSImre Deak 
31819dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
31829dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
31839dfe2e3aSImre Deak 		return;
31849dfe2e3aSImre Deak 	}
31859dfe2e3aSImre Deak 
31866831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
31876831f3e3SVille Syrjälä 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
31886831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
31896831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
31909dfe2e3aSImre Deak 
319113321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3192d49bdb0eSPaulo Zanoni }
3193d49bdb0eSPaulo Zanoni 
3194aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3195001bd2cbSImre Deak 				     u8 pipe_mask)
3196aae8ba84SVille Syrjälä {
31976831f3e3SVille Syrjälä 	enum pipe pipe;
31986831f3e3SVille Syrjälä 
3199aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
32009dfe2e3aSImre Deak 
32019dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
32029dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
32039dfe2e3aSImre Deak 		return;
32049dfe2e3aSImre Deak 	}
32059dfe2e3aSImre Deak 
32066831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
32076831f3e3SVille Syrjälä 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
32089dfe2e3aSImre Deak 
3209aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3210aae8ba84SVille Syrjälä 
3211aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
321291c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
3213aae8ba84SVille Syrjälä }
3214aae8ba84SVille Syrjälä 
32156bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev)
321643f328d7SVille Syrjälä {
3217fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
321843f328d7SVille Syrjälä 
321943f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
322043f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
322143f328d7SVille Syrjälä 
3222d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
322343f328d7SVille Syrjälä 
32243488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_PCU_);
322543f328d7SVille Syrjälä 
3226ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
32279918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
322870591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3229ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
323043f328d7SVille Syrjälä }
323143f328d7SVille Syrjälä 
323291d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
323387a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
323487a02106SVille Syrjälä {
323587a02106SVille Syrjälä 	struct intel_encoder *encoder;
323687a02106SVille Syrjälä 	u32 enabled_irqs = 0;
323787a02106SVille Syrjälä 
323891c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
323987a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
324087a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
324187a02106SVille Syrjälä 
324287a02106SVille Syrjälä 	return enabled_irqs;
324387a02106SVille Syrjälä }
324487a02106SVille Syrjälä 
32451a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
32461a56b1a2SImre Deak {
32471a56b1a2SImre Deak 	u32 hotplug;
32481a56b1a2SImre Deak 
32491a56b1a2SImre Deak 	/*
32501a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
32511a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
32521a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
32531a56b1a2SImre Deak 	 */
32541a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
32551a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
32561a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
32571a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
32581a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
32591a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
32601a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
32611a56b1a2SImre Deak 	/*
32621a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
32631a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
32641a56b1a2SImre Deak 	 */
32651a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
32661a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
32671a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
32681a56b1a2SImre Deak }
32691a56b1a2SImre Deak 
327091d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
327182a28bcfSDaniel Vetter {
32721a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
327382a28bcfSDaniel Vetter 
327491d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3275fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
327691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
327782a28bcfSDaniel Vetter 	} else {
3278fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
327991d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
328082a28bcfSDaniel Vetter 	}
328182a28bcfSDaniel Vetter 
3282fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
328382a28bcfSDaniel Vetter 
32841a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
32856dbf30ceSVille Syrjälä }
328626951cafSXiong Zhang 
32872a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
32882a57d9ccSImre Deak {
32893b92e263SRodrigo Vivi 	u32 val, hotplug;
32903b92e263SRodrigo Vivi 
32913b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
32923b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
32933b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
32943b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
32953b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
32963b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
32973b92e263SRodrigo Vivi 	}
32982a57d9ccSImre Deak 
32992a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
33002a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
33012a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
33022a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
33032a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
33042a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
33052a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
33062a57d9ccSImre Deak 
33072a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
33082a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
33092a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
33102a57d9ccSImre Deak }
33112a57d9ccSImre Deak 
331291d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
33136dbf30ceSVille Syrjälä {
33142a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
33156dbf30ceSVille Syrjälä 
33166dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
331791d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
33186dbf30ceSVille Syrjälä 
33196dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
33206dbf30ceSVille Syrjälä 
33212a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
332226951cafSXiong Zhang }
33237fe0b973SKeith Packard 
33241a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
33251a56b1a2SImre Deak {
33261a56b1a2SImre Deak 	u32 hotplug;
33271a56b1a2SImre Deak 
33281a56b1a2SImre Deak 	/*
33291a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
33301a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
33311a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
33321a56b1a2SImre Deak 	 */
33331a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
33341a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
33351a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
33361a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
33371a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
33381a56b1a2SImre Deak }
33391a56b1a2SImre Deak 
334091d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3341e4ce95aaSVille Syrjälä {
33421a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3343e4ce95aaSVille Syrjälä 
334491d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
33453a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
334691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
33473a3b3c7dSVille Syrjälä 
33483a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
334991d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
335023bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
335191d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
33523a3b3c7dSVille Syrjälä 
33533a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
335423bb4cb5SVille Syrjälä 	} else {
3355e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
335691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3357e4ce95aaSVille Syrjälä 
3358e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
33593a3b3c7dSVille Syrjälä 	}
3360e4ce95aaSVille Syrjälä 
33611a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3362e4ce95aaSVille Syrjälä 
336391d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3364e4ce95aaSVille Syrjälä }
3365e4ce95aaSVille Syrjälä 
33662a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
33672a57d9ccSImre Deak 				      u32 enabled_irqs)
3368e0a20ad7SShashank Sharma {
33692a57d9ccSImre Deak 	u32 hotplug;
3370e0a20ad7SShashank Sharma 
3371a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
33722a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
33732a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
33742a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3375d252bf68SShubhangi Shrivastava 
3376d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3377d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3378d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3379d252bf68SShubhangi Shrivastava 
3380d252bf68SShubhangi Shrivastava 	/*
3381d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3382d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3383d252bf68SShubhangi Shrivastava 	 */
3384d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3385d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3386d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3387d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3388d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3389d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3390d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3391d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3392d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3393d252bf68SShubhangi Shrivastava 
3394a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3395e0a20ad7SShashank Sharma }
3396e0a20ad7SShashank Sharma 
33972a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
33982a57d9ccSImre Deak {
33992a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
34002a57d9ccSImre Deak }
34012a57d9ccSImre Deak 
34022a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
34032a57d9ccSImre Deak {
34042a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
34052a57d9ccSImre Deak 
34062a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
34072a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
34082a57d9ccSImre Deak 
34092a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
34102a57d9ccSImre Deak 
34112a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
34122a57d9ccSImre Deak }
34132a57d9ccSImre Deak 
3414d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev)
3415d46da437SPaulo Zanoni {
3416fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
341782a28bcfSDaniel Vetter 	u32 mask;
3418d46da437SPaulo Zanoni 
34196e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3420692a04cfSDaniel Vetter 		return;
3421692a04cfSDaniel Vetter 
34226e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
34235c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
34244ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
34255c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
34264ebc6509SDhinakaran Pandiyan 	else
34274ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
34288664281bSPaulo Zanoni 
34293488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3430d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
34312a57d9ccSImre Deak 
34322a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
34332a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
34341a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
34352a57d9ccSImre Deak 	else
34362a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3437d46da437SPaulo Zanoni }
3438d46da437SPaulo Zanoni 
34390a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev)
34400a9a8c91SDaniel Vetter {
3441fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
34420a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
34430a9a8c91SDaniel Vetter 
34440a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
34450a9a8c91SDaniel Vetter 
34460a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
34473c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
34480a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
3449772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3450772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
34510a9a8c91SDaniel Vetter 	}
34520a9a8c91SDaniel Vetter 
34530a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
34545db94019STvrtko Ursulin 	if (IS_GEN5(dev_priv)) {
3455f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
34560a9a8c91SDaniel Vetter 	} else {
34570a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
34580a9a8c91SDaniel Vetter 	}
34590a9a8c91SDaniel Vetter 
34603488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
34610a9a8c91SDaniel Vetter 
3462b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
346378e68d36SImre Deak 		/*
346478e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
346578e68d36SImre Deak 		 * itself is enabled/disabled.
346678e68d36SImre Deak 		 */
3467f4e9af4fSAkash Goel 		if (HAS_VEBOX(dev_priv)) {
34680a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3469f4e9af4fSAkash Goel 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3470f4e9af4fSAkash Goel 		}
34710a9a8c91SDaniel Vetter 
3472f4e9af4fSAkash Goel 		dev_priv->pm_imr = 0xffffffff;
34733488d4ebSVille Syrjälä 		GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
34740a9a8c91SDaniel Vetter 	}
34750a9a8c91SDaniel Vetter }
34760a9a8c91SDaniel Vetter 
3477f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev)
3478036a4a7dSZhenyu Wang {
3479fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
34808e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
34818e76f8dcSPaulo Zanoni 
3482b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
34838e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3484842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
34858e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
348623bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
348723bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
34888e76f8dcSPaulo Zanoni 	} else {
34898e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3490842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3491842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
3492e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3493e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3494e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
34958e76f8dcSPaulo Zanoni 	}
3496036a4a7dSZhenyu Wang 
34971ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
3498036a4a7dSZhenyu Wang 
3499622364b6SPaulo Zanoni 	ibx_irq_pre_postinstall(dev);
3500622364b6SPaulo Zanoni 
35013488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3502036a4a7dSZhenyu Wang 
35030a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
3504036a4a7dSZhenyu Wang 
35051a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
35061a56b1a2SImre Deak 
3507d46da437SPaulo Zanoni 	ibx_irq_postinstall(dev);
35087fe0b973SKeith Packard 
350950a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
35106005ce42SDaniel Vetter 		/* Enable PCU event interrupts
35116005ce42SDaniel Vetter 		 *
35126005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
35134bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
35144bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
3515d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
3516fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3517d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
3518f97108d1SJesse Barnes 	}
3519f97108d1SJesse Barnes 
3520036a4a7dSZhenyu Wang 	return 0;
3521036a4a7dSZhenyu Wang }
3522036a4a7dSZhenyu Wang 
3523f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3524f8b79e58SImre Deak {
352567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3526f8b79e58SImre Deak 
3527f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
3528f8b79e58SImre Deak 		return;
3529f8b79e58SImre Deak 
3530f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
3531f8b79e58SImre Deak 
3532d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
3533d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3534ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3535f8b79e58SImre Deak 	}
3536d6c69803SVille Syrjälä }
3537f8b79e58SImre Deak 
3538f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3539f8b79e58SImre Deak {
354067520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3541f8b79e58SImre Deak 
3542f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
3543f8b79e58SImre Deak 		return;
3544f8b79e58SImre Deak 
3545f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
3546f8b79e58SImre Deak 
3547950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
3548ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3549f8b79e58SImre Deak }
3550f8b79e58SImre Deak 
35510e6c9a9eSVille Syrjälä 
35520e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev)
35530e6c9a9eSVille Syrjälä {
3554fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
35550e6c9a9eSVille Syrjälä 
35560a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
35577e231dbeSJesse Barnes 
3558ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35599918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
3560ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3561ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3562ad22d106SVille Syrjälä 
35637e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
356434c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
356520afbda2SDaniel Vetter 
356620afbda2SDaniel Vetter 	return 0;
356720afbda2SDaniel Vetter }
356820afbda2SDaniel Vetter 
3569abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3570abd58f01SBen Widawsky {
3571abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
3572abd58f01SBen Widawsky 	uint32_t gt_interrupts[] = {
3573abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
357473d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
357573d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
357673d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3577abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
357873d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
357973d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
358073d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3581abd58f01SBen Widawsky 		0,
358273d477f6SOscar Mateo 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
358373d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3584abd58f01SBen Widawsky 		};
3585abd58f01SBen Widawsky 
358698735739STvrtko Ursulin 	if (HAS_L3_DPF(dev_priv))
358798735739STvrtko Ursulin 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
358898735739STvrtko Ursulin 
3589f4e9af4fSAkash Goel 	dev_priv->pm_ier = 0x0;
3590f4e9af4fSAkash Goel 	dev_priv->pm_imr = ~dev_priv->pm_ier;
35919a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
35929a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
359378e68d36SImre Deak 	/*
359478e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
359526705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
359678e68d36SImre Deak 	 */
3597f4e9af4fSAkash Goel 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
35989a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3599abd58f01SBen Widawsky }
3600abd58f01SBen Widawsky 
3601abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3602abd58f01SBen Widawsky {
3603770de83dSDamien Lespiau 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3604770de83dSDamien Lespiau 	uint32_t de_pipe_enables;
36053a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
36063a3b3c7dSVille Syrjälä 	u32 de_port_enables;
360711825b0dSVille Syrjälä 	u32 de_misc_masked = GEN8_DE_MISC_GSE;
36083a3b3c7dSVille Syrjälä 	enum pipe pipe;
3609770de83dSDamien Lespiau 
3610bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
3611842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
36123a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
361388e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
3614cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
36153a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
36163a3b3c7dSVille Syrjälä 	} else {
3617842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
36183a3b3c7dSVille Syrjälä 	}
3619770de83dSDamien Lespiau 
3620770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3621770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
3622770de83dSDamien Lespiau 
36233a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
3624cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
3625a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3626a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
36273a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
36283a3b3c7dSVille Syrjälä 
36290a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
36300a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3631abd58f01SBen Widawsky 
3632f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3633813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
3634813bde43SPaulo Zanoni 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3635813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
363635079899SPaulo Zanoni 					  de_pipe_enables);
36370a195c02SMika Kahola 	}
3638abd58f01SBen Widawsky 
36393488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
36403488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
36412a57d9ccSImre Deak 
36422a57d9ccSImre Deak 	if (IS_GEN9_LP(dev_priv))
36432a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
36441a56b1a2SImre Deak 	else if (IS_BROADWELL(dev_priv))
36451a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
3646abd58f01SBen Widawsky }
3647abd58f01SBen Widawsky 
3648abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev)
3649abd58f01SBen Widawsky {
3650fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3651abd58f01SBen Widawsky 
36526e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3653622364b6SPaulo Zanoni 		ibx_irq_pre_postinstall(dev);
3654622364b6SPaulo Zanoni 
3655abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
3656abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
3657abd58f01SBen Widawsky 
36586e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3659abd58f01SBen Widawsky 		ibx_irq_postinstall(dev);
3660abd58f01SBen Widawsky 
3661e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3662abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3663abd58f01SBen Widawsky 
3664abd58f01SBen Widawsky 	return 0;
3665abd58f01SBen Widawsky }
3666abd58f01SBen Widawsky 
366743f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev)
366843f328d7SVille Syrjälä {
3669fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
367043f328d7SVille Syrjälä 
367143f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
367243f328d7SVille Syrjälä 
3673ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36749918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
3675ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3676ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3677ad22d106SVille Syrjälä 
3678e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
367943f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
368043f328d7SVille Syrjälä 
368143f328d7SVille Syrjälä 	return 0;
368243f328d7SVille Syrjälä }
368343f328d7SVille Syrjälä 
36846bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev)
3685c2798b19SChris Wilson {
3686fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3687c2798b19SChris Wilson 
368844d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
368944d9241eSVille Syrjälä 
3690d420a50cSVille Syrjälä 	I915_WRITE16(HWSTAM, 0xffff);
3691d420a50cSVille Syrjälä 
3692e9e9848aSVille Syrjälä 	GEN2_IRQ_RESET();
3693c2798b19SChris Wilson }
3694c2798b19SChris Wilson 
3695c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev)
3696c2798b19SChris Wilson {
3697fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3698e9e9848aSVille Syrjälä 	u16 enable_mask;
3699c2798b19SChris Wilson 
3700045cebd2SVille Syrjälä 	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
3701045cebd2SVille Syrjälä 			    I915_ERROR_MEMORY_REFRESH));
3702c2798b19SChris Wilson 
3703c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
3704c2798b19SChris Wilson 	dev_priv->irq_mask =
3705c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3706842ebf7aSVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
3707c2798b19SChris Wilson 
3708e9e9848aSVille Syrjälä 	enable_mask =
3709c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3710c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3711e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
3712e9e9848aSVille Syrjälä 
3713e9e9848aSVille Syrjälä 	GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3714c2798b19SChris Wilson 
3715379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3716379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3717d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3718755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3719755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3720d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3721379ef82dSDaniel Vetter 
3722c2798b19SChris Wilson 	return 0;
3723c2798b19SChris Wilson }
3724c2798b19SChris Wilson 
3725ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3726c2798b19SChris Wilson {
372745a83f84SDaniel Vetter 	struct drm_device *dev = arg;
3728fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3729af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
3730c2798b19SChris Wilson 
37312dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
37322dd2a883SImre Deak 		return IRQ_NONE;
37332dd2a883SImre Deak 
37341f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
37351f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
37361f814dacSImre Deak 
3737af722d28SVille Syrjälä 	do {
3738af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3739af722d28SVille Syrjälä 		u16 iir;
3740af722d28SVille Syrjälä 
3741c2798b19SChris Wilson 		iir = I915_READ16(IIR);
3742c2798b19SChris Wilson 		if (iir == 0)
3743af722d28SVille Syrjälä 			break;
3744c2798b19SChris Wilson 
3745af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
3746c2798b19SChris Wilson 
3747eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
3748eb64343cSVille Syrjälä 		 * signalled in iir */
3749eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3750c2798b19SChris Wilson 
3751fd3a4024SDaniel Vetter 		I915_WRITE16(IIR, iir);
3752c2798b19SChris Wilson 
3753c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
37543b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
3755c2798b19SChris Wilson 
3756af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3757af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3758af722d28SVille Syrjälä 
3759eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3760af722d28SVille Syrjälä 	} while (0);
3761c2798b19SChris Wilson 
37621f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
37631f814dacSImre Deak 
37641f814dacSImre Deak 	return ret;
3765c2798b19SChris Wilson }
3766c2798b19SChris Wilson 
37676bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev)
3768a266c7d5SChris Wilson {
3769fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3770a266c7d5SChris Wilson 
377156b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
37720706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3773a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3774a266c7d5SChris Wilson 	}
3775a266c7d5SChris Wilson 
377644d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
377744d9241eSVille Syrjälä 
3778d420a50cSVille Syrjälä 	I915_WRITE(HWSTAM, 0xffffffff);
377944d9241eSVille Syrjälä 
3780ba7eb789SVille Syrjälä 	GEN3_IRQ_RESET();
3781a266c7d5SChris Wilson }
3782a266c7d5SChris Wilson 
3783a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev)
3784a266c7d5SChris Wilson {
3785fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
378638bde180SChris Wilson 	u32 enable_mask;
3787a266c7d5SChris Wilson 
3788045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3789045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
379038bde180SChris Wilson 
379138bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
379238bde180SChris Wilson 	dev_priv->irq_mask =
379338bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
379438bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3795842ebf7aSVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
379638bde180SChris Wilson 
379738bde180SChris Wilson 	enable_mask =
379838bde180SChris Wilson 		I915_ASLE_INTERRUPT |
379938bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
380038bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
380138bde180SChris Wilson 		I915_USER_INTERRUPT;
380238bde180SChris Wilson 
380356b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
3804a266c7d5SChris Wilson 		/* Enable in IER... */
3805a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3806a266c7d5SChris Wilson 		/* and unmask in IMR */
3807a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3808a266c7d5SChris Wilson 	}
3809a266c7d5SChris Wilson 
3810ba7eb789SVille Syrjälä 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3811a266c7d5SChris Wilson 
3812379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3813379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3814d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3815755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3816755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3817d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3818379ef82dSDaniel Vetter 
3819c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
3820c30bb1fdSVille Syrjälä 
382120afbda2SDaniel Vetter 	return 0;
382220afbda2SDaniel Vetter }
382320afbda2SDaniel Vetter 
3824ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
3825a266c7d5SChris Wilson {
382645a83f84SDaniel Vetter 	struct drm_device *dev = arg;
3827fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3828af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
3829a266c7d5SChris Wilson 
38302dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
38312dd2a883SImre Deak 		return IRQ_NONE;
38322dd2a883SImre Deak 
38331f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
38341f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
38351f814dacSImre Deak 
383638bde180SChris Wilson 	do {
3837eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3838af722d28SVille Syrjälä 		u32 hotplug_status = 0;
3839af722d28SVille Syrjälä 		u32 iir;
3840a266c7d5SChris Wilson 
3841af722d28SVille Syrjälä 		iir = I915_READ(IIR);
3842af722d28SVille Syrjälä 		if (iir == 0)
3843af722d28SVille Syrjälä 			break;
3844af722d28SVille Syrjälä 
3845af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
3846af722d28SVille Syrjälä 
3847af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
3848af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3849af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3850a266c7d5SChris Wilson 
3851eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
3852eb64343cSVille Syrjälä 		 * signalled in iir */
3853eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3854a266c7d5SChris Wilson 
3855fd3a4024SDaniel Vetter 		I915_WRITE(IIR, iir);
3856a266c7d5SChris Wilson 
3857a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
38583b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
3859a266c7d5SChris Wilson 
3860af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3861af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3862a266c7d5SChris Wilson 
3863af722d28SVille Syrjälä 		if (hotplug_status)
3864af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3865af722d28SVille Syrjälä 
3866af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3867af722d28SVille Syrjälä 	} while (0);
3868a266c7d5SChris Wilson 
38691f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
38701f814dacSImre Deak 
3871a266c7d5SChris Wilson 	return ret;
3872a266c7d5SChris Wilson }
3873a266c7d5SChris Wilson 
38746bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev)
3875a266c7d5SChris Wilson {
3876fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3877a266c7d5SChris Wilson 
38780706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3879a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3880a266c7d5SChris Wilson 
388144d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
388244d9241eSVille Syrjälä 
3883d420a50cSVille Syrjälä 	I915_WRITE(HWSTAM, 0xffffffff);
388444d9241eSVille Syrjälä 
3885ba7eb789SVille Syrjälä 	GEN3_IRQ_RESET();
3886a266c7d5SChris Wilson }
3887a266c7d5SChris Wilson 
3888a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev)
3889a266c7d5SChris Wilson {
3890fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3891bbba0a97SChris Wilson 	u32 enable_mask;
3892a266c7d5SChris Wilson 	u32 error_mask;
3893a266c7d5SChris Wilson 
3894045cebd2SVille Syrjälä 	/*
3895045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
3896045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
3897045cebd2SVille Syrjälä 	 */
3898045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
3899045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3900045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
3901045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
3902045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
3903045cebd2SVille Syrjälä 	} else {
3904045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3905045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
3906045cebd2SVille Syrjälä 	}
3907045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
3908045cebd2SVille Syrjälä 
3909a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
3910c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
3911c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
3912adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
3913bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3914bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3915bbba0a97SChris Wilson 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3916bbba0a97SChris Wilson 
3917c30bb1fdSVille Syrjälä 	enable_mask =
3918c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
3919c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
3920c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3921c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3922c30bb1fdSVille Syrjälä 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3923c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
3924bbba0a97SChris Wilson 
392591d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
3926bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
3927a266c7d5SChris Wilson 
3928c30bb1fdSVille Syrjälä 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3929c30bb1fdSVille Syrjälä 
3930b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3931b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3932d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3933755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3934755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3935755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3936d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3937a266c7d5SChris Wilson 
393891d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
393920afbda2SDaniel Vetter 
394020afbda2SDaniel Vetter 	return 0;
394120afbda2SDaniel Vetter }
394220afbda2SDaniel Vetter 
394391d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
394420afbda2SDaniel Vetter {
394520afbda2SDaniel Vetter 	u32 hotplug_en;
394620afbda2SDaniel Vetter 
394767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3948b5ea2d56SDaniel Vetter 
3949adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
3950e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
395191d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3952a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
3953a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
3954a266c7d5SChris Wilson 	   seconds later.  So just do it once.
3955a266c7d5SChris Wilson 	*/
395691d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
3957a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3958a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3959a266c7d5SChris Wilson 
3960a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
39610706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
3962f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
3963f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3964f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
39650706f17cSEgbert Eich 					     hotplug_en);
3966a266c7d5SChris Wilson }
3967a266c7d5SChris Wilson 
3968ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
3969a266c7d5SChris Wilson {
397045a83f84SDaniel Vetter 	struct drm_device *dev = arg;
3971fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3972af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
3973a266c7d5SChris Wilson 
39742dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
39752dd2a883SImre Deak 		return IRQ_NONE;
39762dd2a883SImre Deak 
39771f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
39781f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
39791f814dacSImre Deak 
3980af722d28SVille Syrjälä 	do {
3981eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3982af722d28SVille Syrjälä 		u32 hotplug_status = 0;
3983af722d28SVille Syrjälä 		u32 iir;
39842c8ba29fSChris Wilson 
3985af722d28SVille Syrjälä 		iir = I915_READ(IIR);
3986af722d28SVille Syrjälä 		if (iir == 0)
3987af722d28SVille Syrjälä 			break;
3988af722d28SVille Syrjälä 
3989af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
3990af722d28SVille Syrjälä 
3991af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3992af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3993a266c7d5SChris Wilson 
3994eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
3995eb64343cSVille Syrjälä 		 * signalled in iir */
3996eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3997a266c7d5SChris Wilson 
3998fd3a4024SDaniel Vetter 		I915_WRITE(IIR, iir);
3999a266c7d5SChris Wilson 
4000a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
40013b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
4002af722d28SVille Syrjälä 
4003a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
40043b3f1650SAkash Goel 			notify_ring(dev_priv->engine[VCS]);
4005a266c7d5SChris Wilson 
4006af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4007af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4008515ac2bbSDaniel Vetter 
4009af722d28SVille Syrjälä 		if (hotplug_status)
4010af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4011af722d28SVille Syrjälä 
4012af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4013af722d28SVille Syrjälä 	} while (0);
4014a266c7d5SChris Wilson 
40151f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
40161f814dacSImre Deak 
4017a266c7d5SChris Wilson 	return ret;
4018a266c7d5SChris Wilson }
4019a266c7d5SChris Wilson 
4020fca52a55SDaniel Vetter /**
4021fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4022fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4023fca52a55SDaniel Vetter  *
4024fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4025fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4026fca52a55SDaniel Vetter  */
4027b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4028f71d4af4SJesse Barnes {
402991c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4030562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4031cefcff8fSJoonas Lahtinen 	int i;
40328b2e326dSChris Wilson 
403377913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
403477913b39SJani Nikula 
4035562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4036cefcff8fSJoonas Lahtinen 
4037a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4038cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4039cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
40408b2e326dSChris Wilson 
40414805fe82STvrtko Ursulin 	if (HAS_GUC_SCHED(dev_priv))
404226705e20SSagar Arun Kamble 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
404326705e20SSagar Arun Kamble 
4044a6706b45SDeepak S 	/* Let's track the enabled rps events */
4045666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
40466c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4047e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
404831685c25SDeepak S 	else
4049a6706b45SDeepak S 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4050a6706b45SDeepak S 
4051562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
40521800ad25SSagar Arun Kamble 
40531800ad25SSagar Arun Kamble 	/*
4054acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
40551800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
40561800ad25SSagar Arun Kamble 	 *
40571800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
40581800ad25SSagar Arun Kamble 	 */
4059bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4060562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
40611800ad25SSagar Arun Kamble 
4062bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4063562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
40641800ad25SSagar Arun Kamble 
4065b963291cSDaniel Vetter 	if (IS_GEN2(dev_priv)) {
40664194c088SRodrigo Vivi 		/* Gen2 doesn't have a hardware frame counter */
40674cdb83ecSVille Syrjälä 		dev->max_vblank_count = 0;
4068bca2bf2aSPandiyan, Dhinakaran 	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4069f71d4af4SJesse Barnes 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4070fd8f507cSVille Syrjälä 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4071391f75e2SVille Syrjälä 	} else {
4072391f75e2SVille Syrjälä 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4073391f75e2SVille Syrjälä 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4074f71d4af4SJesse Barnes 	}
4075f71d4af4SJesse Barnes 
407621da2700SVille Syrjälä 	/*
407721da2700SVille Syrjälä 	 * Opt out of the vblank disable timer on everything except gen2.
407821da2700SVille Syrjälä 	 * Gen2 doesn't have a hardware frame counter and so depends on
407921da2700SVille Syrjälä 	 * vblank interrupts to produce sane vblank seuquence numbers.
408021da2700SVille Syrjälä 	 */
4081b963291cSDaniel Vetter 	if (!IS_GEN2(dev_priv))
408221da2700SVille Syrjälä 		dev->vblank_disable_immediate = true;
408321da2700SVille Syrjälä 
4084262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4085262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4086262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4087262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4088262fd485SChris Wilson 	 * in this case to the runtime pm.
4089262fd485SChris Wilson 	 */
4090262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4091262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4092262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4093262fd485SChris Wilson 
4094317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4095317eaa95SLyude 
40961bf6ad62SDaniel Vetter 	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4097f71d4af4SJesse Barnes 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4098f71d4af4SJesse Barnes 
4099b963291cSDaniel Vetter 	if (IS_CHERRYVIEW(dev_priv)) {
410043f328d7SVille Syrjälä 		dev->driver->irq_handler = cherryview_irq_handler;
41016bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = cherryview_irq_reset;
410243f328d7SVille Syrjälä 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
41036bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = cherryview_irq_reset;
410486e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
410586e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
410643f328d7SVille Syrjälä 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4107b963291cSDaniel Vetter 	} else if (IS_VALLEYVIEW(dev_priv)) {
41087e231dbeSJesse Barnes 		dev->driver->irq_handler = valleyview_irq_handler;
41096bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = valleyview_irq_reset;
41107e231dbeSJesse Barnes 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
41116bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = valleyview_irq_reset;
411286e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
411386e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
4114fa00abe0SEgbert Eich 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4115bca2bf2aSPandiyan, Dhinakaran 	} else if (INTEL_GEN(dev_priv) >= 8) {
4116abd58f01SBen Widawsky 		dev->driver->irq_handler = gen8_irq_handler;
4117723761b8SDaniel Vetter 		dev->driver->irq_preinstall = gen8_irq_reset;
4118abd58f01SBen Widawsky 		dev->driver->irq_postinstall = gen8_irq_postinstall;
41196bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = gen8_irq_reset;
4120abd58f01SBen Widawsky 		dev->driver->enable_vblank = gen8_enable_vblank;
4121abd58f01SBen Widawsky 		dev->driver->disable_vblank = gen8_disable_vblank;
4122cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
4123e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
41247b22b8c4SRodrigo Vivi 		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
41257b22b8c4SRodrigo Vivi 			 HAS_PCH_CNP(dev_priv))
41266dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
41276dbf30ceSVille Syrjälä 		else
41283a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
41296e266956STvrtko Ursulin 	} else if (HAS_PCH_SPLIT(dev_priv)) {
4130f71d4af4SJesse Barnes 		dev->driver->irq_handler = ironlake_irq_handler;
4131723761b8SDaniel Vetter 		dev->driver->irq_preinstall = ironlake_irq_reset;
4132f71d4af4SJesse Barnes 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
41336bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = ironlake_irq_reset;
4134f71d4af4SJesse Barnes 		dev->driver->enable_vblank = ironlake_enable_vblank;
4135f71d4af4SJesse Barnes 		dev->driver->disable_vblank = ironlake_disable_vblank;
4136e4ce95aaSVille Syrjälä 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4137f71d4af4SJesse Barnes 	} else {
41387e22dbbbSTvrtko Ursulin 		if (IS_GEN2(dev_priv)) {
41396bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i8xx_irq_reset;
4140c2798b19SChris Wilson 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4141c2798b19SChris Wilson 			dev->driver->irq_handler = i8xx_irq_handler;
41426bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i8xx_irq_reset;
414386e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
414486e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
41457e22dbbbSTvrtko Ursulin 		} else if (IS_GEN3(dev_priv)) {
41466bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i915_irq_reset;
4147a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i915_irq_postinstall;
41486bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i915_irq_reset;
4149a266c7d5SChris Wilson 			dev->driver->irq_handler = i915_irq_handler;
415086e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
415186e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
4152c2798b19SChris Wilson 		} else {
41536bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i965_irq_reset;
4154a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i965_irq_postinstall;
41556bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i965_irq_reset;
4156a266c7d5SChris Wilson 			dev->driver->irq_handler = i965_irq_handler;
415786e83e35SChris Wilson 			dev->driver->enable_vblank = i965_enable_vblank;
415886e83e35SChris Wilson 			dev->driver->disable_vblank = i965_disable_vblank;
4159c2798b19SChris Wilson 		}
4160778eb334SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
4161778eb334SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4162f71d4af4SJesse Barnes 	}
4163f71d4af4SJesse Barnes }
416420afbda2SDaniel Vetter 
4165fca52a55SDaniel Vetter /**
4166cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4167cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4168cefcff8fSJoonas Lahtinen  *
4169cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4170cefcff8fSJoonas Lahtinen  */
4171cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4172cefcff8fSJoonas Lahtinen {
4173cefcff8fSJoonas Lahtinen 	int i;
4174cefcff8fSJoonas Lahtinen 
4175cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4176cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4177cefcff8fSJoonas Lahtinen }
4178cefcff8fSJoonas Lahtinen 
4179cefcff8fSJoonas Lahtinen /**
4180fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4181fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4182fca52a55SDaniel Vetter  *
4183fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4184fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4185fca52a55SDaniel Vetter  *
4186fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4187fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4188fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4189fca52a55SDaniel Vetter  */
41902aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
41912aeb7d3aSDaniel Vetter {
41922aeb7d3aSDaniel Vetter 	/*
41932aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
41942aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
41952aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
41962aeb7d3aSDaniel Vetter 	 */
4197ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
41982aeb7d3aSDaniel Vetter 
419991c8a326SChris Wilson 	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
42002aeb7d3aSDaniel Vetter }
42012aeb7d3aSDaniel Vetter 
4202fca52a55SDaniel Vetter /**
4203fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4204fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4205fca52a55SDaniel Vetter  *
4206fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4207fca52a55SDaniel Vetter  * resources acquired in the init functions.
4208fca52a55SDaniel Vetter  */
42092aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
42102aeb7d3aSDaniel Vetter {
421191c8a326SChris Wilson 	drm_irq_uninstall(&dev_priv->drm);
42122aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4213ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
42142aeb7d3aSDaniel Vetter }
42152aeb7d3aSDaniel Vetter 
4216fca52a55SDaniel Vetter /**
4217fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4218fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4219fca52a55SDaniel Vetter  *
4220fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4221fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4222fca52a55SDaniel Vetter  */
4223b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4224c67a470bSPaulo Zanoni {
422591c8a326SChris Wilson 	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4226ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
422791c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
4228c67a470bSPaulo Zanoni }
4229c67a470bSPaulo Zanoni 
4230fca52a55SDaniel Vetter /**
4231fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4232fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4233fca52a55SDaniel Vetter  *
4234fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
4235fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4236fca52a55SDaniel Vetter  */
4237b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4238c67a470bSPaulo Zanoni {
4239ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
424091c8a326SChris Wilson 	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
424191c8a326SChris Wilson 	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4242c67a470bSPaulo Zanoni }
4243