xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 4a118ecbe99c93cf9f9582e83a88d03f18d6cb84)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3163eeaf38SJesse Barnes #include <linux/sysrq.h>
325a0e3ad6STejun Heo #include <linux/slab.h>
33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
34760285e7SDavid Howells #include <drm/drmP.h>
35760285e7SDavid Howells #include <drm/i915_drm.h>
36c0e09200SDave Airlie #include "i915_drv.h"
371c5d22f7SChris Wilson #include "i915_trace.h"
3879e53945SJesse Barnes #include "intel_drv.h"
39c0e09200SDave Airlie 
40fca52a55SDaniel Vetter /**
41fca52a55SDaniel Vetter  * DOC: interrupt handling
42fca52a55SDaniel Vetter  *
43fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
44fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
45fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
46fca52a55SDaniel Vetter  */
47fca52a55SDaniel Vetter 
48e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
49e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50e4ce95aaSVille Syrjälä };
51e4ce95aaSVille Syrjälä 
5223bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
5323bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
5423bb4cb5SVille Syrjälä };
5523bb4cb5SVille Syrjälä 
563a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
573a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
583a3b3c7dSVille Syrjälä };
593a3b3c7dSVille Syrjälä 
607c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
61e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
62e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66e5868a31SEgbert Eich };
67e5868a31SEgbert Eich 
687c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
69e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
7073c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74e5868a31SEgbert Eich };
75e5868a31SEgbert Eich 
7626951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
7774c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
7826951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
7926951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
8026951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
8126951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
8226951cafSXiong Zhang };
8326951cafSXiong Zhang 
847c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91e5868a31SEgbert Eich };
92e5868a31SEgbert Eich 
937c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100e5868a31SEgbert Eich };
101e5868a31SEgbert Eich 
1024bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109e5868a31SEgbert Eich };
110e5868a31SEgbert Eich 
111e0a20ad7SShashank Sharma /* BXT hpd list */
112e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1137f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116e0a20ad7SShashank Sharma };
117e0a20ad7SShashank Sharma 
1185c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */
119f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \
1205c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
1215c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IMR(which)); \
1225c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), 0); \
1235c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1245c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1255c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1265c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1275c502442SPaulo Zanoni } while (0)
1285c502442SPaulo Zanoni 
1293488d4ebSVille Syrjälä #define GEN3_IRQ_RESET(type) do { \
130a9d356a6SPaulo Zanoni 	I915_WRITE(type##IMR, 0xffffffff); \
1315c502442SPaulo Zanoni 	POSTING_READ(type##IMR); \
132a9d356a6SPaulo Zanoni 	I915_WRITE(type##IER, 0); \
1335c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1345c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
1355c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1365c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
137a9d356a6SPaulo Zanoni } while (0)
138a9d356a6SPaulo Zanoni 
139e9e9848aSVille Syrjälä #define GEN2_IRQ_RESET(type) do { \
140e9e9848aSVille Syrjälä 	I915_WRITE16(type##IMR, 0xffff); \
141e9e9848aSVille Syrjälä 	POSTING_READ16(type##IMR); \
142e9e9848aSVille Syrjälä 	I915_WRITE16(type##IER, 0); \
143e9e9848aSVille Syrjälä 	I915_WRITE16(type##IIR, 0xffff); \
144e9e9848aSVille Syrjälä 	POSTING_READ16(type##IIR); \
145e9e9848aSVille Syrjälä 	I915_WRITE16(type##IIR, 0xffff); \
146e9e9848aSVille Syrjälä 	POSTING_READ16(type##IIR); \
147e9e9848aSVille Syrjälä } while (0)
148e9e9848aSVille Syrjälä 
149337ba017SPaulo Zanoni /*
150337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
151337ba017SPaulo Zanoni  */
1523488d4ebSVille Syrjälä static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
153f0f59a00SVille Syrjälä 				    i915_reg_t reg)
154b51a2842SVille Syrjälä {
155b51a2842SVille Syrjälä 	u32 val = I915_READ(reg);
156b51a2842SVille Syrjälä 
157b51a2842SVille Syrjälä 	if (val == 0)
158b51a2842SVille Syrjälä 		return;
159b51a2842SVille Syrjälä 
160b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
161f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
162b51a2842SVille Syrjälä 	I915_WRITE(reg, 0xffffffff);
163b51a2842SVille Syrjälä 	POSTING_READ(reg);
164b51a2842SVille Syrjälä 	I915_WRITE(reg, 0xffffffff);
165b51a2842SVille Syrjälä 	POSTING_READ(reg);
166b51a2842SVille Syrjälä }
167337ba017SPaulo Zanoni 
168e9e9848aSVille Syrjälä static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
169e9e9848aSVille Syrjälä 				    i915_reg_t reg)
170e9e9848aSVille Syrjälä {
171e9e9848aSVille Syrjälä 	u16 val = I915_READ16(reg);
172e9e9848aSVille Syrjälä 
173e9e9848aSVille Syrjälä 	if (val == 0)
174e9e9848aSVille Syrjälä 		return;
175e9e9848aSVille Syrjälä 
176e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
177e9e9848aSVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
178e9e9848aSVille Syrjälä 	I915_WRITE16(reg, 0xffff);
179e9e9848aSVille Syrjälä 	POSTING_READ16(reg);
180e9e9848aSVille Syrjälä 	I915_WRITE16(reg, 0xffff);
181e9e9848aSVille Syrjälä 	POSTING_READ16(reg);
182e9e9848aSVille Syrjälä }
183e9e9848aSVille Syrjälä 
18435079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
1853488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
18635079899SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
1877d1bd539SVille Syrjälä 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
1887d1bd539SVille Syrjälä 	POSTING_READ(GEN8_##type##_IMR(which)); \
18935079899SPaulo Zanoni } while (0)
19035079899SPaulo Zanoni 
1913488d4ebSVille Syrjälä #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
1923488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, type##IIR); \
19335079899SPaulo Zanoni 	I915_WRITE(type##IER, (ier_val)); \
1947d1bd539SVille Syrjälä 	I915_WRITE(type##IMR, (imr_val)); \
1957d1bd539SVille Syrjälä 	POSTING_READ(type##IMR); \
19635079899SPaulo Zanoni } while (0)
19735079899SPaulo Zanoni 
198e9e9848aSVille Syrjälä #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
199e9e9848aSVille Syrjälä 	gen2_assert_iir_is_zero(dev_priv, type##IIR); \
200e9e9848aSVille Syrjälä 	I915_WRITE16(type##IER, (ier_val)); \
201e9e9848aSVille Syrjälä 	I915_WRITE16(type##IMR, (imr_val)); \
202e9e9848aSVille Syrjälä 	POSTING_READ16(type##IMR); \
203e9e9848aSVille Syrjälä } while (0)
204e9e9848aSVille Syrjälä 
205c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
20626705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
207c9a9a268SImre Deak 
2080706f17cSEgbert Eich /* For display hotplug interrupt */
2090706f17cSEgbert Eich static inline void
2100706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
2110706f17cSEgbert Eich 				     uint32_t mask,
2120706f17cSEgbert Eich 				     uint32_t bits)
2130706f17cSEgbert Eich {
2140706f17cSEgbert Eich 	uint32_t val;
2150706f17cSEgbert Eich 
21667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2170706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2180706f17cSEgbert Eich 
2190706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2200706f17cSEgbert Eich 	val &= ~mask;
2210706f17cSEgbert Eich 	val |= bits;
2220706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2230706f17cSEgbert Eich }
2240706f17cSEgbert Eich 
2250706f17cSEgbert Eich /**
2260706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2270706f17cSEgbert Eich  * @dev_priv: driver private
2280706f17cSEgbert Eich  * @mask: bits to update
2290706f17cSEgbert Eich  * @bits: bits to enable
2300706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2310706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2320706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2330706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2340706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2350706f17cSEgbert Eich  * version is also available.
2360706f17cSEgbert Eich  */
2370706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2380706f17cSEgbert Eich 				   uint32_t mask,
2390706f17cSEgbert Eich 				   uint32_t bits)
2400706f17cSEgbert Eich {
2410706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
2420706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
2430706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
2440706f17cSEgbert Eich }
2450706f17cSEgbert Eich 
246d9dc34f1SVille Syrjälä /**
247d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
248d9dc34f1SVille Syrjälä  * @dev_priv: driver private
249d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
250d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
251d9dc34f1SVille Syrjälä  */
252fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
253d9dc34f1SVille Syrjälä 			    uint32_t interrupt_mask,
254d9dc34f1SVille Syrjälä 			    uint32_t enabled_irq_mask)
255036a4a7dSZhenyu Wang {
256d9dc34f1SVille Syrjälä 	uint32_t new_val;
257d9dc34f1SVille Syrjälä 
25867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2594bc9d430SDaniel Vetter 
260d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
261d9dc34f1SVille Syrjälä 
2629df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263c67a470bSPaulo Zanoni 		return;
264c67a470bSPaulo Zanoni 
265d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
266d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
267d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
268d9dc34f1SVille Syrjälä 
269d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
270d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
2711ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
2723143a2bfSChris Wilson 		POSTING_READ(DEIMR);
273036a4a7dSZhenyu Wang 	}
274036a4a7dSZhenyu Wang }
275036a4a7dSZhenyu Wang 
27643eaea13SPaulo Zanoni /**
27743eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
27843eaea13SPaulo Zanoni  * @dev_priv: driver private
27943eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
28043eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
28143eaea13SPaulo Zanoni  */
28243eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
28343eaea13SPaulo Zanoni 			      uint32_t interrupt_mask,
28443eaea13SPaulo Zanoni 			      uint32_t enabled_irq_mask)
28543eaea13SPaulo Zanoni {
28667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
28743eaea13SPaulo Zanoni 
28815a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
28915a17aaeSDaniel Vetter 
2909df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
291c67a470bSPaulo Zanoni 		return;
292c67a470bSPaulo Zanoni 
29343eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
29443eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
29543eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
29643eaea13SPaulo Zanoni }
29743eaea13SPaulo Zanoni 
298480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
29943eaea13SPaulo Zanoni {
30043eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
30131bb59ccSChris Wilson 	POSTING_READ_FW(GTIMR);
30243eaea13SPaulo Zanoni }
30343eaea13SPaulo Zanoni 
304480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
30543eaea13SPaulo Zanoni {
30643eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
30743eaea13SPaulo Zanoni }
30843eaea13SPaulo Zanoni 
309f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
310b900b949SImre Deak {
311bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
312b900b949SImre Deak }
313b900b949SImre Deak 
314f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
315a72fbc3aSImre Deak {
316bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
317a72fbc3aSImre Deak }
318a72fbc3aSImre Deak 
319f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
320b900b949SImre Deak {
321bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
322b900b949SImre Deak }
323b900b949SImre Deak 
324edbfdb45SPaulo Zanoni /**
325edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
326edbfdb45SPaulo Zanoni  * @dev_priv: driver private
327edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
328edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
329edbfdb45SPaulo Zanoni  */
330edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
331edbfdb45SPaulo Zanoni 			      uint32_t interrupt_mask,
332edbfdb45SPaulo Zanoni 			      uint32_t enabled_irq_mask)
333edbfdb45SPaulo Zanoni {
334605cd25bSPaulo Zanoni 	uint32_t new_val;
335edbfdb45SPaulo Zanoni 
33615a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
33715a17aaeSDaniel Vetter 
33867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
339edbfdb45SPaulo Zanoni 
340f4e9af4fSAkash Goel 	new_val = dev_priv->pm_imr;
341f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
342f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
343f52ecbcfSPaulo Zanoni 
344f4e9af4fSAkash Goel 	if (new_val != dev_priv->pm_imr) {
345f4e9af4fSAkash Goel 		dev_priv->pm_imr = new_val;
346f4e9af4fSAkash Goel 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
347a72fbc3aSImre Deak 		POSTING_READ(gen6_pm_imr(dev_priv));
348edbfdb45SPaulo Zanoni 	}
349f52ecbcfSPaulo Zanoni }
350edbfdb45SPaulo Zanoni 
351f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
352edbfdb45SPaulo Zanoni {
3539939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
3549939fba2SImre Deak 		return;
3559939fba2SImre Deak 
356edbfdb45SPaulo Zanoni 	snb_update_pm_irq(dev_priv, mask, mask);
357edbfdb45SPaulo Zanoni }
358edbfdb45SPaulo Zanoni 
359f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
3609939fba2SImre Deak {
3619939fba2SImre Deak 	snb_update_pm_irq(dev_priv, mask, 0);
3629939fba2SImre Deak }
3639939fba2SImre Deak 
364f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
365edbfdb45SPaulo Zanoni {
3669939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
3679939fba2SImre Deak 		return;
3689939fba2SImre Deak 
369f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, mask);
370f4e9af4fSAkash Goel }
371f4e9af4fSAkash Goel 
3723814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
373f4e9af4fSAkash Goel {
374f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
375f4e9af4fSAkash Goel 
37667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
377f4e9af4fSAkash Goel 
378f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
379f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
380f4e9af4fSAkash Goel 	POSTING_READ(reg);
381f4e9af4fSAkash Goel }
382f4e9af4fSAkash Goel 
3833814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
384f4e9af4fSAkash Goel {
38567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
386f4e9af4fSAkash Goel 
387f4e9af4fSAkash Goel 	dev_priv->pm_ier |= enable_mask;
388f4e9af4fSAkash Goel 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
389f4e9af4fSAkash Goel 	gen6_unmask_pm_irq(dev_priv, enable_mask);
390f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
391f4e9af4fSAkash Goel }
392f4e9af4fSAkash Goel 
3933814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
394f4e9af4fSAkash Goel {
39567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
396f4e9af4fSAkash Goel 
397f4e9af4fSAkash Goel 	dev_priv->pm_ier &= ~disable_mask;
398f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, disable_mask);
399f4e9af4fSAkash Goel 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
400f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
401edbfdb45SPaulo Zanoni }
402edbfdb45SPaulo Zanoni 
403dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
4043cc134e3SImre Deak {
4053cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
406f4e9af4fSAkash Goel 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
407562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
4083cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
4093cc134e3SImre Deak }
4103cc134e3SImre Deak 
41191d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
412b900b949SImre Deak {
413562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
414562d9baeSSagar Arun Kamble 
415562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
416f2a91d1aSChris Wilson 		return;
417f2a91d1aSChris Wilson 
418b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
419562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
420c33d247dSChris Wilson 	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
421562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
422b900b949SImre Deak 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
42378e68d36SImre Deak 
424b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
425b900b949SImre Deak }
426b900b949SImre Deak 
42791d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
428b900b949SImre Deak {
429562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
430562d9baeSSagar Arun Kamble 
431562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
432f2a91d1aSChris Wilson 		return;
433f2a91d1aSChris Wilson 
434d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
435562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
4369939fba2SImre Deak 
437b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
4389939fba2SImre Deak 
439f4e9af4fSAkash Goel 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
44058072ccbSImre Deak 
44158072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
44291c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
443c33d247dSChris Wilson 
444c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
4453814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
446c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
447c33d247dSChris Wilson 	 * state of the worker can be discarded.
448c33d247dSChris Wilson 	 */
449562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
450c33d247dSChris Wilson 	gen6_reset_rps_interrupts(dev_priv);
451b900b949SImre Deak }
452b900b949SImre Deak 
45326705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
45426705e20SSagar Arun Kamble {
45526705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
45626705e20SSagar Arun Kamble 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
45726705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
45826705e20SSagar Arun Kamble }
45926705e20SSagar Arun Kamble 
46026705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
46126705e20SSagar Arun Kamble {
46226705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
46326705e20SSagar Arun Kamble 	if (!dev_priv->guc.interrupts_enabled) {
46426705e20SSagar Arun Kamble 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
46526705e20SSagar Arun Kamble 				       dev_priv->pm_guc_events);
46626705e20SSagar Arun Kamble 		dev_priv->guc.interrupts_enabled = true;
46726705e20SSagar Arun Kamble 		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
46826705e20SSagar Arun Kamble 	}
46926705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
47026705e20SSagar Arun Kamble }
47126705e20SSagar Arun Kamble 
47226705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
47326705e20SSagar Arun Kamble {
47426705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
47526705e20SSagar Arun Kamble 	dev_priv->guc.interrupts_enabled = false;
47626705e20SSagar Arun Kamble 
47726705e20SSagar Arun Kamble 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
47826705e20SSagar Arun Kamble 
47926705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
48026705e20SSagar Arun Kamble 	synchronize_irq(dev_priv->drm.irq);
48126705e20SSagar Arun Kamble 
48226705e20SSagar Arun Kamble 	gen9_reset_guc_interrupts(dev_priv);
48326705e20SSagar Arun Kamble }
48426705e20SSagar Arun Kamble 
4850961021aSBen Widawsky /**
4863a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
4873a3b3c7dSVille Syrjälä  * @dev_priv: driver private
4883a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
4893a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
4903a3b3c7dSVille Syrjälä  */
4913a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
4923a3b3c7dSVille Syrjälä 				uint32_t interrupt_mask,
4933a3b3c7dSVille Syrjälä 				uint32_t enabled_irq_mask)
4943a3b3c7dSVille Syrjälä {
4953a3b3c7dSVille Syrjälä 	uint32_t new_val;
4963a3b3c7dSVille Syrjälä 	uint32_t old_val;
4973a3b3c7dSVille Syrjälä 
49867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4993a3b3c7dSVille Syrjälä 
5003a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
5013a3b3c7dSVille Syrjälä 
5023a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
5033a3b3c7dSVille Syrjälä 		return;
5043a3b3c7dSVille Syrjälä 
5053a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
5063a3b3c7dSVille Syrjälä 
5073a3b3c7dSVille Syrjälä 	new_val = old_val;
5083a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
5093a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
5103a3b3c7dSVille Syrjälä 
5113a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
5123a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
5133a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
5143a3b3c7dSVille Syrjälä 	}
5153a3b3c7dSVille Syrjälä }
5163a3b3c7dSVille Syrjälä 
5173a3b3c7dSVille Syrjälä /**
518013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
519013d3752SVille Syrjälä  * @dev_priv: driver private
520013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
521013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
522013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
523013d3752SVille Syrjälä  */
524013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
525013d3752SVille Syrjälä 			 enum pipe pipe,
526013d3752SVille Syrjälä 			 uint32_t interrupt_mask,
527013d3752SVille Syrjälä 			 uint32_t enabled_irq_mask)
528013d3752SVille Syrjälä {
529013d3752SVille Syrjälä 	uint32_t new_val;
530013d3752SVille Syrjälä 
53167520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
532013d3752SVille Syrjälä 
533013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
534013d3752SVille Syrjälä 
535013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
536013d3752SVille Syrjälä 		return;
537013d3752SVille Syrjälä 
538013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
539013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
540013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
541013d3752SVille Syrjälä 
542013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
543013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
544013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
545013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
546013d3752SVille Syrjälä 	}
547013d3752SVille Syrjälä }
548013d3752SVille Syrjälä 
549013d3752SVille Syrjälä /**
550fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
551fee884edSDaniel Vetter  * @dev_priv: driver private
552fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
553fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
554fee884edSDaniel Vetter  */
55547339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
556fee884edSDaniel Vetter 				  uint32_t interrupt_mask,
557fee884edSDaniel Vetter 				  uint32_t enabled_irq_mask)
558fee884edSDaniel Vetter {
559fee884edSDaniel Vetter 	uint32_t sdeimr = I915_READ(SDEIMR);
560fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
561fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
562fee884edSDaniel Vetter 
56315a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
56415a17aaeSDaniel Vetter 
56567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
566fee884edSDaniel Vetter 
5679df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
568c67a470bSPaulo Zanoni 		return;
569c67a470bSPaulo Zanoni 
570fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
571fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
572fee884edSDaniel Vetter }
5738664281bSPaulo Zanoni 
5746b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
5756b12ca56SVille Syrjälä 			      enum pipe pipe)
5767c463586SKeith Packard {
5776b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
57810c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
57910c59c51SImre Deak 
5806b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
5816b12ca56SVille Syrjälä 
5826b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
5836b12ca56SVille Syrjälä 		goto out;
5846b12ca56SVille Syrjälä 
58510c59c51SImre Deak 	/*
586724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
587724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
58810c59c51SImre Deak 	 */
58910c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
59010c59c51SImre Deak 		return 0;
591724a6905SVille Syrjälä 	/*
592724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
593724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
594724a6905SVille Syrjälä 	 */
595724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
596724a6905SVille Syrjälä 		return 0;
59710c59c51SImre Deak 
59810c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
59910c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
60010c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
60110c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
60210c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
60310c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
60410c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
60510c59c51SImre Deak 
6066b12ca56SVille Syrjälä out:
6076b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
6086b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
6096b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
6106b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
6116b12ca56SVille Syrjälä 
61210c59c51SImre Deak 	return enable_mask;
61310c59c51SImre Deak }
61410c59c51SImre Deak 
6156b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
6166b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
617755e9019SImre Deak {
6186b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
619755e9019SImre Deak 	u32 enable_mask;
620755e9019SImre Deak 
6216b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
6226b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
6236b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
6246b12ca56SVille Syrjälä 
6256b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
6266b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
6276b12ca56SVille Syrjälä 
6286b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
6296b12ca56SVille Syrjälä 		return;
6306b12ca56SVille Syrjälä 
6316b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
6326b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
6336b12ca56SVille Syrjälä 
6346b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
6356b12ca56SVille Syrjälä 	POSTING_READ(reg);
636755e9019SImre Deak }
637755e9019SImre Deak 
6386b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
6396b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
640755e9019SImre Deak {
6416b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
642755e9019SImre Deak 	u32 enable_mask;
643755e9019SImre Deak 
6446b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
6456b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
6466b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
6476b12ca56SVille Syrjälä 
6486b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
6496b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
6506b12ca56SVille Syrjälä 
6516b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
6526b12ca56SVille Syrjälä 		return;
6536b12ca56SVille Syrjälä 
6546b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
6556b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
6566b12ca56SVille Syrjälä 
6576b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
6586b12ca56SVille Syrjälä 	POSTING_READ(reg);
659755e9019SImre Deak }
660755e9019SImre Deak 
661c0e09200SDave Airlie /**
662f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
66314bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
66401c66889SZhao Yakui  */
66591d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
66601c66889SZhao Yakui {
66791d14251STvrtko Ursulin 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
668f49e38ddSJani Nikula 		return;
669f49e38ddSJani Nikula 
67013321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
67101c66889SZhao Yakui 
672755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
67391d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
6743b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
675755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
6761ec14ad3SChris Wilson 
67713321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
67801c66889SZhao Yakui }
67901c66889SZhao Yakui 
680f75f3746SVille Syrjälä /*
681f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
682f75f3746SVille Syrjälä  * around the vertical blanking period.
683f75f3746SVille Syrjälä  *
684f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
685f75f3746SVille Syrjälä  *  vblank_start >= 3
686f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
687f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
688f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
689f75f3746SVille Syrjälä  *
690f75f3746SVille Syrjälä  *           start of vblank:
691f75f3746SVille Syrjälä  *           latch double buffered registers
692f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
693f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
694f75f3746SVille Syrjälä  *           |
695f75f3746SVille Syrjälä  *           |          frame start:
696f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
697f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
698f75f3746SVille Syrjälä  *           |          |
699f75f3746SVille Syrjälä  *           |          |  start of vsync:
700f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
701f75f3746SVille Syrjälä  *           |          |  |
702f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
703f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
704f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
705f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
706f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
707f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
708f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
709f75f3746SVille Syrjälä  *       |          |                                         |
710f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
711f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
712f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
713f75f3746SVille Syrjälä  *
714f75f3746SVille Syrjälä  * x  = horizontal active
715f75f3746SVille Syrjälä  * _  = horizontal blanking
716f75f3746SVille Syrjälä  * hs = horizontal sync
717f75f3746SVille Syrjälä  * va = vertical active
718f75f3746SVille Syrjälä  * vb = vertical blanking
719f75f3746SVille Syrjälä  * vs = vertical sync
720f75f3746SVille Syrjälä  * vbs = vblank_start (number)
721f75f3746SVille Syrjälä  *
722f75f3746SVille Syrjälä  * Summary:
723f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
724f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
725f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
726f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
727f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
728f75f3746SVille Syrjälä  */
729f75f3746SVille Syrjälä 
73042f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
73142f52ef8SKeith Packard  * we use as a pipe index
73242f52ef8SKeith Packard  */
73388e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
7340a3e67a4SJesse Barnes {
735fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
736f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
7370b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
7385caa0feaSDaniel Vetter 	const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
739694e409dSVille Syrjälä 	unsigned long irqflags;
740391f75e2SVille Syrjälä 
7410b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
7420b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
7430b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
7440b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
7450b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
746391f75e2SVille Syrjälä 
7470b2a8e09SVille Syrjälä 	/* Convert to pixel count */
7480b2a8e09SVille Syrjälä 	vbl_start *= htotal;
7490b2a8e09SVille Syrjälä 
7500b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
7510b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
7520b2a8e09SVille Syrjälä 
7539db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
7549db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
7555eddb70bSChris Wilson 
756694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
757694e409dSVille Syrjälä 
7580a3e67a4SJesse Barnes 	/*
7590a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
7600a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
7610a3e67a4SJesse Barnes 	 * register.
7620a3e67a4SJesse Barnes 	 */
7630a3e67a4SJesse Barnes 	do {
764694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
765694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
766694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
7670a3e67a4SJesse Barnes 	} while (high1 != high2);
7680a3e67a4SJesse Barnes 
769694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
770694e409dSVille Syrjälä 
7715eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
772391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
7735eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
774391f75e2SVille Syrjälä 
775391f75e2SVille Syrjälä 	/*
776391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
777391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
778391f75e2SVille Syrjälä 	 * counter against vblank start.
779391f75e2SVille Syrjälä 	 */
780edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
7810a3e67a4SJesse Barnes }
7820a3e67a4SJesse Barnes 
783974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
7849880b7a5SJesse Barnes {
785fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
7869880b7a5SJesse Barnes 
787649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
7889880b7a5SJesse Barnes }
7899880b7a5SJesse Barnes 
790aec0246fSUma Shankar /*
791aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
792aec0246fSUma Shankar  * scanline register will not work to get the scanline,
793aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
794aec0246fSUma Shankar  * with scanline register updates.
795aec0246fSUma Shankar  * This function will use Framestamp and current
796aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
797aec0246fSUma Shankar  */
798aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
799aec0246fSUma Shankar {
800aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
801aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
802aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
803aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
804aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
805aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
806aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
807aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
808aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
809aec0246fSUma Shankar 
810aec0246fSUma Shankar 	/*
811aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
812aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
813aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
814aec0246fSUma Shankar 	 * during the same frame.
815aec0246fSUma Shankar 	 */
816aec0246fSUma Shankar 	do {
817aec0246fSUma Shankar 		/*
818aec0246fSUma Shankar 		 * This field provides read back of the display
819aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
820aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
821aec0246fSUma Shankar 		 */
822aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
823aec0246fSUma Shankar 
824aec0246fSUma Shankar 		/*
825aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
826aec0246fSUma Shankar 		 * time stamp value.
827aec0246fSUma Shankar 		 */
828aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
829aec0246fSUma Shankar 
830aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
831aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
832aec0246fSUma Shankar 
833aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
834aec0246fSUma Shankar 					clock), 1000 * htotal);
835aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
836aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
837aec0246fSUma Shankar 
838aec0246fSUma Shankar 	return scanline;
839aec0246fSUma Shankar }
840aec0246fSUma Shankar 
84175aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
842a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
843a225f079SVille Syrjälä {
844a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
845fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
8465caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
8475caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
848a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
84980715b2fSVille Syrjälä 	int position, vtotal;
850a225f079SVille Syrjälä 
85172259536SVille Syrjälä 	if (!crtc->active)
85272259536SVille Syrjälä 		return -1;
85372259536SVille Syrjälä 
8545caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
8555caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
8565caa0feaSDaniel Vetter 
857aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
858aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
859aec0246fSUma Shankar 
86080715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
861a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
862a225f079SVille Syrjälä 		vtotal /= 2;
863a225f079SVille Syrjälä 
86491d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv))
86575aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
866a225f079SVille Syrjälä 	else
86775aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
868a225f079SVille Syrjälä 
869a225f079SVille Syrjälä 	/*
87041b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
87141b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
87241b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
87341b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
87441b578fbSJesse Barnes 	 *
87541b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
87641b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
87741b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
87841b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
87941b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
88041b578fbSJesse Barnes 	 */
88191d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
88241b578fbSJesse Barnes 		int i, temp;
88341b578fbSJesse Barnes 
88441b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
88541b578fbSJesse Barnes 			udelay(1);
886707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
88741b578fbSJesse Barnes 			if (temp != position) {
88841b578fbSJesse Barnes 				position = temp;
88941b578fbSJesse Barnes 				break;
89041b578fbSJesse Barnes 			}
89141b578fbSJesse Barnes 		}
89241b578fbSJesse Barnes 	}
89341b578fbSJesse Barnes 
89441b578fbSJesse Barnes 	/*
89580715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
89680715b2fSVille Syrjälä 	 * scanline_offset adjustment.
897a225f079SVille Syrjälä 	 */
89880715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
899a225f079SVille Syrjälä }
900a225f079SVille Syrjälä 
9011bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
9021bf6ad62SDaniel Vetter 				     bool in_vblank_irq, int *vpos, int *hpos,
9033bb403bfSVille Syrjälä 				     ktime_t *stime, ktime_t *etime,
9043bb403bfSVille Syrjälä 				     const struct drm_display_mode *mode)
9050af7e4dfSMario Kleiner {
906fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
90798187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
90898187836SVille Syrjälä 								pipe);
9093aa18df8SVille Syrjälä 	int position;
91078e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
911ad3543edSMario Kleiner 	unsigned long irqflags;
9120af7e4dfSMario Kleiner 
913fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
9140af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9159db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
9161bf6ad62SDaniel Vetter 		return false;
9170af7e4dfSMario Kleiner 	}
9180af7e4dfSMario Kleiner 
919c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
92078e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
921c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
922c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
923c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
9240af7e4dfSMario Kleiner 
925d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
926d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
927d31faf65SVille Syrjälä 		vbl_end /= 2;
928d31faf65SVille Syrjälä 		vtotal /= 2;
929d31faf65SVille Syrjälä 	}
930d31faf65SVille Syrjälä 
931ad3543edSMario Kleiner 	/*
932ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
933ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
934ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
935ad3543edSMario Kleiner 	 */
936ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
937ad3543edSMario Kleiner 
938ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
939ad3543edSMario Kleiner 
940ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
941ad3543edSMario Kleiner 	if (stime)
942ad3543edSMario Kleiner 		*stime = ktime_get();
943ad3543edSMario Kleiner 
94491d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
9450af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
9460af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
9470af7e4dfSMario Kleiner 		 */
948a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
9490af7e4dfSMario Kleiner 	} else {
9500af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
9510af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
9520af7e4dfSMario Kleiner 		 * scanout position.
9530af7e4dfSMario Kleiner 		 */
95475aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
9550af7e4dfSMario Kleiner 
9563aa18df8SVille Syrjälä 		/* convert to pixel counts */
9573aa18df8SVille Syrjälä 		vbl_start *= htotal;
9583aa18df8SVille Syrjälä 		vbl_end *= htotal;
9593aa18df8SVille Syrjälä 		vtotal *= htotal;
96078e8fc6bSVille Syrjälä 
96178e8fc6bSVille Syrjälä 		/*
9627e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
9637e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
9647e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
9657e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
9667e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
9677e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
9687e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
9697e78f1cbSVille Syrjälä 		 */
9707e78f1cbSVille Syrjälä 		if (position >= vtotal)
9717e78f1cbSVille Syrjälä 			position = vtotal - 1;
9727e78f1cbSVille Syrjälä 
9737e78f1cbSVille Syrjälä 		/*
97478e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
97578e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
97678e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
97778e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
97878e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
97978e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
98078e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
98178e8fc6bSVille Syrjälä 		 */
98278e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
9833aa18df8SVille Syrjälä 	}
9843aa18df8SVille Syrjälä 
985ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
986ad3543edSMario Kleiner 	if (etime)
987ad3543edSMario Kleiner 		*etime = ktime_get();
988ad3543edSMario Kleiner 
989ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
990ad3543edSMario Kleiner 
991ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
992ad3543edSMario Kleiner 
9933aa18df8SVille Syrjälä 	/*
9943aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
9953aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
9963aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
9973aa18df8SVille Syrjälä 	 * up since vbl_end.
9983aa18df8SVille Syrjälä 	 */
9993aa18df8SVille Syrjälä 	if (position >= vbl_start)
10003aa18df8SVille Syrjälä 		position -= vbl_end;
10013aa18df8SVille Syrjälä 	else
10023aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
10033aa18df8SVille Syrjälä 
100491d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
10053aa18df8SVille Syrjälä 		*vpos = position;
10063aa18df8SVille Syrjälä 		*hpos = 0;
10073aa18df8SVille Syrjälä 	} else {
10080af7e4dfSMario Kleiner 		*vpos = position / htotal;
10090af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
10100af7e4dfSMario Kleiner 	}
10110af7e4dfSMario Kleiner 
10121bf6ad62SDaniel Vetter 	return true;
10130af7e4dfSMario Kleiner }
10140af7e4dfSMario Kleiner 
1015a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1016a225f079SVille Syrjälä {
1017fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1018a225f079SVille Syrjälä 	unsigned long irqflags;
1019a225f079SVille Syrjälä 	int position;
1020a225f079SVille Syrjälä 
1021a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1022a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1023a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1024a225f079SVille Syrjälä 
1025a225f079SVille Syrjälä 	return position;
1026a225f079SVille Syrjälä }
1027a225f079SVille Syrjälä 
102891d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1029f97108d1SJesse Barnes {
1030b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
10319270388eSDaniel Vetter 	u8 new_delay;
10329270388eSDaniel Vetter 
1033d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1034f97108d1SJesse Barnes 
103573edd18fSDaniel Vetter 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
103673edd18fSDaniel Vetter 
103720e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
10389270388eSDaniel Vetter 
10397648fa99SJesse Barnes 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1040b5b72e89SMatthew Garrett 	busy_up = I915_READ(RCPREVBSYTUPAVG);
1041b5b72e89SMatthew Garrett 	busy_down = I915_READ(RCPREVBSYTDNAVG);
1042f97108d1SJesse Barnes 	max_avg = I915_READ(RCBMAXAVG);
1043f97108d1SJesse Barnes 	min_avg = I915_READ(RCBMINAVG);
1044f97108d1SJesse Barnes 
1045f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1046b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
104720e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
104820e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
104920e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
105020e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1051b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
105220e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
105320e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
105420e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
105520e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1056f97108d1SJesse Barnes 	}
1057f97108d1SJesse Barnes 
105891d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
105920e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1060f97108d1SJesse Barnes 
1061d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
10629270388eSDaniel Vetter 
1063f97108d1SJesse Barnes 	return;
1064f97108d1SJesse Barnes }
1065f97108d1SJesse Barnes 
10660bc40be8STvrtko Ursulin static void notify_ring(struct intel_engine_cs *engine)
1067549f7365SChris Wilson {
106856299fb7SChris Wilson 	struct drm_i915_gem_request *rq = NULL;
106956299fb7SChris Wilson 	struct intel_wait *wait;
1070dffabc8fSTvrtko Ursulin 
10712246bea6SChris Wilson 	atomic_inc(&engine->irq_count);
1072538b257dSChris Wilson 	set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
107356299fb7SChris Wilson 
107461d3dc70SChris Wilson 	spin_lock(&engine->breadcrumbs.irq_lock);
107561d3dc70SChris Wilson 	wait = engine->breadcrumbs.irq_wait;
107656299fb7SChris Wilson 	if (wait) {
107717b51ad8SChris Wilson 		bool wakeup = engine->irq_seqno_barrier;
107817b51ad8SChris Wilson 
107956299fb7SChris Wilson 		/* We use a callback from the dma-fence to submit
108056299fb7SChris Wilson 		 * requests after waiting on our own requests. To
108156299fb7SChris Wilson 		 * ensure minimum delay in queuing the next request to
108256299fb7SChris Wilson 		 * hardware, signal the fence now rather than wait for
108356299fb7SChris Wilson 		 * the signaler to be woken up. We still wake up the
108456299fb7SChris Wilson 		 * waiter in order to handle the irq-seqno coherency
108556299fb7SChris Wilson 		 * issues (we may receive the interrupt before the
108656299fb7SChris Wilson 		 * seqno is written, see __i915_request_irq_complete())
108756299fb7SChris Wilson 		 * and to handle coalescing of multiple seqno updates
108856299fb7SChris Wilson 		 * and many waiters.
108956299fb7SChris Wilson 		 */
109056299fb7SChris Wilson 		if (i915_seqno_passed(intel_engine_get_seqno(engine),
109117b51ad8SChris Wilson 				      wait->seqno)) {
1092de4d2106SChris Wilson 			struct drm_i915_gem_request *waiter = wait->request;
1093de4d2106SChris Wilson 
109417b51ad8SChris Wilson 			wakeup = true;
109517b51ad8SChris Wilson 			if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1096de4d2106SChris Wilson 				      &waiter->fence.flags) &&
1097de4d2106SChris Wilson 			    intel_wait_check_request(wait, waiter))
1098de4d2106SChris Wilson 				rq = i915_gem_request_get(waiter);
109917b51ad8SChris Wilson 		}
110056299fb7SChris Wilson 
110117b51ad8SChris Wilson 		if (wakeup)
110256299fb7SChris Wilson 			wake_up_process(wait->tsk);
110367b807a8SChris Wilson 	} else {
110467b807a8SChris Wilson 		__intel_engine_disarm_breadcrumbs(engine);
110556299fb7SChris Wilson 	}
110661d3dc70SChris Wilson 	spin_unlock(&engine->breadcrumbs.irq_lock);
110756299fb7SChris Wilson 
110824754d75SChris Wilson 	if (rq) {
110956299fb7SChris Wilson 		dma_fence_signal(&rq->fence);
111024754d75SChris Wilson 		i915_gem_request_put(rq);
111124754d75SChris Wilson 	}
111256299fb7SChris Wilson 
111356299fb7SChris Wilson 	trace_intel_engine_notify(engine, wait);
1114549f7365SChris Wilson }
1115549f7365SChris Wilson 
111643cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
111743cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
111831685c25SDeepak S {
1119679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
112043cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
112143cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
112231685c25SDeepak S }
112331685c25SDeepak S 
112443cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
112543cf3bf0SChris Wilson {
1126562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
112743cf3bf0SChris Wilson }
112843cf3bf0SChris Wilson 
112943cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
113043cf3bf0SChris Wilson {
1131562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1132562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
113343cf3bf0SChris Wilson 	struct intel_rps_ei now;
113443cf3bf0SChris Wilson 	u32 events = 0;
113543cf3bf0SChris Wilson 
1136e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
113743cf3bf0SChris Wilson 		return 0;
113843cf3bf0SChris Wilson 
113943cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
114031685c25SDeepak S 
1141679cb6c1SMika Kuoppala 	if (prev->ktime) {
1142e0e8c7cbSChris Wilson 		u64 time, c0;
1143569884e3SChris Wilson 		u32 render, media;
1144e0e8c7cbSChris Wilson 
1145679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
11468f68d591SChris Wilson 
1147e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1148e0e8c7cbSChris Wilson 
1149e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1150e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1151e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1152e0e8c7cbSChris Wilson 		 * into our activity counter.
1153e0e8c7cbSChris Wilson 		 */
1154569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1155569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1156569884e3SChris Wilson 		c0 = max(render, media);
11576b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1158e0e8c7cbSChris Wilson 
1159562d9baeSSagar Arun Kamble 		if (c0 > time * rps->up_threshold)
1160e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
1161562d9baeSSagar Arun Kamble 		else if (c0 < time * rps->down_threshold)
1162e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
116331685c25SDeepak S 	}
116431685c25SDeepak S 
1165562d9baeSSagar Arun Kamble 	rps->ei = now;
116643cf3bf0SChris Wilson 	return events;
116731685c25SDeepak S }
116831685c25SDeepak S 
11694912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
11703b8d8d91SJesse Barnes {
11712d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1172562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1173562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
11747c0a16adSChris Wilson 	bool client_boost = false;
11758d3afd7dSChris Wilson 	int new_delay, adj, min, max;
11767c0a16adSChris Wilson 	u32 pm_iir = 0;
11773b8d8d91SJesse Barnes 
117859cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1179562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1180562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1181562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1182d4d70aa5SImre Deak 	}
118359cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
11844912d041SBen Widawsky 
118560611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1186a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
11878d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
11887c0a16adSChris Wilson 		goto out;
11893b8d8d91SJesse Barnes 
11909f817501SSagar Arun Kamble 	mutex_lock(&dev_priv->pcu_lock);
11917b9e0ae6SChris Wilson 
119243cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
119343cf3bf0SChris Wilson 
1194562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1195562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1196562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1197562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
11987b92c1bdSChris Wilson 	if (client_boost)
1199562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1200562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1201562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
12028d3afd7dSChris Wilson 		adj = 0;
12038d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1204dd75fdc8SChris Wilson 		if (adj > 0)
1205dd75fdc8SChris Wilson 			adj *= 2;
1206edcf284bSChris Wilson 		else /* CHV needs even encode values */
1207edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
12087e79a683SSagar Arun Kamble 
1209562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
12107e79a683SSagar Arun Kamble 			adj = 0;
12117b92c1bdSChris Wilson 	} else if (client_boost) {
1212f5a4c67dSChris Wilson 		adj = 0;
1213dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1214562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1215562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1216562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1217562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1218dd75fdc8SChris Wilson 		adj = 0;
1219dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1220dd75fdc8SChris Wilson 		if (adj < 0)
1221dd75fdc8SChris Wilson 			adj *= 2;
1222edcf284bSChris Wilson 		else /* CHV needs even encode values */
1223edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
12247e79a683SSagar Arun Kamble 
1225562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
12267e79a683SSagar Arun Kamble 			adj = 0;
1227dd75fdc8SChris Wilson 	} else { /* unknown event */
1228edcf284bSChris Wilson 		adj = 0;
1229dd75fdc8SChris Wilson 	}
12303b8d8d91SJesse Barnes 
1231562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1232edcf284bSChris Wilson 
123379249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
123479249636SBen Widawsky 	 * interrupt
123579249636SBen Widawsky 	 */
1236edcf284bSChris Wilson 	new_delay += adj;
12378d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
123827544369SDeepak S 
12399fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
12409fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1241562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
12429fcee2f7SChris Wilson 	}
12433b8d8d91SJesse Barnes 
12449f817501SSagar Arun Kamble 	mutex_unlock(&dev_priv->pcu_lock);
12457c0a16adSChris Wilson 
12467c0a16adSChris Wilson out:
12477c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
12487c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1249562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
12507c0a16adSChris Wilson 		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
12517c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
12523b8d8d91SJesse Barnes }
12533b8d8d91SJesse Barnes 
1254e3689190SBen Widawsky 
1255e3689190SBen Widawsky /**
1256e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1257e3689190SBen Widawsky  * occurred.
1258e3689190SBen Widawsky  * @work: workqueue struct
1259e3689190SBen Widawsky  *
1260e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1261e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1262e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1263e3689190SBen Widawsky  */
1264e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1265e3689190SBen Widawsky {
12662d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1267cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1268e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
126935a85ac6SBen Widawsky 	char *parity_event[6];
1270e3689190SBen Widawsky 	uint32_t misccpctl;
127135a85ac6SBen Widawsky 	uint8_t slice = 0;
1272e3689190SBen Widawsky 
1273e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1274e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1275e3689190SBen Widawsky 	 * any time we access those registers.
1276e3689190SBen Widawsky 	 */
127791c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1278e3689190SBen Widawsky 
127935a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
128035a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
128135a85ac6SBen Widawsky 		goto out;
128235a85ac6SBen Widawsky 
1283e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1284e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1285e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1286e3689190SBen Widawsky 
128735a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1288f0f59a00SVille Syrjälä 		i915_reg_t reg;
128935a85ac6SBen Widawsky 
129035a85ac6SBen Widawsky 		slice--;
12912d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
129235a85ac6SBen Widawsky 			break;
129335a85ac6SBen Widawsky 
129435a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
129535a85ac6SBen Widawsky 
12966fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
129735a85ac6SBen Widawsky 
129835a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1299e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1300e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1301e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1302e3689190SBen Widawsky 
130335a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
130435a85ac6SBen Widawsky 		POSTING_READ(reg);
1305e3689190SBen Widawsky 
1306cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1307e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1308e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1309e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
131035a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
131135a85ac6SBen Widawsky 		parity_event[5] = NULL;
1312e3689190SBen Widawsky 
131391c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1314e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1315e3689190SBen Widawsky 
131635a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
131735a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1318e3689190SBen Widawsky 
131935a85ac6SBen Widawsky 		kfree(parity_event[4]);
1320e3689190SBen Widawsky 		kfree(parity_event[3]);
1321e3689190SBen Widawsky 		kfree(parity_event[2]);
1322e3689190SBen Widawsky 		kfree(parity_event[1]);
1323e3689190SBen Widawsky 	}
1324e3689190SBen Widawsky 
132535a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
132635a85ac6SBen Widawsky 
132735a85ac6SBen Widawsky out:
132835a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
13294cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
13302d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
13314cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
133235a85ac6SBen Widawsky 
133391c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
133435a85ac6SBen Widawsky }
133535a85ac6SBen Widawsky 
1336261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1337261e40b8SVille Syrjälä 					       u32 iir)
1338e3689190SBen Widawsky {
1339261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1340e3689190SBen Widawsky 		return;
1341e3689190SBen Widawsky 
1342d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1343261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1344d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1345e3689190SBen Widawsky 
1346261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
134735a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
134835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
134935a85ac6SBen Widawsky 
135035a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
135135a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
135235a85ac6SBen Widawsky 
1353a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1354e3689190SBen Widawsky }
1355e3689190SBen Widawsky 
1356261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1357f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1358f1af8fc1SPaulo Zanoni {
1359f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
13603b3f1650SAkash Goel 		notify_ring(dev_priv->engine[RCS]);
1361f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
13623b3f1650SAkash Goel 		notify_ring(dev_priv->engine[VCS]);
1363f1af8fc1SPaulo Zanoni }
1364f1af8fc1SPaulo Zanoni 
1365261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1366e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1367e7b4c6b1SDaniel Vetter {
1368f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
13693b3f1650SAkash Goel 		notify_ring(dev_priv->engine[RCS]);
1370cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
13713b3f1650SAkash Goel 		notify_ring(dev_priv->engine[VCS]);
1372cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
13733b3f1650SAkash Goel 		notify_ring(dev_priv->engine[BCS]);
1374e7b4c6b1SDaniel Vetter 
1375cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1376cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1377aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1378aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1379e3689190SBen Widawsky 
1380261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1381261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1382e7b4c6b1SDaniel Vetter }
1383e7b4c6b1SDaniel Vetter 
13845d3d69d5SChris Wilson static void
13850bc40be8STvrtko Ursulin gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1386fbcc1a0cSNick Hoath {
1387b620e870SMika Kuoppala 	struct intel_engine_execlists * const execlists = &engine->execlists;
138831de7350SChris Wilson 	bool tasklet = false;
1389f747026cSChris Wilson 
1390f747026cSChris Wilson 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
1391*4a118ecbSChris Wilson 		if (READ_ONCE(engine->execlists.active)) {
1392955a4b89SChris Wilson 			__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
139331de7350SChris Wilson 			tasklet = true;
1394f747026cSChris Wilson 		}
1395*4a118ecbSChris Wilson 	}
139631de7350SChris Wilson 
139731de7350SChris Wilson 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
139831de7350SChris Wilson 		notify_ring(engine);
13994f044a88SMichal Wajdeczko 		tasklet |= i915_modparams.enable_guc_submission;
140031de7350SChris Wilson 	}
140131de7350SChris Wilson 
140231de7350SChris Wilson 	if (tasklet)
1403b620e870SMika Kuoppala 		tasklet_hi_schedule(&execlists->irq_tasklet);
1404fbcc1a0cSNick Hoath }
1405fbcc1a0cSNick Hoath 
1406e30e251aSVille Syrjälä static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1407e30e251aSVille Syrjälä 				   u32 master_ctl,
1408e30e251aSVille Syrjälä 				   u32 gt_iir[4])
1409abd58f01SBen Widawsky {
1410abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
1411abd58f01SBen Widawsky 
1412abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1413e30e251aSVille Syrjälä 		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1414e30e251aSVille Syrjälä 		if (gt_iir[0]) {
1415e30e251aSVille Syrjälä 			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1416abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
1417abd58f01SBen Widawsky 		} else
1418abd58f01SBen Widawsky 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1419abd58f01SBen Widawsky 	}
1420abd58f01SBen Widawsky 
142185f9b5f9SZhao Yakui 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1422e30e251aSVille Syrjälä 		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1423e30e251aSVille Syrjälä 		if (gt_iir[1]) {
1424e30e251aSVille Syrjälä 			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1425abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
1426abd58f01SBen Widawsky 		} else
1427abd58f01SBen Widawsky 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1428abd58f01SBen Widawsky 	}
1429abd58f01SBen Widawsky 
143074cdb337SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1431e30e251aSVille Syrjälä 		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1432e30e251aSVille Syrjälä 		if (gt_iir[3]) {
1433e30e251aSVille Syrjälä 			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
143474cdb337SChris Wilson 			ret = IRQ_HANDLED;
143574cdb337SChris Wilson 		} else
143674cdb337SChris Wilson 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
143774cdb337SChris Wilson 	}
143874cdb337SChris Wilson 
143926705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1440e30e251aSVille Syrjälä 		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
144126705e20SSagar Arun Kamble 		if (gt_iir[2] & (dev_priv->pm_rps_events |
144226705e20SSagar Arun Kamble 				 dev_priv->pm_guc_events)) {
1443cb0d205eSChris Wilson 			I915_WRITE_FW(GEN8_GT_IIR(2),
144426705e20SSagar Arun Kamble 				      gt_iir[2] & (dev_priv->pm_rps_events |
144526705e20SSagar Arun Kamble 						   dev_priv->pm_guc_events));
144638cc46d7SOscar Mateo 			ret = IRQ_HANDLED;
14470961021aSBen Widawsky 		} else
14480961021aSBen Widawsky 			DRM_ERROR("The master control interrupt lied (PM)!\n");
14490961021aSBen Widawsky 	}
14500961021aSBen Widawsky 
1451abd58f01SBen Widawsky 	return ret;
1452abd58f01SBen Widawsky }
1453abd58f01SBen Widawsky 
1454e30e251aSVille Syrjälä static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1455e30e251aSVille Syrjälä 				u32 gt_iir[4])
1456e30e251aSVille Syrjälä {
1457e30e251aSVille Syrjälä 	if (gt_iir[0]) {
14583b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[RCS],
1459e30e251aSVille Syrjälä 				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
14603b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[BCS],
1461e30e251aSVille Syrjälä 				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1462e30e251aSVille Syrjälä 	}
1463e30e251aSVille Syrjälä 
1464e30e251aSVille Syrjälä 	if (gt_iir[1]) {
14653b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[VCS],
1466e30e251aSVille Syrjälä 				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
14673b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[VCS2],
1468e30e251aSVille Syrjälä 				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1469e30e251aSVille Syrjälä 	}
1470e30e251aSVille Syrjälä 
1471e30e251aSVille Syrjälä 	if (gt_iir[3])
14723b3f1650SAkash Goel 		gen8_cs_irq_handler(dev_priv->engine[VECS],
1473e30e251aSVille Syrjälä 				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1474e30e251aSVille Syrjälä 
1475e30e251aSVille Syrjälä 	if (gt_iir[2] & dev_priv->pm_rps_events)
1476e30e251aSVille Syrjälä 		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
147726705e20SSagar Arun Kamble 
147826705e20SSagar Arun Kamble 	if (gt_iir[2] & dev_priv->pm_guc_events)
147926705e20SSagar Arun Kamble 		gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1480e30e251aSVille Syrjälä }
1481e30e251aSVille Syrjälä 
148263c88d22SImre Deak static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
148363c88d22SImre Deak {
148463c88d22SImre Deak 	switch (port) {
148563c88d22SImre Deak 	case PORT_A:
1486195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
148763c88d22SImre Deak 	case PORT_B:
148863c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
148963c88d22SImre Deak 	case PORT_C:
149063c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
149163c88d22SImre Deak 	default:
149263c88d22SImre Deak 		return false;
149363c88d22SImre Deak 	}
149463c88d22SImre Deak }
149563c88d22SImre Deak 
14966dbf30ceSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
14976dbf30ceSVille Syrjälä {
14986dbf30ceSVille Syrjälä 	switch (port) {
14996dbf30ceSVille Syrjälä 	case PORT_E:
15006dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
15016dbf30ceSVille Syrjälä 	default:
15026dbf30ceSVille Syrjälä 		return false;
15036dbf30ceSVille Syrjälä 	}
15046dbf30ceSVille Syrjälä }
15056dbf30ceSVille Syrjälä 
150674c0b395SVille Syrjälä static bool spt_port_hotplug_long_detect(enum port port, u32 val)
150774c0b395SVille Syrjälä {
150874c0b395SVille Syrjälä 	switch (port) {
150974c0b395SVille Syrjälä 	case PORT_A:
151074c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
151174c0b395SVille Syrjälä 	case PORT_B:
151274c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
151374c0b395SVille Syrjälä 	case PORT_C:
151474c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
151574c0b395SVille Syrjälä 	case PORT_D:
151674c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
151774c0b395SVille Syrjälä 	default:
151874c0b395SVille Syrjälä 		return false;
151974c0b395SVille Syrjälä 	}
152074c0b395SVille Syrjälä }
152174c0b395SVille Syrjälä 
1522e4ce95aaSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1523e4ce95aaSVille Syrjälä {
1524e4ce95aaSVille Syrjälä 	switch (port) {
1525e4ce95aaSVille Syrjälä 	case PORT_A:
1526e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1527e4ce95aaSVille Syrjälä 	default:
1528e4ce95aaSVille Syrjälä 		return false;
1529e4ce95aaSVille Syrjälä 	}
1530e4ce95aaSVille Syrjälä }
1531e4ce95aaSVille Syrjälä 
1532676574dfSJani Nikula static bool pch_port_hotplug_long_detect(enum port port, u32 val)
153313cf5504SDave Airlie {
153413cf5504SDave Airlie 	switch (port) {
153513cf5504SDave Airlie 	case PORT_B:
1536676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
153713cf5504SDave Airlie 	case PORT_C:
1538676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
153913cf5504SDave Airlie 	case PORT_D:
1540676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1541676574dfSJani Nikula 	default:
1542676574dfSJani Nikula 		return false;
154313cf5504SDave Airlie 	}
154413cf5504SDave Airlie }
154513cf5504SDave Airlie 
1546676574dfSJani Nikula static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
154713cf5504SDave Airlie {
154813cf5504SDave Airlie 	switch (port) {
154913cf5504SDave Airlie 	case PORT_B:
1550676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
155113cf5504SDave Airlie 	case PORT_C:
1552676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
155313cf5504SDave Airlie 	case PORT_D:
1554676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1555676574dfSJani Nikula 	default:
1556676574dfSJani Nikula 		return false;
155713cf5504SDave Airlie 	}
155813cf5504SDave Airlie }
155913cf5504SDave Airlie 
156042db67d6SVille Syrjälä /*
156142db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
156242db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
156342db67d6SVille Syrjälä  * hotplug detection results from several registers.
156442db67d6SVille Syrjälä  *
156542db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
156642db67d6SVille Syrjälä  */
1567fd63e2a9SImre Deak static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
15688c841e57SJani Nikula 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1569fd63e2a9SImre Deak 			     const u32 hpd[HPD_NUM_PINS],
1570fd63e2a9SImre Deak 			     bool long_pulse_detect(enum port port, u32 val))
1571676574dfSJani Nikula {
15728c841e57SJani Nikula 	enum port port;
1573676574dfSJani Nikula 	int i;
1574676574dfSJani Nikula 
1575676574dfSJani Nikula 	for_each_hpd_pin(i) {
15768c841e57SJani Nikula 		if ((hpd[i] & hotplug_trigger) == 0)
15778c841e57SJani Nikula 			continue;
15788c841e57SJani Nikula 
1579676574dfSJani Nikula 		*pin_mask |= BIT(i);
1580676574dfSJani Nikula 
1581256cfddeSRodrigo Vivi 		port = intel_hpd_pin_to_port(i);
1582256cfddeSRodrigo Vivi 		if (port == PORT_NONE)
1583cc24fcdcSImre Deak 			continue;
1584cc24fcdcSImre Deak 
1585fd63e2a9SImre Deak 		if (long_pulse_detect(port, dig_hotplug_reg))
1586676574dfSJani Nikula 			*long_mask |= BIT(i);
1587676574dfSJani Nikula 	}
1588676574dfSJani Nikula 
1589676574dfSJani Nikula 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1590676574dfSJani Nikula 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1591676574dfSJani Nikula 
1592676574dfSJani Nikula }
1593676574dfSJani Nikula 
159491d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1595515ac2bbSDaniel Vetter {
159628c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1597515ac2bbSDaniel Vetter }
1598515ac2bbSDaniel Vetter 
159991d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1600ce99c256SDaniel Vetter {
16019ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1602ce99c256SDaniel Vetter }
1603ce99c256SDaniel Vetter 
16048bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
160591d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
160691d14251STvrtko Ursulin 					 enum pipe pipe,
1607eba94eb9SDaniel Vetter 					 uint32_t crc0, uint32_t crc1,
1608eba94eb9SDaniel Vetter 					 uint32_t crc2, uint32_t crc3,
16098bc5e955SDaniel Vetter 					 uint32_t crc4)
16108bf1e9f1SShuang He {
16118bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
16128bf1e9f1SShuang He 	struct intel_pipe_crc_entry *entry;
16138c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16148c6b709dSTomeu Vizoso 	struct drm_driver *driver = dev_priv->drm.driver;
16158c6b709dSTomeu Vizoso 	uint32_t crcs[5];
1616ac2300d4SDamien Lespiau 	int head, tail;
1617b2c88f5bSDamien Lespiau 
1618d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
16198c6b709dSTomeu Vizoso 	if (pipe_crc->source) {
16200c912c79SDamien Lespiau 		if (!pipe_crc->entries) {
1621d538bbdfSDamien Lespiau 			spin_unlock(&pipe_crc->lock);
162234273620SDaniel Vetter 			DRM_DEBUG_KMS("spurious interrupt\n");
16230c912c79SDamien Lespiau 			return;
16240c912c79SDamien Lespiau 		}
16250c912c79SDamien Lespiau 
1626d538bbdfSDamien Lespiau 		head = pipe_crc->head;
1627d538bbdfSDamien Lespiau 		tail = pipe_crc->tail;
1628b2c88f5bSDamien Lespiau 
1629b2c88f5bSDamien Lespiau 		if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1630d538bbdfSDamien Lespiau 			spin_unlock(&pipe_crc->lock);
1631b2c88f5bSDamien Lespiau 			DRM_ERROR("CRC buffer overflowing\n");
1632b2c88f5bSDamien Lespiau 			return;
1633b2c88f5bSDamien Lespiau 		}
1634b2c88f5bSDamien Lespiau 
1635b2c88f5bSDamien Lespiau 		entry = &pipe_crc->entries[head];
16368bf1e9f1SShuang He 
16378c6b709dSTomeu Vizoso 		entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1638eba94eb9SDaniel Vetter 		entry->crc[0] = crc0;
1639eba94eb9SDaniel Vetter 		entry->crc[1] = crc1;
1640eba94eb9SDaniel Vetter 		entry->crc[2] = crc2;
1641eba94eb9SDaniel Vetter 		entry->crc[3] = crc3;
1642eba94eb9SDaniel Vetter 		entry->crc[4] = crc4;
1643b2c88f5bSDamien Lespiau 
1644b2c88f5bSDamien Lespiau 		head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1645d538bbdfSDamien Lespiau 		pipe_crc->head = head;
1646d538bbdfSDamien Lespiau 
1647d538bbdfSDamien Lespiau 		spin_unlock(&pipe_crc->lock);
164807144428SDamien Lespiau 
164907144428SDamien Lespiau 		wake_up_interruptible(&pipe_crc->wq);
16508c6b709dSTomeu Vizoso 	} else {
16518c6b709dSTomeu Vizoso 		/*
16528c6b709dSTomeu Vizoso 		 * For some not yet identified reason, the first CRC is
16538c6b709dSTomeu Vizoso 		 * bonkers. So let's just wait for the next vblank and read
16548c6b709dSTomeu Vizoso 		 * out the buggy result.
16558c6b709dSTomeu Vizoso 		 *
1656163e8aecSRodrigo Vivi 		 * On GEN8+ sometimes the second CRC is bonkers as well, so
16578c6b709dSTomeu Vizoso 		 * don't trust that one either.
16588c6b709dSTomeu Vizoso 		 */
16598c6b709dSTomeu Vizoso 		if (pipe_crc->skipped == 0 ||
1660163e8aecSRodrigo Vivi 		    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
16618c6b709dSTomeu Vizoso 			pipe_crc->skipped++;
16628c6b709dSTomeu Vizoso 			spin_unlock(&pipe_crc->lock);
16638c6b709dSTomeu Vizoso 			return;
16648c6b709dSTomeu Vizoso 		}
16658c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
16668c6b709dSTomeu Vizoso 		crcs[0] = crc0;
16678c6b709dSTomeu Vizoso 		crcs[1] = crc1;
16688c6b709dSTomeu Vizoso 		crcs[2] = crc2;
16698c6b709dSTomeu Vizoso 		crcs[3] = crc3;
16708c6b709dSTomeu Vizoso 		crcs[4] = crc4;
1671246ee524STomeu Vizoso 		drm_crtc_add_crc_entry(&crtc->base, true,
1672ca814b25SDaniel Vetter 				       drm_crtc_accurate_vblank_count(&crtc->base),
1673246ee524STomeu Vizoso 				       crcs);
16748c6b709dSTomeu Vizoso 	}
16758bf1e9f1SShuang He }
1676277de95eSDaniel Vetter #else
1677277de95eSDaniel Vetter static inline void
167891d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
167991d14251STvrtko Ursulin 			     enum pipe pipe,
1680277de95eSDaniel Vetter 			     uint32_t crc0, uint32_t crc1,
1681277de95eSDaniel Vetter 			     uint32_t crc2, uint32_t crc3,
1682277de95eSDaniel Vetter 			     uint32_t crc4) {}
1683277de95eSDaniel Vetter #endif
1684eba94eb9SDaniel Vetter 
1685277de95eSDaniel Vetter 
168691d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
168791d14251STvrtko Ursulin 				     enum pipe pipe)
16885a69b89fSDaniel Vetter {
168991d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
16905a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
16915a69b89fSDaniel Vetter 				     0, 0, 0, 0);
16925a69b89fSDaniel Vetter }
16935a69b89fSDaniel Vetter 
169491d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
169591d14251STvrtko Ursulin 				     enum pipe pipe)
1696eba94eb9SDaniel Vetter {
169791d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1698eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1699eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1700eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1701eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
17028bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1703eba94eb9SDaniel Vetter }
17045b3a856bSDaniel Vetter 
170591d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
170691d14251STvrtko Ursulin 				      enum pipe pipe)
17075b3a856bSDaniel Vetter {
17080b5c5ed0SDaniel Vetter 	uint32_t res1, res2;
17090b5c5ed0SDaniel Vetter 
171091d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
17110b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
17120b5c5ed0SDaniel Vetter 	else
17130b5c5ed0SDaniel Vetter 		res1 = 0;
17140b5c5ed0SDaniel Vetter 
171591d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
17160b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
17170b5c5ed0SDaniel Vetter 	else
17180b5c5ed0SDaniel Vetter 		res2 = 0;
17195b3a856bSDaniel Vetter 
172091d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
17210b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
17220b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
17230b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
17240b5c5ed0SDaniel Vetter 				     res1, res2);
17255b3a856bSDaniel Vetter }
17268bf1e9f1SShuang He 
17271403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
17281403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
17291403c0d4SPaulo Zanoni  * the work queue. */
17301403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1731baf02a1fSBen Widawsky {
1732562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1733562d9baeSSagar Arun Kamble 
1734a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
173559cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
1736f4e9af4fSAkash Goel 		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1737562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1738562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1739562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
174041a05a3aSDaniel Vetter 		}
1741d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1742d4d70aa5SImre Deak 	}
1743baf02a1fSBen Widawsky 
1744bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1745c9a9a268SImre Deak 		return;
1746c9a9a268SImre Deak 
17472d1fe073SJoonas Lahtinen 	if (HAS_VEBOX(dev_priv)) {
174812638c57SBen Widawsky 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
17493b3f1650SAkash Goel 			notify_ring(dev_priv->engine[VECS]);
175012638c57SBen Widawsky 
1751aaecdf61SDaniel Vetter 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1752aaecdf61SDaniel Vetter 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
175312638c57SBen Widawsky 	}
17541403c0d4SPaulo Zanoni }
1755baf02a1fSBen Widawsky 
175626705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
175726705e20SSagar Arun Kamble {
175826705e20SSagar Arun Kamble 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
17594100b2abSSagar Arun Kamble 		/* Sample the log buffer flush related bits & clear them out now
17604100b2abSSagar Arun Kamble 		 * itself from the message identity register to minimize the
17614100b2abSSagar Arun Kamble 		 * probability of losing a flush interrupt, when there are back
17624100b2abSSagar Arun Kamble 		 * to back flush interrupts.
17634100b2abSSagar Arun Kamble 		 * There can be a new flush interrupt, for different log buffer
17644100b2abSSagar Arun Kamble 		 * type (like for ISR), whilst Host is handling one (for DPC).
17654100b2abSSagar Arun Kamble 		 * Since same bit is used in message register for ISR & DPC, it
17664100b2abSSagar Arun Kamble 		 * could happen that GuC sets the bit for 2nd interrupt but Host
17674100b2abSSagar Arun Kamble 		 * clears out the bit on handling the 1st interrupt.
17684100b2abSSagar Arun Kamble 		 */
17694100b2abSSagar Arun Kamble 		u32 msg, flush;
17704100b2abSSagar Arun Kamble 
17714100b2abSSagar Arun Kamble 		msg = I915_READ(SOFT_SCRATCH(15));
1772a80bc45fSArkadiusz Hiler 		flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
1773a80bc45fSArkadiusz Hiler 			       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
17744100b2abSSagar Arun Kamble 		if (flush) {
17754100b2abSSagar Arun Kamble 			/* Clear the message bits that are handled */
17764100b2abSSagar Arun Kamble 			I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
17774100b2abSSagar Arun Kamble 
17784100b2abSSagar Arun Kamble 			/* Handle flush interrupt in bottom half */
1779e7465473SOscar Mateo 			queue_work(dev_priv->guc.log.runtime.flush_wq,
1780e7465473SOscar Mateo 				   &dev_priv->guc.log.runtime.flush_work);
17815aa1ee4bSAkash Goel 
17825aa1ee4bSAkash Goel 			dev_priv->guc.log.flush_interrupt_count++;
17834100b2abSSagar Arun Kamble 		} else {
17844100b2abSSagar Arun Kamble 			/* Not clearing of unhandled event bits won't result in
17854100b2abSSagar Arun Kamble 			 * re-triggering of the interrupt.
17864100b2abSSagar Arun Kamble 			 */
17874100b2abSSagar Arun Kamble 		}
178826705e20SSagar Arun Kamble 	}
178926705e20SSagar Arun Kamble }
179026705e20SSagar Arun Kamble 
179144d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
179244d9241eSVille Syrjälä {
179344d9241eSVille Syrjälä 	enum pipe pipe;
179444d9241eSVille Syrjälä 
179544d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
179644d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
179744d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
179844d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
179944d9241eSVille Syrjälä 
180044d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
180144d9241eSVille Syrjälä 	}
180244d9241eSVille Syrjälä }
180344d9241eSVille Syrjälä 
1804eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
180591d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
18067e231dbeSJesse Barnes {
18077e231dbeSJesse Barnes 	int pipe;
18087e231dbeSJesse Barnes 
180958ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
18101ca993d2SVille Syrjälä 
18111ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
18121ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
18131ca993d2SVille Syrjälä 		return;
18141ca993d2SVille Syrjälä 	}
18151ca993d2SVille Syrjälä 
1816055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1817f0f59a00SVille Syrjälä 		i915_reg_t reg;
18186b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
181991d181ddSImre Deak 
1820bbb5eebfSDaniel Vetter 		/*
1821bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
1822bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
1823bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
1824bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
1825bbb5eebfSDaniel Vetter 		 * handle.
1826bbb5eebfSDaniel Vetter 		 */
18270f239f4cSDaniel Vetter 
18280f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
18296b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1830bbb5eebfSDaniel Vetter 
1831bbb5eebfSDaniel Vetter 		switch (pipe) {
1832bbb5eebfSDaniel Vetter 		case PIPE_A:
1833bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1834bbb5eebfSDaniel Vetter 			break;
1835bbb5eebfSDaniel Vetter 		case PIPE_B:
1836bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1837bbb5eebfSDaniel Vetter 			break;
18383278f67fSVille Syrjälä 		case PIPE_C:
18393278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
18403278f67fSVille Syrjälä 			break;
1841bbb5eebfSDaniel Vetter 		}
1842bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
18436b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1844bbb5eebfSDaniel Vetter 
18456b12ca56SVille Syrjälä 		if (!status_mask)
184691d181ddSImre Deak 			continue;
184791d181ddSImre Deak 
184891d181ddSImre Deak 		reg = PIPESTAT(pipe);
18496b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
18506b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
18517e231dbeSJesse Barnes 
18527e231dbeSJesse Barnes 		/*
18537e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
18547e231dbeSJesse Barnes 		 */
18556b12ca56SVille Syrjälä 		if (pipe_stats[pipe])
18566b12ca56SVille Syrjälä 			I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
18577e231dbeSJesse Barnes 	}
185858ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
18592ecb8ca4SVille Syrjälä }
18602ecb8ca4SVille Syrjälä 
1861eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1862eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1863eb64343cSVille Syrjälä {
1864eb64343cSVille Syrjälä 	enum pipe pipe;
1865eb64343cSVille Syrjälä 
1866eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1867eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1868eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1869eb64343cSVille Syrjälä 
1870eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1871eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1872eb64343cSVille Syrjälä 
1873eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1874eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1875eb64343cSVille Syrjälä 	}
1876eb64343cSVille Syrjälä }
1877eb64343cSVille Syrjälä 
1878eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1879eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1880eb64343cSVille Syrjälä {
1881eb64343cSVille Syrjälä 	bool blc_event = false;
1882eb64343cSVille Syrjälä 	enum pipe pipe;
1883eb64343cSVille Syrjälä 
1884eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1885eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1886eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1887eb64343cSVille Syrjälä 
1888eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1889eb64343cSVille Syrjälä 			blc_event = true;
1890eb64343cSVille Syrjälä 
1891eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1892eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1893eb64343cSVille Syrjälä 
1894eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1895eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1896eb64343cSVille Syrjälä 	}
1897eb64343cSVille Syrjälä 
1898eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1899eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
1900eb64343cSVille Syrjälä }
1901eb64343cSVille Syrjälä 
1902eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1903eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1904eb64343cSVille Syrjälä {
1905eb64343cSVille Syrjälä 	bool blc_event = false;
1906eb64343cSVille Syrjälä 	enum pipe pipe;
1907eb64343cSVille Syrjälä 
1908eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1909eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1910eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1911eb64343cSVille Syrjälä 
1912eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1913eb64343cSVille Syrjälä 			blc_event = true;
1914eb64343cSVille Syrjälä 
1915eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1916eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1917eb64343cSVille Syrjälä 
1918eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1919eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1920eb64343cSVille Syrjälä 	}
1921eb64343cSVille Syrjälä 
1922eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1923eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
1924eb64343cSVille Syrjälä 
1925eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1926eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
1927eb64343cSVille Syrjälä }
1928eb64343cSVille Syrjälä 
192991d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
19302ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
19312ecb8ca4SVille Syrjälä {
19322ecb8ca4SVille Syrjälä 	enum pipe pipe;
19337e231dbeSJesse Barnes 
1934055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1935fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1936fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
19374356d586SDaniel Vetter 
19384356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
193991d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
19402d9d2b0bSVille Syrjälä 
19411f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
19421f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
194331acc7f5SJesse Barnes 	}
194431acc7f5SJesse Barnes 
1945c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
194691d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
1947c1874ed7SImre Deak }
1948c1874ed7SImre Deak 
19491ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
195016c6c56bSVille Syrjälä {
195116c6c56bSVille Syrjälä 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
195216c6c56bSVille Syrjälä 
19531ae3c34cSVille Syrjälä 	if (hotplug_status)
19543ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
19551ae3c34cSVille Syrjälä 
19561ae3c34cSVille Syrjälä 	return hotplug_status;
19571ae3c34cSVille Syrjälä }
19581ae3c34cSVille Syrjälä 
195991d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
19601ae3c34cSVille Syrjälä 				 u32 hotplug_status)
19611ae3c34cSVille Syrjälä {
19621ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
19633ff60f89SOscar Mateo 
196491d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
196591d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
196616c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
196716c6c56bSVille Syrjälä 
196858f2cf24SVille Syrjälä 		if (hotplug_trigger) {
1969fd63e2a9SImre Deak 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1970fd63e2a9SImre Deak 					   hotplug_trigger, hpd_status_g4x,
1971fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
197258f2cf24SVille Syrjälä 
197391d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
197458f2cf24SVille Syrjälä 		}
1975369712e8SJani Nikula 
1976369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
197791d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
197816c6c56bSVille Syrjälä 	} else {
197916c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
198016c6c56bSVille Syrjälä 
198158f2cf24SVille Syrjälä 		if (hotplug_trigger) {
1982fd63e2a9SImre Deak 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
19834e3d1e26SVille Syrjälä 					   hotplug_trigger, hpd_status_i915,
1984fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
198591d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
198616c6c56bSVille Syrjälä 		}
19873ff60f89SOscar Mateo 	}
198858f2cf24SVille Syrjälä }
198916c6c56bSVille Syrjälä 
1990c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1991c1874ed7SImre Deak {
199245a83f84SDaniel Vetter 	struct drm_device *dev = arg;
1993fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
1994c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
1995c1874ed7SImre Deak 
19962dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
19972dd2a883SImre Deak 		return IRQ_NONE;
19982dd2a883SImre Deak 
19991f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
20001f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
20011f814dacSImre Deak 
20021e1cace9SVille Syrjälä 	do {
20036e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
20042ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
20051ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2006a5e485a9SVille Syrjälä 		u32 ier = 0;
20073ff60f89SOscar Mateo 
2008c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2009c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
20103ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2011c1874ed7SImre Deak 
2012c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
20131e1cace9SVille Syrjälä 			break;
2014c1874ed7SImre Deak 
2015c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2016c1874ed7SImre Deak 
2017a5e485a9SVille Syrjälä 		/*
2018a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2019a5e485a9SVille Syrjälä 		 *
2020a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2021a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2022a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2023a5e485a9SVille Syrjälä 		 *
2024a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2025a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2026a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2027a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2028a5e485a9SVille Syrjälä 		 * bits this time around.
2029a5e485a9SVille Syrjälä 		 */
20304a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2031a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2032a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
20334a0a0202SVille Syrjälä 
20344a0a0202SVille Syrjälä 		if (gt_iir)
20354a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
20364a0a0202SVille Syrjälä 		if (pm_iir)
20374a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
20384a0a0202SVille Syrjälä 
20397ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
20401ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
20417ce4d1f2SVille Syrjälä 
20423ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
20433ff60f89SOscar Mateo 		 * signalled in iir */
2044eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
20457ce4d1f2SVille Syrjälä 
2046eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2047eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2048eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2049eef57324SJerome Anand 
20507ce4d1f2SVille Syrjälä 		/*
20517ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
20527ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
20537ce4d1f2SVille Syrjälä 		 */
20547ce4d1f2SVille Syrjälä 		if (iir)
20557ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
20564a0a0202SVille Syrjälä 
2057a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
20584a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20594a0a0202SVille Syrjälä 		POSTING_READ(VLV_MASTER_IER);
20601ae3c34cSVille Syrjälä 
206152894874SVille Syrjälä 		if (gt_iir)
2062261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
206352894874SVille Syrjälä 		if (pm_iir)
206452894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
206552894874SVille Syrjälä 
20661ae3c34cSVille Syrjälä 		if (hotplug_status)
206791d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
20682ecb8ca4SVille Syrjälä 
206991d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
20701e1cace9SVille Syrjälä 	} while (0);
20717e231dbeSJesse Barnes 
20721f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
20731f814dacSImre Deak 
20747e231dbeSJesse Barnes 	return ret;
20757e231dbeSJesse Barnes }
20767e231dbeSJesse Barnes 
207743f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
207843f328d7SVille Syrjälä {
207945a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2080fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
208143f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
208243f328d7SVille Syrjälä 
20832dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
20842dd2a883SImre Deak 		return IRQ_NONE;
20852dd2a883SImre Deak 
20861f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
20871f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
20881f814dacSImre Deak 
2089579de73bSChris Wilson 	do {
20906e814800SVille Syrjälä 		u32 master_ctl, iir;
2091e30e251aSVille Syrjälä 		u32 gt_iir[4] = {};
20922ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
20931ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2094a5e485a9SVille Syrjälä 		u32 ier = 0;
2095a5e485a9SVille Syrjälä 
20968e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
20973278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
20983278f67fSVille Syrjälä 
20993278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
21008e5fd599SVille Syrjälä 			break;
210143f328d7SVille Syrjälä 
210227b6c122SOscar Mateo 		ret = IRQ_HANDLED;
210327b6c122SOscar Mateo 
2104a5e485a9SVille Syrjälä 		/*
2105a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2106a5e485a9SVille Syrjälä 		 *
2107a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2108a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2109a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2110a5e485a9SVille Syrjälä 		 *
2111a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2112a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2113a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2114a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2115a5e485a9SVille Syrjälä 		 * bits this time around.
2116a5e485a9SVille Syrjälä 		 */
211743f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2118a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2119a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
212043f328d7SVille Syrjälä 
2121e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
212227b6c122SOscar Mateo 
212327b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
21241ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
212543f328d7SVille Syrjälä 
212627b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
212727b6c122SOscar Mateo 		 * signalled in iir */
2128eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
212943f328d7SVille Syrjälä 
2130eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2131eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2132eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2133eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2134eef57324SJerome Anand 
21357ce4d1f2SVille Syrjälä 		/*
21367ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
21377ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
21387ce4d1f2SVille Syrjälä 		 */
21397ce4d1f2SVille Syrjälä 		if (iir)
21407ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
21417ce4d1f2SVille Syrjälä 
2142a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2143e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
214443f328d7SVille Syrjälä 		POSTING_READ(GEN8_MASTER_IRQ);
21451ae3c34cSVille Syrjälä 
2146e30e251aSVille Syrjälä 		gen8_gt_irq_handler(dev_priv, gt_iir);
2147e30e251aSVille Syrjälä 
21481ae3c34cSVille Syrjälä 		if (hotplug_status)
214991d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
21502ecb8ca4SVille Syrjälä 
215191d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2152579de73bSChris Wilson 	} while (0);
21533278f67fSVille Syrjälä 
21541f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
21551f814dacSImre Deak 
215643f328d7SVille Syrjälä 	return ret;
215743f328d7SVille Syrjälä }
215843f328d7SVille Syrjälä 
215991d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
216091d14251STvrtko Ursulin 				u32 hotplug_trigger,
216140e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2162776ad806SJesse Barnes {
216342db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2164776ad806SJesse Barnes 
21656a39d7c9SJani Nikula 	/*
21666a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
21676a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
21686a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
21696a39d7c9SJani Nikula 	 * errors.
21706a39d7c9SJani Nikula 	 */
217113cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
21726a39d7c9SJani Nikula 	if (!hotplug_trigger) {
21736a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
21746a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
21756a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
21766a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
21776a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
21786a39d7c9SJani Nikula 	}
21796a39d7c9SJani Nikula 
218013cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
21816a39d7c9SJani Nikula 	if (!hotplug_trigger)
21826a39d7c9SJani Nikula 		return;
218313cf5504SDave Airlie 
2184fd63e2a9SImre Deak 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
218540e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2186fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
218740e56410SVille Syrjälä 
218891d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2189aaf5ec2eSSonika Jindal }
219091d131d2SDaniel Vetter 
219191d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
219240e56410SVille Syrjälä {
219340e56410SVille Syrjälä 	int pipe;
219440e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
219540e56410SVille Syrjälä 
219691d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
219740e56410SVille Syrjälä 
2198cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2199cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2200776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2201cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2202cfc33bf7SVille Syrjälä 				 port_name(port));
2203cfc33bf7SVille Syrjälä 	}
2204776ad806SJesse Barnes 
2205ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
220691d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2207ce99c256SDaniel Vetter 
2208776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
220991d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2210776ad806SJesse Barnes 
2211776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2212776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2213776ad806SJesse Barnes 
2214776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2215776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2216776ad806SJesse Barnes 
2217776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2218776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2219776ad806SJesse Barnes 
22209db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2221055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
22229db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
22239db4a9c7SJesse Barnes 					 pipe_name(pipe),
22249db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2225776ad806SJesse Barnes 
2226776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2227776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2228776ad806SJesse Barnes 
2229776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2230776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2231776ad806SJesse Barnes 
2232776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2233a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
22348664281bSPaulo Zanoni 
22358664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2236a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
22378664281bSPaulo Zanoni }
22388664281bSPaulo Zanoni 
223991d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
22408664281bSPaulo Zanoni {
22418664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
22425a69b89fSDaniel Vetter 	enum pipe pipe;
22438664281bSPaulo Zanoni 
2244de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2245de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2246de032bf4SPaulo Zanoni 
2247055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
22481f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
22491f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
22508664281bSPaulo Zanoni 
22515a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
225291d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
225391d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
22545a69b89fSDaniel Vetter 			else
225591d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
22565a69b89fSDaniel Vetter 		}
22575a69b89fSDaniel Vetter 	}
22588bf1e9f1SShuang He 
22598664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
22608664281bSPaulo Zanoni }
22618664281bSPaulo Zanoni 
226291d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
22638664281bSPaulo Zanoni {
22648664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
226545c1cd87SMika Kahola 	enum pipe pipe;
22668664281bSPaulo Zanoni 
2267de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2268de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2269de032bf4SPaulo Zanoni 
227045c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
227145c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
227245c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
22738664281bSPaulo Zanoni 
22748664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2275776ad806SJesse Barnes }
2276776ad806SJesse Barnes 
227791d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
227823e81d69SAdam Jackson {
227923e81d69SAdam Jackson 	int pipe;
22806dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2281aaf5ec2eSSonika Jindal 
228291d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
228391d131d2SDaniel Vetter 
2284cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2285cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
228623e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2287cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2288cfc33bf7SVille Syrjälä 				 port_name(port));
2289cfc33bf7SVille Syrjälä 	}
229023e81d69SAdam Jackson 
229123e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
229291d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
229323e81d69SAdam Jackson 
229423e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
229591d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
229623e81d69SAdam Jackson 
229723e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
229823e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
229923e81d69SAdam Jackson 
230023e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
230123e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
230223e81d69SAdam Jackson 
230323e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2304055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
230523e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
230623e81d69SAdam Jackson 					 pipe_name(pipe),
230723e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
23088664281bSPaulo Zanoni 
23098664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
231091d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
231123e81d69SAdam Jackson }
231223e81d69SAdam Jackson 
231391d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23146dbf30ceSVille Syrjälä {
23156dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
23166dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
23176dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
23186dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
23196dbf30ceSVille Syrjälä 
23206dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
23216dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
23226dbf30ceSVille Syrjälä 
23236dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23246dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23256dbf30ceSVille Syrjälä 
23266dbf30ceSVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
23276dbf30ceSVille Syrjälä 				   dig_hotplug_reg, hpd_spt,
232874c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
23296dbf30ceSVille Syrjälä 	}
23306dbf30ceSVille Syrjälä 
23316dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
23326dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
23336dbf30ceSVille Syrjälä 
23346dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
23356dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
23366dbf30ceSVille Syrjälä 
23376dbf30ceSVille Syrjälä 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
23386dbf30ceSVille Syrjälä 				   dig_hotplug_reg, hpd_spt,
23396dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
23406dbf30ceSVille Syrjälä 	}
23416dbf30ceSVille Syrjälä 
23426dbf30ceSVille Syrjälä 	if (pin_mask)
234391d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
23446dbf30ceSVille Syrjälä 
23456dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
234691d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
23476dbf30ceSVille Syrjälä }
23486dbf30ceSVille Syrjälä 
234991d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
235091d14251STvrtko Ursulin 				u32 hotplug_trigger,
235140e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2352c008bc6eSPaulo Zanoni {
2353e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2354e4ce95aaSVille Syrjälä 
2355e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2356e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2357e4ce95aaSVille Syrjälä 
2358e4ce95aaSVille Syrjälä 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
235940e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2360e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
236140e56410SVille Syrjälä 
236291d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2363e4ce95aaSVille Syrjälä }
2364c008bc6eSPaulo Zanoni 
236591d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
236691d14251STvrtko Ursulin 				    u32 de_iir)
236740e56410SVille Syrjälä {
236840e56410SVille Syrjälä 	enum pipe pipe;
236940e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
237040e56410SVille Syrjälä 
237140e56410SVille Syrjälä 	if (hotplug_trigger)
237291d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
237340e56410SVille Syrjälä 
2374c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
237591d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2376c008bc6eSPaulo Zanoni 
2377c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
237891d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2379c008bc6eSPaulo Zanoni 
2380c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2381c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2382c008bc6eSPaulo Zanoni 
2383055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2384fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2385fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2386c008bc6eSPaulo Zanoni 
238740da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
23881f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2389c008bc6eSPaulo Zanoni 
239040da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
239191d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2392c008bc6eSPaulo Zanoni 	}
2393c008bc6eSPaulo Zanoni 
2394c008bc6eSPaulo Zanoni 	/* check event from PCH */
2395c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2396c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2397c008bc6eSPaulo Zanoni 
239891d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
239991d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2400c008bc6eSPaulo Zanoni 		else
240191d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2402c008bc6eSPaulo Zanoni 
2403c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2404c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2405c008bc6eSPaulo Zanoni 	}
2406c008bc6eSPaulo Zanoni 
240791d14251STvrtko Ursulin 	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
240891d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2409c008bc6eSPaulo Zanoni }
2410c008bc6eSPaulo Zanoni 
241191d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
241291d14251STvrtko Ursulin 				    u32 de_iir)
24139719fb98SPaulo Zanoni {
241407d27e20SDamien Lespiau 	enum pipe pipe;
241523bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
241623bb4cb5SVille Syrjälä 
241740e56410SVille Syrjälä 	if (hotplug_trigger)
241891d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
24199719fb98SPaulo Zanoni 
24209719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
242191d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
24229719fb98SPaulo Zanoni 
24239719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
242491d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
24259719fb98SPaulo Zanoni 
24269719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
242791d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
24289719fb98SPaulo Zanoni 
2429055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2430fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2431fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
24329719fb98SPaulo Zanoni 	}
24339719fb98SPaulo Zanoni 
24349719fb98SPaulo Zanoni 	/* check event from PCH */
243591d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
24369719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
24379719fb98SPaulo Zanoni 
243891d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
24399719fb98SPaulo Zanoni 
24409719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
24419719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
24429719fb98SPaulo Zanoni 	}
24439719fb98SPaulo Zanoni }
24449719fb98SPaulo Zanoni 
244572c90f62SOscar Mateo /*
244672c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
244772c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
244872c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
244972c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
245072c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
245172c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
245272c90f62SOscar Mateo  */
2453f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2454b1f14ad0SJesse Barnes {
245545a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2456fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2457f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
24580e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2459b1f14ad0SJesse Barnes 
24602dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
24612dd2a883SImre Deak 		return IRQ_NONE;
24622dd2a883SImre Deak 
24631f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
24641f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
24651f814dacSImre Deak 
2466b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2467b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2468b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
246923a78516SPaulo Zanoni 	POSTING_READ(DEIER);
24700e43406bSChris Wilson 
247144498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
247244498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
247344498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
247444498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
247544498aeaSPaulo Zanoni 	 * due to its back queue). */
247691d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
247744498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
247844498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
247944498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2480ab5c608bSBen Widawsky 	}
248144498aeaSPaulo Zanoni 
248272c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
248372c90f62SOscar Mateo 
24840e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
24850e43406bSChris Wilson 	if (gt_iir) {
248672c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
248772c90f62SOscar Mateo 		ret = IRQ_HANDLED;
248891d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2489261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2490d8fc8a47SPaulo Zanoni 		else
2491261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
24920e43406bSChris Wilson 	}
2493b1f14ad0SJesse Barnes 
2494b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
24950e43406bSChris Wilson 	if (de_iir) {
249672c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
249772c90f62SOscar Mateo 		ret = IRQ_HANDLED;
249891d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
249991d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2500f1af8fc1SPaulo Zanoni 		else
250191d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
25020e43406bSChris Wilson 	}
25030e43406bSChris Wilson 
250491d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2505f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
25060e43406bSChris Wilson 		if (pm_iir) {
2507b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
25080e43406bSChris Wilson 			ret = IRQ_HANDLED;
250972c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
25100e43406bSChris Wilson 		}
2511f1af8fc1SPaulo Zanoni 	}
2512b1f14ad0SJesse Barnes 
2513b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
2514b1f14ad0SJesse Barnes 	POSTING_READ(DEIER);
251591d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
251644498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
251744498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2518ab5c608bSBen Widawsky 	}
2519b1f14ad0SJesse Barnes 
25201f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
25211f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
25221f814dacSImre Deak 
2523b1f14ad0SJesse Barnes 	return ret;
2524b1f14ad0SJesse Barnes }
2525b1f14ad0SJesse Barnes 
252691d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
252791d14251STvrtko Ursulin 				u32 hotplug_trigger,
252840e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2529d04a492dSShashank Sharma {
2530cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2531d04a492dSShashank Sharma 
2532a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2533a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2534d04a492dSShashank Sharma 
2535cebd87a0SVille Syrjälä 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
253640e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2537cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
253840e56410SVille Syrjälä 
253991d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2540d04a492dSShashank Sharma }
2541d04a492dSShashank Sharma 
2542f11a0f46STvrtko Ursulin static irqreturn_t
2543f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2544abd58f01SBen Widawsky {
2545abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2546f11a0f46STvrtko Ursulin 	u32 iir;
2547c42664ccSDaniel Vetter 	enum pipe pipe;
254888e04703SJesse Barnes 
2549abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2550e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2551e32192e1STvrtko Ursulin 		if (iir) {
2552e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2553abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2554e32192e1STvrtko Ursulin 			if (iir & GEN8_DE_MISC_GSE)
255591d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
255638cc46d7SOscar Mateo 			else
255738cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2558abd58f01SBen Widawsky 		}
255938cc46d7SOscar Mateo 		else
256038cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2561abd58f01SBen Widawsky 	}
2562abd58f01SBen Widawsky 
25636d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2564e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2565e32192e1STvrtko Ursulin 		if (iir) {
2566e32192e1STvrtko Ursulin 			u32 tmp_mask;
2567d04a492dSShashank Sharma 			bool found = false;
2568cebd87a0SVille Syrjälä 
2569e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
25706d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
257188e04703SJesse Barnes 
2572e32192e1STvrtko Ursulin 			tmp_mask = GEN8_AUX_CHANNEL_A;
2573bca2bf2aSPandiyan, Dhinakaran 			if (INTEL_GEN(dev_priv) >= 9)
2574e32192e1STvrtko Ursulin 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2575e32192e1STvrtko Ursulin 					    GEN9_AUX_CHANNEL_C |
2576e32192e1STvrtko Ursulin 					    GEN9_AUX_CHANNEL_D;
2577e32192e1STvrtko Ursulin 
2578e32192e1STvrtko Ursulin 			if (iir & tmp_mask) {
257991d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2580d04a492dSShashank Sharma 				found = true;
2581d04a492dSShashank Sharma 			}
2582d04a492dSShashank Sharma 
2583cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2584e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2585e32192e1STvrtko Ursulin 				if (tmp_mask) {
258691d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
258791d14251STvrtko Ursulin 							    hpd_bxt);
2588d04a492dSShashank Sharma 					found = true;
2589d04a492dSShashank Sharma 				}
2590e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2591e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2592e32192e1STvrtko Ursulin 				if (tmp_mask) {
259391d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
259491d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2595e32192e1STvrtko Ursulin 					found = true;
2596e32192e1STvrtko Ursulin 				}
2597e32192e1STvrtko Ursulin 			}
2598d04a492dSShashank Sharma 
2599cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
260091d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
26019e63743eSShashank Sharma 				found = true;
26029e63743eSShashank Sharma 			}
26039e63743eSShashank Sharma 
2604d04a492dSShashank Sharma 			if (!found)
260538cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
26066d766f02SDaniel Vetter 		}
260738cc46d7SOscar Mateo 		else
260838cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
26096d766f02SDaniel Vetter 	}
26106d766f02SDaniel Vetter 
2611055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2612fd3a4024SDaniel Vetter 		u32 fault_errors;
2613abd58f01SBen Widawsky 
2614c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2615c42664ccSDaniel Vetter 			continue;
2616c42664ccSDaniel Vetter 
2617e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2618e32192e1STvrtko Ursulin 		if (!iir) {
2619e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2620e32192e1STvrtko Ursulin 			continue;
2621e32192e1STvrtko Ursulin 		}
2622770de83dSDamien Lespiau 
2623e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2624e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2625e32192e1STvrtko Ursulin 
2626fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2627fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2628abd58f01SBen Widawsky 
2629e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
263091d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
26310fbe7870SDaniel Vetter 
2632e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2633e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
263438d83c96SDaniel Vetter 
2635e32192e1STvrtko Ursulin 		fault_errors = iir;
2636bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2637e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2638770de83dSDamien Lespiau 		else
2639e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2640770de83dSDamien Lespiau 
2641770de83dSDamien Lespiau 		if (fault_errors)
26421353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
264330100f2bSDaniel Vetter 				  pipe_name(pipe),
2644e32192e1STvrtko Ursulin 				  fault_errors);
2645abd58f01SBen Widawsky 	}
2646abd58f01SBen Widawsky 
264791d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2648266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
264992d03a80SDaniel Vetter 		/*
265092d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
265192d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
265292d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
265392d03a80SDaniel Vetter 		 */
2654e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2655e32192e1STvrtko Ursulin 		if (iir) {
2656e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
265792d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
26586dbf30ceSVille Syrjälä 
26597b22b8c4SRodrigo Vivi 			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
26607b22b8c4SRodrigo Vivi 			    HAS_PCH_CNP(dev_priv))
266191d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
26626dbf30ceSVille Syrjälä 			else
266391d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
26642dfb0b81SJani Nikula 		} else {
26652dfb0b81SJani Nikula 			/*
26662dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
26672dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
26682dfb0b81SJani Nikula 			 */
26692dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
26702dfb0b81SJani Nikula 		}
267192d03a80SDaniel Vetter 	}
267292d03a80SDaniel Vetter 
2673f11a0f46STvrtko Ursulin 	return ret;
2674f11a0f46STvrtko Ursulin }
2675f11a0f46STvrtko Ursulin 
2676f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
2677f11a0f46STvrtko Ursulin {
2678f11a0f46STvrtko Ursulin 	struct drm_device *dev = arg;
2679fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2680f11a0f46STvrtko Ursulin 	u32 master_ctl;
2681e30e251aSVille Syrjälä 	u32 gt_iir[4] = {};
2682f11a0f46STvrtko Ursulin 	irqreturn_t ret;
2683f11a0f46STvrtko Ursulin 
2684f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
2685f11a0f46STvrtko Ursulin 		return IRQ_NONE;
2686f11a0f46STvrtko Ursulin 
2687f11a0f46STvrtko Ursulin 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2688f11a0f46STvrtko Ursulin 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2689f11a0f46STvrtko Ursulin 	if (!master_ctl)
2690f11a0f46STvrtko Ursulin 		return IRQ_NONE;
2691f11a0f46STvrtko Ursulin 
2692f11a0f46STvrtko Ursulin 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2693f11a0f46STvrtko Ursulin 
2694f11a0f46STvrtko Ursulin 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2695f11a0f46STvrtko Ursulin 	disable_rpm_wakeref_asserts(dev_priv);
2696f11a0f46STvrtko Ursulin 
2697f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
2698e30e251aSVille Syrjälä 	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2699e30e251aSVille Syrjälä 	gen8_gt_irq_handler(dev_priv, gt_iir);
2700f11a0f46STvrtko Ursulin 	ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2701f11a0f46STvrtko Ursulin 
2702cb0d205eSChris Wilson 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2703cb0d205eSChris Wilson 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2704abd58f01SBen Widawsky 
27051f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
27061f814dacSImre Deak 
2707abd58f01SBen Widawsky 	return ret;
2708abd58f01SBen Widawsky }
2709abd58f01SBen Widawsky 
271036703e79SChris Wilson struct wedge_me {
271136703e79SChris Wilson 	struct delayed_work work;
271236703e79SChris Wilson 	struct drm_i915_private *i915;
271336703e79SChris Wilson 	const char *name;
271436703e79SChris Wilson };
271536703e79SChris Wilson 
271636703e79SChris Wilson static void wedge_me(struct work_struct *work)
271736703e79SChris Wilson {
271836703e79SChris Wilson 	struct wedge_me *w = container_of(work, typeof(*w), work.work);
271936703e79SChris Wilson 
272036703e79SChris Wilson 	dev_err(w->i915->drm.dev,
272136703e79SChris Wilson 		"%s timed out, cancelling all in-flight rendering.\n",
272236703e79SChris Wilson 		w->name);
272336703e79SChris Wilson 	i915_gem_set_wedged(w->i915);
272436703e79SChris Wilson }
272536703e79SChris Wilson 
272636703e79SChris Wilson static void __init_wedge(struct wedge_me *w,
272736703e79SChris Wilson 			 struct drm_i915_private *i915,
272836703e79SChris Wilson 			 long timeout,
272936703e79SChris Wilson 			 const char *name)
273036703e79SChris Wilson {
273136703e79SChris Wilson 	w->i915 = i915;
273236703e79SChris Wilson 	w->name = name;
273336703e79SChris Wilson 
273436703e79SChris Wilson 	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
273536703e79SChris Wilson 	schedule_delayed_work(&w->work, timeout);
273636703e79SChris Wilson }
273736703e79SChris Wilson 
273836703e79SChris Wilson static void __fini_wedge(struct wedge_me *w)
273936703e79SChris Wilson {
274036703e79SChris Wilson 	cancel_delayed_work_sync(&w->work);
274136703e79SChris Wilson 	destroy_delayed_work_on_stack(&w->work);
274236703e79SChris Wilson 	w->i915 = NULL;
274336703e79SChris Wilson }
274436703e79SChris Wilson 
274536703e79SChris Wilson #define i915_wedge_on_timeout(W, DEV, TIMEOUT)				\
274636703e79SChris Wilson 	for (__init_wedge((W), (DEV), (TIMEOUT), __func__);		\
274736703e79SChris Wilson 	     (W)->i915;							\
274836703e79SChris Wilson 	     __fini_wedge((W)))
274936703e79SChris Wilson 
27508a905236SJesse Barnes /**
2751d5367307SChris Wilson  * i915_reset_device - do process context error handling work
275214bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
27538a905236SJesse Barnes  *
27548a905236SJesse Barnes  * Fire an error uevent so userspace can see that a hang or error
27558a905236SJesse Barnes  * was detected.
27568a905236SJesse Barnes  */
2757d5367307SChris Wilson static void i915_reset_device(struct drm_i915_private *dev_priv)
27588a905236SJesse Barnes {
275991c8a326SChris Wilson 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2760cce723edSBen Widawsky 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2761cce723edSBen Widawsky 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2762cce723edSBen Widawsky 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
276336703e79SChris Wilson 	struct wedge_me w;
27648a905236SJesse Barnes 
2765c033666aSChris Wilson 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
27668a905236SJesse Barnes 
276744d98a61SZhao Yakui 	DRM_DEBUG_DRIVER("resetting chip\n");
2768c033666aSChris Wilson 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
27691f83fee0SDaniel Vetter 
277036703e79SChris Wilson 	/* Use a watchdog to ensure that our reset completes */
277136703e79SChris Wilson 	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
2772c033666aSChris Wilson 		intel_prepare_reset(dev_priv);
27737514747dSVille Syrjälä 
277436703e79SChris Wilson 		/* Signal that locked waiters should reset the GPU */
27758c185ecaSChris Wilson 		set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
27768c185ecaSChris Wilson 		wake_up_all(&dev_priv->gpu_error.wait_queue);
27778c185ecaSChris Wilson 
277836703e79SChris Wilson 		/* Wait for anyone holding the lock to wakeup, without
277936703e79SChris Wilson 		 * blocking indefinitely on struct_mutex.
278017e1df07SDaniel Vetter 		 */
278136703e79SChris Wilson 		do {
2782780f262aSChris Wilson 			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2783535275d3SChris Wilson 				i915_reset(dev_priv, 0);
2784221fe799SChris Wilson 				mutex_unlock(&dev_priv->drm.struct_mutex);
2785780f262aSChris Wilson 			}
2786780f262aSChris Wilson 		} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
27878c185ecaSChris Wilson 					     I915_RESET_HANDOFF,
2788780f262aSChris Wilson 					     TASK_UNINTERRUPTIBLE,
278936703e79SChris Wilson 					     1));
2790f69061beSDaniel Vetter 
2791c033666aSChris Wilson 		intel_finish_reset(dev_priv);
279236703e79SChris Wilson 	}
2793f454c694SImre Deak 
2794780f262aSChris Wilson 	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2795c033666aSChris Wilson 		kobject_uevent_env(kobj,
2796f69061beSDaniel Vetter 				   KOBJ_CHANGE, reset_done_event);
2797f316a42cSBen Gamari }
27988a905236SJesse Barnes 
2799eaa14c24SChris Wilson static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2800c0e09200SDave Airlie {
2801eaa14c24SChris Wilson 	u32 eir;
280263eeaf38SJesse Barnes 
2803eaa14c24SChris Wilson 	if (!IS_GEN2(dev_priv))
2804eaa14c24SChris Wilson 		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
280563eeaf38SJesse Barnes 
2806eaa14c24SChris Wilson 	if (INTEL_GEN(dev_priv) < 4)
2807eaa14c24SChris Wilson 		I915_WRITE(IPEIR, I915_READ(IPEIR));
2808eaa14c24SChris Wilson 	else
2809eaa14c24SChris Wilson 		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
28108a905236SJesse Barnes 
2811eaa14c24SChris Wilson 	I915_WRITE(EIR, I915_READ(EIR));
281263eeaf38SJesse Barnes 	eir = I915_READ(EIR);
281363eeaf38SJesse Barnes 	if (eir) {
281463eeaf38SJesse Barnes 		/*
281563eeaf38SJesse Barnes 		 * some errors might have become stuck,
281663eeaf38SJesse Barnes 		 * mask them.
281763eeaf38SJesse Barnes 		 */
2818eaa14c24SChris Wilson 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
281963eeaf38SJesse Barnes 		I915_WRITE(EMR, I915_READ(EMR) | eir);
282063eeaf38SJesse Barnes 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
282163eeaf38SJesse Barnes 	}
282235aed2e6SChris Wilson }
282335aed2e6SChris Wilson 
282435aed2e6SChris Wilson /**
2825b8d24a06SMika Kuoppala  * i915_handle_error - handle a gpu error
282614bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
282714b730fcSarun.siluvery@linux.intel.com  * @engine_mask: mask representing engines that are hung
282887c390b6SMichel Thierry  * @fmt: Error message format string
282987c390b6SMichel Thierry  *
2830aafd8581SJavier Martinez Canillas  * Do some basic checking of register state at error time and
283135aed2e6SChris Wilson  * dump it to the syslog.  Also call i915_capture_error_state() to make
283235aed2e6SChris Wilson  * sure we get a record and make it available in debugfs.  Fire a uevent
283335aed2e6SChris Wilson  * so userspace knows something bad happened (should trigger collection
283435aed2e6SChris Wilson  * of a ring dump etc.).
283535aed2e6SChris Wilson  */
2836c033666aSChris Wilson void i915_handle_error(struct drm_i915_private *dev_priv,
2837c033666aSChris Wilson 		       u32 engine_mask,
283858174462SMika Kuoppala 		       const char *fmt, ...)
283935aed2e6SChris Wilson {
2840142bc7d9SMichel Thierry 	struct intel_engine_cs *engine;
2841142bc7d9SMichel Thierry 	unsigned int tmp;
284258174462SMika Kuoppala 	va_list args;
284358174462SMika Kuoppala 	char error_msg[80];
284435aed2e6SChris Wilson 
284558174462SMika Kuoppala 	va_start(args, fmt);
284658174462SMika Kuoppala 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
284758174462SMika Kuoppala 	va_end(args);
284858174462SMika Kuoppala 
28491604a86dSChris Wilson 	/*
28501604a86dSChris Wilson 	 * In most cases it's guaranteed that we get here with an RPM
28511604a86dSChris Wilson 	 * reference held, for example because there is a pending GPU
28521604a86dSChris Wilson 	 * request that won't finish until the reset is done. This
28531604a86dSChris Wilson 	 * isn't the case at least when we get here by doing a
28541604a86dSChris Wilson 	 * simulated reset via debugfs, so get an RPM reference.
28551604a86dSChris Wilson 	 */
28561604a86dSChris Wilson 	intel_runtime_pm_get(dev_priv);
28571604a86dSChris Wilson 
2858c033666aSChris Wilson 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
2859eaa14c24SChris Wilson 	i915_clear_error_registers(dev_priv);
28608a905236SJesse Barnes 
2861142bc7d9SMichel Thierry 	/*
2862142bc7d9SMichel Thierry 	 * Try engine reset when available. We fall back to full reset if
2863142bc7d9SMichel Thierry 	 * single reset fails.
2864142bc7d9SMichel Thierry 	 */
2865142bc7d9SMichel Thierry 	if (intel_has_reset_engine(dev_priv)) {
2866142bc7d9SMichel Thierry 		for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
28679db529aaSDaniel Vetter 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
2868142bc7d9SMichel Thierry 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2869142bc7d9SMichel Thierry 					     &dev_priv->gpu_error.flags))
2870142bc7d9SMichel Thierry 				continue;
2871142bc7d9SMichel Thierry 
2872535275d3SChris Wilson 			if (i915_reset_engine(engine, 0) == 0)
2873142bc7d9SMichel Thierry 				engine_mask &= ~intel_engine_flag(engine);
2874142bc7d9SMichel Thierry 
2875142bc7d9SMichel Thierry 			clear_bit(I915_RESET_ENGINE + engine->id,
2876142bc7d9SMichel Thierry 				  &dev_priv->gpu_error.flags);
2877142bc7d9SMichel Thierry 			wake_up_bit(&dev_priv->gpu_error.flags,
2878142bc7d9SMichel Thierry 				    I915_RESET_ENGINE + engine->id);
2879142bc7d9SMichel Thierry 		}
2880142bc7d9SMichel Thierry 	}
2881142bc7d9SMichel Thierry 
28828af29b0cSChris Wilson 	if (!engine_mask)
28831604a86dSChris Wilson 		goto out;
28848af29b0cSChris Wilson 
2885142bc7d9SMichel Thierry 	/* Full reset needs the mutex, stop any other user trying to do so. */
2886d5367307SChris Wilson 	if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
2887d5367307SChris Wilson 		wait_event(dev_priv->gpu_error.reset_queue,
2888d5367307SChris Wilson 			   !test_bit(I915_RESET_BACKOFF,
2889d5367307SChris Wilson 				     &dev_priv->gpu_error.flags));
28901604a86dSChris Wilson 		goto out;
2891d5367307SChris Wilson 	}
2892ba1234d1SBen Gamari 
2893142bc7d9SMichel Thierry 	/* Prevent any other reset-engine attempt. */
2894142bc7d9SMichel Thierry 	for_each_engine(engine, dev_priv, tmp) {
2895142bc7d9SMichel Thierry 		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2896142bc7d9SMichel Thierry 					&dev_priv->gpu_error.flags))
2897142bc7d9SMichel Thierry 			wait_on_bit(&dev_priv->gpu_error.flags,
2898142bc7d9SMichel Thierry 				    I915_RESET_ENGINE + engine->id,
2899142bc7d9SMichel Thierry 				    TASK_UNINTERRUPTIBLE);
2900142bc7d9SMichel Thierry 	}
2901142bc7d9SMichel Thierry 
2902d5367307SChris Wilson 	i915_reset_device(dev_priv);
2903d5367307SChris Wilson 
2904142bc7d9SMichel Thierry 	for_each_engine(engine, dev_priv, tmp) {
2905142bc7d9SMichel Thierry 		clear_bit(I915_RESET_ENGINE + engine->id,
2906142bc7d9SMichel Thierry 			  &dev_priv->gpu_error.flags);
2907142bc7d9SMichel Thierry 	}
2908142bc7d9SMichel Thierry 
2909d5367307SChris Wilson 	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
2910d5367307SChris Wilson 	wake_up_all(&dev_priv->gpu_error.reset_queue);
29111604a86dSChris Wilson 
29121604a86dSChris Wilson out:
29131604a86dSChris Wilson 	intel_runtime_pm_put(dev_priv);
29148a905236SJesse Barnes }
29158a905236SJesse Barnes 
291642f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
291742f52ef8SKeith Packard  * we use as a pipe index
291842f52ef8SKeith Packard  */
291986e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
29200a3e67a4SJesse Barnes {
2921fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2922e9d21d7fSKeith Packard 	unsigned long irqflags;
292371e0ffa5SJesse Barnes 
29241ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
292586e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
292686e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
292786e83e35SChris Wilson 
292886e83e35SChris Wilson 	return 0;
292986e83e35SChris Wilson }
293086e83e35SChris Wilson 
293186e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
293286e83e35SChris Wilson {
293386e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
293486e83e35SChris Wilson 	unsigned long irqflags;
293586e83e35SChris Wilson 
293686e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
29377c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
2938755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
29391ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
29408692d00eSChris Wilson 
29410a3e67a4SJesse Barnes 	return 0;
29420a3e67a4SJesse Barnes }
29430a3e67a4SJesse Barnes 
294488e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2945f796cf8fSJesse Barnes {
2946fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2947f796cf8fSJesse Barnes 	unsigned long irqflags;
294855b8f2a7STvrtko Ursulin 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
294986e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2950f796cf8fSJesse Barnes 
2951f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2952fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
2953b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2954b1f14ad0SJesse Barnes 
2955b1f14ad0SJesse Barnes 	return 0;
2956b1f14ad0SJesse Barnes }
2957b1f14ad0SJesse Barnes 
295888e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2959abd58f01SBen Widawsky {
2960fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2961abd58f01SBen Widawsky 	unsigned long irqflags;
2962abd58f01SBen Widawsky 
2963abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2964013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2965abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2966013d3752SVille Syrjälä 
2967abd58f01SBen Widawsky 	return 0;
2968abd58f01SBen Widawsky }
2969abd58f01SBen Widawsky 
297042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
297142f52ef8SKeith Packard  * we use as a pipe index
297242f52ef8SKeith Packard  */
297386e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
297486e83e35SChris Wilson {
297586e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
297686e83e35SChris Wilson 	unsigned long irqflags;
297786e83e35SChris Wilson 
297886e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
297986e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
298086e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
298186e83e35SChris Wilson }
298286e83e35SChris Wilson 
298386e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
29840a3e67a4SJesse Barnes {
2985fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2986e9d21d7fSKeith Packard 	unsigned long irqflags;
29870a3e67a4SJesse Barnes 
29881ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
29897c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
2990755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
29911ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
29920a3e67a4SJesse Barnes }
29930a3e67a4SJesse Barnes 
299488e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2995f796cf8fSJesse Barnes {
2996fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2997f796cf8fSJesse Barnes 	unsigned long irqflags;
299855b8f2a7STvrtko Ursulin 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
299986e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3000f796cf8fSJesse Barnes 
3001f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3002fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3003b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3004b1f14ad0SJesse Barnes }
3005b1f14ad0SJesse Barnes 
300688e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3007abd58f01SBen Widawsky {
3008fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3009abd58f01SBen Widawsky 	unsigned long irqflags;
3010abd58f01SBen Widawsky 
3011abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3012013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3013abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3014abd58f01SBen Widawsky }
3015abd58f01SBen Widawsky 
3016b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
301791738a95SPaulo Zanoni {
30186e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
301991738a95SPaulo Zanoni 		return;
302091738a95SPaulo Zanoni 
30213488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(SDE);
3022105b122eSPaulo Zanoni 
30236e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3024105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3025622364b6SPaulo Zanoni }
3026105b122eSPaulo Zanoni 
302791738a95SPaulo Zanoni /*
3028622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3029622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3030622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3031622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3032622364b6SPaulo Zanoni  *
3033622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
303491738a95SPaulo Zanoni  */
3035622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev)
3036622364b6SPaulo Zanoni {
3037fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3038622364b6SPaulo Zanoni 
30396e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3040622364b6SPaulo Zanoni 		return;
3041622364b6SPaulo Zanoni 
3042622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
304391738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
304491738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
304591738a95SPaulo Zanoni }
304691738a95SPaulo Zanoni 
3047b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3048d18ea1b5SDaniel Vetter {
30493488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GT);
3050b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
30513488d4ebSVille Syrjälä 		GEN3_IRQ_RESET(GEN6_PM);
3052d18ea1b5SDaniel Vetter }
3053d18ea1b5SDaniel Vetter 
305470591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
305570591a41SVille Syrjälä {
305671b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
305771b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
305871b8b41dSVille Syrjälä 	else
305971b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
306071b8b41dSVille Syrjälä 
3061ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
306270591a41SVille Syrjälä 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
306370591a41SVille Syrjälä 
306444d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
306570591a41SVille Syrjälä 
30663488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(VLV_);
3067ad22d106SVille Syrjälä 	dev_priv->irq_mask = ~0;
306870591a41SVille Syrjälä }
306970591a41SVille Syrjälä 
30708bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
30718bb61306SVille Syrjälä {
30728bb61306SVille Syrjälä 	u32 pipestat_mask;
30739ab981f2SVille Syrjälä 	u32 enable_mask;
30748bb61306SVille Syrjälä 	enum pipe pipe;
30758bb61306SVille Syrjälä 
3076842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
30778bb61306SVille Syrjälä 
30788bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
30798bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
30808bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
30818bb61306SVille Syrjälä 
30829ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
30838bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3084ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3085ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3086ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3087ebf5f921SVille Syrjälä 
30888bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3089ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3090ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
30916b7eafc1SVille Syrjälä 
30926b7eafc1SVille Syrjälä 	WARN_ON(dev_priv->irq_mask != ~0);
30936b7eafc1SVille Syrjälä 
30949ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
30958bb61306SVille Syrjälä 
30963488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
30978bb61306SVille Syrjälä }
30988bb61306SVille Syrjälä 
30998bb61306SVille Syrjälä /* drm_dma.h hooks
31008bb61306SVille Syrjälä */
31018bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev)
31028bb61306SVille Syrjälä {
3103fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
31048bb61306SVille Syrjälä 
3105d420a50cSVille Syrjälä 	if (IS_GEN5(dev_priv))
31068bb61306SVille Syrjälä 		I915_WRITE(HWSTAM, 0xffffffff);
31078bb61306SVille Syrjälä 
31083488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(DE);
31095db94019STvrtko Ursulin 	if (IS_GEN7(dev_priv))
31108bb61306SVille Syrjälä 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
31118bb61306SVille Syrjälä 
3112b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
31138bb61306SVille Syrjälä 
3114b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
31158bb61306SVille Syrjälä }
31168bb61306SVille Syrjälä 
31176bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev)
31187e231dbeSJesse Barnes {
3119fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
31207e231dbeSJesse Barnes 
312134c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
312234c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
312334c7b8a7SVille Syrjälä 
3124b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
31257e231dbeSJesse Barnes 
3126ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
31279918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
312870591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3129ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
31307e231dbeSJesse Barnes }
31317e231dbeSJesse Barnes 
3132d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3133d6e3cca3SDaniel Vetter {
3134d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 0);
3135d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 1);
3136d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 2);
3137d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 3);
3138d6e3cca3SDaniel Vetter }
3139d6e3cca3SDaniel Vetter 
3140823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev)
3141abd58f01SBen Widawsky {
3142fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3143abd58f01SBen Widawsky 	int pipe;
3144abd58f01SBen Widawsky 
3145abd58f01SBen Widawsky 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3146abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3147abd58f01SBen Widawsky 
3148d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3149abd58f01SBen Widawsky 
3150055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3151f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3152813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3153f86f3fb0SPaulo Zanoni 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3154abd58f01SBen Widawsky 
31553488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_DE_PORT_);
31563488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_DE_MISC_);
31573488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_PCU_);
3158abd58f01SBen Widawsky 
31596e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3160b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3161abd58f01SBen Widawsky }
3162abd58f01SBen Widawsky 
31634c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3164001bd2cbSImre Deak 				     u8 pipe_mask)
3165d49bdb0eSPaulo Zanoni {
31661180e206SPaulo Zanoni 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
31676831f3e3SVille Syrjälä 	enum pipe pipe;
3168d49bdb0eSPaulo Zanoni 
316913321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
31709dfe2e3aSImre Deak 
31719dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
31729dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
31739dfe2e3aSImre Deak 		return;
31749dfe2e3aSImre Deak 	}
31759dfe2e3aSImre Deak 
31766831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
31776831f3e3SVille Syrjälä 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
31786831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
31796831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
31809dfe2e3aSImre Deak 
318113321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3182d49bdb0eSPaulo Zanoni }
3183d49bdb0eSPaulo Zanoni 
3184aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3185001bd2cbSImre Deak 				     u8 pipe_mask)
3186aae8ba84SVille Syrjälä {
31876831f3e3SVille Syrjälä 	enum pipe pipe;
31886831f3e3SVille Syrjälä 
3189aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
31909dfe2e3aSImre Deak 
31919dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
31929dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
31939dfe2e3aSImre Deak 		return;
31949dfe2e3aSImre Deak 	}
31959dfe2e3aSImre Deak 
31966831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
31976831f3e3SVille Syrjälä 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
31989dfe2e3aSImre Deak 
3199aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3200aae8ba84SVille Syrjälä 
3201aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
320291c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
3203aae8ba84SVille Syrjälä }
3204aae8ba84SVille Syrjälä 
32056bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev)
320643f328d7SVille Syrjälä {
3207fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
320843f328d7SVille Syrjälä 
320943f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
321043f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
321143f328d7SVille Syrjälä 
3212d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
321343f328d7SVille Syrjälä 
32143488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_PCU_);
321543f328d7SVille Syrjälä 
3216ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
32179918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
321870591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3219ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
322043f328d7SVille Syrjälä }
322143f328d7SVille Syrjälä 
322291d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
322387a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
322487a02106SVille Syrjälä {
322587a02106SVille Syrjälä 	struct intel_encoder *encoder;
322687a02106SVille Syrjälä 	u32 enabled_irqs = 0;
322787a02106SVille Syrjälä 
322891c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
322987a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
323087a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
323187a02106SVille Syrjälä 
323287a02106SVille Syrjälä 	return enabled_irqs;
323387a02106SVille Syrjälä }
323487a02106SVille Syrjälä 
32351a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
32361a56b1a2SImre Deak {
32371a56b1a2SImre Deak 	u32 hotplug;
32381a56b1a2SImre Deak 
32391a56b1a2SImre Deak 	/*
32401a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
32411a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
32421a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
32431a56b1a2SImre Deak 	 */
32441a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
32451a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
32461a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
32471a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
32481a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
32491a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
32501a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
32511a56b1a2SImre Deak 	/*
32521a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
32531a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
32541a56b1a2SImre Deak 	 */
32551a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
32561a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
32571a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
32581a56b1a2SImre Deak }
32591a56b1a2SImre Deak 
326091d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
326182a28bcfSDaniel Vetter {
32621a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
326382a28bcfSDaniel Vetter 
326491d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3265fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
326691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
326782a28bcfSDaniel Vetter 	} else {
3268fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
326991d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
327082a28bcfSDaniel Vetter 	}
327182a28bcfSDaniel Vetter 
3272fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
327382a28bcfSDaniel Vetter 
32741a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
32756dbf30ceSVille Syrjälä }
327626951cafSXiong Zhang 
32772a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
32782a57d9ccSImre Deak {
32793b92e263SRodrigo Vivi 	u32 val, hotplug;
32803b92e263SRodrigo Vivi 
32813b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
32823b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
32833b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
32843b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
32853b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
32863b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
32873b92e263SRodrigo Vivi 	}
32882a57d9ccSImre Deak 
32892a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
32902a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
32912a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
32922a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
32932a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
32942a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
32952a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
32962a57d9ccSImre Deak 
32972a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
32982a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
32992a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
33002a57d9ccSImre Deak }
33012a57d9ccSImre Deak 
330291d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
33036dbf30ceSVille Syrjälä {
33042a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
33056dbf30ceSVille Syrjälä 
33066dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
330791d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
33086dbf30ceSVille Syrjälä 
33096dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
33106dbf30ceSVille Syrjälä 
33112a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
331226951cafSXiong Zhang }
33137fe0b973SKeith Packard 
33141a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
33151a56b1a2SImre Deak {
33161a56b1a2SImre Deak 	u32 hotplug;
33171a56b1a2SImre Deak 
33181a56b1a2SImre Deak 	/*
33191a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
33201a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
33211a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
33221a56b1a2SImre Deak 	 */
33231a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
33241a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
33251a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
33261a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
33271a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
33281a56b1a2SImre Deak }
33291a56b1a2SImre Deak 
333091d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3331e4ce95aaSVille Syrjälä {
33321a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3333e4ce95aaSVille Syrjälä 
333491d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
33353a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
333691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
33373a3b3c7dSVille Syrjälä 
33383a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
333991d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
334023bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
334191d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
33423a3b3c7dSVille Syrjälä 
33433a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
334423bb4cb5SVille Syrjälä 	} else {
3345e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
334691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3347e4ce95aaSVille Syrjälä 
3348e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
33493a3b3c7dSVille Syrjälä 	}
3350e4ce95aaSVille Syrjälä 
33511a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3352e4ce95aaSVille Syrjälä 
335391d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3354e4ce95aaSVille Syrjälä }
3355e4ce95aaSVille Syrjälä 
33562a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
33572a57d9ccSImre Deak 				      u32 enabled_irqs)
3358e0a20ad7SShashank Sharma {
33592a57d9ccSImre Deak 	u32 hotplug;
3360e0a20ad7SShashank Sharma 
3361a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
33622a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
33632a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
33642a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3365d252bf68SShubhangi Shrivastava 
3366d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3367d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3368d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3369d252bf68SShubhangi Shrivastava 
3370d252bf68SShubhangi Shrivastava 	/*
3371d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3372d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3373d252bf68SShubhangi Shrivastava 	 */
3374d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3375d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3376d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3377d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3378d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3379d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3380d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3381d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3382d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3383d252bf68SShubhangi Shrivastava 
3384a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3385e0a20ad7SShashank Sharma }
3386e0a20ad7SShashank Sharma 
33872a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
33882a57d9ccSImre Deak {
33892a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
33902a57d9ccSImre Deak }
33912a57d9ccSImre Deak 
33922a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
33932a57d9ccSImre Deak {
33942a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
33952a57d9ccSImre Deak 
33962a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
33972a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
33982a57d9ccSImre Deak 
33992a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
34002a57d9ccSImre Deak 
34012a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
34022a57d9ccSImre Deak }
34032a57d9ccSImre Deak 
3404d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev)
3405d46da437SPaulo Zanoni {
3406fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
340782a28bcfSDaniel Vetter 	u32 mask;
3408d46da437SPaulo Zanoni 
34096e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3410692a04cfSDaniel Vetter 		return;
3411692a04cfSDaniel Vetter 
34126e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
34135c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
34144ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
34155c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
34164ebc6509SDhinakaran Pandiyan 	else
34174ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
34188664281bSPaulo Zanoni 
34193488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3420d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
34212a57d9ccSImre Deak 
34222a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
34232a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
34241a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
34252a57d9ccSImre Deak 	else
34262a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3427d46da437SPaulo Zanoni }
3428d46da437SPaulo Zanoni 
34290a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev)
34300a9a8c91SDaniel Vetter {
3431fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
34320a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
34330a9a8c91SDaniel Vetter 
34340a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
34350a9a8c91SDaniel Vetter 
34360a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
34373c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
34380a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
3439772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3440772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
34410a9a8c91SDaniel Vetter 	}
34420a9a8c91SDaniel Vetter 
34430a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
34445db94019STvrtko Ursulin 	if (IS_GEN5(dev_priv)) {
3445f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
34460a9a8c91SDaniel Vetter 	} else {
34470a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
34480a9a8c91SDaniel Vetter 	}
34490a9a8c91SDaniel Vetter 
34503488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
34510a9a8c91SDaniel Vetter 
3452b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
345378e68d36SImre Deak 		/*
345478e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
345578e68d36SImre Deak 		 * itself is enabled/disabled.
345678e68d36SImre Deak 		 */
3457f4e9af4fSAkash Goel 		if (HAS_VEBOX(dev_priv)) {
34580a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3459f4e9af4fSAkash Goel 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3460f4e9af4fSAkash Goel 		}
34610a9a8c91SDaniel Vetter 
3462f4e9af4fSAkash Goel 		dev_priv->pm_imr = 0xffffffff;
34633488d4ebSVille Syrjälä 		GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
34640a9a8c91SDaniel Vetter 	}
34650a9a8c91SDaniel Vetter }
34660a9a8c91SDaniel Vetter 
3467f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev)
3468036a4a7dSZhenyu Wang {
3469fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
34708e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
34718e76f8dcSPaulo Zanoni 
3472b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
34738e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3474842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
34758e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
347623bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
347723bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
34788e76f8dcSPaulo Zanoni 	} else {
34798e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3480842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3481842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
3482e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3483e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3484e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
34858e76f8dcSPaulo Zanoni 	}
3486036a4a7dSZhenyu Wang 
34871ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
3488036a4a7dSZhenyu Wang 
3489622364b6SPaulo Zanoni 	ibx_irq_pre_postinstall(dev);
3490622364b6SPaulo Zanoni 
34913488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3492036a4a7dSZhenyu Wang 
34930a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
3494036a4a7dSZhenyu Wang 
34951a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
34961a56b1a2SImre Deak 
3497d46da437SPaulo Zanoni 	ibx_irq_postinstall(dev);
34987fe0b973SKeith Packard 
349950a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
35006005ce42SDaniel Vetter 		/* Enable PCU event interrupts
35016005ce42SDaniel Vetter 		 *
35026005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
35034bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
35044bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
3505d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
3506fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3507d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
3508f97108d1SJesse Barnes 	}
3509f97108d1SJesse Barnes 
3510036a4a7dSZhenyu Wang 	return 0;
3511036a4a7dSZhenyu Wang }
3512036a4a7dSZhenyu Wang 
3513f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3514f8b79e58SImre Deak {
351567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3516f8b79e58SImre Deak 
3517f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
3518f8b79e58SImre Deak 		return;
3519f8b79e58SImre Deak 
3520f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
3521f8b79e58SImre Deak 
3522d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
3523d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3524ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3525f8b79e58SImre Deak 	}
3526d6c69803SVille Syrjälä }
3527f8b79e58SImre Deak 
3528f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3529f8b79e58SImre Deak {
353067520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3531f8b79e58SImre Deak 
3532f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
3533f8b79e58SImre Deak 		return;
3534f8b79e58SImre Deak 
3535f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
3536f8b79e58SImre Deak 
3537950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
3538ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3539f8b79e58SImre Deak }
3540f8b79e58SImre Deak 
35410e6c9a9eSVille Syrjälä 
35420e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev)
35430e6c9a9eSVille Syrjälä {
3544fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
35450e6c9a9eSVille Syrjälä 
35460a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
35477e231dbeSJesse Barnes 
3548ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35499918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
3550ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3551ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3552ad22d106SVille Syrjälä 
35537e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
355434c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
355520afbda2SDaniel Vetter 
355620afbda2SDaniel Vetter 	return 0;
355720afbda2SDaniel Vetter }
355820afbda2SDaniel Vetter 
3559abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3560abd58f01SBen Widawsky {
3561abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
3562abd58f01SBen Widawsky 	uint32_t gt_interrupts[] = {
3563abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
356473d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
356573d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
356673d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3567abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
356873d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
356973d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
357073d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3571abd58f01SBen Widawsky 		0,
357273d477f6SOscar Mateo 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
357373d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3574abd58f01SBen Widawsky 		};
3575abd58f01SBen Widawsky 
357698735739STvrtko Ursulin 	if (HAS_L3_DPF(dev_priv))
357798735739STvrtko Ursulin 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
357898735739STvrtko Ursulin 
3579f4e9af4fSAkash Goel 	dev_priv->pm_ier = 0x0;
3580f4e9af4fSAkash Goel 	dev_priv->pm_imr = ~dev_priv->pm_ier;
35819a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
35829a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
358378e68d36SImre Deak 	/*
358478e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
358526705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
358678e68d36SImre Deak 	 */
3587f4e9af4fSAkash Goel 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
35889a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3589abd58f01SBen Widawsky }
3590abd58f01SBen Widawsky 
3591abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3592abd58f01SBen Widawsky {
3593770de83dSDamien Lespiau 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3594770de83dSDamien Lespiau 	uint32_t de_pipe_enables;
35953a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
35963a3b3c7dSVille Syrjälä 	u32 de_port_enables;
359711825b0dSVille Syrjälä 	u32 de_misc_masked = GEN8_DE_MISC_GSE;
35983a3b3c7dSVille Syrjälä 	enum pipe pipe;
3599770de83dSDamien Lespiau 
3600bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
3601842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
36023a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
360388e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
3604cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
36053a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
36063a3b3c7dSVille Syrjälä 	} else {
3607842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
36083a3b3c7dSVille Syrjälä 	}
3609770de83dSDamien Lespiau 
3610770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3611770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
3612770de83dSDamien Lespiau 
36133a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
3614cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
3615a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3616a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
36173a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
36183a3b3c7dSVille Syrjälä 
36190a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
36200a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3621abd58f01SBen Widawsky 
3622f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3623813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
3624813bde43SPaulo Zanoni 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3625813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
362635079899SPaulo Zanoni 					  de_pipe_enables);
36270a195c02SMika Kahola 	}
3628abd58f01SBen Widawsky 
36293488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
36303488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
36312a57d9ccSImre Deak 
36322a57d9ccSImre Deak 	if (IS_GEN9_LP(dev_priv))
36332a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
36341a56b1a2SImre Deak 	else if (IS_BROADWELL(dev_priv))
36351a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
3636abd58f01SBen Widawsky }
3637abd58f01SBen Widawsky 
3638abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev)
3639abd58f01SBen Widawsky {
3640fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3641abd58f01SBen Widawsky 
36426e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3643622364b6SPaulo Zanoni 		ibx_irq_pre_postinstall(dev);
3644622364b6SPaulo Zanoni 
3645abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
3646abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
3647abd58f01SBen Widawsky 
36486e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3649abd58f01SBen Widawsky 		ibx_irq_postinstall(dev);
3650abd58f01SBen Widawsky 
3651e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3652abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3653abd58f01SBen Widawsky 
3654abd58f01SBen Widawsky 	return 0;
3655abd58f01SBen Widawsky }
3656abd58f01SBen Widawsky 
365743f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev)
365843f328d7SVille Syrjälä {
3659fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
366043f328d7SVille Syrjälä 
366143f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
366243f328d7SVille Syrjälä 
3663ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36649918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
3665ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3666ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3667ad22d106SVille Syrjälä 
3668e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
366943f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
367043f328d7SVille Syrjälä 
367143f328d7SVille Syrjälä 	return 0;
367243f328d7SVille Syrjälä }
367343f328d7SVille Syrjälä 
36746bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev)
3675c2798b19SChris Wilson {
3676fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3677c2798b19SChris Wilson 
367844d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
367944d9241eSVille Syrjälä 
3680d420a50cSVille Syrjälä 	I915_WRITE16(HWSTAM, 0xffff);
3681d420a50cSVille Syrjälä 
3682e9e9848aSVille Syrjälä 	GEN2_IRQ_RESET();
3683c2798b19SChris Wilson }
3684c2798b19SChris Wilson 
3685c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev)
3686c2798b19SChris Wilson {
3687fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3688e9e9848aSVille Syrjälä 	u16 enable_mask;
3689c2798b19SChris Wilson 
3690045cebd2SVille Syrjälä 	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
3691045cebd2SVille Syrjälä 			    I915_ERROR_MEMORY_REFRESH));
3692c2798b19SChris Wilson 
3693c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
3694c2798b19SChris Wilson 	dev_priv->irq_mask =
3695c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3696842ebf7aSVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
3697c2798b19SChris Wilson 
3698e9e9848aSVille Syrjälä 	enable_mask =
3699c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3700c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3701e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
3702e9e9848aSVille Syrjälä 
3703e9e9848aSVille Syrjälä 	GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3704c2798b19SChris Wilson 
3705379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3706379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3707d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3708755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3709755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3710d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3711379ef82dSDaniel Vetter 
3712c2798b19SChris Wilson 	return 0;
3713c2798b19SChris Wilson }
3714c2798b19SChris Wilson 
3715ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3716c2798b19SChris Wilson {
371745a83f84SDaniel Vetter 	struct drm_device *dev = arg;
3718fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3719af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
3720c2798b19SChris Wilson 
37212dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
37222dd2a883SImre Deak 		return IRQ_NONE;
37232dd2a883SImre Deak 
37241f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
37251f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
37261f814dacSImre Deak 
3727af722d28SVille Syrjälä 	do {
3728af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3729af722d28SVille Syrjälä 		u16 iir;
3730af722d28SVille Syrjälä 
3731c2798b19SChris Wilson 		iir = I915_READ16(IIR);
3732c2798b19SChris Wilson 		if (iir == 0)
3733af722d28SVille Syrjälä 			break;
3734c2798b19SChris Wilson 
3735af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
3736c2798b19SChris Wilson 
3737eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
3738eb64343cSVille Syrjälä 		 * signalled in iir */
3739eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3740c2798b19SChris Wilson 
3741fd3a4024SDaniel Vetter 		I915_WRITE16(IIR, iir);
3742c2798b19SChris Wilson 
3743c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
37443b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
3745c2798b19SChris Wilson 
3746af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3747af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3748af722d28SVille Syrjälä 
3749eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3750af722d28SVille Syrjälä 	} while (0);
3751c2798b19SChris Wilson 
37521f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
37531f814dacSImre Deak 
37541f814dacSImre Deak 	return ret;
3755c2798b19SChris Wilson }
3756c2798b19SChris Wilson 
37576bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev)
3758a266c7d5SChris Wilson {
3759fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3760a266c7d5SChris Wilson 
376156b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
37620706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3763a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3764a266c7d5SChris Wilson 	}
3765a266c7d5SChris Wilson 
376644d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
376744d9241eSVille Syrjälä 
3768d420a50cSVille Syrjälä 	I915_WRITE(HWSTAM, 0xffffffff);
376944d9241eSVille Syrjälä 
3770ba7eb789SVille Syrjälä 	GEN3_IRQ_RESET();
3771a266c7d5SChris Wilson }
3772a266c7d5SChris Wilson 
3773a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev)
3774a266c7d5SChris Wilson {
3775fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
377638bde180SChris Wilson 	u32 enable_mask;
3777a266c7d5SChris Wilson 
3778045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3779045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
378038bde180SChris Wilson 
378138bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
378238bde180SChris Wilson 	dev_priv->irq_mask =
378338bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
378438bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3785842ebf7aSVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
378638bde180SChris Wilson 
378738bde180SChris Wilson 	enable_mask =
378838bde180SChris Wilson 		I915_ASLE_INTERRUPT |
378938bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
379038bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
379138bde180SChris Wilson 		I915_USER_INTERRUPT;
379238bde180SChris Wilson 
379356b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
3794a266c7d5SChris Wilson 		/* Enable in IER... */
3795a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3796a266c7d5SChris Wilson 		/* and unmask in IMR */
3797a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3798a266c7d5SChris Wilson 	}
3799a266c7d5SChris Wilson 
3800ba7eb789SVille Syrjälä 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3801a266c7d5SChris Wilson 
3802379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3803379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3804d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3805755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3806755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3807d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3808379ef82dSDaniel Vetter 
3809c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
3810c30bb1fdSVille Syrjälä 
381120afbda2SDaniel Vetter 	return 0;
381220afbda2SDaniel Vetter }
381320afbda2SDaniel Vetter 
3814ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
3815a266c7d5SChris Wilson {
381645a83f84SDaniel Vetter 	struct drm_device *dev = arg;
3817fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3818af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
3819a266c7d5SChris Wilson 
38202dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
38212dd2a883SImre Deak 		return IRQ_NONE;
38222dd2a883SImre Deak 
38231f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
38241f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
38251f814dacSImre Deak 
382638bde180SChris Wilson 	do {
3827eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3828af722d28SVille Syrjälä 		u32 hotplug_status = 0;
3829af722d28SVille Syrjälä 		u32 iir;
3830a266c7d5SChris Wilson 
3831af722d28SVille Syrjälä 		iir = I915_READ(IIR);
3832af722d28SVille Syrjälä 		if (iir == 0)
3833af722d28SVille Syrjälä 			break;
3834af722d28SVille Syrjälä 
3835af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
3836af722d28SVille Syrjälä 
3837af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
3838af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3839af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3840a266c7d5SChris Wilson 
3841eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
3842eb64343cSVille Syrjälä 		 * signalled in iir */
3843eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3844a266c7d5SChris Wilson 
3845fd3a4024SDaniel Vetter 		I915_WRITE(IIR, iir);
3846a266c7d5SChris Wilson 
3847a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
38483b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
3849a266c7d5SChris Wilson 
3850af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3851af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3852a266c7d5SChris Wilson 
3853af722d28SVille Syrjälä 		if (hotplug_status)
3854af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3855af722d28SVille Syrjälä 
3856af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3857af722d28SVille Syrjälä 	} while (0);
3858a266c7d5SChris Wilson 
38591f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
38601f814dacSImre Deak 
3861a266c7d5SChris Wilson 	return ret;
3862a266c7d5SChris Wilson }
3863a266c7d5SChris Wilson 
38646bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev)
3865a266c7d5SChris Wilson {
3866fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3867a266c7d5SChris Wilson 
38680706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3869a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3870a266c7d5SChris Wilson 
387144d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
387244d9241eSVille Syrjälä 
3873d420a50cSVille Syrjälä 	I915_WRITE(HWSTAM, 0xffffffff);
387444d9241eSVille Syrjälä 
3875ba7eb789SVille Syrjälä 	GEN3_IRQ_RESET();
3876a266c7d5SChris Wilson }
3877a266c7d5SChris Wilson 
3878a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev)
3879a266c7d5SChris Wilson {
3880fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3881bbba0a97SChris Wilson 	u32 enable_mask;
3882a266c7d5SChris Wilson 	u32 error_mask;
3883a266c7d5SChris Wilson 
3884045cebd2SVille Syrjälä 	/*
3885045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
3886045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
3887045cebd2SVille Syrjälä 	 */
3888045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
3889045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3890045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
3891045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
3892045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
3893045cebd2SVille Syrjälä 	} else {
3894045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3895045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
3896045cebd2SVille Syrjälä 	}
3897045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
3898045cebd2SVille Syrjälä 
3899a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
3900c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
3901c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
3902adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
3903bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3904bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3905bbba0a97SChris Wilson 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3906bbba0a97SChris Wilson 
3907c30bb1fdSVille Syrjälä 	enable_mask =
3908c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
3909c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
3910c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3911c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3912c30bb1fdSVille Syrjälä 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3913c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
3914bbba0a97SChris Wilson 
391591d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
3916bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
3917a266c7d5SChris Wilson 
3918c30bb1fdSVille Syrjälä 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
3919c30bb1fdSVille Syrjälä 
3920b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3921b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
3922d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
3923755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3924755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3925755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3926d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3927a266c7d5SChris Wilson 
392891d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
392920afbda2SDaniel Vetter 
393020afbda2SDaniel Vetter 	return 0;
393120afbda2SDaniel Vetter }
393220afbda2SDaniel Vetter 
393391d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
393420afbda2SDaniel Vetter {
393520afbda2SDaniel Vetter 	u32 hotplug_en;
393620afbda2SDaniel Vetter 
393767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3938b5ea2d56SDaniel Vetter 
3939adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
3940e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
394191d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3942a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
3943a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
3944a266c7d5SChris Wilson 	   seconds later.  So just do it once.
3945a266c7d5SChris Wilson 	*/
394691d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
3947a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3948a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3949a266c7d5SChris Wilson 
3950a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
39510706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
3952f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
3953f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3954f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
39550706f17cSEgbert Eich 					     hotplug_en);
3956a266c7d5SChris Wilson }
3957a266c7d5SChris Wilson 
3958ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
3959a266c7d5SChris Wilson {
396045a83f84SDaniel Vetter 	struct drm_device *dev = arg;
3961fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3962af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
3963a266c7d5SChris Wilson 
39642dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
39652dd2a883SImre Deak 		return IRQ_NONE;
39662dd2a883SImre Deak 
39671f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
39681f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
39691f814dacSImre Deak 
3970af722d28SVille Syrjälä 	do {
3971eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3972af722d28SVille Syrjälä 		u32 hotplug_status = 0;
3973af722d28SVille Syrjälä 		u32 iir;
39742c8ba29fSChris Wilson 
3975af722d28SVille Syrjälä 		iir = I915_READ(IIR);
3976af722d28SVille Syrjälä 		if (iir == 0)
3977af722d28SVille Syrjälä 			break;
3978af722d28SVille Syrjälä 
3979af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
3980af722d28SVille Syrjälä 
3981af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3982af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3983a266c7d5SChris Wilson 
3984eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
3985eb64343cSVille Syrjälä 		 * signalled in iir */
3986eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3987a266c7d5SChris Wilson 
3988fd3a4024SDaniel Vetter 		I915_WRITE(IIR, iir);
3989a266c7d5SChris Wilson 
3990a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
39913b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
3992af722d28SVille Syrjälä 
3993a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
39943b3f1650SAkash Goel 			notify_ring(dev_priv->engine[VCS]);
3995a266c7d5SChris Wilson 
3996af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3997af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3998515ac2bbSDaniel Vetter 
3999af722d28SVille Syrjälä 		if (hotplug_status)
4000af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4001af722d28SVille Syrjälä 
4002af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4003af722d28SVille Syrjälä 	} while (0);
4004a266c7d5SChris Wilson 
40051f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
40061f814dacSImre Deak 
4007a266c7d5SChris Wilson 	return ret;
4008a266c7d5SChris Wilson }
4009a266c7d5SChris Wilson 
4010fca52a55SDaniel Vetter /**
4011fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4012fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4013fca52a55SDaniel Vetter  *
4014fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4015fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4016fca52a55SDaniel Vetter  */
4017b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4018f71d4af4SJesse Barnes {
401991c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4020562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4021cefcff8fSJoonas Lahtinen 	int i;
40228b2e326dSChris Wilson 
402377913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
402477913b39SJani Nikula 
4025562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4026cefcff8fSJoonas Lahtinen 
4027a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4028cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4029cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
40308b2e326dSChris Wilson 
40314805fe82STvrtko Ursulin 	if (HAS_GUC_SCHED(dev_priv))
403226705e20SSagar Arun Kamble 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
403326705e20SSagar Arun Kamble 
4034a6706b45SDeepak S 	/* Let's track the enabled rps events */
4035666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
40366c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4037e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
403831685c25SDeepak S 	else
4039a6706b45SDeepak S 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4040a6706b45SDeepak S 
4041562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
40421800ad25SSagar Arun Kamble 
40431800ad25SSagar Arun Kamble 	/*
4044acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
40451800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
40461800ad25SSagar Arun Kamble 	 *
40471800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
40481800ad25SSagar Arun Kamble 	 */
4049bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4050562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
40511800ad25SSagar Arun Kamble 
4052bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4053562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
40541800ad25SSagar Arun Kamble 
4055b963291cSDaniel Vetter 	if (IS_GEN2(dev_priv)) {
40564194c088SRodrigo Vivi 		/* Gen2 doesn't have a hardware frame counter */
40574cdb83ecSVille Syrjälä 		dev->max_vblank_count = 0;
4058bca2bf2aSPandiyan, Dhinakaran 	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4059f71d4af4SJesse Barnes 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4060fd8f507cSVille Syrjälä 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4061391f75e2SVille Syrjälä 	} else {
4062391f75e2SVille Syrjälä 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4063391f75e2SVille Syrjälä 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4064f71d4af4SJesse Barnes 	}
4065f71d4af4SJesse Barnes 
406621da2700SVille Syrjälä 	/*
406721da2700SVille Syrjälä 	 * Opt out of the vblank disable timer on everything except gen2.
406821da2700SVille Syrjälä 	 * Gen2 doesn't have a hardware frame counter and so depends on
406921da2700SVille Syrjälä 	 * vblank interrupts to produce sane vblank seuquence numbers.
407021da2700SVille Syrjälä 	 */
4071b963291cSDaniel Vetter 	if (!IS_GEN2(dev_priv))
407221da2700SVille Syrjälä 		dev->vblank_disable_immediate = true;
407321da2700SVille Syrjälä 
4074262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4075262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4076262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4077262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4078262fd485SChris Wilson 	 * in this case to the runtime pm.
4079262fd485SChris Wilson 	 */
4080262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4081262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4082262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4083262fd485SChris Wilson 
4084317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4085317eaa95SLyude 
40861bf6ad62SDaniel Vetter 	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4087f71d4af4SJesse Barnes 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4088f71d4af4SJesse Barnes 
4089b963291cSDaniel Vetter 	if (IS_CHERRYVIEW(dev_priv)) {
409043f328d7SVille Syrjälä 		dev->driver->irq_handler = cherryview_irq_handler;
40916bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = cherryview_irq_reset;
409243f328d7SVille Syrjälä 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
40936bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = cherryview_irq_reset;
409486e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
409586e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
409643f328d7SVille Syrjälä 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4097b963291cSDaniel Vetter 	} else if (IS_VALLEYVIEW(dev_priv)) {
40987e231dbeSJesse Barnes 		dev->driver->irq_handler = valleyview_irq_handler;
40996bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = valleyview_irq_reset;
41007e231dbeSJesse Barnes 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
41016bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = valleyview_irq_reset;
410286e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
410386e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
4104fa00abe0SEgbert Eich 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4105bca2bf2aSPandiyan, Dhinakaran 	} else if (INTEL_GEN(dev_priv) >= 8) {
4106abd58f01SBen Widawsky 		dev->driver->irq_handler = gen8_irq_handler;
4107723761b8SDaniel Vetter 		dev->driver->irq_preinstall = gen8_irq_reset;
4108abd58f01SBen Widawsky 		dev->driver->irq_postinstall = gen8_irq_postinstall;
41096bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = gen8_irq_reset;
4110abd58f01SBen Widawsky 		dev->driver->enable_vblank = gen8_enable_vblank;
4111abd58f01SBen Widawsky 		dev->driver->disable_vblank = gen8_disable_vblank;
4112cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
4113e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
41147b22b8c4SRodrigo Vivi 		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
41157b22b8c4SRodrigo Vivi 			 HAS_PCH_CNP(dev_priv))
41166dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
41176dbf30ceSVille Syrjälä 		else
41183a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
41196e266956STvrtko Ursulin 	} else if (HAS_PCH_SPLIT(dev_priv)) {
4120f71d4af4SJesse Barnes 		dev->driver->irq_handler = ironlake_irq_handler;
4121723761b8SDaniel Vetter 		dev->driver->irq_preinstall = ironlake_irq_reset;
4122f71d4af4SJesse Barnes 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
41236bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = ironlake_irq_reset;
4124f71d4af4SJesse Barnes 		dev->driver->enable_vblank = ironlake_enable_vblank;
4125f71d4af4SJesse Barnes 		dev->driver->disable_vblank = ironlake_disable_vblank;
4126e4ce95aaSVille Syrjälä 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4127f71d4af4SJesse Barnes 	} else {
41287e22dbbbSTvrtko Ursulin 		if (IS_GEN2(dev_priv)) {
41296bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i8xx_irq_reset;
4130c2798b19SChris Wilson 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4131c2798b19SChris Wilson 			dev->driver->irq_handler = i8xx_irq_handler;
41326bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i8xx_irq_reset;
413386e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
413486e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
41357e22dbbbSTvrtko Ursulin 		} else if (IS_GEN3(dev_priv)) {
41366bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i915_irq_reset;
4137a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i915_irq_postinstall;
41386bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i915_irq_reset;
4139a266c7d5SChris Wilson 			dev->driver->irq_handler = i915_irq_handler;
414086e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
414186e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
4142c2798b19SChris Wilson 		} else {
41436bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i965_irq_reset;
4144a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i965_irq_postinstall;
41456bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i965_irq_reset;
4146a266c7d5SChris Wilson 			dev->driver->irq_handler = i965_irq_handler;
414786e83e35SChris Wilson 			dev->driver->enable_vblank = i965_enable_vblank;
414886e83e35SChris Wilson 			dev->driver->disable_vblank = i965_disable_vblank;
4149c2798b19SChris Wilson 		}
4150778eb334SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
4151778eb334SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4152f71d4af4SJesse Barnes 	}
4153f71d4af4SJesse Barnes }
415420afbda2SDaniel Vetter 
4155fca52a55SDaniel Vetter /**
4156cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4157cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4158cefcff8fSJoonas Lahtinen  *
4159cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4160cefcff8fSJoonas Lahtinen  */
4161cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4162cefcff8fSJoonas Lahtinen {
4163cefcff8fSJoonas Lahtinen 	int i;
4164cefcff8fSJoonas Lahtinen 
4165cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4166cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4167cefcff8fSJoonas Lahtinen }
4168cefcff8fSJoonas Lahtinen 
4169cefcff8fSJoonas Lahtinen /**
4170fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4171fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4172fca52a55SDaniel Vetter  *
4173fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4174fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4175fca52a55SDaniel Vetter  *
4176fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4177fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4178fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4179fca52a55SDaniel Vetter  */
41802aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
41812aeb7d3aSDaniel Vetter {
41822aeb7d3aSDaniel Vetter 	/*
41832aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
41842aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
41852aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
41862aeb7d3aSDaniel Vetter 	 */
4187ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
41882aeb7d3aSDaniel Vetter 
418991c8a326SChris Wilson 	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
41902aeb7d3aSDaniel Vetter }
41912aeb7d3aSDaniel Vetter 
4192fca52a55SDaniel Vetter /**
4193fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4194fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4195fca52a55SDaniel Vetter  *
4196fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4197fca52a55SDaniel Vetter  * resources acquired in the init functions.
4198fca52a55SDaniel Vetter  */
41992aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
42002aeb7d3aSDaniel Vetter {
420191c8a326SChris Wilson 	drm_irq_uninstall(&dev_priv->drm);
42022aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4203ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
42042aeb7d3aSDaniel Vetter }
42052aeb7d3aSDaniel Vetter 
4206fca52a55SDaniel Vetter /**
4207fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4208fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4209fca52a55SDaniel Vetter  *
4210fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4211fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4212fca52a55SDaniel Vetter  */
4213b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4214c67a470bSPaulo Zanoni {
421591c8a326SChris Wilson 	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4216ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
421791c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
4218c67a470bSPaulo Zanoni }
4219c67a470bSPaulo Zanoni 
4220fca52a55SDaniel Vetter /**
4221fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4222fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4223fca52a55SDaniel Vetter  *
4224fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
4225fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4226fca52a55SDaniel Vetter  */
4227b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4228c67a470bSPaulo Zanoni {
4229ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
423091c8a326SChris Wilson 	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
423191c8a326SChris Wilson 	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4232c67a470bSPaulo Zanoni }
4233