xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 96606f3beb8668de2c936d7719a7e385cff9ff01)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3163eeaf38SJesse Barnes #include <linux/sysrq.h>
325a0e3ad6STejun Heo #include <linux/slab.h>
33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
34760285e7SDavid Howells #include <drm/drmP.h>
35760285e7SDavid Howells #include <drm/i915_drm.h>
36c0e09200SDave Airlie #include "i915_drv.h"
371c5d22f7SChris Wilson #include "i915_trace.h"
3879e53945SJesse Barnes #include "intel_drv.h"
39c0e09200SDave Airlie 
40fca52a55SDaniel Vetter /**
41fca52a55SDaniel Vetter  * DOC: interrupt handling
42fca52a55SDaniel Vetter  *
43fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
44fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
45fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
46fca52a55SDaniel Vetter  */
47fca52a55SDaniel Vetter 
48e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
49e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50e4ce95aaSVille Syrjälä };
51e4ce95aaSVille Syrjälä 
5223bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
5323bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
5423bb4cb5SVille Syrjälä };
5523bb4cb5SVille Syrjälä 
563a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
573a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
583a3b3c7dSVille Syrjälä };
593a3b3c7dSVille Syrjälä 
607c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
61e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
62e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66e5868a31SEgbert Eich };
67e5868a31SEgbert Eich 
687c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
69e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
7073c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74e5868a31SEgbert Eich };
75e5868a31SEgbert Eich 
7626951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
7774c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
7826951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
7926951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
8026951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
8126951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
8226951cafSXiong Zhang };
8326951cafSXiong Zhang 
847c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91e5868a31SEgbert Eich };
92e5868a31SEgbert Eich 
937c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100e5868a31SEgbert Eich };
101e5868a31SEgbert Eich 
1024bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109e5868a31SEgbert Eich };
110e5868a31SEgbert Eich 
111e0a20ad7SShashank Sharma /* BXT hpd list */
112e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1137f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116e0a20ad7SShashank Sharma };
117e0a20ad7SShashank Sharma 
1185c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */
119f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \
1205c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
1215c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IMR(which)); \
1225c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), 0); \
1235c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1245c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1255c502442SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
1265c502442SPaulo Zanoni 	POSTING_READ(GEN8_##type##_IIR(which)); \
1275c502442SPaulo Zanoni } while (0)
1285c502442SPaulo Zanoni 
1293488d4ebSVille Syrjälä #define GEN3_IRQ_RESET(type) do { \
130a9d356a6SPaulo Zanoni 	I915_WRITE(type##IMR, 0xffffffff); \
1315c502442SPaulo Zanoni 	POSTING_READ(type##IMR); \
132a9d356a6SPaulo Zanoni 	I915_WRITE(type##IER, 0); \
1335c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1345c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
1355c502442SPaulo Zanoni 	I915_WRITE(type##IIR, 0xffffffff); \
1365c502442SPaulo Zanoni 	POSTING_READ(type##IIR); \
137a9d356a6SPaulo Zanoni } while (0)
138a9d356a6SPaulo Zanoni 
139e9e9848aSVille Syrjälä #define GEN2_IRQ_RESET(type) do { \
140e9e9848aSVille Syrjälä 	I915_WRITE16(type##IMR, 0xffff); \
141e9e9848aSVille Syrjälä 	POSTING_READ16(type##IMR); \
142e9e9848aSVille Syrjälä 	I915_WRITE16(type##IER, 0); \
143e9e9848aSVille Syrjälä 	I915_WRITE16(type##IIR, 0xffff); \
144e9e9848aSVille Syrjälä 	POSTING_READ16(type##IIR); \
145e9e9848aSVille Syrjälä 	I915_WRITE16(type##IIR, 0xffff); \
146e9e9848aSVille Syrjälä 	POSTING_READ16(type##IIR); \
147e9e9848aSVille Syrjälä } while (0)
148e9e9848aSVille Syrjälä 
149337ba017SPaulo Zanoni /*
150337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
151337ba017SPaulo Zanoni  */
1523488d4ebSVille Syrjälä static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
153f0f59a00SVille Syrjälä 				    i915_reg_t reg)
154b51a2842SVille Syrjälä {
155b51a2842SVille Syrjälä 	u32 val = I915_READ(reg);
156b51a2842SVille Syrjälä 
157b51a2842SVille Syrjälä 	if (val == 0)
158b51a2842SVille Syrjälä 		return;
159b51a2842SVille Syrjälä 
160b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
161f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
162b51a2842SVille Syrjälä 	I915_WRITE(reg, 0xffffffff);
163b51a2842SVille Syrjälä 	POSTING_READ(reg);
164b51a2842SVille Syrjälä 	I915_WRITE(reg, 0xffffffff);
165b51a2842SVille Syrjälä 	POSTING_READ(reg);
166b51a2842SVille Syrjälä }
167337ba017SPaulo Zanoni 
168e9e9848aSVille Syrjälä static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
169e9e9848aSVille Syrjälä 				    i915_reg_t reg)
170e9e9848aSVille Syrjälä {
171e9e9848aSVille Syrjälä 	u16 val = I915_READ16(reg);
172e9e9848aSVille Syrjälä 
173e9e9848aSVille Syrjälä 	if (val == 0)
174e9e9848aSVille Syrjälä 		return;
175e9e9848aSVille Syrjälä 
176e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
177e9e9848aSVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
178e9e9848aSVille Syrjälä 	I915_WRITE16(reg, 0xffff);
179e9e9848aSVille Syrjälä 	POSTING_READ16(reg);
180e9e9848aSVille Syrjälä 	I915_WRITE16(reg, 0xffff);
181e9e9848aSVille Syrjälä 	POSTING_READ16(reg);
182e9e9848aSVille Syrjälä }
183e9e9848aSVille Syrjälä 
18435079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
1853488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
18635079899SPaulo Zanoni 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
1877d1bd539SVille Syrjälä 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
1887d1bd539SVille Syrjälä 	POSTING_READ(GEN8_##type##_IMR(which)); \
18935079899SPaulo Zanoni } while (0)
19035079899SPaulo Zanoni 
1913488d4ebSVille Syrjälä #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
1923488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, type##IIR); \
19335079899SPaulo Zanoni 	I915_WRITE(type##IER, (ier_val)); \
1947d1bd539SVille Syrjälä 	I915_WRITE(type##IMR, (imr_val)); \
1957d1bd539SVille Syrjälä 	POSTING_READ(type##IMR); \
19635079899SPaulo Zanoni } while (0)
19735079899SPaulo Zanoni 
198e9e9848aSVille Syrjälä #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
199e9e9848aSVille Syrjälä 	gen2_assert_iir_is_zero(dev_priv, type##IIR); \
200e9e9848aSVille Syrjälä 	I915_WRITE16(type##IER, (ier_val)); \
201e9e9848aSVille Syrjälä 	I915_WRITE16(type##IMR, (imr_val)); \
202e9e9848aSVille Syrjälä 	POSTING_READ16(type##IMR); \
203e9e9848aSVille Syrjälä } while (0)
204e9e9848aSVille Syrjälä 
205c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
20626705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
207c9a9a268SImre Deak 
2080706f17cSEgbert Eich /* For display hotplug interrupt */
2090706f17cSEgbert Eich static inline void
2100706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
2110706f17cSEgbert Eich 				     uint32_t mask,
2120706f17cSEgbert Eich 				     uint32_t bits)
2130706f17cSEgbert Eich {
2140706f17cSEgbert Eich 	uint32_t val;
2150706f17cSEgbert Eich 
21667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2170706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2180706f17cSEgbert Eich 
2190706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2200706f17cSEgbert Eich 	val &= ~mask;
2210706f17cSEgbert Eich 	val |= bits;
2220706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2230706f17cSEgbert Eich }
2240706f17cSEgbert Eich 
2250706f17cSEgbert Eich /**
2260706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2270706f17cSEgbert Eich  * @dev_priv: driver private
2280706f17cSEgbert Eich  * @mask: bits to update
2290706f17cSEgbert Eich  * @bits: bits to enable
2300706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2310706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2320706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2330706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2340706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2350706f17cSEgbert Eich  * version is also available.
2360706f17cSEgbert Eich  */
2370706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2380706f17cSEgbert Eich 				   uint32_t mask,
2390706f17cSEgbert Eich 				   uint32_t bits)
2400706f17cSEgbert Eich {
2410706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
2420706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
2430706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
2440706f17cSEgbert Eich }
2450706f17cSEgbert Eich 
246*96606f3bSOscar Mateo static u32
247*96606f3bSOscar Mateo gen11_gt_engine_identity(struct drm_i915_private * const i915,
248*96606f3bSOscar Mateo 			 const unsigned int bank, const unsigned int bit);
249*96606f3bSOscar Mateo 
250*96606f3bSOscar Mateo static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
251*96606f3bSOscar Mateo 				const unsigned int bank,
252*96606f3bSOscar Mateo 				const unsigned int bit)
253*96606f3bSOscar Mateo {
254*96606f3bSOscar Mateo 	void __iomem * const regs = i915->regs;
255*96606f3bSOscar Mateo 	u32 dw;
256*96606f3bSOscar Mateo 
257*96606f3bSOscar Mateo 	lockdep_assert_held(&i915->irq_lock);
258*96606f3bSOscar Mateo 
259*96606f3bSOscar Mateo 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
260*96606f3bSOscar Mateo 	if (dw & BIT(bit)) {
261*96606f3bSOscar Mateo 		/*
262*96606f3bSOscar Mateo 		 * According to the BSpec, DW_IIR bits cannot be cleared without
263*96606f3bSOscar Mateo 		 * first servicing the Selector & Shared IIR registers.
264*96606f3bSOscar Mateo 		 */
265*96606f3bSOscar Mateo 		gen11_gt_engine_identity(i915, bank, bit);
266*96606f3bSOscar Mateo 
267*96606f3bSOscar Mateo 		/*
268*96606f3bSOscar Mateo 		 * We locked GT INT DW by reading it. If we want to (try
269*96606f3bSOscar Mateo 		 * to) recover from this succesfully, we need to clear
270*96606f3bSOscar Mateo 		 * our bit, otherwise we are locking the register for
271*96606f3bSOscar Mateo 		 * everybody.
272*96606f3bSOscar Mateo 		 */
273*96606f3bSOscar Mateo 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
274*96606f3bSOscar Mateo 
275*96606f3bSOscar Mateo 		return true;
276*96606f3bSOscar Mateo 	}
277*96606f3bSOscar Mateo 
278*96606f3bSOscar Mateo 	return false;
279*96606f3bSOscar Mateo }
280*96606f3bSOscar Mateo 
281d9dc34f1SVille Syrjälä /**
282d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
283d9dc34f1SVille Syrjälä  * @dev_priv: driver private
284d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
285d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
286d9dc34f1SVille Syrjälä  */
287fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
288d9dc34f1SVille Syrjälä 			    uint32_t interrupt_mask,
289d9dc34f1SVille Syrjälä 			    uint32_t enabled_irq_mask)
290036a4a7dSZhenyu Wang {
291d9dc34f1SVille Syrjälä 	uint32_t new_val;
292d9dc34f1SVille Syrjälä 
29367520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2944bc9d430SDaniel Vetter 
295d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
296d9dc34f1SVille Syrjälä 
2979df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
298c67a470bSPaulo Zanoni 		return;
299c67a470bSPaulo Zanoni 
300d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
301d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
302d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
303d9dc34f1SVille Syrjälä 
304d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
305d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
3061ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
3073143a2bfSChris Wilson 		POSTING_READ(DEIMR);
308036a4a7dSZhenyu Wang 	}
309036a4a7dSZhenyu Wang }
310036a4a7dSZhenyu Wang 
31143eaea13SPaulo Zanoni /**
31243eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
31343eaea13SPaulo Zanoni  * @dev_priv: driver private
31443eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
31543eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
31643eaea13SPaulo Zanoni  */
31743eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
31843eaea13SPaulo Zanoni 			      uint32_t interrupt_mask,
31943eaea13SPaulo Zanoni 			      uint32_t enabled_irq_mask)
32043eaea13SPaulo Zanoni {
32167520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
32243eaea13SPaulo Zanoni 
32315a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
32415a17aaeSDaniel Vetter 
3259df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
326c67a470bSPaulo Zanoni 		return;
327c67a470bSPaulo Zanoni 
32843eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
32943eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
33043eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
33143eaea13SPaulo Zanoni }
33243eaea13SPaulo Zanoni 
333480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
33443eaea13SPaulo Zanoni {
33543eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
33631bb59ccSChris Wilson 	POSTING_READ_FW(GTIMR);
33743eaea13SPaulo Zanoni }
33843eaea13SPaulo Zanoni 
339480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
34043eaea13SPaulo Zanoni {
34143eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
34243eaea13SPaulo Zanoni }
34343eaea13SPaulo Zanoni 
344f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
345b900b949SImre Deak {
346d02b98b8SOscar Mateo 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
347d02b98b8SOscar Mateo 
348bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
349b900b949SImre Deak }
350b900b949SImre Deak 
351f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
352a72fbc3aSImre Deak {
353d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
354d02b98b8SOscar Mateo 		return GEN11_GPM_WGBOXPERF_INTR_MASK;
355d02b98b8SOscar Mateo 	else if (INTEL_GEN(dev_priv) >= 8)
356d02b98b8SOscar Mateo 		return GEN8_GT_IMR(2);
357d02b98b8SOscar Mateo 	else
358d02b98b8SOscar Mateo 		return GEN6_PMIMR;
359a72fbc3aSImre Deak }
360a72fbc3aSImre Deak 
361f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
362b900b949SImre Deak {
363d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
364d02b98b8SOscar Mateo 		return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
365d02b98b8SOscar Mateo 	else if (INTEL_GEN(dev_priv) >= 8)
366d02b98b8SOscar Mateo 		return GEN8_GT_IER(2);
367d02b98b8SOscar Mateo 	else
368d02b98b8SOscar Mateo 		return GEN6_PMIER;
369b900b949SImre Deak }
370b900b949SImre Deak 
371edbfdb45SPaulo Zanoni /**
372edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
373edbfdb45SPaulo Zanoni  * @dev_priv: driver private
374edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
375edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
376edbfdb45SPaulo Zanoni  */
377edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
378edbfdb45SPaulo Zanoni 			      uint32_t interrupt_mask,
379edbfdb45SPaulo Zanoni 			      uint32_t enabled_irq_mask)
380edbfdb45SPaulo Zanoni {
381605cd25bSPaulo Zanoni 	uint32_t new_val;
382edbfdb45SPaulo Zanoni 
38315a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
38415a17aaeSDaniel Vetter 
38567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
386edbfdb45SPaulo Zanoni 
387f4e9af4fSAkash Goel 	new_val = dev_priv->pm_imr;
388f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
389f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
390f52ecbcfSPaulo Zanoni 
391f4e9af4fSAkash Goel 	if (new_val != dev_priv->pm_imr) {
392f4e9af4fSAkash Goel 		dev_priv->pm_imr = new_val;
393f4e9af4fSAkash Goel 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
394a72fbc3aSImre Deak 		POSTING_READ(gen6_pm_imr(dev_priv));
395edbfdb45SPaulo Zanoni 	}
396f52ecbcfSPaulo Zanoni }
397edbfdb45SPaulo Zanoni 
398f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
399edbfdb45SPaulo Zanoni {
4009939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4019939fba2SImre Deak 		return;
4029939fba2SImre Deak 
403edbfdb45SPaulo Zanoni 	snb_update_pm_irq(dev_priv, mask, mask);
404edbfdb45SPaulo Zanoni }
405edbfdb45SPaulo Zanoni 
406f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
4079939fba2SImre Deak {
4089939fba2SImre Deak 	snb_update_pm_irq(dev_priv, mask, 0);
4099939fba2SImre Deak }
4109939fba2SImre Deak 
411f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
412edbfdb45SPaulo Zanoni {
4139939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4149939fba2SImre Deak 		return;
4159939fba2SImre Deak 
416f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, mask);
417f4e9af4fSAkash Goel }
418f4e9af4fSAkash Goel 
4193814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
420f4e9af4fSAkash Goel {
421f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
422f4e9af4fSAkash Goel 
42367520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
424f4e9af4fSAkash Goel 
425f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
426f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
427f4e9af4fSAkash Goel 	POSTING_READ(reg);
428f4e9af4fSAkash Goel }
429f4e9af4fSAkash Goel 
4303814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
431f4e9af4fSAkash Goel {
43267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
433f4e9af4fSAkash Goel 
434f4e9af4fSAkash Goel 	dev_priv->pm_ier |= enable_mask;
435f4e9af4fSAkash Goel 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
436f4e9af4fSAkash Goel 	gen6_unmask_pm_irq(dev_priv, enable_mask);
437f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
438f4e9af4fSAkash Goel }
439f4e9af4fSAkash Goel 
4403814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
441f4e9af4fSAkash Goel {
44267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
443f4e9af4fSAkash Goel 
444f4e9af4fSAkash Goel 	dev_priv->pm_ier &= ~disable_mask;
445f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, disable_mask);
446f4e9af4fSAkash Goel 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
447f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
448edbfdb45SPaulo Zanoni }
449edbfdb45SPaulo Zanoni 
450d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
451d02b98b8SOscar Mateo {
452d02b98b8SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
453d02b98b8SOscar Mateo 
454*96606f3bSOscar Mateo 	while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
455*96606f3bSOscar Mateo 		;
456d02b98b8SOscar Mateo 
457d02b98b8SOscar Mateo 	dev_priv->gt_pm.rps.pm_iir = 0;
458d02b98b8SOscar Mateo 
459d02b98b8SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
460d02b98b8SOscar Mateo }
461d02b98b8SOscar Mateo 
462dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
4633cc134e3SImre Deak {
4643cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
465f4e9af4fSAkash Goel 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
466562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
4673cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
4683cc134e3SImre Deak }
4693cc134e3SImre Deak 
47091d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
471b900b949SImre Deak {
472562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
473562d9baeSSagar Arun Kamble 
474562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
475f2a91d1aSChris Wilson 		return;
476f2a91d1aSChris Wilson 
477b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
478562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
479*96606f3bSOscar Mateo 
480d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
481*96606f3bSOscar Mateo 		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
482d02b98b8SOscar Mateo 	else
483c33d247dSChris Wilson 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
484*96606f3bSOscar Mateo 
485562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
486b900b949SImre Deak 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
48778e68d36SImre Deak 
488b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
489b900b949SImre Deak }
490b900b949SImre Deak 
49191d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
492b900b949SImre Deak {
493562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
494562d9baeSSagar Arun Kamble 
495562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
496f2a91d1aSChris Wilson 		return;
497f2a91d1aSChris Wilson 
498d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
499562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
5009939fba2SImre Deak 
501b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
5029939fba2SImre Deak 
503f4e9af4fSAkash Goel 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
50458072ccbSImre Deak 
50558072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
50691c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
507c33d247dSChris Wilson 
508c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
5093814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
510c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
511c33d247dSChris Wilson 	 * state of the worker can be discarded.
512c33d247dSChris Wilson 	 */
513562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
514d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
515d02b98b8SOscar Mateo 		gen11_reset_rps_interrupts(dev_priv);
516d02b98b8SOscar Mateo 	else
517c33d247dSChris Wilson 		gen6_reset_rps_interrupts(dev_priv);
518b900b949SImre Deak }
519b900b949SImre Deak 
52026705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
52126705e20SSagar Arun Kamble {
5221be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
5231be333d3SSagar Arun Kamble 
52426705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
52526705e20SSagar Arun Kamble 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
52626705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
52726705e20SSagar Arun Kamble }
52826705e20SSagar Arun Kamble 
52926705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
53026705e20SSagar Arun Kamble {
5311be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
5321be333d3SSagar Arun Kamble 
53326705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
53426705e20SSagar Arun Kamble 	if (!dev_priv->guc.interrupts_enabled) {
53526705e20SSagar Arun Kamble 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
53626705e20SSagar Arun Kamble 				       dev_priv->pm_guc_events);
53726705e20SSagar Arun Kamble 		dev_priv->guc.interrupts_enabled = true;
53826705e20SSagar Arun Kamble 		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
53926705e20SSagar Arun Kamble 	}
54026705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
54126705e20SSagar Arun Kamble }
54226705e20SSagar Arun Kamble 
54326705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
54426705e20SSagar Arun Kamble {
5451be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
5461be333d3SSagar Arun Kamble 
54726705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
54826705e20SSagar Arun Kamble 	dev_priv->guc.interrupts_enabled = false;
54926705e20SSagar Arun Kamble 
55026705e20SSagar Arun Kamble 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
55126705e20SSagar Arun Kamble 
55226705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
55326705e20SSagar Arun Kamble 	synchronize_irq(dev_priv->drm.irq);
55426705e20SSagar Arun Kamble 
55526705e20SSagar Arun Kamble 	gen9_reset_guc_interrupts(dev_priv);
55626705e20SSagar Arun Kamble }
55726705e20SSagar Arun Kamble 
5580961021aSBen Widawsky /**
5593a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
5603a3b3c7dSVille Syrjälä  * @dev_priv: driver private
5613a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
5623a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
5633a3b3c7dSVille Syrjälä  */
5643a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
5653a3b3c7dSVille Syrjälä 				uint32_t interrupt_mask,
5663a3b3c7dSVille Syrjälä 				uint32_t enabled_irq_mask)
5673a3b3c7dSVille Syrjälä {
5683a3b3c7dSVille Syrjälä 	uint32_t new_val;
5693a3b3c7dSVille Syrjälä 	uint32_t old_val;
5703a3b3c7dSVille Syrjälä 
57167520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
5723a3b3c7dSVille Syrjälä 
5733a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
5743a3b3c7dSVille Syrjälä 
5753a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
5763a3b3c7dSVille Syrjälä 		return;
5773a3b3c7dSVille Syrjälä 
5783a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
5793a3b3c7dSVille Syrjälä 
5803a3b3c7dSVille Syrjälä 	new_val = old_val;
5813a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
5823a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
5833a3b3c7dSVille Syrjälä 
5843a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
5853a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
5863a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
5873a3b3c7dSVille Syrjälä 	}
5883a3b3c7dSVille Syrjälä }
5893a3b3c7dSVille Syrjälä 
5903a3b3c7dSVille Syrjälä /**
591013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
592013d3752SVille Syrjälä  * @dev_priv: driver private
593013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
594013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
595013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
596013d3752SVille Syrjälä  */
597013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
598013d3752SVille Syrjälä 			 enum pipe pipe,
599013d3752SVille Syrjälä 			 uint32_t interrupt_mask,
600013d3752SVille Syrjälä 			 uint32_t enabled_irq_mask)
601013d3752SVille Syrjälä {
602013d3752SVille Syrjälä 	uint32_t new_val;
603013d3752SVille Syrjälä 
60467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
605013d3752SVille Syrjälä 
606013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
607013d3752SVille Syrjälä 
608013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
609013d3752SVille Syrjälä 		return;
610013d3752SVille Syrjälä 
611013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
612013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
613013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
614013d3752SVille Syrjälä 
615013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
616013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
617013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
618013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
619013d3752SVille Syrjälä 	}
620013d3752SVille Syrjälä }
621013d3752SVille Syrjälä 
622013d3752SVille Syrjälä /**
623fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
624fee884edSDaniel Vetter  * @dev_priv: driver private
625fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
626fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
627fee884edSDaniel Vetter  */
62847339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
629fee884edSDaniel Vetter 				  uint32_t interrupt_mask,
630fee884edSDaniel Vetter 				  uint32_t enabled_irq_mask)
631fee884edSDaniel Vetter {
632fee884edSDaniel Vetter 	uint32_t sdeimr = I915_READ(SDEIMR);
633fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
634fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
635fee884edSDaniel Vetter 
63615a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
63715a17aaeSDaniel Vetter 
63867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
639fee884edSDaniel Vetter 
6409df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
641c67a470bSPaulo Zanoni 		return;
642c67a470bSPaulo Zanoni 
643fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
644fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
645fee884edSDaniel Vetter }
6468664281bSPaulo Zanoni 
6476b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
6486b12ca56SVille Syrjälä 			      enum pipe pipe)
6497c463586SKeith Packard {
6506b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
65110c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
65210c59c51SImre Deak 
6536b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
6546b12ca56SVille Syrjälä 
6556b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
6566b12ca56SVille Syrjälä 		goto out;
6576b12ca56SVille Syrjälä 
65810c59c51SImre Deak 	/*
659724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
660724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
66110c59c51SImre Deak 	 */
66210c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
66310c59c51SImre Deak 		return 0;
664724a6905SVille Syrjälä 	/*
665724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
666724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
667724a6905SVille Syrjälä 	 */
668724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
669724a6905SVille Syrjälä 		return 0;
67010c59c51SImre Deak 
67110c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
67210c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
67310c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
67410c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
67510c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
67610c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
67710c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
67810c59c51SImre Deak 
6796b12ca56SVille Syrjälä out:
6806b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
6816b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
6826b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
6836b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
6846b12ca56SVille Syrjälä 
68510c59c51SImre Deak 	return enable_mask;
68610c59c51SImre Deak }
68710c59c51SImre Deak 
6886b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
6896b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
690755e9019SImre Deak {
6916b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
692755e9019SImre Deak 	u32 enable_mask;
693755e9019SImre Deak 
6946b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
6956b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
6966b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
6976b12ca56SVille Syrjälä 
6986b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
6996b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
7006b12ca56SVille Syrjälä 
7016b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
7026b12ca56SVille Syrjälä 		return;
7036b12ca56SVille Syrjälä 
7046b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
7056b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
7066b12ca56SVille Syrjälä 
7076b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
7086b12ca56SVille Syrjälä 	POSTING_READ(reg);
709755e9019SImre Deak }
710755e9019SImre Deak 
7116b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
7126b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
713755e9019SImre Deak {
7146b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
715755e9019SImre Deak 	u32 enable_mask;
716755e9019SImre Deak 
7176b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
7186b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
7196b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
7206b12ca56SVille Syrjälä 
7216b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
7226b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
7236b12ca56SVille Syrjälä 
7246b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
7256b12ca56SVille Syrjälä 		return;
7266b12ca56SVille Syrjälä 
7276b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
7286b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
7296b12ca56SVille Syrjälä 
7306b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
7316b12ca56SVille Syrjälä 	POSTING_READ(reg);
732755e9019SImre Deak }
733755e9019SImre Deak 
734c0e09200SDave Airlie /**
735f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
73614bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
73701c66889SZhao Yakui  */
73891d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
73901c66889SZhao Yakui {
74091d14251STvrtko Ursulin 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
741f49e38ddSJani Nikula 		return;
742f49e38ddSJani Nikula 
74313321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
74401c66889SZhao Yakui 
745755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
74691d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
7473b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
748755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
7491ec14ad3SChris Wilson 
75013321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
75101c66889SZhao Yakui }
75201c66889SZhao Yakui 
753f75f3746SVille Syrjälä /*
754f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
755f75f3746SVille Syrjälä  * around the vertical blanking period.
756f75f3746SVille Syrjälä  *
757f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
758f75f3746SVille Syrjälä  *  vblank_start >= 3
759f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
760f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
761f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
762f75f3746SVille Syrjälä  *
763f75f3746SVille Syrjälä  *           start of vblank:
764f75f3746SVille Syrjälä  *           latch double buffered registers
765f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
766f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
767f75f3746SVille Syrjälä  *           |
768f75f3746SVille Syrjälä  *           |          frame start:
769f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
770f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
771f75f3746SVille Syrjälä  *           |          |
772f75f3746SVille Syrjälä  *           |          |  start of vsync:
773f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
774f75f3746SVille Syrjälä  *           |          |  |
775f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
776f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
777f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
778f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
779f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
780f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
781f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
782f75f3746SVille Syrjälä  *       |          |                                         |
783f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
784f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
785f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
786f75f3746SVille Syrjälä  *
787f75f3746SVille Syrjälä  * x  = horizontal active
788f75f3746SVille Syrjälä  * _  = horizontal blanking
789f75f3746SVille Syrjälä  * hs = horizontal sync
790f75f3746SVille Syrjälä  * va = vertical active
791f75f3746SVille Syrjälä  * vb = vertical blanking
792f75f3746SVille Syrjälä  * vs = vertical sync
793f75f3746SVille Syrjälä  * vbs = vblank_start (number)
794f75f3746SVille Syrjälä  *
795f75f3746SVille Syrjälä  * Summary:
796f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
797f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
798f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
799f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
800f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
801f75f3746SVille Syrjälä  */
802f75f3746SVille Syrjälä 
80342f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
80442f52ef8SKeith Packard  * we use as a pipe index
80542f52ef8SKeith Packard  */
80688e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
8070a3e67a4SJesse Barnes {
808fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
809f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
8100b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
8115caa0feaSDaniel Vetter 	const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
812694e409dSVille Syrjälä 	unsigned long irqflags;
813391f75e2SVille Syrjälä 
8140b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
8150b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
8160b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
8170b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
8180b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
819391f75e2SVille Syrjälä 
8200b2a8e09SVille Syrjälä 	/* Convert to pixel count */
8210b2a8e09SVille Syrjälä 	vbl_start *= htotal;
8220b2a8e09SVille Syrjälä 
8230b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
8240b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
8250b2a8e09SVille Syrjälä 
8269db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
8279db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
8285eddb70bSChris Wilson 
829694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
830694e409dSVille Syrjälä 
8310a3e67a4SJesse Barnes 	/*
8320a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
8330a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
8340a3e67a4SJesse Barnes 	 * register.
8350a3e67a4SJesse Barnes 	 */
8360a3e67a4SJesse Barnes 	do {
837694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
838694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
839694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
8400a3e67a4SJesse Barnes 	} while (high1 != high2);
8410a3e67a4SJesse Barnes 
842694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
843694e409dSVille Syrjälä 
8445eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
845391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
8465eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
847391f75e2SVille Syrjälä 
848391f75e2SVille Syrjälä 	/*
849391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
850391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
851391f75e2SVille Syrjälä 	 * counter against vblank start.
852391f75e2SVille Syrjälä 	 */
853edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
8540a3e67a4SJesse Barnes }
8550a3e67a4SJesse Barnes 
856974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
8579880b7a5SJesse Barnes {
858fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
8599880b7a5SJesse Barnes 
860649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
8619880b7a5SJesse Barnes }
8629880b7a5SJesse Barnes 
863aec0246fSUma Shankar /*
864aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
865aec0246fSUma Shankar  * scanline register will not work to get the scanline,
866aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
867aec0246fSUma Shankar  * with scanline register updates.
868aec0246fSUma Shankar  * This function will use Framestamp and current
869aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
870aec0246fSUma Shankar  */
871aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
872aec0246fSUma Shankar {
873aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
874aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
875aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
876aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
877aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
878aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
879aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
880aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
881aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
882aec0246fSUma Shankar 
883aec0246fSUma Shankar 	/*
884aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
885aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
886aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
887aec0246fSUma Shankar 	 * during the same frame.
888aec0246fSUma Shankar 	 */
889aec0246fSUma Shankar 	do {
890aec0246fSUma Shankar 		/*
891aec0246fSUma Shankar 		 * This field provides read back of the display
892aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
893aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
894aec0246fSUma Shankar 		 */
895aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
896aec0246fSUma Shankar 
897aec0246fSUma Shankar 		/*
898aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
899aec0246fSUma Shankar 		 * time stamp value.
900aec0246fSUma Shankar 		 */
901aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
902aec0246fSUma Shankar 
903aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
904aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
905aec0246fSUma Shankar 
906aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
907aec0246fSUma Shankar 					clock), 1000 * htotal);
908aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
909aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
910aec0246fSUma Shankar 
911aec0246fSUma Shankar 	return scanline;
912aec0246fSUma Shankar }
913aec0246fSUma Shankar 
91475aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
915a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
916a225f079SVille Syrjälä {
917a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
918fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
9195caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
9205caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
921a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
92280715b2fSVille Syrjälä 	int position, vtotal;
923a225f079SVille Syrjälä 
92472259536SVille Syrjälä 	if (!crtc->active)
92572259536SVille Syrjälä 		return -1;
92672259536SVille Syrjälä 
9275caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
9285caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
9295caa0feaSDaniel Vetter 
930aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
931aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
932aec0246fSUma Shankar 
93380715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
934a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
935a225f079SVille Syrjälä 		vtotal /= 2;
936a225f079SVille Syrjälä 
93791d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv))
93875aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
939a225f079SVille Syrjälä 	else
94075aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
941a225f079SVille Syrjälä 
942a225f079SVille Syrjälä 	/*
94341b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
94441b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
94541b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
94641b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
94741b578fbSJesse Barnes 	 *
94841b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
94941b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
95041b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
95141b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
95241b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
95341b578fbSJesse Barnes 	 */
95491d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
95541b578fbSJesse Barnes 		int i, temp;
95641b578fbSJesse Barnes 
95741b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
95841b578fbSJesse Barnes 			udelay(1);
959707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
96041b578fbSJesse Barnes 			if (temp != position) {
96141b578fbSJesse Barnes 				position = temp;
96241b578fbSJesse Barnes 				break;
96341b578fbSJesse Barnes 			}
96441b578fbSJesse Barnes 		}
96541b578fbSJesse Barnes 	}
96641b578fbSJesse Barnes 
96741b578fbSJesse Barnes 	/*
96880715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
96980715b2fSVille Syrjälä 	 * scanline_offset adjustment.
970a225f079SVille Syrjälä 	 */
97180715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
972a225f079SVille Syrjälä }
973a225f079SVille Syrjälä 
9741bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
9751bf6ad62SDaniel Vetter 				     bool in_vblank_irq, int *vpos, int *hpos,
9763bb403bfSVille Syrjälä 				     ktime_t *stime, ktime_t *etime,
9773bb403bfSVille Syrjälä 				     const struct drm_display_mode *mode)
9780af7e4dfSMario Kleiner {
979fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
98098187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
98198187836SVille Syrjälä 								pipe);
9823aa18df8SVille Syrjälä 	int position;
98378e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
984ad3543edSMario Kleiner 	unsigned long irqflags;
9850af7e4dfSMario Kleiner 
986fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
9870af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9889db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
9891bf6ad62SDaniel Vetter 		return false;
9900af7e4dfSMario Kleiner 	}
9910af7e4dfSMario Kleiner 
992c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
99378e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
994c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
995c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
996c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
9970af7e4dfSMario Kleiner 
998d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
999d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1000d31faf65SVille Syrjälä 		vbl_end /= 2;
1001d31faf65SVille Syrjälä 		vtotal /= 2;
1002d31faf65SVille Syrjälä 	}
1003d31faf65SVille Syrjälä 
1004ad3543edSMario Kleiner 	/*
1005ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
1006ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
1007ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
1008ad3543edSMario Kleiner 	 */
1009ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1010ad3543edSMario Kleiner 
1011ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1012ad3543edSMario Kleiner 
1013ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
1014ad3543edSMario Kleiner 	if (stime)
1015ad3543edSMario Kleiner 		*stime = ktime_get();
1016ad3543edSMario Kleiner 
101791d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
10180af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
10190af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
10200af7e4dfSMario Kleiner 		 */
1021a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
10220af7e4dfSMario Kleiner 	} else {
10230af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
10240af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
10250af7e4dfSMario Kleiner 		 * scanout position.
10260af7e4dfSMario Kleiner 		 */
102775aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
10280af7e4dfSMario Kleiner 
10293aa18df8SVille Syrjälä 		/* convert to pixel counts */
10303aa18df8SVille Syrjälä 		vbl_start *= htotal;
10313aa18df8SVille Syrjälä 		vbl_end *= htotal;
10323aa18df8SVille Syrjälä 		vtotal *= htotal;
103378e8fc6bSVille Syrjälä 
103478e8fc6bSVille Syrjälä 		/*
10357e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
10367e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
10377e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
10387e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
10397e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
10407e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
10417e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
10427e78f1cbSVille Syrjälä 		 */
10437e78f1cbSVille Syrjälä 		if (position >= vtotal)
10447e78f1cbSVille Syrjälä 			position = vtotal - 1;
10457e78f1cbSVille Syrjälä 
10467e78f1cbSVille Syrjälä 		/*
104778e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
104878e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
104978e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
105078e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
105178e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
105278e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
105378e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
105478e8fc6bSVille Syrjälä 		 */
105578e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
10563aa18df8SVille Syrjälä 	}
10573aa18df8SVille Syrjälä 
1058ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
1059ad3543edSMario Kleiner 	if (etime)
1060ad3543edSMario Kleiner 		*etime = ktime_get();
1061ad3543edSMario Kleiner 
1062ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1063ad3543edSMario Kleiner 
1064ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1065ad3543edSMario Kleiner 
10663aa18df8SVille Syrjälä 	/*
10673aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
10683aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
10693aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
10703aa18df8SVille Syrjälä 	 * up since vbl_end.
10713aa18df8SVille Syrjälä 	 */
10723aa18df8SVille Syrjälä 	if (position >= vbl_start)
10733aa18df8SVille Syrjälä 		position -= vbl_end;
10743aa18df8SVille Syrjälä 	else
10753aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
10763aa18df8SVille Syrjälä 
107791d14251STvrtko Ursulin 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
10783aa18df8SVille Syrjälä 		*vpos = position;
10793aa18df8SVille Syrjälä 		*hpos = 0;
10803aa18df8SVille Syrjälä 	} else {
10810af7e4dfSMario Kleiner 		*vpos = position / htotal;
10820af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
10830af7e4dfSMario Kleiner 	}
10840af7e4dfSMario Kleiner 
10851bf6ad62SDaniel Vetter 	return true;
10860af7e4dfSMario Kleiner }
10870af7e4dfSMario Kleiner 
1088a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1089a225f079SVille Syrjälä {
1090fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1091a225f079SVille Syrjälä 	unsigned long irqflags;
1092a225f079SVille Syrjälä 	int position;
1093a225f079SVille Syrjälä 
1094a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1095a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1096a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1097a225f079SVille Syrjälä 
1098a225f079SVille Syrjälä 	return position;
1099a225f079SVille Syrjälä }
1100a225f079SVille Syrjälä 
110191d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1102f97108d1SJesse Barnes {
1103b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
11049270388eSDaniel Vetter 	u8 new_delay;
11059270388eSDaniel Vetter 
1106d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1107f97108d1SJesse Barnes 
110873edd18fSDaniel Vetter 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
110973edd18fSDaniel Vetter 
111020e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
11119270388eSDaniel Vetter 
11127648fa99SJesse Barnes 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1113b5b72e89SMatthew Garrett 	busy_up = I915_READ(RCPREVBSYTUPAVG);
1114b5b72e89SMatthew Garrett 	busy_down = I915_READ(RCPREVBSYTDNAVG);
1115f97108d1SJesse Barnes 	max_avg = I915_READ(RCBMAXAVG);
1116f97108d1SJesse Barnes 	min_avg = I915_READ(RCBMINAVG);
1117f97108d1SJesse Barnes 
1118f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1119b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
112020e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
112120e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
112220e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
112320e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1124b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
112520e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
112620e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
112720e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
112820e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1129f97108d1SJesse Barnes 	}
1130f97108d1SJesse Barnes 
113191d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
113220e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1133f97108d1SJesse Barnes 
1134d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
11359270388eSDaniel Vetter 
1136f97108d1SJesse Barnes 	return;
1137f97108d1SJesse Barnes }
1138f97108d1SJesse Barnes 
11390bc40be8STvrtko Ursulin static void notify_ring(struct intel_engine_cs *engine)
1140549f7365SChris Wilson {
1141e61e0f51SChris Wilson 	struct i915_request *rq = NULL;
114256299fb7SChris Wilson 	struct intel_wait *wait;
1143dffabc8fSTvrtko Ursulin 
1144bcbd5c33SChris Wilson 	if (!engine->breadcrumbs.irq_armed)
1145bcbd5c33SChris Wilson 		return;
1146bcbd5c33SChris Wilson 
11472246bea6SChris Wilson 	atomic_inc(&engine->irq_count);
1148538b257dSChris Wilson 	set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
114956299fb7SChris Wilson 
115061d3dc70SChris Wilson 	spin_lock(&engine->breadcrumbs.irq_lock);
115161d3dc70SChris Wilson 	wait = engine->breadcrumbs.irq_wait;
115256299fb7SChris Wilson 	if (wait) {
115317b51ad8SChris Wilson 		bool wakeup = engine->irq_seqno_barrier;
115417b51ad8SChris Wilson 
115556299fb7SChris Wilson 		/* We use a callback from the dma-fence to submit
115656299fb7SChris Wilson 		 * requests after waiting on our own requests. To
115756299fb7SChris Wilson 		 * ensure minimum delay in queuing the next request to
115856299fb7SChris Wilson 		 * hardware, signal the fence now rather than wait for
115956299fb7SChris Wilson 		 * the signaler to be woken up. We still wake up the
116056299fb7SChris Wilson 		 * waiter in order to handle the irq-seqno coherency
116156299fb7SChris Wilson 		 * issues (we may receive the interrupt before the
116256299fb7SChris Wilson 		 * seqno is written, see __i915_request_irq_complete())
116356299fb7SChris Wilson 		 * and to handle coalescing of multiple seqno updates
116456299fb7SChris Wilson 		 * and many waiters.
116556299fb7SChris Wilson 		 */
116656299fb7SChris Wilson 		if (i915_seqno_passed(intel_engine_get_seqno(engine),
116717b51ad8SChris Wilson 				      wait->seqno)) {
1168e61e0f51SChris Wilson 			struct i915_request *waiter = wait->request;
1169de4d2106SChris Wilson 
117017b51ad8SChris Wilson 			wakeup = true;
117117b51ad8SChris Wilson 			if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1172de4d2106SChris Wilson 				      &waiter->fence.flags) &&
1173de4d2106SChris Wilson 			    intel_wait_check_request(wait, waiter))
1174e61e0f51SChris Wilson 				rq = i915_request_get(waiter);
117517b51ad8SChris Wilson 		}
117656299fb7SChris Wilson 
117717b51ad8SChris Wilson 		if (wakeup)
117856299fb7SChris Wilson 			wake_up_process(wait->tsk);
117967b807a8SChris Wilson 	} else {
1180bcbd5c33SChris Wilson 		if (engine->breadcrumbs.irq_armed)
118167b807a8SChris Wilson 			__intel_engine_disarm_breadcrumbs(engine);
118256299fb7SChris Wilson 	}
118361d3dc70SChris Wilson 	spin_unlock(&engine->breadcrumbs.irq_lock);
118456299fb7SChris Wilson 
118524754d75SChris Wilson 	if (rq) {
118656299fb7SChris Wilson 		dma_fence_signal(&rq->fence);
11874e9a8befSChris Wilson 		GEM_BUG_ON(!i915_request_completed(rq));
1188e61e0f51SChris Wilson 		i915_request_put(rq);
118924754d75SChris Wilson 	}
119056299fb7SChris Wilson 
119156299fb7SChris Wilson 	trace_intel_engine_notify(engine, wait);
1192549f7365SChris Wilson }
1193549f7365SChris Wilson 
119443cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
119543cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
119631685c25SDeepak S {
1197679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
119843cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
119943cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
120031685c25SDeepak S }
120131685c25SDeepak S 
120243cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
120343cf3bf0SChris Wilson {
1204562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
120543cf3bf0SChris Wilson }
120643cf3bf0SChris Wilson 
120743cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
120843cf3bf0SChris Wilson {
1209562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1210562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
121143cf3bf0SChris Wilson 	struct intel_rps_ei now;
121243cf3bf0SChris Wilson 	u32 events = 0;
121343cf3bf0SChris Wilson 
1214e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
121543cf3bf0SChris Wilson 		return 0;
121643cf3bf0SChris Wilson 
121743cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
121831685c25SDeepak S 
1219679cb6c1SMika Kuoppala 	if (prev->ktime) {
1220e0e8c7cbSChris Wilson 		u64 time, c0;
1221569884e3SChris Wilson 		u32 render, media;
1222e0e8c7cbSChris Wilson 
1223679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
12248f68d591SChris Wilson 
1225e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1226e0e8c7cbSChris Wilson 
1227e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1228e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1229e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1230e0e8c7cbSChris Wilson 		 * into our activity counter.
1231e0e8c7cbSChris Wilson 		 */
1232569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1233569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1234569884e3SChris Wilson 		c0 = max(render, media);
12356b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1236e0e8c7cbSChris Wilson 
1237562d9baeSSagar Arun Kamble 		if (c0 > time * rps->up_threshold)
1238e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
1239562d9baeSSagar Arun Kamble 		else if (c0 < time * rps->down_threshold)
1240e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
124131685c25SDeepak S 	}
124231685c25SDeepak S 
1243562d9baeSSagar Arun Kamble 	rps->ei = now;
124443cf3bf0SChris Wilson 	return events;
124531685c25SDeepak S }
124631685c25SDeepak S 
12474912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
12483b8d8d91SJesse Barnes {
12492d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1250562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1251562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
12527c0a16adSChris Wilson 	bool client_boost = false;
12538d3afd7dSChris Wilson 	int new_delay, adj, min, max;
12547c0a16adSChris Wilson 	u32 pm_iir = 0;
12553b8d8d91SJesse Barnes 
125659cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1257562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1258562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1259562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1260d4d70aa5SImre Deak 	}
126159cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
12624912d041SBen Widawsky 
126360611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1264a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
12658d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
12667c0a16adSChris Wilson 		goto out;
12673b8d8d91SJesse Barnes 
12689f817501SSagar Arun Kamble 	mutex_lock(&dev_priv->pcu_lock);
12697b9e0ae6SChris Wilson 
127043cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
127143cf3bf0SChris Wilson 
1272562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1273562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1274562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1275562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
12767b92c1bdSChris Wilson 	if (client_boost)
1277562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1278562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1279562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
12808d3afd7dSChris Wilson 		adj = 0;
12818d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1282dd75fdc8SChris Wilson 		if (adj > 0)
1283dd75fdc8SChris Wilson 			adj *= 2;
1284edcf284bSChris Wilson 		else /* CHV needs even encode values */
1285edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
12867e79a683SSagar Arun Kamble 
1287562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
12887e79a683SSagar Arun Kamble 			adj = 0;
12897b92c1bdSChris Wilson 	} else if (client_boost) {
1290f5a4c67dSChris Wilson 		adj = 0;
1291dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1292562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1293562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1294562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1295562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1296dd75fdc8SChris Wilson 		adj = 0;
1297dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1298dd75fdc8SChris Wilson 		if (adj < 0)
1299dd75fdc8SChris Wilson 			adj *= 2;
1300edcf284bSChris Wilson 		else /* CHV needs even encode values */
1301edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
13027e79a683SSagar Arun Kamble 
1303562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
13047e79a683SSagar Arun Kamble 			adj = 0;
1305dd75fdc8SChris Wilson 	} else { /* unknown event */
1306edcf284bSChris Wilson 		adj = 0;
1307dd75fdc8SChris Wilson 	}
13083b8d8d91SJesse Barnes 
1309562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1310edcf284bSChris Wilson 
131179249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
131279249636SBen Widawsky 	 * interrupt
131379249636SBen Widawsky 	 */
1314edcf284bSChris Wilson 	new_delay += adj;
13158d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
131627544369SDeepak S 
13179fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
13189fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1319562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
13209fcee2f7SChris Wilson 	}
13213b8d8d91SJesse Barnes 
13229f817501SSagar Arun Kamble 	mutex_unlock(&dev_priv->pcu_lock);
13237c0a16adSChris Wilson 
13247c0a16adSChris Wilson out:
13257c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
13267c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1327562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
13287c0a16adSChris Wilson 		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
13297c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
13303b8d8d91SJesse Barnes }
13313b8d8d91SJesse Barnes 
1332e3689190SBen Widawsky 
1333e3689190SBen Widawsky /**
1334e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1335e3689190SBen Widawsky  * occurred.
1336e3689190SBen Widawsky  * @work: workqueue struct
1337e3689190SBen Widawsky  *
1338e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1339e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1340e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1341e3689190SBen Widawsky  */
1342e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1343e3689190SBen Widawsky {
13442d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1345cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1346e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
134735a85ac6SBen Widawsky 	char *parity_event[6];
1348e3689190SBen Widawsky 	uint32_t misccpctl;
134935a85ac6SBen Widawsky 	uint8_t slice = 0;
1350e3689190SBen Widawsky 
1351e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1352e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1353e3689190SBen Widawsky 	 * any time we access those registers.
1354e3689190SBen Widawsky 	 */
135591c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1356e3689190SBen Widawsky 
135735a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
135835a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
135935a85ac6SBen Widawsky 		goto out;
136035a85ac6SBen Widawsky 
1361e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1362e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1363e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1364e3689190SBen Widawsky 
136535a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1366f0f59a00SVille Syrjälä 		i915_reg_t reg;
136735a85ac6SBen Widawsky 
136835a85ac6SBen Widawsky 		slice--;
13692d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
137035a85ac6SBen Widawsky 			break;
137135a85ac6SBen Widawsky 
137235a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
137335a85ac6SBen Widawsky 
13746fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
137535a85ac6SBen Widawsky 
137635a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1377e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1378e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1379e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1380e3689190SBen Widawsky 
138135a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
138235a85ac6SBen Widawsky 		POSTING_READ(reg);
1383e3689190SBen Widawsky 
1384cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1385e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1386e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1387e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
138835a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
138935a85ac6SBen Widawsky 		parity_event[5] = NULL;
1390e3689190SBen Widawsky 
139191c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1392e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1393e3689190SBen Widawsky 
139435a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
139535a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1396e3689190SBen Widawsky 
139735a85ac6SBen Widawsky 		kfree(parity_event[4]);
1398e3689190SBen Widawsky 		kfree(parity_event[3]);
1399e3689190SBen Widawsky 		kfree(parity_event[2]);
1400e3689190SBen Widawsky 		kfree(parity_event[1]);
1401e3689190SBen Widawsky 	}
1402e3689190SBen Widawsky 
140335a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
140435a85ac6SBen Widawsky 
140535a85ac6SBen Widawsky out:
140635a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
14074cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
14082d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
14094cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
141035a85ac6SBen Widawsky 
141191c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
141235a85ac6SBen Widawsky }
141335a85ac6SBen Widawsky 
1414261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1415261e40b8SVille Syrjälä 					       u32 iir)
1416e3689190SBen Widawsky {
1417261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1418e3689190SBen Widawsky 		return;
1419e3689190SBen Widawsky 
1420d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1421261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1422d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1423e3689190SBen Widawsky 
1424261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
142535a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
142635a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
142735a85ac6SBen Widawsky 
142835a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
142935a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
143035a85ac6SBen Widawsky 
1431a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1432e3689190SBen Widawsky }
1433e3689190SBen Widawsky 
1434261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1435f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1436f1af8fc1SPaulo Zanoni {
1437f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
14383b3f1650SAkash Goel 		notify_ring(dev_priv->engine[RCS]);
1439f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
14403b3f1650SAkash Goel 		notify_ring(dev_priv->engine[VCS]);
1441f1af8fc1SPaulo Zanoni }
1442f1af8fc1SPaulo Zanoni 
1443261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1444e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1445e7b4c6b1SDaniel Vetter {
1446f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
14473b3f1650SAkash Goel 		notify_ring(dev_priv->engine[RCS]);
1448cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
14493b3f1650SAkash Goel 		notify_ring(dev_priv->engine[VCS]);
1450cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
14513b3f1650SAkash Goel 		notify_ring(dev_priv->engine[BCS]);
1452e7b4c6b1SDaniel Vetter 
1453cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1454cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1455aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1456aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1457e3689190SBen Widawsky 
1458261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1459261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1460e7b4c6b1SDaniel Vetter }
1461e7b4c6b1SDaniel Vetter 
14625d3d69d5SChris Wilson static void
146351f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1464fbcc1a0cSNick Hoath {
1465b620e870SMika Kuoppala 	struct intel_engine_execlists * const execlists = &engine->execlists;
146631de7350SChris Wilson 	bool tasklet = false;
1467f747026cSChris Wilson 
146851f6b0f9SChris Wilson 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
14691c645bf4SChris Wilson 		if (READ_ONCE(engine->execlists.active))
14701c645bf4SChris Wilson 			tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
14711c645bf4SChris Wilson 						    &engine->irq_posted);
14724a118ecbSChris Wilson 	}
147331de7350SChris Wilson 
147451f6b0f9SChris Wilson 	if (iir & GT_RENDER_USER_INTERRUPT) {
147531de7350SChris Wilson 		notify_ring(engine);
147693ffbe8eSMichal Wajdeczko 		tasklet |= USES_GUC_SUBMISSION(engine->i915);
147731de7350SChris Wilson 	}
147831de7350SChris Wilson 
147931de7350SChris Wilson 	if (tasklet)
1480c6dce8f1SSagar Arun Kamble 		tasklet_hi_schedule(&execlists->tasklet);
1481fbcc1a0cSNick Hoath }
1482fbcc1a0cSNick Hoath 
14832e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915,
148455ef72f2SChris Wilson 			    u32 master_ctl, u32 gt_iir[4])
1485abd58f01SBen Widawsky {
14862e4a5b25SChris Wilson 	void __iomem * const regs = i915->regs;
14872e4a5b25SChris Wilson 
1488f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1489f0fd96f5SChris Wilson 		      GEN8_GT_BCS_IRQ | \
1490f0fd96f5SChris Wilson 		      GEN8_GT_VCS1_IRQ | \
1491f0fd96f5SChris Wilson 		      GEN8_GT_VCS2_IRQ | \
1492f0fd96f5SChris Wilson 		      GEN8_GT_VECS_IRQ | \
1493f0fd96f5SChris Wilson 		      GEN8_GT_PM_IRQ | \
1494f0fd96f5SChris Wilson 		      GEN8_GT_GUC_IRQ)
1495f0fd96f5SChris Wilson 
1496abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
14972e4a5b25SChris Wilson 		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
14982e4a5b25SChris Wilson 		if (likely(gt_iir[0]))
14992e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1500abd58f01SBen Widawsky 	}
1501abd58f01SBen Widawsky 
150285f9b5f9SZhao Yakui 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
15032e4a5b25SChris Wilson 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
15042e4a5b25SChris Wilson 		if (likely(gt_iir[1]))
15052e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
150674cdb337SChris Wilson 	}
150774cdb337SChris Wilson 
150826705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
15092e4a5b25SChris Wilson 		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
15102e4a5b25SChris Wilson 		if (likely(gt_iir[2] & (i915->pm_rps_events |
15112e4a5b25SChris Wilson 					i915->pm_guc_events)))
15122e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(2),
15132e4a5b25SChris Wilson 				      gt_iir[2] & (i915->pm_rps_events |
15142e4a5b25SChris Wilson 						   i915->pm_guc_events));
15150961021aSBen Widawsky 	}
15162e4a5b25SChris Wilson 
15172e4a5b25SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
15182e4a5b25SChris Wilson 		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
15192e4a5b25SChris Wilson 		if (likely(gt_iir[3]))
15202e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
152155ef72f2SChris Wilson 	}
1522abd58f01SBen Widawsky }
1523abd58f01SBen Widawsky 
15242e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1525f0fd96f5SChris Wilson 				u32 master_ctl, u32 gt_iir[4])
1526e30e251aSVille Syrjälä {
1527f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
15282e4a5b25SChris Wilson 		gen8_cs_irq_handler(i915->engine[RCS],
152951f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
15302e4a5b25SChris Wilson 		gen8_cs_irq_handler(i915->engine[BCS],
153151f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1532e30e251aSVille Syrjälä 	}
1533e30e251aSVille Syrjälä 
1534f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
15352e4a5b25SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS],
153651f6b0f9SChris Wilson 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
15372e4a5b25SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS2],
153851f6b0f9SChris Wilson 				    gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
1539e30e251aSVille Syrjälä 	}
1540e30e251aSVille Syrjälä 
1541f0fd96f5SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
15422e4a5b25SChris Wilson 		gen8_cs_irq_handler(i915->engine[VECS],
154351f6b0f9SChris Wilson 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1544f0fd96f5SChris Wilson 	}
1545e30e251aSVille Syrjälä 
1546f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
15472e4a5b25SChris Wilson 		gen6_rps_irq_handler(i915, gt_iir[2]);
15482e4a5b25SChris Wilson 		gen9_guc_irq_handler(i915, gt_iir[2]);
1549e30e251aSVille Syrjälä 	}
1550f0fd96f5SChris Wilson }
1551e30e251aSVille Syrjälä 
155263c88d22SImre Deak static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
155363c88d22SImre Deak {
155463c88d22SImre Deak 	switch (port) {
155563c88d22SImre Deak 	case PORT_A:
1556195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
155763c88d22SImre Deak 	case PORT_B:
155863c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
155963c88d22SImre Deak 	case PORT_C:
156063c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
156163c88d22SImre Deak 	default:
156263c88d22SImre Deak 		return false;
156363c88d22SImre Deak 	}
156463c88d22SImre Deak }
156563c88d22SImre Deak 
15666dbf30ceSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
15676dbf30ceSVille Syrjälä {
15686dbf30ceSVille Syrjälä 	switch (port) {
15696dbf30ceSVille Syrjälä 	case PORT_E:
15706dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
15716dbf30ceSVille Syrjälä 	default:
15726dbf30ceSVille Syrjälä 		return false;
15736dbf30ceSVille Syrjälä 	}
15746dbf30ceSVille Syrjälä }
15756dbf30ceSVille Syrjälä 
157674c0b395SVille Syrjälä static bool spt_port_hotplug_long_detect(enum port port, u32 val)
157774c0b395SVille Syrjälä {
157874c0b395SVille Syrjälä 	switch (port) {
157974c0b395SVille Syrjälä 	case PORT_A:
158074c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
158174c0b395SVille Syrjälä 	case PORT_B:
158274c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
158374c0b395SVille Syrjälä 	case PORT_C:
158474c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
158574c0b395SVille Syrjälä 	case PORT_D:
158674c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
158774c0b395SVille Syrjälä 	default:
158874c0b395SVille Syrjälä 		return false;
158974c0b395SVille Syrjälä 	}
159074c0b395SVille Syrjälä }
159174c0b395SVille Syrjälä 
1592e4ce95aaSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1593e4ce95aaSVille Syrjälä {
1594e4ce95aaSVille Syrjälä 	switch (port) {
1595e4ce95aaSVille Syrjälä 	case PORT_A:
1596e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1597e4ce95aaSVille Syrjälä 	default:
1598e4ce95aaSVille Syrjälä 		return false;
1599e4ce95aaSVille Syrjälä 	}
1600e4ce95aaSVille Syrjälä }
1601e4ce95aaSVille Syrjälä 
1602676574dfSJani Nikula static bool pch_port_hotplug_long_detect(enum port port, u32 val)
160313cf5504SDave Airlie {
160413cf5504SDave Airlie 	switch (port) {
160513cf5504SDave Airlie 	case PORT_B:
1606676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
160713cf5504SDave Airlie 	case PORT_C:
1608676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
160913cf5504SDave Airlie 	case PORT_D:
1610676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1611676574dfSJani Nikula 	default:
1612676574dfSJani Nikula 		return false;
161313cf5504SDave Airlie 	}
161413cf5504SDave Airlie }
161513cf5504SDave Airlie 
1616676574dfSJani Nikula static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
161713cf5504SDave Airlie {
161813cf5504SDave Airlie 	switch (port) {
161913cf5504SDave Airlie 	case PORT_B:
1620676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
162113cf5504SDave Airlie 	case PORT_C:
1622676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
162313cf5504SDave Airlie 	case PORT_D:
1624676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1625676574dfSJani Nikula 	default:
1626676574dfSJani Nikula 		return false;
162713cf5504SDave Airlie 	}
162813cf5504SDave Airlie }
162913cf5504SDave Airlie 
163042db67d6SVille Syrjälä /*
163142db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
163242db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
163342db67d6SVille Syrjälä  * hotplug detection results from several registers.
163442db67d6SVille Syrjälä  *
163542db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
163642db67d6SVille Syrjälä  */
1637cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1638cf53902fSRodrigo Vivi 			       u32 *pin_mask, u32 *long_mask,
16398c841e57SJani Nikula 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1640fd63e2a9SImre Deak 			       const u32 hpd[HPD_NUM_PINS],
1641fd63e2a9SImre Deak 			       bool long_pulse_detect(enum port port, u32 val))
1642676574dfSJani Nikula {
16438c841e57SJani Nikula 	enum port port;
1644676574dfSJani Nikula 	int i;
1645676574dfSJani Nikula 
1646676574dfSJani Nikula 	for_each_hpd_pin(i) {
16478c841e57SJani Nikula 		if ((hpd[i] & hotplug_trigger) == 0)
16488c841e57SJani Nikula 			continue;
16498c841e57SJani Nikula 
1650676574dfSJani Nikula 		*pin_mask |= BIT(i);
1651676574dfSJani Nikula 
1652cf53902fSRodrigo Vivi 		port = intel_hpd_pin_to_port(dev_priv, i);
1653256cfddeSRodrigo Vivi 		if (port == PORT_NONE)
1654cc24fcdcSImre Deak 			continue;
1655cc24fcdcSImre Deak 
1656fd63e2a9SImre Deak 		if (long_pulse_detect(port, dig_hotplug_reg))
1657676574dfSJani Nikula 			*long_mask |= BIT(i);
1658676574dfSJani Nikula 	}
1659676574dfSJani Nikula 
1660676574dfSJani Nikula 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1661676574dfSJani Nikula 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1662676574dfSJani Nikula 
1663676574dfSJani Nikula }
1664676574dfSJani Nikula 
166591d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1666515ac2bbSDaniel Vetter {
166728c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1668515ac2bbSDaniel Vetter }
1669515ac2bbSDaniel Vetter 
167091d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1671ce99c256SDaniel Vetter {
16729ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1673ce99c256SDaniel Vetter }
1674ce99c256SDaniel Vetter 
16758bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
167691d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
167791d14251STvrtko Ursulin 					 enum pipe pipe,
1678eba94eb9SDaniel Vetter 					 uint32_t crc0, uint32_t crc1,
1679eba94eb9SDaniel Vetter 					 uint32_t crc2, uint32_t crc3,
16808bc5e955SDaniel Vetter 					 uint32_t crc4)
16818bf1e9f1SShuang He {
16828bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
16838bf1e9f1SShuang He 	struct intel_pipe_crc_entry *entry;
16848c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16858c6b709dSTomeu Vizoso 	struct drm_driver *driver = dev_priv->drm.driver;
16868c6b709dSTomeu Vizoso 	uint32_t crcs[5];
1687ac2300d4SDamien Lespiau 	int head, tail;
1688b2c88f5bSDamien Lespiau 
1689d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
1690033b7a23SMaarten Lankhorst 	if (pipe_crc->source && !crtc->base.crc.opened) {
16910c912c79SDamien Lespiau 		if (!pipe_crc->entries) {
1692d538bbdfSDamien Lespiau 			spin_unlock(&pipe_crc->lock);
169334273620SDaniel Vetter 			DRM_DEBUG_KMS("spurious interrupt\n");
16940c912c79SDamien Lespiau 			return;
16950c912c79SDamien Lespiau 		}
16960c912c79SDamien Lespiau 
1697d538bbdfSDamien Lespiau 		head = pipe_crc->head;
1698d538bbdfSDamien Lespiau 		tail = pipe_crc->tail;
1699b2c88f5bSDamien Lespiau 
1700b2c88f5bSDamien Lespiau 		if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1701d538bbdfSDamien Lespiau 			spin_unlock(&pipe_crc->lock);
1702b2c88f5bSDamien Lespiau 			DRM_ERROR("CRC buffer overflowing\n");
1703b2c88f5bSDamien Lespiau 			return;
1704b2c88f5bSDamien Lespiau 		}
1705b2c88f5bSDamien Lespiau 
1706b2c88f5bSDamien Lespiau 		entry = &pipe_crc->entries[head];
17078bf1e9f1SShuang He 
17088c6b709dSTomeu Vizoso 		entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1709eba94eb9SDaniel Vetter 		entry->crc[0] = crc0;
1710eba94eb9SDaniel Vetter 		entry->crc[1] = crc1;
1711eba94eb9SDaniel Vetter 		entry->crc[2] = crc2;
1712eba94eb9SDaniel Vetter 		entry->crc[3] = crc3;
1713eba94eb9SDaniel Vetter 		entry->crc[4] = crc4;
1714b2c88f5bSDamien Lespiau 
1715b2c88f5bSDamien Lespiau 		head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1716d538bbdfSDamien Lespiau 		pipe_crc->head = head;
1717d538bbdfSDamien Lespiau 
1718d538bbdfSDamien Lespiau 		spin_unlock(&pipe_crc->lock);
171907144428SDamien Lespiau 
172007144428SDamien Lespiau 		wake_up_interruptible(&pipe_crc->wq);
17218c6b709dSTomeu Vizoso 	} else {
17228c6b709dSTomeu Vizoso 		/*
17238c6b709dSTomeu Vizoso 		 * For some not yet identified reason, the first CRC is
17248c6b709dSTomeu Vizoso 		 * bonkers. So let's just wait for the next vblank and read
17258c6b709dSTomeu Vizoso 		 * out the buggy result.
17268c6b709dSTomeu Vizoso 		 *
1727163e8aecSRodrigo Vivi 		 * On GEN8+ sometimes the second CRC is bonkers as well, so
17288c6b709dSTomeu Vizoso 		 * don't trust that one either.
17298c6b709dSTomeu Vizoso 		 */
1730033b7a23SMaarten Lankhorst 		if (pipe_crc->skipped <= 0 ||
1731163e8aecSRodrigo Vivi 		    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
17328c6b709dSTomeu Vizoso 			pipe_crc->skipped++;
17338c6b709dSTomeu Vizoso 			spin_unlock(&pipe_crc->lock);
17348c6b709dSTomeu Vizoso 			return;
17358c6b709dSTomeu Vizoso 		}
17368c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
17378c6b709dSTomeu Vizoso 		crcs[0] = crc0;
17388c6b709dSTomeu Vizoso 		crcs[1] = crc1;
17398c6b709dSTomeu Vizoso 		crcs[2] = crc2;
17408c6b709dSTomeu Vizoso 		crcs[3] = crc3;
17418c6b709dSTomeu Vizoso 		crcs[4] = crc4;
1742246ee524STomeu Vizoso 		drm_crtc_add_crc_entry(&crtc->base, true,
1743ca814b25SDaniel Vetter 				       drm_crtc_accurate_vblank_count(&crtc->base),
1744246ee524STomeu Vizoso 				       crcs);
17458c6b709dSTomeu Vizoso 	}
17468bf1e9f1SShuang He }
1747277de95eSDaniel Vetter #else
1748277de95eSDaniel Vetter static inline void
174991d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
175091d14251STvrtko Ursulin 			     enum pipe pipe,
1751277de95eSDaniel Vetter 			     uint32_t crc0, uint32_t crc1,
1752277de95eSDaniel Vetter 			     uint32_t crc2, uint32_t crc3,
1753277de95eSDaniel Vetter 			     uint32_t crc4) {}
1754277de95eSDaniel Vetter #endif
1755eba94eb9SDaniel Vetter 
1756277de95eSDaniel Vetter 
175791d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
175891d14251STvrtko Ursulin 				     enum pipe pipe)
17595a69b89fSDaniel Vetter {
176091d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
17615a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
17625a69b89fSDaniel Vetter 				     0, 0, 0, 0);
17635a69b89fSDaniel Vetter }
17645a69b89fSDaniel Vetter 
176591d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
176691d14251STvrtko Ursulin 				     enum pipe pipe)
1767eba94eb9SDaniel Vetter {
176891d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1769eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1770eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1771eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1772eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
17738bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1774eba94eb9SDaniel Vetter }
17755b3a856bSDaniel Vetter 
177691d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
177791d14251STvrtko Ursulin 				      enum pipe pipe)
17785b3a856bSDaniel Vetter {
17790b5c5ed0SDaniel Vetter 	uint32_t res1, res2;
17800b5c5ed0SDaniel Vetter 
178191d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
17820b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
17830b5c5ed0SDaniel Vetter 	else
17840b5c5ed0SDaniel Vetter 		res1 = 0;
17850b5c5ed0SDaniel Vetter 
178691d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
17870b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
17880b5c5ed0SDaniel Vetter 	else
17890b5c5ed0SDaniel Vetter 		res2 = 0;
17905b3a856bSDaniel Vetter 
179191d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
17920b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
17930b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
17940b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
17950b5c5ed0SDaniel Vetter 				     res1, res2);
17965b3a856bSDaniel Vetter }
17978bf1e9f1SShuang He 
17981403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
17991403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
18001403c0d4SPaulo Zanoni  * the work queue. */
18011403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1802baf02a1fSBen Widawsky {
1803562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1804562d9baeSSagar Arun Kamble 
1805a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
180659cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
1807f4e9af4fSAkash Goel 		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1808562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1809562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1810562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
181141a05a3aSDaniel Vetter 		}
1812d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1813d4d70aa5SImre Deak 	}
1814baf02a1fSBen Widawsky 
1815bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1816c9a9a268SImre Deak 		return;
1817c9a9a268SImre Deak 
18182d1fe073SJoonas Lahtinen 	if (HAS_VEBOX(dev_priv)) {
181912638c57SBen Widawsky 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
18203b3f1650SAkash Goel 			notify_ring(dev_priv->engine[VECS]);
182112638c57SBen Widawsky 
1822aaecdf61SDaniel Vetter 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1823aaecdf61SDaniel Vetter 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
182412638c57SBen Widawsky 	}
18251403c0d4SPaulo Zanoni }
1826baf02a1fSBen Widawsky 
182726705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
182826705e20SSagar Arun Kamble {
182993bf8096SMichal Wajdeczko 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
183093bf8096SMichal Wajdeczko 		intel_guc_to_host_event_handler(&dev_priv->guc);
183126705e20SSagar Arun Kamble }
183226705e20SSagar Arun Kamble 
183344d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
183444d9241eSVille Syrjälä {
183544d9241eSVille Syrjälä 	enum pipe pipe;
183644d9241eSVille Syrjälä 
183744d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
183844d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
183944d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
184044d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
184144d9241eSVille Syrjälä 
184244d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
184344d9241eSVille Syrjälä 	}
184444d9241eSVille Syrjälä }
184544d9241eSVille Syrjälä 
1846eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
184791d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
18487e231dbeSJesse Barnes {
18497e231dbeSJesse Barnes 	int pipe;
18507e231dbeSJesse Barnes 
185158ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
18521ca993d2SVille Syrjälä 
18531ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
18541ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
18551ca993d2SVille Syrjälä 		return;
18561ca993d2SVille Syrjälä 	}
18571ca993d2SVille Syrjälä 
1858055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1859f0f59a00SVille Syrjälä 		i915_reg_t reg;
18606b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
186191d181ddSImre Deak 
1862bbb5eebfSDaniel Vetter 		/*
1863bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
1864bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
1865bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
1866bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
1867bbb5eebfSDaniel Vetter 		 * handle.
1868bbb5eebfSDaniel Vetter 		 */
18690f239f4cSDaniel Vetter 
18700f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
18716b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1872bbb5eebfSDaniel Vetter 
1873bbb5eebfSDaniel Vetter 		switch (pipe) {
1874bbb5eebfSDaniel Vetter 		case PIPE_A:
1875bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1876bbb5eebfSDaniel Vetter 			break;
1877bbb5eebfSDaniel Vetter 		case PIPE_B:
1878bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1879bbb5eebfSDaniel Vetter 			break;
18803278f67fSVille Syrjälä 		case PIPE_C:
18813278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
18823278f67fSVille Syrjälä 			break;
1883bbb5eebfSDaniel Vetter 		}
1884bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
18856b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1886bbb5eebfSDaniel Vetter 
18876b12ca56SVille Syrjälä 		if (!status_mask)
188891d181ddSImre Deak 			continue;
188991d181ddSImre Deak 
189091d181ddSImre Deak 		reg = PIPESTAT(pipe);
18916b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
18926b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
18937e231dbeSJesse Barnes 
18947e231dbeSJesse Barnes 		/*
18957e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
18967e231dbeSJesse Barnes 		 */
18976b12ca56SVille Syrjälä 		if (pipe_stats[pipe])
18986b12ca56SVille Syrjälä 			I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
18997e231dbeSJesse Barnes 	}
190058ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
19012ecb8ca4SVille Syrjälä }
19022ecb8ca4SVille Syrjälä 
1903eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1904eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1905eb64343cSVille Syrjälä {
1906eb64343cSVille Syrjälä 	enum pipe pipe;
1907eb64343cSVille Syrjälä 
1908eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1909eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1910eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1911eb64343cSVille Syrjälä 
1912eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1913eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1914eb64343cSVille Syrjälä 
1915eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1916eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1917eb64343cSVille Syrjälä 	}
1918eb64343cSVille Syrjälä }
1919eb64343cSVille Syrjälä 
1920eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1921eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1922eb64343cSVille Syrjälä {
1923eb64343cSVille Syrjälä 	bool blc_event = false;
1924eb64343cSVille Syrjälä 	enum pipe pipe;
1925eb64343cSVille Syrjälä 
1926eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1927eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1928eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1929eb64343cSVille Syrjälä 
1930eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1931eb64343cSVille Syrjälä 			blc_event = true;
1932eb64343cSVille Syrjälä 
1933eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1934eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1935eb64343cSVille Syrjälä 
1936eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1937eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1938eb64343cSVille Syrjälä 	}
1939eb64343cSVille Syrjälä 
1940eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1941eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
1942eb64343cSVille Syrjälä }
1943eb64343cSVille Syrjälä 
1944eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1945eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1946eb64343cSVille Syrjälä {
1947eb64343cSVille Syrjälä 	bool blc_event = false;
1948eb64343cSVille Syrjälä 	enum pipe pipe;
1949eb64343cSVille Syrjälä 
1950eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
1951eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1952eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
1953eb64343cSVille Syrjälä 
1954eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1955eb64343cSVille Syrjälä 			blc_event = true;
1956eb64343cSVille Syrjälä 
1957eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1958eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1959eb64343cSVille Syrjälä 
1960eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1961eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1962eb64343cSVille Syrjälä 	}
1963eb64343cSVille Syrjälä 
1964eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1965eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
1966eb64343cSVille Syrjälä 
1967eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1968eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
1969eb64343cSVille Syrjälä }
1970eb64343cSVille Syrjälä 
197191d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
19722ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
19732ecb8ca4SVille Syrjälä {
19742ecb8ca4SVille Syrjälä 	enum pipe pipe;
19757e231dbeSJesse Barnes 
1976055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1977fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1978fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
19794356d586SDaniel Vetter 
19804356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
198191d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
19822d9d2b0bSVille Syrjälä 
19831f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
19841f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
198531acc7f5SJesse Barnes 	}
198631acc7f5SJesse Barnes 
1987c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
198891d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
1989c1874ed7SImre Deak }
1990c1874ed7SImre Deak 
19911ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
199216c6c56bSVille Syrjälä {
199316c6c56bSVille Syrjälä 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
199416c6c56bSVille Syrjälä 
19951ae3c34cSVille Syrjälä 	if (hotplug_status)
19963ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
19971ae3c34cSVille Syrjälä 
19981ae3c34cSVille Syrjälä 	return hotplug_status;
19991ae3c34cSVille Syrjälä }
20001ae3c34cSVille Syrjälä 
200191d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
20021ae3c34cSVille Syrjälä 				 u32 hotplug_status)
20031ae3c34cSVille Syrjälä {
20041ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
20053ff60f89SOscar Mateo 
200691d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
200791d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
200816c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
200916c6c56bSVille Syrjälä 
201058f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2011cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2012cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2013cf53902fSRodrigo Vivi 					   hpd_status_g4x,
2014fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
201558f2cf24SVille Syrjälä 
201691d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
201758f2cf24SVille Syrjälä 		}
2018369712e8SJani Nikula 
2019369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
202091d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
202116c6c56bSVille Syrjälä 	} else {
202216c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
202316c6c56bSVille Syrjälä 
202458f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2025cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2026cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2027cf53902fSRodrigo Vivi 					   hpd_status_i915,
2028fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
202991d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
203016c6c56bSVille Syrjälä 		}
20313ff60f89SOscar Mateo 	}
203258f2cf24SVille Syrjälä }
203316c6c56bSVille Syrjälä 
2034c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2035c1874ed7SImre Deak {
203645a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2037fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2038c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
2039c1874ed7SImre Deak 
20402dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
20412dd2a883SImre Deak 		return IRQ_NONE;
20422dd2a883SImre Deak 
20431f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
20441f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
20451f814dacSImre Deak 
20461e1cace9SVille Syrjälä 	do {
20476e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
20482ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
20491ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2050a5e485a9SVille Syrjälä 		u32 ier = 0;
20513ff60f89SOscar Mateo 
2052c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2053c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
20543ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2055c1874ed7SImre Deak 
2056c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
20571e1cace9SVille Syrjälä 			break;
2058c1874ed7SImre Deak 
2059c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2060c1874ed7SImre Deak 
2061a5e485a9SVille Syrjälä 		/*
2062a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2063a5e485a9SVille Syrjälä 		 *
2064a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2065a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2066a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2067a5e485a9SVille Syrjälä 		 *
2068a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2069a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2070a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2071a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2072a5e485a9SVille Syrjälä 		 * bits this time around.
2073a5e485a9SVille Syrjälä 		 */
20744a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2075a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2076a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
20774a0a0202SVille Syrjälä 
20784a0a0202SVille Syrjälä 		if (gt_iir)
20794a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
20804a0a0202SVille Syrjälä 		if (pm_iir)
20814a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
20824a0a0202SVille Syrjälä 
20837ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
20841ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
20857ce4d1f2SVille Syrjälä 
20863ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
20873ff60f89SOscar Mateo 		 * signalled in iir */
2088eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
20897ce4d1f2SVille Syrjälä 
2090eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2091eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2092eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2093eef57324SJerome Anand 
20947ce4d1f2SVille Syrjälä 		/*
20957ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
20967ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
20977ce4d1f2SVille Syrjälä 		 */
20987ce4d1f2SVille Syrjälä 		if (iir)
20997ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
21004a0a0202SVille Syrjälä 
2101a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
21024a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
21034a0a0202SVille Syrjälä 		POSTING_READ(VLV_MASTER_IER);
21041ae3c34cSVille Syrjälä 
210552894874SVille Syrjälä 		if (gt_iir)
2106261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
210752894874SVille Syrjälä 		if (pm_iir)
210852894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
210952894874SVille Syrjälä 
21101ae3c34cSVille Syrjälä 		if (hotplug_status)
211191d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
21122ecb8ca4SVille Syrjälä 
211391d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
21141e1cace9SVille Syrjälä 	} while (0);
21157e231dbeSJesse Barnes 
21161f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
21171f814dacSImre Deak 
21187e231dbeSJesse Barnes 	return ret;
21197e231dbeSJesse Barnes }
21207e231dbeSJesse Barnes 
212143f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
212243f328d7SVille Syrjälä {
212345a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2124fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
212543f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
212643f328d7SVille Syrjälä 
21272dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
21282dd2a883SImre Deak 		return IRQ_NONE;
21292dd2a883SImre Deak 
21301f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
21311f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
21321f814dacSImre Deak 
2133579de73bSChris Wilson 	do {
21346e814800SVille Syrjälä 		u32 master_ctl, iir;
21352ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
21361ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2137f0fd96f5SChris Wilson 		u32 gt_iir[4];
2138a5e485a9SVille Syrjälä 		u32 ier = 0;
2139a5e485a9SVille Syrjälä 
21408e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
21413278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
21423278f67fSVille Syrjälä 
21433278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
21448e5fd599SVille Syrjälä 			break;
214543f328d7SVille Syrjälä 
214627b6c122SOscar Mateo 		ret = IRQ_HANDLED;
214727b6c122SOscar Mateo 
2148a5e485a9SVille Syrjälä 		/*
2149a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2150a5e485a9SVille Syrjälä 		 *
2151a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2152a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2153a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2154a5e485a9SVille Syrjälä 		 *
2155a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2156a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2157a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2158a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2159a5e485a9SVille Syrjälä 		 * bits this time around.
2160a5e485a9SVille Syrjälä 		 */
216143f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2162a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2163a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
216443f328d7SVille Syrjälä 
2165e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
216627b6c122SOscar Mateo 
216727b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
21681ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
216943f328d7SVille Syrjälä 
217027b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
217127b6c122SOscar Mateo 		 * signalled in iir */
2172eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
217343f328d7SVille Syrjälä 
2174eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2175eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2176eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2177eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2178eef57324SJerome Anand 
21797ce4d1f2SVille Syrjälä 		/*
21807ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
21817ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
21827ce4d1f2SVille Syrjälä 		 */
21837ce4d1f2SVille Syrjälä 		if (iir)
21847ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
21857ce4d1f2SVille Syrjälä 
2186a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2187e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
218843f328d7SVille Syrjälä 		POSTING_READ(GEN8_MASTER_IRQ);
21891ae3c34cSVille Syrjälä 
2190f0fd96f5SChris Wilson 		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2191e30e251aSVille Syrjälä 
21921ae3c34cSVille Syrjälä 		if (hotplug_status)
219391d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
21942ecb8ca4SVille Syrjälä 
219591d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2196579de73bSChris Wilson 	} while (0);
21973278f67fSVille Syrjälä 
21981f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
21991f814dacSImre Deak 
220043f328d7SVille Syrjälä 	return ret;
220143f328d7SVille Syrjälä }
220243f328d7SVille Syrjälä 
220391d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
220491d14251STvrtko Ursulin 				u32 hotplug_trigger,
220540e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2206776ad806SJesse Barnes {
220742db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2208776ad806SJesse Barnes 
22096a39d7c9SJani Nikula 	/*
22106a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
22116a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
22126a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
22136a39d7c9SJani Nikula 	 * errors.
22146a39d7c9SJani Nikula 	 */
221513cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
22166a39d7c9SJani Nikula 	if (!hotplug_trigger) {
22176a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
22186a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
22196a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
22206a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
22216a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
22226a39d7c9SJani Nikula 	}
22236a39d7c9SJani Nikula 
222413cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
22256a39d7c9SJani Nikula 	if (!hotplug_trigger)
22266a39d7c9SJani Nikula 		return;
222713cf5504SDave Airlie 
2228cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
222940e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2230fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
223140e56410SVille Syrjälä 
223291d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2233aaf5ec2eSSonika Jindal }
223491d131d2SDaniel Vetter 
223591d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
223640e56410SVille Syrjälä {
223740e56410SVille Syrjälä 	int pipe;
223840e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
223940e56410SVille Syrjälä 
224091d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
224140e56410SVille Syrjälä 
2242cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2243cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2244776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2245cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2246cfc33bf7SVille Syrjälä 				 port_name(port));
2247cfc33bf7SVille Syrjälä 	}
2248776ad806SJesse Barnes 
2249ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
225091d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2251ce99c256SDaniel Vetter 
2252776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
225391d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2254776ad806SJesse Barnes 
2255776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2256776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2257776ad806SJesse Barnes 
2258776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2259776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2260776ad806SJesse Barnes 
2261776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2262776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2263776ad806SJesse Barnes 
22649db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2265055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
22669db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
22679db4a9c7SJesse Barnes 					 pipe_name(pipe),
22689db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2269776ad806SJesse Barnes 
2270776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2271776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2272776ad806SJesse Barnes 
2273776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2274776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2275776ad806SJesse Barnes 
2276776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2277a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
22788664281bSPaulo Zanoni 
22798664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2280a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
22818664281bSPaulo Zanoni }
22828664281bSPaulo Zanoni 
228391d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
22848664281bSPaulo Zanoni {
22858664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
22865a69b89fSDaniel Vetter 	enum pipe pipe;
22878664281bSPaulo Zanoni 
2288de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2289de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2290de032bf4SPaulo Zanoni 
2291055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
22921f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
22931f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
22948664281bSPaulo Zanoni 
22955a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
229691d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
229791d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
22985a69b89fSDaniel Vetter 			else
229991d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
23005a69b89fSDaniel Vetter 		}
23015a69b89fSDaniel Vetter 	}
23028bf1e9f1SShuang He 
23038664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
23048664281bSPaulo Zanoni }
23058664281bSPaulo Zanoni 
230691d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
23078664281bSPaulo Zanoni {
23088664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
230945c1cd87SMika Kahola 	enum pipe pipe;
23108664281bSPaulo Zanoni 
2311de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2312de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2313de032bf4SPaulo Zanoni 
231445c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
231545c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
231645c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
23178664281bSPaulo Zanoni 
23188664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2319776ad806SJesse Barnes }
2320776ad806SJesse Barnes 
232191d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
232223e81d69SAdam Jackson {
232323e81d69SAdam Jackson 	int pipe;
23246dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2325aaf5ec2eSSonika Jindal 
232691d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
232791d131d2SDaniel Vetter 
2328cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2329cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
233023e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2331cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2332cfc33bf7SVille Syrjälä 				 port_name(port));
2333cfc33bf7SVille Syrjälä 	}
233423e81d69SAdam Jackson 
233523e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
233691d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
233723e81d69SAdam Jackson 
233823e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
233991d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
234023e81d69SAdam Jackson 
234123e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
234223e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
234323e81d69SAdam Jackson 
234423e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
234523e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
234623e81d69SAdam Jackson 
234723e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2348055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
234923e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
235023e81d69SAdam Jackson 					 pipe_name(pipe),
235123e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
23528664281bSPaulo Zanoni 
23538664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
235491d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
235523e81d69SAdam Jackson }
235623e81d69SAdam Jackson 
235791d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23586dbf30ceSVille Syrjälä {
23596dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
23606dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
23616dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
23626dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
23636dbf30ceSVille Syrjälä 
23646dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
23656dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
23666dbf30ceSVille Syrjälä 
23676dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23686dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23696dbf30ceSVille Syrjälä 
2370cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2371cf53902fSRodrigo Vivi 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
237274c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
23736dbf30ceSVille Syrjälä 	}
23746dbf30ceSVille Syrjälä 
23756dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
23766dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
23776dbf30ceSVille Syrjälä 
23786dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
23796dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
23806dbf30ceSVille Syrjälä 
2381cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2382cf53902fSRodrigo Vivi 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
23836dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
23846dbf30ceSVille Syrjälä 	}
23856dbf30ceSVille Syrjälä 
23866dbf30ceSVille Syrjälä 	if (pin_mask)
238791d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
23886dbf30ceSVille Syrjälä 
23896dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
239091d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
23916dbf30ceSVille Syrjälä }
23926dbf30ceSVille Syrjälä 
239391d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
239491d14251STvrtko Ursulin 				u32 hotplug_trigger,
239540e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2396c008bc6eSPaulo Zanoni {
2397e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2398e4ce95aaSVille Syrjälä 
2399e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2400e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2401e4ce95aaSVille Syrjälä 
2402cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
240340e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2404e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
240540e56410SVille Syrjälä 
240691d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2407e4ce95aaSVille Syrjälä }
2408c008bc6eSPaulo Zanoni 
240991d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
241091d14251STvrtko Ursulin 				    u32 de_iir)
241140e56410SVille Syrjälä {
241240e56410SVille Syrjälä 	enum pipe pipe;
241340e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
241440e56410SVille Syrjälä 
241540e56410SVille Syrjälä 	if (hotplug_trigger)
241691d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
241740e56410SVille Syrjälä 
2418c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
241991d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2420c008bc6eSPaulo Zanoni 
2421c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
242291d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2423c008bc6eSPaulo Zanoni 
2424c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2425c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2426c008bc6eSPaulo Zanoni 
2427055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2428fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2429fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2430c008bc6eSPaulo Zanoni 
243140da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
24321f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2433c008bc6eSPaulo Zanoni 
243440da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
243591d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2436c008bc6eSPaulo Zanoni 	}
2437c008bc6eSPaulo Zanoni 
2438c008bc6eSPaulo Zanoni 	/* check event from PCH */
2439c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2440c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2441c008bc6eSPaulo Zanoni 
244291d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
244391d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2444c008bc6eSPaulo Zanoni 		else
244591d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2446c008bc6eSPaulo Zanoni 
2447c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2448c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2449c008bc6eSPaulo Zanoni 	}
2450c008bc6eSPaulo Zanoni 
245191d14251STvrtko Ursulin 	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
245291d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2453c008bc6eSPaulo Zanoni }
2454c008bc6eSPaulo Zanoni 
245591d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
245691d14251STvrtko Ursulin 				    u32 de_iir)
24579719fb98SPaulo Zanoni {
245807d27e20SDamien Lespiau 	enum pipe pipe;
245923bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
246023bb4cb5SVille Syrjälä 
246140e56410SVille Syrjälä 	if (hotplug_trigger)
246291d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
24639719fb98SPaulo Zanoni 
24649719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
246591d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
24669719fb98SPaulo Zanoni 
24679719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
246891d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
24699719fb98SPaulo Zanoni 
24709719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
247191d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
24729719fb98SPaulo Zanoni 
2473055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2474fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2475fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
24769719fb98SPaulo Zanoni 	}
24779719fb98SPaulo Zanoni 
24789719fb98SPaulo Zanoni 	/* check event from PCH */
247991d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
24809719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
24819719fb98SPaulo Zanoni 
248291d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
24839719fb98SPaulo Zanoni 
24849719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
24859719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
24869719fb98SPaulo Zanoni 	}
24879719fb98SPaulo Zanoni }
24889719fb98SPaulo Zanoni 
248972c90f62SOscar Mateo /*
249072c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
249172c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
249272c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
249372c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
249472c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
249572c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
249672c90f62SOscar Mateo  */
2497f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2498b1f14ad0SJesse Barnes {
249945a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2500fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2501f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
25020e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2503b1f14ad0SJesse Barnes 
25042dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
25052dd2a883SImre Deak 		return IRQ_NONE;
25062dd2a883SImre Deak 
25071f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
25081f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
25091f814dacSImre Deak 
2510b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2511b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2512b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
251323a78516SPaulo Zanoni 	POSTING_READ(DEIER);
25140e43406bSChris Wilson 
251544498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
251644498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
251744498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
251844498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
251944498aeaSPaulo Zanoni 	 * due to its back queue). */
252091d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
252144498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
252244498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
252344498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2524ab5c608bSBen Widawsky 	}
252544498aeaSPaulo Zanoni 
252672c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
252772c90f62SOscar Mateo 
25280e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
25290e43406bSChris Wilson 	if (gt_iir) {
253072c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
253172c90f62SOscar Mateo 		ret = IRQ_HANDLED;
253291d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2533261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2534d8fc8a47SPaulo Zanoni 		else
2535261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
25360e43406bSChris Wilson 	}
2537b1f14ad0SJesse Barnes 
2538b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
25390e43406bSChris Wilson 	if (de_iir) {
254072c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
254172c90f62SOscar Mateo 		ret = IRQ_HANDLED;
254291d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
254391d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2544f1af8fc1SPaulo Zanoni 		else
254591d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
25460e43406bSChris Wilson 	}
25470e43406bSChris Wilson 
254891d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2549f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
25500e43406bSChris Wilson 		if (pm_iir) {
2551b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
25520e43406bSChris Wilson 			ret = IRQ_HANDLED;
255372c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
25540e43406bSChris Wilson 		}
2555f1af8fc1SPaulo Zanoni 	}
2556b1f14ad0SJesse Barnes 
2557b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
2558b1f14ad0SJesse Barnes 	POSTING_READ(DEIER);
255991d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
256044498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
256144498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
2562ab5c608bSBen Widawsky 	}
2563b1f14ad0SJesse Barnes 
25641f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
25651f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
25661f814dacSImre Deak 
2567b1f14ad0SJesse Barnes 	return ret;
2568b1f14ad0SJesse Barnes }
2569b1f14ad0SJesse Barnes 
257091d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
257191d14251STvrtko Ursulin 				u32 hotplug_trigger,
257240e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2573d04a492dSShashank Sharma {
2574cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2575d04a492dSShashank Sharma 
2576a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2577a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2578d04a492dSShashank Sharma 
2579cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
258040e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2581cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
258240e56410SVille Syrjälä 
258391d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2584d04a492dSShashank Sharma }
2585d04a492dSShashank Sharma 
2586f11a0f46STvrtko Ursulin static irqreturn_t
2587f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2588abd58f01SBen Widawsky {
2589abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2590f11a0f46STvrtko Ursulin 	u32 iir;
2591c42664ccSDaniel Vetter 	enum pipe pipe;
259288e04703SJesse Barnes 
2593abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2594e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2595e32192e1STvrtko Ursulin 		if (iir) {
2596e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2597abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2598e32192e1STvrtko Ursulin 			if (iir & GEN8_DE_MISC_GSE)
259991d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
260038cc46d7SOscar Mateo 			else
260138cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2602abd58f01SBen Widawsky 		}
260338cc46d7SOscar Mateo 		else
260438cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2605abd58f01SBen Widawsky 	}
2606abd58f01SBen Widawsky 
26076d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2608e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2609e32192e1STvrtko Ursulin 		if (iir) {
2610e32192e1STvrtko Ursulin 			u32 tmp_mask;
2611d04a492dSShashank Sharma 			bool found = false;
2612cebd87a0SVille Syrjälä 
2613e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
26146d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
261588e04703SJesse Barnes 
2616e32192e1STvrtko Ursulin 			tmp_mask = GEN8_AUX_CHANNEL_A;
2617bca2bf2aSPandiyan, Dhinakaran 			if (INTEL_GEN(dev_priv) >= 9)
2618e32192e1STvrtko Ursulin 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2619e32192e1STvrtko Ursulin 					    GEN9_AUX_CHANNEL_C |
2620e32192e1STvrtko Ursulin 					    GEN9_AUX_CHANNEL_D;
2621e32192e1STvrtko Ursulin 
2622a324fcacSRodrigo Vivi 			if (IS_CNL_WITH_PORT_F(dev_priv))
2623a324fcacSRodrigo Vivi 				tmp_mask |= CNL_AUX_CHANNEL_F;
2624a324fcacSRodrigo Vivi 
2625e32192e1STvrtko Ursulin 			if (iir & tmp_mask) {
262691d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2627d04a492dSShashank Sharma 				found = true;
2628d04a492dSShashank Sharma 			}
2629d04a492dSShashank Sharma 
2630cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2631e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2632e32192e1STvrtko Ursulin 				if (tmp_mask) {
263391d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
263491d14251STvrtko Ursulin 							    hpd_bxt);
2635d04a492dSShashank Sharma 					found = true;
2636d04a492dSShashank Sharma 				}
2637e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2638e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2639e32192e1STvrtko Ursulin 				if (tmp_mask) {
264091d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
264191d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2642e32192e1STvrtko Ursulin 					found = true;
2643e32192e1STvrtko Ursulin 				}
2644e32192e1STvrtko Ursulin 			}
2645d04a492dSShashank Sharma 
2646cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
264791d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
26489e63743eSShashank Sharma 				found = true;
26499e63743eSShashank Sharma 			}
26509e63743eSShashank Sharma 
2651d04a492dSShashank Sharma 			if (!found)
265238cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
26536d766f02SDaniel Vetter 		}
265438cc46d7SOscar Mateo 		else
265538cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
26566d766f02SDaniel Vetter 	}
26576d766f02SDaniel Vetter 
2658055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2659fd3a4024SDaniel Vetter 		u32 fault_errors;
2660abd58f01SBen Widawsky 
2661c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2662c42664ccSDaniel Vetter 			continue;
2663c42664ccSDaniel Vetter 
2664e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2665e32192e1STvrtko Ursulin 		if (!iir) {
2666e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2667e32192e1STvrtko Ursulin 			continue;
2668e32192e1STvrtko Ursulin 		}
2669770de83dSDamien Lespiau 
2670e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2671e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2672e32192e1STvrtko Ursulin 
2673fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2674fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2675abd58f01SBen Widawsky 
2676e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
267791d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
26780fbe7870SDaniel Vetter 
2679e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2680e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
268138d83c96SDaniel Vetter 
2682e32192e1STvrtko Ursulin 		fault_errors = iir;
2683bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2684e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2685770de83dSDamien Lespiau 		else
2686e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2687770de83dSDamien Lespiau 
2688770de83dSDamien Lespiau 		if (fault_errors)
26891353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
269030100f2bSDaniel Vetter 				  pipe_name(pipe),
2691e32192e1STvrtko Ursulin 				  fault_errors);
2692abd58f01SBen Widawsky 	}
2693abd58f01SBen Widawsky 
269491d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2695266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
269692d03a80SDaniel Vetter 		/*
269792d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
269892d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
269992d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
270092d03a80SDaniel Vetter 		 */
2701e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2702e32192e1STvrtko Ursulin 		if (iir) {
2703e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
270492d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
27056dbf30ceSVille Syrjälä 
27067b22b8c4SRodrigo Vivi 			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
27077b22b8c4SRodrigo Vivi 			    HAS_PCH_CNP(dev_priv))
270891d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
27096dbf30ceSVille Syrjälä 			else
271091d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
27112dfb0b81SJani Nikula 		} else {
27122dfb0b81SJani Nikula 			/*
27132dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
27142dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
27152dfb0b81SJani Nikula 			 */
27162dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
27172dfb0b81SJani Nikula 		}
271892d03a80SDaniel Vetter 	}
271992d03a80SDaniel Vetter 
2720f11a0f46STvrtko Ursulin 	return ret;
2721f11a0f46STvrtko Ursulin }
2722f11a0f46STvrtko Ursulin 
2723f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
2724f11a0f46STvrtko Ursulin {
2725f0fd96f5SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(arg);
2726f11a0f46STvrtko Ursulin 	u32 master_ctl;
2727f0fd96f5SChris Wilson 	u32 gt_iir[4];
2728f11a0f46STvrtko Ursulin 
2729f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
2730f11a0f46STvrtko Ursulin 		return IRQ_NONE;
2731f11a0f46STvrtko Ursulin 
2732f11a0f46STvrtko Ursulin 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2733f11a0f46STvrtko Ursulin 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2734f11a0f46STvrtko Ursulin 	if (!master_ctl)
2735f11a0f46STvrtko Ursulin 		return IRQ_NONE;
2736f11a0f46STvrtko Ursulin 
2737f11a0f46STvrtko Ursulin 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2738f11a0f46STvrtko Ursulin 
2739f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
274055ef72f2SChris Wilson 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2741f0fd96f5SChris Wilson 
2742f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2743f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
2744f0fd96f5SChris Wilson 		disable_rpm_wakeref_asserts(dev_priv);
274555ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
2746f0fd96f5SChris Wilson 		enable_rpm_wakeref_asserts(dev_priv);
2747f0fd96f5SChris Wilson 	}
2748f11a0f46STvrtko Ursulin 
2749cb0d205eSChris Wilson 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2750abd58f01SBen Widawsky 
2751f0fd96f5SChris Wilson 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
27521f814dacSImre Deak 
275355ef72f2SChris Wilson 	return IRQ_HANDLED;
2754abd58f01SBen Widawsky }
2755abd58f01SBen Widawsky 
275636703e79SChris Wilson struct wedge_me {
275736703e79SChris Wilson 	struct delayed_work work;
275836703e79SChris Wilson 	struct drm_i915_private *i915;
275936703e79SChris Wilson 	const char *name;
276036703e79SChris Wilson };
276136703e79SChris Wilson 
276236703e79SChris Wilson static void wedge_me(struct work_struct *work)
276336703e79SChris Wilson {
276436703e79SChris Wilson 	struct wedge_me *w = container_of(work, typeof(*w), work.work);
276536703e79SChris Wilson 
276636703e79SChris Wilson 	dev_err(w->i915->drm.dev,
276736703e79SChris Wilson 		"%s timed out, cancelling all in-flight rendering.\n",
276836703e79SChris Wilson 		w->name);
276936703e79SChris Wilson 	i915_gem_set_wedged(w->i915);
277036703e79SChris Wilson }
277136703e79SChris Wilson 
277236703e79SChris Wilson static void __init_wedge(struct wedge_me *w,
277336703e79SChris Wilson 			 struct drm_i915_private *i915,
277436703e79SChris Wilson 			 long timeout,
277536703e79SChris Wilson 			 const char *name)
277636703e79SChris Wilson {
277736703e79SChris Wilson 	w->i915 = i915;
277836703e79SChris Wilson 	w->name = name;
277936703e79SChris Wilson 
278036703e79SChris Wilson 	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
278136703e79SChris Wilson 	schedule_delayed_work(&w->work, timeout);
278236703e79SChris Wilson }
278336703e79SChris Wilson 
278436703e79SChris Wilson static void __fini_wedge(struct wedge_me *w)
278536703e79SChris Wilson {
278636703e79SChris Wilson 	cancel_delayed_work_sync(&w->work);
278736703e79SChris Wilson 	destroy_delayed_work_on_stack(&w->work);
278836703e79SChris Wilson 	w->i915 = NULL;
278936703e79SChris Wilson }
279036703e79SChris Wilson 
279136703e79SChris Wilson #define i915_wedge_on_timeout(W, DEV, TIMEOUT)				\
279236703e79SChris Wilson 	for (__init_wedge((W), (DEV), (TIMEOUT), __func__);		\
279336703e79SChris Wilson 	     (W)->i915;							\
279436703e79SChris Wilson 	     __fini_wedge((W)))
279536703e79SChris Wilson 
279651951ae7SMika Kuoppala static u32
2797f744dbc2SMika Kuoppala gen11_gt_engine_identity(struct drm_i915_private * const i915,
279851951ae7SMika Kuoppala 			 const unsigned int bank, const unsigned int bit)
279951951ae7SMika Kuoppala {
280051951ae7SMika Kuoppala 	void __iomem * const regs = i915->regs;
280151951ae7SMika Kuoppala 	u32 timeout_ts;
280251951ae7SMika Kuoppala 	u32 ident;
280351951ae7SMika Kuoppala 
2804*96606f3bSOscar Mateo 	lockdep_assert_held(&i915->irq_lock);
2805*96606f3bSOscar Mateo 
280651951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
280751951ae7SMika Kuoppala 
280851951ae7SMika Kuoppala 	/*
280951951ae7SMika Kuoppala 	 * NB: Specs do not specify how long to spin wait,
281051951ae7SMika Kuoppala 	 * so we do ~100us as an educated guess.
281151951ae7SMika Kuoppala 	 */
281251951ae7SMika Kuoppala 	timeout_ts = (local_clock() >> 10) + 100;
281351951ae7SMika Kuoppala 	do {
281451951ae7SMika Kuoppala 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
281551951ae7SMika Kuoppala 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
281651951ae7SMika Kuoppala 		 !time_after32(local_clock() >> 10, timeout_ts));
281751951ae7SMika Kuoppala 
281851951ae7SMika Kuoppala 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
281951951ae7SMika Kuoppala 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
282051951ae7SMika Kuoppala 			  bank, bit, ident);
282151951ae7SMika Kuoppala 		return 0;
282251951ae7SMika Kuoppala 	}
282351951ae7SMika Kuoppala 
282451951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
282551951ae7SMika Kuoppala 		      GEN11_INTR_DATA_VALID);
282651951ae7SMika Kuoppala 
2827f744dbc2SMika Kuoppala 	return ident;
2828f744dbc2SMika Kuoppala }
2829f744dbc2SMika Kuoppala 
2830f744dbc2SMika Kuoppala static void
2831f744dbc2SMika Kuoppala gen11_other_irq_handler(struct drm_i915_private * const i915,
2832f744dbc2SMika Kuoppala 			const u8 instance, const u16 iir)
2833f744dbc2SMika Kuoppala {
2834d02b98b8SOscar Mateo 	if (instance == OTHER_GTPM_INSTANCE)
2835d02b98b8SOscar Mateo 		return gen6_rps_irq_handler(i915, iir);
2836d02b98b8SOscar Mateo 
2837f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
2838f744dbc2SMika Kuoppala 		  instance, iir);
2839f744dbc2SMika Kuoppala }
2840f744dbc2SMika Kuoppala 
2841f744dbc2SMika Kuoppala static void
2842f744dbc2SMika Kuoppala gen11_engine_irq_handler(struct drm_i915_private * const i915,
2843f744dbc2SMika Kuoppala 			 const u8 class, const u8 instance, const u16 iir)
2844f744dbc2SMika Kuoppala {
2845f744dbc2SMika Kuoppala 	struct intel_engine_cs *engine;
2846f744dbc2SMika Kuoppala 
2847f744dbc2SMika Kuoppala 	if (instance <= MAX_ENGINE_INSTANCE)
2848f744dbc2SMika Kuoppala 		engine = i915->engine_class[class][instance];
2849f744dbc2SMika Kuoppala 	else
2850f744dbc2SMika Kuoppala 		engine = NULL;
2851f744dbc2SMika Kuoppala 
2852f744dbc2SMika Kuoppala 	if (likely(engine))
2853f744dbc2SMika Kuoppala 		return gen8_cs_irq_handler(engine, iir);
2854f744dbc2SMika Kuoppala 
2855f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
2856f744dbc2SMika Kuoppala 		  class, instance);
2857f744dbc2SMika Kuoppala }
2858f744dbc2SMika Kuoppala 
2859f744dbc2SMika Kuoppala static void
2860f744dbc2SMika Kuoppala gen11_gt_identity_handler(struct drm_i915_private * const i915,
2861f744dbc2SMika Kuoppala 			  const u32 identity)
2862f744dbc2SMika Kuoppala {
2863f744dbc2SMika Kuoppala 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
2864f744dbc2SMika Kuoppala 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
2865f744dbc2SMika Kuoppala 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
2866f744dbc2SMika Kuoppala 
2867f744dbc2SMika Kuoppala 	if (unlikely(!intr))
2868f744dbc2SMika Kuoppala 		return;
2869f744dbc2SMika Kuoppala 
2870f744dbc2SMika Kuoppala 	if (class <= COPY_ENGINE_CLASS)
2871f744dbc2SMika Kuoppala 		return gen11_engine_irq_handler(i915, class, instance, intr);
2872f744dbc2SMika Kuoppala 
2873f744dbc2SMika Kuoppala 	if (class == OTHER_CLASS)
2874f744dbc2SMika Kuoppala 		return gen11_other_irq_handler(i915, instance, intr);
2875f744dbc2SMika Kuoppala 
2876f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
2877f744dbc2SMika Kuoppala 		  class, instance, intr);
287851951ae7SMika Kuoppala }
287951951ae7SMika Kuoppala 
288051951ae7SMika Kuoppala static void
2881*96606f3bSOscar Mateo gen11_gt_bank_handler(struct drm_i915_private * const i915,
2882*96606f3bSOscar Mateo 		      const unsigned int bank)
288351951ae7SMika Kuoppala {
288451951ae7SMika Kuoppala 	void __iomem * const regs = i915->regs;
288551951ae7SMika Kuoppala 	unsigned long intr_dw;
288651951ae7SMika Kuoppala 	unsigned int bit;
288751951ae7SMika Kuoppala 
2888*96606f3bSOscar Mateo 	lockdep_assert_held(&i915->irq_lock);
288951951ae7SMika Kuoppala 
289051951ae7SMika Kuoppala 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
289151951ae7SMika Kuoppala 
289251951ae7SMika Kuoppala 	if (unlikely(!intr_dw)) {
289351951ae7SMika Kuoppala 		DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
2894*96606f3bSOscar Mateo 		return;
289551951ae7SMika Kuoppala 	}
289651951ae7SMika Kuoppala 
289751951ae7SMika Kuoppala 	for_each_set_bit(bit, &intr_dw, 32) {
2898f744dbc2SMika Kuoppala 		const u32 ident = gen11_gt_engine_identity(i915,
2899f744dbc2SMika Kuoppala 							   bank, bit);
290051951ae7SMika Kuoppala 
2901f744dbc2SMika Kuoppala 		gen11_gt_identity_handler(i915, ident);
290251951ae7SMika Kuoppala 	}
290351951ae7SMika Kuoppala 
290451951ae7SMika Kuoppala 	/* Clear must be after shared has been served for engine */
290551951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
290651951ae7SMika Kuoppala }
2907*96606f3bSOscar Mateo 
2908*96606f3bSOscar Mateo static void
2909*96606f3bSOscar Mateo gen11_gt_irq_handler(struct drm_i915_private * const i915,
2910*96606f3bSOscar Mateo 		     const u32 master_ctl)
2911*96606f3bSOscar Mateo {
2912*96606f3bSOscar Mateo 	unsigned int bank;
2913*96606f3bSOscar Mateo 
2914*96606f3bSOscar Mateo 	spin_lock(&i915->irq_lock);
2915*96606f3bSOscar Mateo 
2916*96606f3bSOscar Mateo 	for (bank = 0; bank < 2; bank++) {
2917*96606f3bSOscar Mateo 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
2918*96606f3bSOscar Mateo 			gen11_gt_bank_handler(i915, bank);
2919*96606f3bSOscar Mateo 	}
2920*96606f3bSOscar Mateo 
2921*96606f3bSOscar Mateo 	spin_unlock(&i915->irq_lock);
292251951ae7SMika Kuoppala }
292351951ae7SMika Kuoppala 
292451951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg)
292551951ae7SMika Kuoppala {
292651951ae7SMika Kuoppala 	struct drm_i915_private * const i915 = to_i915(arg);
292751951ae7SMika Kuoppala 	void __iomem * const regs = i915->regs;
292851951ae7SMika Kuoppala 	u32 master_ctl;
292951951ae7SMika Kuoppala 
293051951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
293151951ae7SMika Kuoppala 		return IRQ_NONE;
293251951ae7SMika Kuoppala 
293351951ae7SMika Kuoppala 	master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
293451951ae7SMika Kuoppala 	master_ctl &= ~GEN11_MASTER_IRQ;
293551951ae7SMika Kuoppala 	if (!master_ctl)
293651951ae7SMika Kuoppala 		return IRQ_NONE;
293751951ae7SMika Kuoppala 
293851951ae7SMika Kuoppala 	/* Disable interrupts. */
293951951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
294051951ae7SMika Kuoppala 
294151951ae7SMika Kuoppala 	/* Find, clear, then process each source of interrupt. */
294251951ae7SMika Kuoppala 	gen11_gt_irq_handler(i915, master_ctl);
294351951ae7SMika Kuoppala 
294451951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
294551951ae7SMika Kuoppala 	if (master_ctl & GEN11_DISPLAY_IRQ) {
294651951ae7SMika Kuoppala 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
294751951ae7SMika Kuoppala 
294851951ae7SMika Kuoppala 		disable_rpm_wakeref_asserts(i915);
294951951ae7SMika Kuoppala 		/*
295051951ae7SMika Kuoppala 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
295151951ae7SMika Kuoppala 		 * for the display related bits.
295251951ae7SMika Kuoppala 		 */
295351951ae7SMika Kuoppala 		gen8_de_irq_handler(i915, disp_ctl);
295451951ae7SMika Kuoppala 		enable_rpm_wakeref_asserts(i915);
295551951ae7SMika Kuoppala 	}
295651951ae7SMika Kuoppala 
295751951ae7SMika Kuoppala 	/* Acknowledge and enable interrupts. */
295851951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
295951951ae7SMika Kuoppala 
296051951ae7SMika Kuoppala 	return IRQ_HANDLED;
296151951ae7SMika Kuoppala }
296251951ae7SMika Kuoppala 
2963ce800754SChris Wilson static void i915_reset_device(struct drm_i915_private *dev_priv,
2964ce800754SChris Wilson 			      const char *msg)
29658a905236SJesse Barnes {
2966ce800754SChris Wilson 	struct i915_gpu_error *error = &dev_priv->gpu_error;
296791c8a326SChris Wilson 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2968cce723edSBen Widawsky 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2969cce723edSBen Widawsky 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2970cce723edSBen Widawsky 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
297136703e79SChris Wilson 	struct wedge_me w;
29728a905236SJesse Barnes 
2973c033666aSChris Wilson 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
29748a905236SJesse Barnes 
297544d98a61SZhao Yakui 	DRM_DEBUG_DRIVER("resetting chip\n");
2976c033666aSChris Wilson 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
29771f83fee0SDaniel Vetter 
297836703e79SChris Wilson 	/* Use a watchdog to ensure that our reset completes */
297936703e79SChris Wilson 	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
2980c033666aSChris Wilson 		intel_prepare_reset(dev_priv);
29817514747dSVille Syrjälä 
2982ce800754SChris Wilson 		error->reason = msg;
2983ce800754SChris Wilson 
298436703e79SChris Wilson 		/* Signal that locked waiters should reset the GPU */
2985ce800754SChris Wilson 		set_bit(I915_RESET_HANDOFF, &error->flags);
2986ce800754SChris Wilson 		wake_up_all(&error->wait_queue);
29878c185ecaSChris Wilson 
298836703e79SChris Wilson 		/* Wait for anyone holding the lock to wakeup, without
298936703e79SChris Wilson 		 * blocking indefinitely on struct_mutex.
299017e1df07SDaniel Vetter 		 */
299136703e79SChris Wilson 		do {
2992780f262aSChris Wilson 			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2993ce800754SChris Wilson 				i915_reset(dev_priv);
2994221fe799SChris Wilson 				mutex_unlock(&dev_priv->drm.struct_mutex);
2995780f262aSChris Wilson 			}
2996ce800754SChris Wilson 		} while (wait_on_bit_timeout(&error->flags,
29978c185ecaSChris Wilson 					     I915_RESET_HANDOFF,
2998780f262aSChris Wilson 					     TASK_UNINTERRUPTIBLE,
299936703e79SChris Wilson 					     1));
3000f69061beSDaniel Vetter 
3001ce800754SChris Wilson 		error->reason = NULL;
3002ce800754SChris Wilson 
3003c033666aSChris Wilson 		intel_finish_reset(dev_priv);
300436703e79SChris Wilson 	}
3005f454c694SImre Deak 
3006ce800754SChris Wilson 	if (!test_bit(I915_WEDGED, &error->flags))
3007ce800754SChris Wilson 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
3008f316a42cSBen Gamari }
30098a905236SJesse Barnes 
3010eaa14c24SChris Wilson static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
3011c0e09200SDave Airlie {
3012eaa14c24SChris Wilson 	u32 eir;
301363eeaf38SJesse Barnes 
3014eaa14c24SChris Wilson 	if (!IS_GEN2(dev_priv))
3015eaa14c24SChris Wilson 		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
301663eeaf38SJesse Barnes 
3017eaa14c24SChris Wilson 	if (INTEL_GEN(dev_priv) < 4)
3018eaa14c24SChris Wilson 		I915_WRITE(IPEIR, I915_READ(IPEIR));
3019eaa14c24SChris Wilson 	else
3020eaa14c24SChris Wilson 		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
30218a905236SJesse Barnes 
3022eaa14c24SChris Wilson 	I915_WRITE(EIR, I915_READ(EIR));
302363eeaf38SJesse Barnes 	eir = I915_READ(EIR);
302463eeaf38SJesse Barnes 	if (eir) {
302563eeaf38SJesse Barnes 		/*
302663eeaf38SJesse Barnes 		 * some errors might have become stuck,
302763eeaf38SJesse Barnes 		 * mask them.
302863eeaf38SJesse Barnes 		 */
3029eaa14c24SChris Wilson 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
303063eeaf38SJesse Barnes 		I915_WRITE(EMR, I915_READ(EMR) | eir);
303163eeaf38SJesse Barnes 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
303263eeaf38SJesse Barnes 	}
303335aed2e6SChris Wilson }
303435aed2e6SChris Wilson 
303535aed2e6SChris Wilson /**
3036b8d24a06SMika Kuoppala  * i915_handle_error - handle a gpu error
303714bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
303814b730fcSarun.siluvery@linux.intel.com  * @engine_mask: mask representing engines that are hung
3039ce800754SChris Wilson  * @flags: control flags
304087c390b6SMichel Thierry  * @fmt: Error message format string
304187c390b6SMichel Thierry  *
3042aafd8581SJavier Martinez Canillas  * Do some basic checking of register state at error time and
304335aed2e6SChris Wilson  * dump it to the syslog.  Also call i915_capture_error_state() to make
304435aed2e6SChris Wilson  * sure we get a record and make it available in debugfs.  Fire a uevent
304535aed2e6SChris Wilson  * so userspace knows something bad happened (should trigger collection
304635aed2e6SChris Wilson  * of a ring dump etc.).
304735aed2e6SChris Wilson  */
3048c033666aSChris Wilson void i915_handle_error(struct drm_i915_private *dev_priv,
3049c033666aSChris Wilson 		       u32 engine_mask,
3050ce800754SChris Wilson 		       unsigned long flags,
305158174462SMika Kuoppala 		       const char *fmt, ...)
305235aed2e6SChris Wilson {
3053142bc7d9SMichel Thierry 	struct intel_engine_cs *engine;
3054142bc7d9SMichel Thierry 	unsigned int tmp;
305558174462SMika Kuoppala 	char error_msg[80];
3056ce800754SChris Wilson 	char *msg = NULL;
3057ce800754SChris Wilson 
3058ce800754SChris Wilson 	if (fmt) {
3059ce800754SChris Wilson 		va_list args;
306035aed2e6SChris Wilson 
306158174462SMika Kuoppala 		va_start(args, fmt);
306258174462SMika Kuoppala 		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
306358174462SMika Kuoppala 		va_end(args);
306458174462SMika Kuoppala 
3065ce800754SChris Wilson 		msg = error_msg;
3066ce800754SChris Wilson 	}
3067ce800754SChris Wilson 
30681604a86dSChris Wilson 	/*
30691604a86dSChris Wilson 	 * In most cases it's guaranteed that we get here with an RPM
30701604a86dSChris Wilson 	 * reference held, for example because there is a pending GPU
30711604a86dSChris Wilson 	 * request that won't finish until the reset is done. This
30721604a86dSChris Wilson 	 * isn't the case at least when we get here by doing a
30731604a86dSChris Wilson 	 * simulated reset via debugfs, so get an RPM reference.
30741604a86dSChris Wilson 	 */
30751604a86dSChris Wilson 	intel_runtime_pm_get(dev_priv);
30761604a86dSChris Wilson 
3077873d66fbSChris Wilson 	engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
3078ce800754SChris Wilson 
3079ce800754SChris Wilson 	if (flags & I915_ERROR_CAPTURE) {
3080ce800754SChris Wilson 		i915_capture_error_state(dev_priv, engine_mask, msg);
3081eaa14c24SChris Wilson 		i915_clear_error_registers(dev_priv);
3082ce800754SChris Wilson 	}
30838a905236SJesse Barnes 
3084142bc7d9SMichel Thierry 	/*
3085142bc7d9SMichel Thierry 	 * Try engine reset when available. We fall back to full reset if
3086142bc7d9SMichel Thierry 	 * single reset fails.
3087142bc7d9SMichel Thierry 	 */
3088142bc7d9SMichel Thierry 	if (intel_has_reset_engine(dev_priv)) {
3089142bc7d9SMichel Thierry 		for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
30909db529aaSDaniel Vetter 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
3091142bc7d9SMichel Thierry 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3092142bc7d9SMichel Thierry 					     &dev_priv->gpu_error.flags))
3093142bc7d9SMichel Thierry 				continue;
3094142bc7d9SMichel Thierry 
3095ce800754SChris Wilson 			if (i915_reset_engine(engine, msg) == 0)
3096142bc7d9SMichel Thierry 				engine_mask &= ~intel_engine_flag(engine);
3097142bc7d9SMichel Thierry 
3098142bc7d9SMichel Thierry 			clear_bit(I915_RESET_ENGINE + engine->id,
3099142bc7d9SMichel Thierry 				  &dev_priv->gpu_error.flags);
3100142bc7d9SMichel Thierry 			wake_up_bit(&dev_priv->gpu_error.flags,
3101142bc7d9SMichel Thierry 				    I915_RESET_ENGINE + engine->id);
3102142bc7d9SMichel Thierry 		}
3103142bc7d9SMichel Thierry 	}
3104142bc7d9SMichel Thierry 
31058af29b0cSChris Wilson 	if (!engine_mask)
31061604a86dSChris Wilson 		goto out;
31078af29b0cSChris Wilson 
3108142bc7d9SMichel Thierry 	/* Full reset needs the mutex, stop any other user trying to do so. */
3109d5367307SChris Wilson 	if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
3110d5367307SChris Wilson 		wait_event(dev_priv->gpu_error.reset_queue,
3111d5367307SChris Wilson 			   !test_bit(I915_RESET_BACKOFF,
3112d5367307SChris Wilson 				     &dev_priv->gpu_error.flags));
31131604a86dSChris Wilson 		goto out;
3114d5367307SChris Wilson 	}
3115ba1234d1SBen Gamari 
3116142bc7d9SMichel Thierry 	/* Prevent any other reset-engine attempt. */
3117142bc7d9SMichel Thierry 	for_each_engine(engine, dev_priv, tmp) {
3118142bc7d9SMichel Thierry 		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3119142bc7d9SMichel Thierry 					&dev_priv->gpu_error.flags))
3120142bc7d9SMichel Thierry 			wait_on_bit(&dev_priv->gpu_error.flags,
3121142bc7d9SMichel Thierry 				    I915_RESET_ENGINE + engine->id,
3122142bc7d9SMichel Thierry 				    TASK_UNINTERRUPTIBLE);
3123142bc7d9SMichel Thierry 	}
3124142bc7d9SMichel Thierry 
3125ce800754SChris Wilson 	i915_reset_device(dev_priv, msg);
3126d5367307SChris Wilson 
3127142bc7d9SMichel Thierry 	for_each_engine(engine, dev_priv, tmp) {
3128142bc7d9SMichel Thierry 		clear_bit(I915_RESET_ENGINE + engine->id,
3129142bc7d9SMichel Thierry 			  &dev_priv->gpu_error.flags);
3130142bc7d9SMichel Thierry 	}
3131142bc7d9SMichel Thierry 
3132d5367307SChris Wilson 	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
3133d5367307SChris Wilson 	wake_up_all(&dev_priv->gpu_error.reset_queue);
31341604a86dSChris Wilson 
31351604a86dSChris Wilson out:
31361604a86dSChris Wilson 	intel_runtime_pm_put(dev_priv);
31378a905236SJesse Barnes }
31388a905236SJesse Barnes 
313942f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
314042f52ef8SKeith Packard  * we use as a pipe index
314142f52ef8SKeith Packard  */
314286e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
31430a3e67a4SJesse Barnes {
3144fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3145e9d21d7fSKeith Packard 	unsigned long irqflags;
314671e0ffa5SJesse Barnes 
31471ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
314886e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
314986e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
315086e83e35SChris Wilson 
315186e83e35SChris Wilson 	return 0;
315286e83e35SChris Wilson }
315386e83e35SChris Wilson 
315486e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
315586e83e35SChris Wilson {
315686e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
315786e83e35SChris Wilson 	unsigned long irqflags;
315886e83e35SChris Wilson 
315986e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31607c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
3161755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
31621ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
31638692d00eSChris Wilson 
31640a3e67a4SJesse Barnes 	return 0;
31650a3e67a4SJesse Barnes }
31660a3e67a4SJesse Barnes 
316788e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3168f796cf8fSJesse Barnes {
3169fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3170f796cf8fSJesse Barnes 	unsigned long irqflags;
317155b8f2a7STvrtko Ursulin 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
317286e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3173f796cf8fSJesse Barnes 
3174f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3175fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
3176b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3177b1f14ad0SJesse Barnes 
31782e8bf223SDhinakaran Pandiyan 	/* Even though there is no DMC, frame counter can get stuck when
31792e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated.
31802e8bf223SDhinakaran Pandiyan 	 */
31812e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
31822e8bf223SDhinakaran Pandiyan 		drm_vblank_restore(dev, pipe);
31832e8bf223SDhinakaran Pandiyan 
3184b1f14ad0SJesse Barnes 	return 0;
3185b1f14ad0SJesse Barnes }
3186b1f14ad0SJesse Barnes 
318788e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3188abd58f01SBen Widawsky {
3189fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3190abd58f01SBen Widawsky 	unsigned long irqflags;
3191abd58f01SBen Widawsky 
3192abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3193013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3194abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3195013d3752SVille Syrjälä 
31962e8bf223SDhinakaran Pandiyan 	/* Even if there is no DMC, frame counter can get stuck when
31972e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated, so check only for PSR.
31982e8bf223SDhinakaran Pandiyan 	 */
31992e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
32002e8bf223SDhinakaran Pandiyan 		drm_vblank_restore(dev, pipe);
32012e8bf223SDhinakaran Pandiyan 
3202abd58f01SBen Widawsky 	return 0;
3203abd58f01SBen Widawsky }
3204abd58f01SBen Widawsky 
320542f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
320642f52ef8SKeith Packard  * we use as a pipe index
320742f52ef8SKeith Packard  */
320886e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
320986e83e35SChris Wilson {
321086e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
321186e83e35SChris Wilson 	unsigned long irqflags;
321286e83e35SChris Wilson 
321386e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
321486e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
321586e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
321686e83e35SChris Wilson }
321786e83e35SChris Wilson 
321886e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
32190a3e67a4SJesse Barnes {
3220fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3221e9d21d7fSKeith Packard 	unsigned long irqflags;
32220a3e67a4SJesse Barnes 
32231ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
32247c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
3225755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
32261ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
32270a3e67a4SJesse Barnes }
32280a3e67a4SJesse Barnes 
322988e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3230f796cf8fSJesse Barnes {
3231fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3232f796cf8fSJesse Barnes 	unsigned long irqflags;
323355b8f2a7STvrtko Ursulin 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
323486e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3235f796cf8fSJesse Barnes 
3236f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3237fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3238b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3239b1f14ad0SJesse Barnes }
3240b1f14ad0SJesse Barnes 
324188e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3242abd58f01SBen Widawsky {
3243fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3244abd58f01SBen Widawsky 	unsigned long irqflags;
3245abd58f01SBen Widawsky 
3246abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3247013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3248abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3249abd58f01SBen Widawsky }
3250abd58f01SBen Widawsky 
3251b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
325291738a95SPaulo Zanoni {
32536e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
325491738a95SPaulo Zanoni 		return;
325591738a95SPaulo Zanoni 
32563488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(SDE);
3257105b122eSPaulo Zanoni 
32586e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3259105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3260622364b6SPaulo Zanoni }
3261105b122eSPaulo Zanoni 
326291738a95SPaulo Zanoni /*
3263622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3264622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3265622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3266622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3267622364b6SPaulo Zanoni  *
3268622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
326991738a95SPaulo Zanoni  */
3270622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev)
3271622364b6SPaulo Zanoni {
3272fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3273622364b6SPaulo Zanoni 
32746e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3275622364b6SPaulo Zanoni 		return;
3276622364b6SPaulo Zanoni 
3277622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
327891738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
327991738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
328091738a95SPaulo Zanoni }
328191738a95SPaulo Zanoni 
3282b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3283d18ea1b5SDaniel Vetter {
32843488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GT);
3285b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
32863488d4ebSVille Syrjälä 		GEN3_IRQ_RESET(GEN6_PM);
3287d18ea1b5SDaniel Vetter }
3288d18ea1b5SDaniel Vetter 
328970591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
329070591a41SVille Syrjälä {
329171b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
329271b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
329371b8b41dSVille Syrjälä 	else
329471b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
329571b8b41dSVille Syrjälä 
3296ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
329770591a41SVille Syrjälä 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
329870591a41SVille Syrjälä 
329944d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
330070591a41SVille Syrjälä 
33013488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(VLV_);
33028bd099a7SChris Wilson 	dev_priv->irq_mask = ~0u;
330370591a41SVille Syrjälä }
330470591a41SVille Syrjälä 
33058bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
33068bb61306SVille Syrjälä {
33078bb61306SVille Syrjälä 	u32 pipestat_mask;
33089ab981f2SVille Syrjälä 	u32 enable_mask;
33098bb61306SVille Syrjälä 	enum pipe pipe;
33108bb61306SVille Syrjälä 
3311842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
33128bb61306SVille Syrjälä 
33138bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
33148bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
33158bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
33168bb61306SVille Syrjälä 
33179ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
33188bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3319ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3320ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3321ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3322ebf5f921SVille Syrjälä 
33238bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3324ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3325ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
33266b7eafc1SVille Syrjälä 
33278bd099a7SChris Wilson 	WARN_ON(dev_priv->irq_mask != ~0u);
33286b7eafc1SVille Syrjälä 
33299ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
33308bb61306SVille Syrjälä 
33313488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
33328bb61306SVille Syrjälä }
33338bb61306SVille Syrjälä 
33348bb61306SVille Syrjälä /* drm_dma.h hooks
33358bb61306SVille Syrjälä */
33368bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev)
33378bb61306SVille Syrjälä {
3338fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
33398bb61306SVille Syrjälä 
3340d420a50cSVille Syrjälä 	if (IS_GEN5(dev_priv))
33418bb61306SVille Syrjälä 		I915_WRITE(HWSTAM, 0xffffffff);
33428bb61306SVille Syrjälä 
33433488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(DE);
33445db94019STvrtko Ursulin 	if (IS_GEN7(dev_priv))
33458bb61306SVille Syrjälä 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
33468bb61306SVille Syrjälä 
3347b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
33488bb61306SVille Syrjälä 
3349b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
33508bb61306SVille Syrjälä }
33518bb61306SVille Syrjälä 
33526bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev)
33537e231dbeSJesse Barnes {
3354fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
33557e231dbeSJesse Barnes 
335634c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
335734c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
335834c7b8a7SVille Syrjälä 
3359b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
33607e231dbeSJesse Barnes 
3361ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
33629918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
336370591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3364ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
33657e231dbeSJesse Barnes }
33667e231dbeSJesse Barnes 
3367d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3368d6e3cca3SDaniel Vetter {
3369d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 0);
3370d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 1);
3371d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 2);
3372d6e3cca3SDaniel Vetter 	GEN8_IRQ_RESET_NDX(GT, 3);
3373d6e3cca3SDaniel Vetter }
3374d6e3cca3SDaniel Vetter 
3375823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev)
3376abd58f01SBen Widawsky {
3377fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3378abd58f01SBen Widawsky 	int pipe;
3379abd58f01SBen Widawsky 
3380abd58f01SBen Widawsky 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3381abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3382abd58f01SBen Widawsky 
3383d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3384abd58f01SBen Widawsky 
3385055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3386f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3387813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3388f86f3fb0SPaulo Zanoni 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3389abd58f01SBen Widawsky 
33903488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_DE_PORT_);
33913488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_DE_MISC_);
33923488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_PCU_);
3393abd58f01SBen Widawsky 
33946e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3395b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3396abd58f01SBen Widawsky }
3397abd58f01SBen Widawsky 
339851951ae7SMika Kuoppala static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
339951951ae7SMika Kuoppala {
340051951ae7SMika Kuoppala 	/* Disable RCS, BCS, VCS and VECS class engines. */
340151951ae7SMika Kuoppala 	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
340251951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  0);
340351951ae7SMika Kuoppala 
340451951ae7SMika Kuoppala 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
340551951ae7SMika Kuoppala 	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~0);
340651951ae7SMika Kuoppala 	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~0);
340751951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
340851951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
340951951ae7SMika Kuoppala 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
3410d02b98b8SOscar Mateo 
3411d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3412d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
341351951ae7SMika Kuoppala }
341451951ae7SMika Kuoppala 
341551951ae7SMika Kuoppala static void gen11_irq_reset(struct drm_device *dev)
341651951ae7SMika Kuoppala {
341751951ae7SMika Kuoppala 	struct drm_i915_private *dev_priv = dev->dev_private;
341851951ae7SMika Kuoppala 	int pipe;
341951951ae7SMika Kuoppala 
342051951ae7SMika Kuoppala 	I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
342151951ae7SMika Kuoppala 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
342251951ae7SMika Kuoppala 
342351951ae7SMika Kuoppala 	gen11_gt_irq_reset(dev_priv);
342451951ae7SMika Kuoppala 
342551951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
342651951ae7SMika Kuoppala 
342751951ae7SMika Kuoppala 	for_each_pipe(dev_priv, pipe)
342851951ae7SMika Kuoppala 		if (intel_display_power_is_enabled(dev_priv,
342951951ae7SMika Kuoppala 						   POWER_DOMAIN_PIPE(pipe)))
343051951ae7SMika Kuoppala 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
343151951ae7SMika Kuoppala 
343251951ae7SMika Kuoppala 	GEN3_IRQ_RESET(GEN8_DE_PORT_);
343351951ae7SMika Kuoppala 	GEN3_IRQ_RESET(GEN8_DE_MISC_);
343451951ae7SMika Kuoppala 	GEN3_IRQ_RESET(GEN8_PCU_);
343551951ae7SMika Kuoppala }
343651951ae7SMika Kuoppala 
34374c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3438001bd2cbSImre Deak 				     u8 pipe_mask)
3439d49bdb0eSPaulo Zanoni {
34401180e206SPaulo Zanoni 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
34416831f3e3SVille Syrjälä 	enum pipe pipe;
3442d49bdb0eSPaulo Zanoni 
344313321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
34449dfe2e3aSImre Deak 
34459dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
34469dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
34479dfe2e3aSImre Deak 		return;
34489dfe2e3aSImre Deak 	}
34499dfe2e3aSImre Deak 
34506831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
34516831f3e3SVille Syrjälä 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
34526831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
34536831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
34549dfe2e3aSImre Deak 
345513321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3456d49bdb0eSPaulo Zanoni }
3457d49bdb0eSPaulo Zanoni 
3458aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3459001bd2cbSImre Deak 				     u8 pipe_mask)
3460aae8ba84SVille Syrjälä {
34616831f3e3SVille Syrjälä 	enum pipe pipe;
34626831f3e3SVille Syrjälä 
3463aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
34649dfe2e3aSImre Deak 
34659dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
34669dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
34679dfe2e3aSImre Deak 		return;
34689dfe2e3aSImre Deak 	}
34699dfe2e3aSImre Deak 
34706831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
34716831f3e3SVille Syrjälä 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
34729dfe2e3aSImre Deak 
3473aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3474aae8ba84SVille Syrjälä 
3475aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
347691c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
3477aae8ba84SVille Syrjälä }
3478aae8ba84SVille Syrjälä 
34796bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev)
348043f328d7SVille Syrjälä {
3481fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
348243f328d7SVille Syrjälä 
348343f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
348443f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
348543f328d7SVille Syrjälä 
3486d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
348743f328d7SVille Syrjälä 
34883488d4ebSVille Syrjälä 	GEN3_IRQ_RESET(GEN8_PCU_);
348943f328d7SVille Syrjälä 
3490ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
34919918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
349270591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3493ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
349443f328d7SVille Syrjälä }
349543f328d7SVille Syrjälä 
349691d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
349787a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
349887a02106SVille Syrjälä {
349987a02106SVille Syrjälä 	struct intel_encoder *encoder;
350087a02106SVille Syrjälä 	u32 enabled_irqs = 0;
350187a02106SVille Syrjälä 
350291c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
350387a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
350487a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
350587a02106SVille Syrjälä 
350687a02106SVille Syrjälä 	return enabled_irqs;
350787a02106SVille Syrjälä }
350887a02106SVille Syrjälä 
35091a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
35101a56b1a2SImre Deak {
35111a56b1a2SImre Deak 	u32 hotplug;
35121a56b1a2SImre Deak 
35131a56b1a2SImre Deak 	/*
35141a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
35151a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
35161a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
35171a56b1a2SImre Deak 	 */
35181a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
35191a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
35201a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
35211a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
35221a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
35231a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
35241a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
35251a56b1a2SImre Deak 	/*
35261a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
35271a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
35281a56b1a2SImre Deak 	 */
35291a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
35301a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
35311a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
35321a56b1a2SImre Deak }
35331a56b1a2SImre Deak 
353491d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
353582a28bcfSDaniel Vetter {
35361a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
353782a28bcfSDaniel Vetter 
353891d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3539fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
354091d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
354182a28bcfSDaniel Vetter 	} else {
3542fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
354391d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
354482a28bcfSDaniel Vetter 	}
354582a28bcfSDaniel Vetter 
3546fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
354782a28bcfSDaniel Vetter 
35481a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
35496dbf30ceSVille Syrjälä }
355026951cafSXiong Zhang 
35512a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
35522a57d9ccSImre Deak {
35533b92e263SRodrigo Vivi 	u32 val, hotplug;
35543b92e263SRodrigo Vivi 
35553b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
35563b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
35573b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
35583b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
35593b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
35603b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
35613b92e263SRodrigo Vivi 	}
35622a57d9ccSImre Deak 
35632a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
35642a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
35652a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
35662a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
35672a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
35682a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
35692a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
35702a57d9ccSImre Deak 
35712a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
35722a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
35732a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
35742a57d9ccSImre Deak }
35752a57d9ccSImre Deak 
357691d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
35776dbf30ceSVille Syrjälä {
35782a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
35796dbf30ceSVille Syrjälä 
35806dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
358191d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
35826dbf30ceSVille Syrjälä 
35836dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
35846dbf30ceSVille Syrjälä 
35852a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
358626951cafSXiong Zhang }
35877fe0b973SKeith Packard 
35881a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
35891a56b1a2SImre Deak {
35901a56b1a2SImre Deak 	u32 hotplug;
35911a56b1a2SImre Deak 
35921a56b1a2SImre Deak 	/*
35931a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
35941a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
35951a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
35961a56b1a2SImre Deak 	 */
35971a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
35981a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
35991a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
36001a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
36011a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
36021a56b1a2SImre Deak }
36031a56b1a2SImre Deak 
360491d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3605e4ce95aaSVille Syrjälä {
36061a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3607e4ce95aaSVille Syrjälä 
360891d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
36093a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
361091d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
36113a3b3c7dSVille Syrjälä 
36123a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
361391d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
361423bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
361591d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
36163a3b3c7dSVille Syrjälä 
36173a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
361823bb4cb5SVille Syrjälä 	} else {
3619e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
362091d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3621e4ce95aaSVille Syrjälä 
3622e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
36233a3b3c7dSVille Syrjälä 	}
3624e4ce95aaSVille Syrjälä 
36251a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3626e4ce95aaSVille Syrjälä 
362791d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3628e4ce95aaSVille Syrjälä }
3629e4ce95aaSVille Syrjälä 
36302a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
36312a57d9ccSImre Deak 				      u32 enabled_irqs)
3632e0a20ad7SShashank Sharma {
36332a57d9ccSImre Deak 	u32 hotplug;
3634e0a20ad7SShashank Sharma 
3635a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
36362a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
36372a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
36382a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3639d252bf68SShubhangi Shrivastava 
3640d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3641d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3642d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3643d252bf68SShubhangi Shrivastava 
3644d252bf68SShubhangi Shrivastava 	/*
3645d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3646d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3647d252bf68SShubhangi Shrivastava 	 */
3648d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3649d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3650d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3651d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3652d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3653d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3654d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3655d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3656d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3657d252bf68SShubhangi Shrivastava 
3658a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3659e0a20ad7SShashank Sharma }
3660e0a20ad7SShashank Sharma 
36612a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
36622a57d9ccSImre Deak {
36632a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
36642a57d9ccSImre Deak }
36652a57d9ccSImre Deak 
36662a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
36672a57d9ccSImre Deak {
36682a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
36692a57d9ccSImre Deak 
36702a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
36712a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
36722a57d9ccSImre Deak 
36732a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
36742a57d9ccSImre Deak 
36752a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
36762a57d9ccSImre Deak }
36772a57d9ccSImre Deak 
3678d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev)
3679d46da437SPaulo Zanoni {
3680fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
368182a28bcfSDaniel Vetter 	u32 mask;
3682d46da437SPaulo Zanoni 
36836e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3684692a04cfSDaniel Vetter 		return;
3685692a04cfSDaniel Vetter 
36866e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
36875c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
36884ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
36895c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
36904ebc6509SDhinakaran Pandiyan 	else
36914ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
36928664281bSPaulo Zanoni 
36933488d4ebSVille Syrjälä 	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3694d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
36952a57d9ccSImre Deak 
36962a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
36972a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
36981a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
36992a57d9ccSImre Deak 	else
37002a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3701d46da437SPaulo Zanoni }
3702d46da437SPaulo Zanoni 
37030a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev)
37040a9a8c91SDaniel Vetter {
3705fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
37060a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
37070a9a8c91SDaniel Vetter 
37080a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
37090a9a8c91SDaniel Vetter 
37100a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
37113c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
37120a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
3713772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3714772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
37150a9a8c91SDaniel Vetter 	}
37160a9a8c91SDaniel Vetter 
37170a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
37185db94019STvrtko Ursulin 	if (IS_GEN5(dev_priv)) {
3719f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
37200a9a8c91SDaniel Vetter 	} else {
37210a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
37220a9a8c91SDaniel Vetter 	}
37230a9a8c91SDaniel Vetter 
37243488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
37250a9a8c91SDaniel Vetter 
3726b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
372778e68d36SImre Deak 		/*
372878e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
372978e68d36SImre Deak 		 * itself is enabled/disabled.
373078e68d36SImre Deak 		 */
3731f4e9af4fSAkash Goel 		if (HAS_VEBOX(dev_priv)) {
37320a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3733f4e9af4fSAkash Goel 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3734f4e9af4fSAkash Goel 		}
37350a9a8c91SDaniel Vetter 
3736f4e9af4fSAkash Goel 		dev_priv->pm_imr = 0xffffffff;
37373488d4ebSVille Syrjälä 		GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
37380a9a8c91SDaniel Vetter 	}
37390a9a8c91SDaniel Vetter }
37400a9a8c91SDaniel Vetter 
3741f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev)
3742036a4a7dSZhenyu Wang {
3743fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
37448e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
37458e76f8dcSPaulo Zanoni 
3746b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
37478e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3748842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
37498e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
375023bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
375123bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
37528e76f8dcSPaulo Zanoni 	} else {
37538e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3754842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3755842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
3756e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3757e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3758e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
37598e76f8dcSPaulo Zanoni 	}
3760036a4a7dSZhenyu Wang 
37611ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
3762036a4a7dSZhenyu Wang 
3763622364b6SPaulo Zanoni 	ibx_irq_pre_postinstall(dev);
3764622364b6SPaulo Zanoni 
37653488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3766036a4a7dSZhenyu Wang 
37670a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
3768036a4a7dSZhenyu Wang 
37691a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
37701a56b1a2SImre Deak 
3771d46da437SPaulo Zanoni 	ibx_irq_postinstall(dev);
37727fe0b973SKeith Packard 
377350a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
37746005ce42SDaniel Vetter 		/* Enable PCU event interrupts
37756005ce42SDaniel Vetter 		 *
37766005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
37774bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
37784bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
3779d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
3780fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3781d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
3782f97108d1SJesse Barnes 	}
3783f97108d1SJesse Barnes 
3784036a4a7dSZhenyu Wang 	return 0;
3785036a4a7dSZhenyu Wang }
3786036a4a7dSZhenyu Wang 
3787f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3788f8b79e58SImre Deak {
378967520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3790f8b79e58SImre Deak 
3791f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
3792f8b79e58SImre Deak 		return;
3793f8b79e58SImre Deak 
3794f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
3795f8b79e58SImre Deak 
3796d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
3797d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3798ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3799f8b79e58SImre Deak 	}
3800d6c69803SVille Syrjälä }
3801f8b79e58SImre Deak 
3802f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3803f8b79e58SImre Deak {
380467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3805f8b79e58SImre Deak 
3806f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
3807f8b79e58SImre Deak 		return;
3808f8b79e58SImre Deak 
3809f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
3810f8b79e58SImre Deak 
3811950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
3812ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3813f8b79e58SImre Deak }
3814f8b79e58SImre Deak 
38150e6c9a9eSVille Syrjälä 
38160e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev)
38170e6c9a9eSVille Syrjälä {
3818fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
38190e6c9a9eSVille Syrjälä 
38200a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
38217e231dbeSJesse Barnes 
3822ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
38239918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
3824ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3825ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3826ad22d106SVille Syrjälä 
38277e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
382834c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
382920afbda2SDaniel Vetter 
383020afbda2SDaniel Vetter 	return 0;
383120afbda2SDaniel Vetter }
383220afbda2SDaniel Vetter 
3833abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3834abd58f01SBen Widawsky {
3835abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
3836abd58f01SBen Widawsky 	uint32_t gt_interrupts[] = {
3837abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
383873d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
383973d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
384073d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3841abd58f01SBen Widawsky 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
384273d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
384373d477f6SOscar Mateo 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
384473d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3845abd58f01SBen Widawsky 		0,
384673d477f6SOscar Mateo 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
384773d477f6SOscar Mateo 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3848abd58f01SBen Widawsky 		};
3849abd58f01SBen Widawsky 
385098735739STvrtko Ursulin 	if (HAS_L3_DPF(dev_priv))
385198735739STvrtko Ursulin 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
385298735739STvrtko Ursulin 
3853f4e9af4fSAkash Goel 	dev_priv->pm_ier = 0x0;
3854f4e9af4fSAkash Goel 	dev_priv->pm_imr = ~dev_priv->pm_ier;
38559a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
38569a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
385778e68d36SImre Deak 	/*
385878e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
385926705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
386078e68d36SImre Deak 	 */
3861f4e9af4fSAkash Goel 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
38629a2d2d87SDeepak S 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3863abd58f01SBen Widawsky }
3864abd58f01SBen Widawsky 
3865abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3866abd58f01SBen Widawsky {
3867770de83dSDamien Lespiau 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3868770de83dSDamien Lespiau 	uint32_t de_pipe_enables;
38693a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
38703a3b3c7dSVille Syrjälä 	u32 de_port_enables;
387111825b0dSVille Syrjälä 	u32 de_misc_masked = GEN8_DE_MISC_GSE;
38723a3b3c7dSVille Syrjälä 	enum pipe pipe;
3873770de83dSDamien Lespiau 
3874bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
3875842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
38763a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
387788e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
3878cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
38793a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
38803a3b3c7dSVille Syrjälä 	} else {
3881842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
38823a3b3c7dSVille Syrjälä 	}
3883770de83dSDamien Lespiau 
3884a324fcacSRodrigo Vivi 	if (IS_CNL_WITH_PORT_F(dev_priv))
3885a324fcacSRodrigo Vivi 		de_port_masked |= CNL_AUX_CHANNEL_F;
3886a324fcacSRodrigo Vivi 
3887770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3888770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
3889770de83dSDamien Lespiau 
38903a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
3891cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
3892a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3893a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
38943a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
38953a3b3c7dSVille Syrjälä 
38960a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
38970a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3898abd58f01SBen Widawsky 
3899f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3900813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
3901813bde43SPaulo Zanoni 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3902813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
390335079899SPaulo Zanoni 					  de_pipe_enables);
39040a195c02SMika Kahola 	}
3905abd58f01SBen Widawsky 
39063488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
39073488d4ebSVille Syrjälä 	GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
39082a57d9ccSImre Deak 
39092a57d9ccSImre Deak 	if (IS_GEN9_LP(dev_priv))
39102a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
39111a56b1a2SImre Deak 	else if (IS_BROADWELL(dev_priv))
39121a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
3913abd58f01SBen Widawsky }
3914abd58f01SBen Widawsky 
3915abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev)
3916abd58f01SBen Widawsky {
3917fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3918abd58f01SBen Widawsky 
39196e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3920622364b6SPaulo Zanoni 		ibx_irq_pre_postinstall(dev);
3921622364b6SPaulo Zanoni 
3922abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
3923abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
3924abd58f01SBen Widawsky 
39256e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3926abd58f01SBen Widawsky 		ibx_irq_postinstall(dev);
3927abd58f01SBen Widawsky 
3928e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3929abd58f01SBen Widawsky 	POSTING_READ(GEN8_MASTER_IRQ);
3930abd58f01SBen Widawsky 
3931abd58f01SBen Widawsky 	return 0;
3932abd58f01SBen Widawsky }
3933abd58f01SBen Widawsky 
393451951ae7SMika Kuoppala static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
393551951ae7SMika Kuoppala {
393651951ae7SMika Kuoppala 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
393751951ae7SMika Kuoppala 
393851951ae7SMika Kuoppala 	BUILD_BUG_ON(irqs & 0xffff0000);
393951951ae7SMika Kuoppala 
394051951ae7SMika Kuoppala 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
394151951ae7SMika Kuoppala 	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
394251951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  irqs << 16 | irqs);
394351951ae7SMika Kuoppala 
394451951ae7SMika Kuoppala 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
394551951ae7SMika Kuoppala 	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~(irqs << 16));
394651951ae7SMika Kuoppala 	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~(irqs << 16));
394751951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~(irqs | irqs << 16));
394851951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
394951951ae7SMika Kuoppala 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));
395051951ae7SMika Kuoppala 
3951d02b98b8SOscar Mateo 	/*
3952d02b98b8SOscar Mateo 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3953d02b98b8SOscar Mateo 	 * is enabled/disabled.
3954d02b98b8SOscar Mateo 	 */
3955d02b98b8SOscar Mateo 	dev_priv->pm_ier = 0x0;
3956d02b98b8SOscar Mateo 	dev_priv->pm_imr = ~dev_priv->pm_ier;
3957d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3958d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
395951951ae7SMika Kuoppala }
396051951ae7SMika Kuoppala 
396151951ae7SMika Kuoppala static int gen11_irq_postinstall(struct drm_device *dev)
396251951ae7SMika Kuoppala {
396351951ae7SMika Kuoppala 	struct drm_i915_private *dev_priv = dev->dev_private;
396451951ae7SMika Kuoppala 
396551951ae7SMika Kuoppala 	gen11_gt_irq_postinstall(dev_priv);
396651951ae7SMika Kuoppala 	gen8_de_irq_postinstall(dev_priv);
396751951ae7SMika Kuoppala 
396851951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
396951951ae7SMika Kuoppala 
397051951ae7SMika Kuoppala 	I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
397151951ae7SMika Kuoppala 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
397251951ae7SMika Kuoppala 
397351951ae7SMika Kuoppala 	return 0;
397451951ae7SMika Kuoppala }
397551951ae7SMika Kuoppala 
397643f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev)
397743f328d7SVille Syrjälä {
3978fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
397943f328d7SVille Syrjälä 
398043f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
398143f328d7SVille Syrjälä 
3982ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
39839918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
3984ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
3985ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3986ad22d106SVille Syrjälä 
3987e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
398843f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
398943f328d7SVille Syrjälä 
399043f328d7SVille Syrjälä 	return 0;
399143f328d7SVille Syrjälä }
399243f328d7SVille Syrjälä 
39936bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev)
3994c2798b19SChris Wilson {
3995fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3996c2798b19SChris Wilson 
399744d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
399844d9241eSVille Syrjälä 
3999d420a50cSVille Syrjälä 	I915_WRITE16(HWSTAM, 0xffff);
4000d420a50cSVille Syrjälä 
4001e9e9848aSVille Syrjälä 	GEN2_IRQ_RESET();
4002c2798b19SChris Wilson }
4003c2798b19SChris Wilson 
4004c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev)
4005c2798b19SChris Wilson {
4006fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4007e9e9848aSVille Syrjälä 	u16 enable_mask;
4008c2798b19SChris Wilson 
4009045cebd2SVille Syrjälä 	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4010045cebd2SVille Syrjälä 			    I915_ERROR_MEMORY_REFRESH));
4011c2798b19SChris Wilson 
4012c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
4013c2798b19SChris Wilson 	dev_priv->irq_mask =
4014c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4015842ebf7aSVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
4016c2798b19SChris Wilson 
4017e9e9848aSVille Syrjälä 	enable_mask =
4018c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4019c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4020e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
4021e9e9848aSVille Syrjälä 
4022e9e9848aSVille Syrjälä 	GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4023c2798b19SChris Wilson 
4024379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4025379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4026d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4027755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4028755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4029d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4030379ef82dSDaniel Vetter 
4031c2798b19SChris Wilson 	return 0;
4032c2798b19SChris Wilson }
4033c2798b19SChris Wilson 
4034ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4035c2798b19SChris Wilson {
403645a83f84SDaniel Vetter 	struct drm_device *dev = arg;
4037fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4038af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4039c2798b19SChris Wilson 
40402dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
40412dd2a883SImre Deak 		return IRQ_NONE;
40422dd2a883SImre Deak 
40431f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
40441f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
40451f814dacSImre Deak 
4046af722d28SVille Syrjälä 	do {
4047af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
4048af722d28SVille Syrjälä 		u16 iir;
4049af722d28SVille Syrjälä 
4050c2798b19SChris Wilson 		iir = I915_READ16(IIR);
4051c2798b19SChris Wilson 		if (iir == 0)
4052af722d28SVille Syrjälä 			break;
4053c2798b19SChris Wilson 
4054af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4055c2798b19SChris Wilson 
4056eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4057eb64343cSVille Syrjälä 		 * signalled in iir */
4058eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4059c2798b19SChris Wilson 
4060fd3a4024SDaniel Vetter 		I915_WRITE16(IIR, iir);
4061c2798b19SChris Wilson 
4062c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
40633b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
4064c2798b19SChris Wilson 
4065af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4066af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4067af722d28SVille Syrjälä 
4068eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4069af722d28SVille Syrjälä 	} while (0);
4070c2798b19SChris Wilson 
40711f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
40721f814dacSImre Deak 
40731f814dacSImre Deak 	return ret;
4074c2798b19SChris Wilson }
4075c2798b19SChris Wilson 
40766bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev)
4077a266c7d5SChris Wilson {
4078fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4079a266c7d5SChris Wilson 
408056b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
40810706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4082a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4083a266c7d5SChris Wilson 	}
4084a266c7d5SChris Wilson 
408544d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
408644d9241eSVille Syrjälä 
4087d420a50cSVille Syrjälä 	I915_WRITE(HWSTAM, 0xffffffff);
408844d9241eSVille Syrjälä 
4089ba7eb789SVille Syrjälä 	GEN3_IRQ_RESET();
4090a266c7d5SChris Wilson }
4091a266c7d5SChris Wilson 
4092a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev)
4093a266c7d5SChris Wilson {
4094fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
409538bde180SChris Wilson 	u32 enable_mask;
4096a266c7d5SChris Wilson 
4097045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4098045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
409938bde180SChris Wilson 
410038bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
410138bde180SChris Wilson 	dev_priv->irq_mask =
410238bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
410338bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4104842ebf7aSVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
410538bde180SChris Wilson 
410638bde180SChris Wilson 	enable_mask =
410738bde180SChris Wilson 		I915_ASLE_INTERRUPT |
410838bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
410938bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
411038bde180SChris Wilson 		I915_USER_INTERRUPT;
411138bde180SChris Wilson 
411256b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
4113a266c7d5SChris Wilson 		/* Enable in IER... */
4114a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4115a266c7d5SChris Wilson 		/* and unmask in IMR */
4116a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4117a266c7d5SChris Wilson 	}
4118a266c7d5SChris Wilson 
4119ba7eb789SVille Syrjälä 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4120a266c7d5SChris Wilson 
4121379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4122379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4123d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4124755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4125755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4126d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4127379ef82dSDaniel Vetter 
4128c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
4129c30bb1fdSVille Syrjälä 
413020afbda2SDaniel Vetter 	return 0;
413120afbda2SDaniel Vetter }
413220afbda2SDaniel Vetter 
4133ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
4134a266c7d5SChris Wilson {
413545a83f84SDaniel Vetter 	struct drm_device *dev = arg;
4136fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4137af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4138a266c7d5SChris Wilson 
41392dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
41402dd2a883SImre Deak 		return IRQ_NONE;
41412dd2a883SImre Deak 
41421f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
41431f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
41441f814dacSImre Deak 
414538bde180SChris Wilson 	do {
4146eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
4147af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4148af722d28SVille Syrjälä 		u32 iir;
4149a266c7d5SChris Wilson 
4150af722d28SVille Syrjälä 		iir = I915_READ(IIR);
4151af722d28SVille Syrjälä 		if (iir == 0)
4152af722d28SVille Syrjälä 			break;
4153af722d28SVille Syrjälä 
4154af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4155af722d28SVille Syrjälä 
4156af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
4157af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4158af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4159a266c7d5SChris Wilson 
4160eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4161eb64343cSVille Syrjälä 		 * signalled in iir */
4162eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4163a266c7d5SChris Wilson 
4164fd3a4024SDaniel Vetter 		I915_WRITE(IIR, iir);
4165a266c7d5SChris Wilson 
4166a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
41673b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
4168a266c7d5SChris Wilson 
4169af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4170af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4171a266c7d5SChris Wilson 
4172af722d28SVille Syrjälä 		if (hotplug_status)
4173af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4174af722d28SVille Syrjälä 
4175af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4176af722d28SVille Syrjälä 	} while (0);
4177a266c7d5SChris Wilson 
41781f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
41791f814dacSImre Deak 
4180a266c7d5SChris Wilson 	return ret;
4181a266c7d5SChris Wilson }
4182a266c7d5SChris Wilson 
41836bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev)
4184a266c7d5SChris Wilson {
4185fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4186a266c7d5SChris Wilson 
41870706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4188a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4189a266c7d5SChris Wilson 
419044d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
419144d9241eSVille Syrjälä 
4192d420a50cSVille Syrjälä 	I915_WRITE(HWSTAM, 0xffffffff);
419344d9241eSVille Syrjälä 
4194ba7eb789SVille Syrjälä 	GEN3_IRQ_RESET();
4195a266c7d5SChris Wilson }
4196a266c7d5SChris Wilson 
4197a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev)
4198a266c7d5SChris Wilson {
4199fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4200bbba0a97SChris Wilson 	u32 enable_mask;
4201a266c7d5SChris Wilson 	u32 error_mask;
4202a266c7d5SChris Wilson 
4203045cebd2SVille Syrjälä 	/*
4204045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
4205045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
4206045cebd2SVille Syrjälä 	 */
4207045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
4208045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4209045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
4210045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
4211045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4212045cebd2SVille Syrjälä 	} else {
4213045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4214045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4215045cebd2SVille Syrjälä 	}
4216045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
4217045cebd2SVille Syrjälä 
4218a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
4219c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
4220c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
4221adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
4222bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4223bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4224bbba0a97SChris Wilson 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4225bbba0a97SChris Wilson 
4226c30bb1fdSVille Syrjälä 	enable_mask =
4227c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
4228c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
4229c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4230c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4231c30bb1fdSVille Syrjälä 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4232c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
4233bbba0a97SChris Wilson 
423491d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4235bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
4236a266c7d5SChris Wilson 
4237c30bb1fdSVille Syrjälä 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4238c30bb1fdSVille Syrjälä 
4239b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4240b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4241d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4242755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4243755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4244755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4245d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4246a266c7d5SChris Wilson 
424791d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
424820afbda2SDaniel Vetter 
424920afbda2SDaniel Vetter 	return 0;
425020afbda2SDaniel Vetter }
425120afbda2SDaniel Vetter 
425291d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
425320afbda2SDaniel Vetter {
425420afbda2SDaniel Vetter 	u32 hotplug_en;
425520afbda2SDaniel Vetter 
425667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4257b5ea2d56SDaniel Vetter 
4258adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
4259e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
426091d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4261a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
4262a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
4263a266c7d5SChris Wilson 	   seconds later.  So just do it once.
4264a266c7d5SChris Wilson 	*/
426591d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4266a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4267a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4268a266c7d5SChris Wilson 
4269a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
42700706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
4271f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
4272f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4273f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
42740706f17cSEgbert Eich 					     hotplug_en);
4275a266c7d5SChris Wilson }
4276a266c7d5SChris Wilson 
4277ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
4278a266c7d5SChris Wilson {
427945a83f84SDaniel Vetter 	struct drm_device *dev = arg;
4280fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4281af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4282a266c7d5SChris Wilson 
42832dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
42842dd2a883SImre Deak 		return IRQ_NONE;
42852dd2a883SImre Deak 
42861f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
42871f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
42881f814dacSImre Deak 
4289af722d28SVille Syrjälä 	do {
4290eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
4291af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4292af722d28SVille Syrjälä 		u32 iir;
42932c8ba29fSChris Wilson 
4294af722d28SVille Syrjälä 		iir = I915_READ(IIR);
4295af722d28SVille Syrjälä 		if (iir == 0)
4296af722d28SVille Syrjälä 			break;
4297af722d28SVille Syrjälä 
4298af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4299af722d28SVille Syrjälä 
4300af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4301af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4302a266c7d5SChris Wilson 
4303eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4304eb64343cSVille Syrjälä 		 * signalled in iir */
4305eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4306a266c7d5SChris Wilson 
4307fd3a4024SDaniel Vetter 		I915_WRITE(IIR, iir);
4308a266c7d5SChris Wilson 
4309a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
43103b3f1650SAkash Goel 			notify_ring(dev_priv->engine[RCS]);
4311af722d28SVille Syrjälä 
4312a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
43133b3f1650SAkash Goel 			notify_ring(dev_priv->engine[VCS]);
4314a266c7d5SChris Wilson 
4315af722d28SVille Syrjälä 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4316af722d28SVille Syrjälä 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4317515ac2bbSDaniel Vetter 
4318af722d28SVille Syrjälä 		if (hotplug_status)
4319af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4320af722d28SVille Syrjälä 
4321af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4322af722d28SVille Syrjälä 	} while (0);
4323a266c7d5SChris Wilson 
43241f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
43251f814dacSImre Deak 
4326a266c7d5SChris Wilson 	return ret;
4327a266c7d5SChris Wilson }
4328a266c7d5SChris Wilson 
4329fca52a55SDaniel Vetter /**
4330fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4331fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4332fca52a55SDaniel Vetter  *
4333fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4334fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4335fca52a55SDaniel Vetter  */
4336b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4337f71d4af4SJesse Barnes {
433891c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4339562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4340cefcff8fSJoonas Lahtinen 	int i;
43418b2e326dSChris Wilson 
434277913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
434377913b39SJani Nikula 
4344562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4345cefcff8fSJoonas Lahtinen 
4346a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4347cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4348cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
43498b2e326dSChris Wilson 
43504805fe82STvrtko Ursulin 	if (HAS_GUC_SCHED(dev_priv))
435126705e20SSagar Arun Kamble 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
435226705e20SSagar Arun Kamble 
4353a6706b45SDeepak S 	/* Let's track the enabled rps events */
4354666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
43556c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4356e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
435731685c25SDeepak S 	else
4358a6706b45SDeepak S 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4359a6706b45SDeepak S 
4360562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
43611800ad25SSagar Arun Kamble 
43621800ad25SSagar Arun Kamble 	/*
4363acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
43641800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
43651800ad25SSagar Arun Kamble 	 *
43661800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
43671800ad25SSagar Arun Kamble 	 */
4368bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4369562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
43701800ad25SSagar Arun Kamble 
4371bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4372562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
43731800ad25SSagar Arun Kamble 
4374b963291cSDaniel Vetter 	if (IS_GEN2(dev_priv)) {
43754194c088SRodrigo Vivi 		/* Gen2 doesn't have a hardware frame counter */
43764cdb83ecSVille Syrjälä 		dev->max_vblank_count = 0;
4377bca2bf2aSPandiyan, Dhinakaran 	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4378f71d4af4SJesse Barnes 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4379fd8f507cSVille Syrjälä 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4380391f75e2SVille Syrjälä 	} else {
4381391f75e2SVille Syrjälä 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4382391f75e2SVille Syrjälä 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4383f71d4af4SJesse Barnes 	}
4384f71d4af4SJesse Barnes 
438521da2700SVille Syrjälä 	/*
438621da2700SVille Syrjälä 	 * Opt out of the vblank disable timer on everything except gen2.
438721da2700SVille Syrjälä 	 * Gen2 doesn't have a hardware frame counter and so depends on
438821da2700SVille Syrjälä 	 * vblank interrupts to produce sane vblank seuquence numbers.
438921da2700SVille Syrjälä 	 */
4390b963291cSDaniel Vetter 	if (!IS_GEN2(dev_priv))
439121da2700SVille Syrjälä 		dev->vblank_disable_immediate = true;
439221da2700SVille Syrjälä 
4393262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4394262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4395262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4396262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4397262fd485SChris Wilson 	 * in this case to the runtime pm.
4398262fd485SChris Wilson 	 */
4399262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4400262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4401262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4402262fd485SChris Wilson 
4403317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4404317eaa95SLyude 
44051bf6ad62SDaniel Vetter 	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4406f71d4af4SJesse Barnes 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4407f71d4af4SJesse Barnes 
4408b963291cSDaniel Vetter 	if (IS_CHERRYVIEW(dev_priv)) {
440943f328d7SVille Syrjälä 		dev->driver->irq_handler = cherryview_irq_handler;
44106bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = cherryview_irq_reset;
441143f328d7SVille Syrjälä 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
44126bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = cherryview_irq_reset;
441386e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
441486e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
441543f328d7SVille Syrjälä 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4416b963291cSDaniel Vetter 	} else if (IS_VALLEYVIEW(dev_priv)) {
44177e231dbeSJesse Barnes 		dev->driver->irq_handler = valleyview_irq_handler;
44186bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = valleyview_irq_reset;
44197e231dbeSJesse Barnes 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
44206bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = valleyview_irq_reset;
442186e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
442286e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
4423fa00abe0SEgbert Eich 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
442451951ae7SMika Kuoppala 	} else if (INTEL_GEN(dev_priv) >= 11) {
442551951ae7SMika Kuoppala 		dev->driver->irq_handler = gen11_irq_handler;
442651951ae7SMika Kuoppala 		dev->driver->irq_preinstall = gen11_irq_reset;
442751951ae7SMika Kuoppala 		dev->driver->irq_postinstall = gen11_irq_postinstall;
442851951ae7SMika Kuoppala 		dev->driver->irq_uninstall = gen11_irq_reset;
442951951ae7SMika Kuoppala 		dev->driver->enable_vblank = gen8_enable_vblank;
443051951ae7SMika Kuoppala 		dev->driver->disable_vblank = gen8_disable_vblank;
443151951ae7SMika Kuoppala 		dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4432bca2bf2aSPandiyan, Dhinakaran 	} else if (INTEL_GEN(dev_priv) >= 8) {
4433abd58f01SBen Widawsky 		dev->driver->irq_handler = gen8_irq_handler;
4434723761b8SDaniel Vetter 		dev->driver->irq_preinstall = gen8_irq_reset;
4435abd58f01SBen Widawsky 		dev->driver->irq_postinstall = gen8_irq_postinstall;
44366bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = gen8_irq_reset;
4437abd58f01SBen Widawsky 		dev->driver->enable_vblank = gen8_enable_vblank;
4438abd58f01SBen Widawsky 		dev->driver->disable_vblank = gen8_disable_vblank;
4439cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
4440e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
44417b22b8c4SRodrigo Vivi 		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
44427b22b8c4SRodrigo Vivi 			 HAS_PCH_CNP(dev_priv))
44436dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
44446dbf30ceSVille Syrjälä 		else
44453a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
44466e266956STvrtko Ursulin 	} else if (HAS_PCH_SPLIT(dev_priv)) {
4447f71d4af4SJesse Barnes 		dev->driver->irq_handler = ironlake_irq_handler;
4448723761b8SDaniel Vetter 		dev->driver->irq_preinstall = ironlake_irq_reset;
4449f71d4af4SJesse Barnes 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
44506bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = ironlake_irq_reset;
4451f71d4af4SJesse Barnes 		dev->driver->enable_vblank = ironlake_enable_vblank;
4452f71d4af4SJesse Barnes 		dev->driver->disable_vblank = ironlake_disable_vblank;
4453e4ce95aaSVille Syrjälä 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4454f71d4af4SJesse Barnes 	} else {
44557e22dbbbSTvrtko Ursulin 		if (IS_GEN2(dev_priv)) {
44566bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i8xx_irq_reset;
4457c2798b19SChris Wilson 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4458c2798b19SChris Wilson 			dev->driver->irq_handler = i8xx_irq_handler;
44596bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i8xx_irq_reset;
446086e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
446186e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
44627e22dbbbSTvrtko Ursulin 		} else if (IS_GEN3(dev_priv)) {
44636bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i915_irq_reset;
4464a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i915_irq_postinstall;
44656bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i915_irq_reset;
4466a266c7d5SChris Wilson 			dev->driver->irq_handler = i915_irq_handler;
446786e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
446886e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
4469c2798b19SChris Wilson 		} else {
44706bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i965_irq_reset;
4471a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i965_irq_postinstall;
44726bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i965_irq_reset;
4473a266c7d5SChris Wilson 			dev->driver->irq_handler = i965_irq_handler;
447486e83e35SChris Wilson 			dev->driver->enable_vblank = i965_enable_vblank;
447586e83e35SChris Wilson 			dev->driver->disable_vblank = i965_disable_vblank;
4476c2798b19SChris Wilson 		}
4477778eb334SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
4478778eb334SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4479f71d4af4SJesse Barnes 	}
4480f71d4af4SJesse Barnes }
448120afbda2SDaniel Vetter 
4482fca52a55SDaniel Vetter /**
4483cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4484cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4485cefcff8fSJoonas Lahtinen  *
4486cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4487cefcff8fSJoonas Lahtinen  */
4488cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4489cefcff8fSJoonas Lahtinen {
4490cefcff8fSJoonas Lahtinen 	int i;
4491cefcff8fSJoonas Lahtinen 
4492cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4493cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4494cefcff8fSJoonas Lahtinen }
4495cefcff8fSJoonas Lahtinen 
4496cefcff8fSJoonas Lahtinen /**
4497fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4498fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4499fca52a55SDaniel Vetter  *
4500fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4501fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4502fca52a55SDaniel Vetter  *
4503fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4504fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4505fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4506fca52a55SDaniel Vetter  */
45072aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
45082aeb7d3aSDaniel Vetter {
45092aeb7d3aSDaniel Vetter 	/*
45102aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
45112aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
45122aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
45132aeb7d3aSDaniel Vetter 	 */
4514ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
45152aeb7d3aSDaniel Vetter 
451691c8a326SChris Wilson 	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
45172aeb7d3aSDaniel Vetter }
45182aeb7d3aSDaniel Vetter 
4519fca52a55SDaniel Vetter /**
4520fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4521fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4522fca52a55SDaniel Vetter  *
4523fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4524fca52a55SDaniel Vetter  * resources acquired in the init functions.
4525fca52a55SDaniel Vetter  */
45262aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
45272aeb7d3aSDaniel Vetter {
452891c8a326SChris Wilson 	drm_irq_uninstall(&dev_priv->drm);
45292aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4530ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
45312aeb7d3aSDaniel Vetter }
45322aeb7d3aSDaniel Vetter 
4533fca52a55SDaniel Vetter /**
4534fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4535fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4536fca52a55SDaniel Vetter  *
4537fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4538fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4539fca52a55SDaniel Vetter  */
4540b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4541c67a470bSPaulo Zanoni {
454291c8a326SChris Wilson 	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4543ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
454491c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
4545c67a470bSPaulo Zanoni }
4546c67a470bSPaulo Zanoni 
4547fca52a55SDaniel Vetter /**
4548fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4549fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4550fca52a55SDaniel Vetter  *
4551fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
4552fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4553fca52a55SDaniel Vetter  */
4554b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4555c67a470bSPaulo Zanoni {
4556ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
455791c8a326SChris Wilson 	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
455891c8a326SChris Wilson 	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4559c67a470bSPaulo Zanoni }
4560