xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 9cbd51c2c0edbafdaab7f0fa7569d1f455113a9b)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
31b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
3255367a27SJani Nikula #include <linux/cpuidle.h>
3355367a27SJani Nikula #include <linux/slab.h>
3455367a27SJani Nikula #include <linux/sysrq.h>
3555367a27SJani Nikula 
36fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3755367a27SJani Nikula #include <drm/drm_irq.h>
38760285e7SDavid Howells #include <drm/i915_drm.h>
3955367a27SJani Nikula 
40df0566a6SJani Nikula #include "display/intel_fifo_underrun.h"
41df0566a6SJani Nikula #include "display/intel_hotplug.h"
42df0566a6SJani Nikula #include "display/intel_lpe_audio.h"
43df0566a6SJani Nikula #include "display/intel_psr.h"
44df0566a6SJani Nikula 
45c0e09200SDave Airlie #include "i915_drv.h"
46440e2b3dSJani Nikula #include "i915_irq.h"
471c5d22f7SChris Wilson #include "i915_trace.h"
4879e53945SJesse Barnes #include "intel_drv.h"
49d13616dbSJani Nikula #include "intel_pm.h"
50c0e09200SDave Airlie 
51fca52a55SDaniel Vetter /**
52fca52a55SDaniel Vetter  * DOC: interrupt handling
53fca52a55SDaniel Vetter  *
54fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
55fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
56fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
57fca52a55SDaniel Vetter  */
58fca52a55SDaniel Vetter 
59e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
60e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
61e4ce95aaSVille Syrjälä };
62e4ce95aaSVille Syrjälä 
6323bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
6423bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
6523bb4cb5SVille Syrjälä };
6623bb4cb5SVille Syrjälä 
673a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
683a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
693a3b3c7dSVille Syrjälä };
703a3b3c7dSVille Syrjälä 
717c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
72e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
73e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
74e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
75e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
76e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
77e5868a31SEgbert Eich };
78e5868a31SEgbert Eich 
797c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
80e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
8173c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
82e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
83e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
84e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
85e5868a31SEgbert Eich };
86e5868a31SEgbert Eich 
8726951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
8874c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
8926951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
9026951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
9126951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
9226951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
9326951cafSXiong Zhang };
9426951cafSXiong Zhang 
957c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
96e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
97e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
98e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
99e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
100e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
101e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
102e5868a31SEgbert Eich };
103e5868a31SEgbert Eich 
1047c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
105e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
106e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
107e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
108e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
109e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
110e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
111e5868a31SEgbert Eich };
112e5868a31SEgbert Eich 
1134bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
114e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
115e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
116e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
117e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
118e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
119e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
120e5868a31SEgbert Eich };
121e5868a31SEgbert Eich 
122e0a20ad7SShashank Sharma /* BXT hpd list */
123e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1247f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
125e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
126e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
127e0a20ad7SShashank Sharma };
128e0a20ad7SShashank Sharma 
129b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = {
130b796b971SDhinakaran Pandiyan 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
131b796b971SDhinakaran Pandiyan 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
132b796b971SDhinakaran Pandiyan 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
133b796b971SDhinakaran Pandiyan 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
134121e758eSDhinakaran Pandiyan };
135121e758eSDhinakaran Pandiyan 
13631604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = {
13731604222SAnusha Srivatsa 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
13831604222SAnusha Srivatsa 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
13931604222SAnusha Srivatsa 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
14031604222SAnusha Srivatsa 	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
14131604222SAnusha Srivatsa 	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
14231604222SAnusha Srivatsa 	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
14331604222SAnusha Srivatsa };
14431604222SAnusha Srivatsa 
145c6f7acb8SMatt Roper static const u32 hpd_mcc[HPD_NUM_PINS] = {
146c6f7acb8SMatt Roper 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
147c6f7acb8SMatt Roper 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
148c6f7acb8SMatt Roper 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
149c6f7acb8SMatt Roper };
150c6f7acb8SMatt Roper 
15165f42cdcSPaulo Zanoni static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
15268eb49b1SPaulo Zanoni 			   i915_reg_t iir, i915_reg_t ier)
15368eb49b1SPaulo Zanoni {
15465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
15565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
15668eb49b1SPaulo Zanoni 
15765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
15868eb49b1SPaulo Zanoni 
1595c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
16065f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16165f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16468eb49b1SPaulo Zanoni }
1655c502442SPaulo Zanoni 
16665f42cdcSPaulo Zanoni static void gen2_irq_reset(struct intel_uncore *uncore)
16768eb49b1SPaulo Zanoni {
16865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
16965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
170a9d356a6SPaulo Zanoni 
17165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
17268eb49b1SPaulo Zanoni 
17368eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
17465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17868eb49b1SPaulo Zanoni }
17968eb49b1SPaulo Zanoni 
180b16b2a2fSPaulo Zanoni #define GEN8_IRQ_RESET_NDX(uncore, type, which) \
18168eb49b1SPaulo Zanoni ({ \
18268eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
183b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
18468eb49b1SPaulo Zanoni 		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
18568eb49b1SPaulo Zanoni })
18668eb49b1SPaulo Zanoni 
187b16b2a2fSPaulo Zanoni #define GEN3_IRQ_RESET(uncore, type) \
188b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
18968eb49b1SPaulo Zanoni 
190b16b2a2fSPaulo Zanoni #define GEN2_IRQ_RESET(uncore) \
191b16b2a2fSPaulo Zanoni 	gen2_irq_reset(uncore)
192e9e9848aSVille Syrjälä 
193337ba017SPaulo Zanoni /*
194337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
195337ba017SPaulo Zanoni  */
19665f42cdcSPaulo Zanoni static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
197b51a2842SVille Syrjälä {
19865f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
199b51a2842SVille Syrjälä 
200b51a2842SVille Syrjälä 	if (val == 0)
201b51a2842SVille Syrjälä 		return;
202b51a2842SVille Syrjälä 
203b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
204f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
20565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20665f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
20765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20865f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
209b51a2842SVille Syrjälä }
210337ba017SPaulo Zanoni 
21165f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
212e9e9848aSVille Syrjälä {
21365f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
214e9e9848aSVille Syrjälä 
215e9e9848aSVille Syrjälä 	if (val == 0)
216e9e9848aSVille Syrjälä 		return;
217e9e9848aSVille Syrjälä 
218e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
2199d9523d8SPaulo Zanoni 	     i915_mmio_reg_offset(GEN2_IIR), val);
22065f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22165f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
22265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22365f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
224e9e9848aSVille Syrjälä }
225e9e9848aSVille Syrjälä 
22665f42cdcSPaulo Zanoni static void gen3_irq_init(struct intel_uncore *uncore,
22768eb49b1SPaulo Zanoni 			  i915_reg_t imr, u32 imr_val,
22868eb49b1SPaulo Zanoni 			  i915_reg_t ier, u32 ier_val,
22968eb49b1SPaulo Zanoni 			  i915_reg_t iir)
23068eb49b1SPaulo Zanoni {
23165f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
23235079899SPaulo Zanoni 
23365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
23465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
23565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
23668eb49b1SPaulo Zanoni }
23735079899SPaulo Zanoni 
23865f42cdcSPaulo Zanoni static void gen2_irq_init(struct intel_uncore *uncore,
2392918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
24068eb49b1SPaulo Zanoni {
24165f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
24268eb49b1SPaulo Zanoni 
24365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
24465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
24565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
24668eb49b1SPaulo Zanoni }
24768eb49b1SPaulo Zanoni 
248b16b2a2fSPaulo Zanoni #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
24968eb49b1SPaulo Zanoni ({ \
25068eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
251b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25268eb49b1SPaulo Zanoni 		      GEN8_##type##_IMR(which_), imr_val, \
25368eb49b1SPaulo Zanoni 		      GEN8_##type##_IER(which_), ier_val, \
25468eb49b1SPaulo Zanoni 		      GEN8_##type##_IIR(which_)); \
25568eb49b1SPaulo Zanoni })
25668eb49b1SPaulo Zanoni 
257b16b2a2fSPaulo Zanoni #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
258b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25968eb49b1SPaulo Zanoni 		      type##IMR, imr_val, \
26068eb49b1SPaulo Zanoni 		      type##IER, ier_val, \
26168eb49b1SPaulo Zanoni 		      type##IIR)
26268eb49b1SPaulo Zanoni 
263b16b2a2fSPaulo Zanoni #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
264b16b2a2fSPaulo Zanoni 	gen2_irq_init((uncore), imr_val, ier_val)
265e9e9848aSVille Syrjälä 
266c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
26726705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
268c9a9a268SImre Deak 
2690706f17cSEgbert Eich /* For display hotplug interrupt */
2700706f17cSEgbert Eich static inline void
2710706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
272a9c287c9SJani Nikula 				     u32 mask,
273a9c287c9SJani Nikula 				     u32 bits)
2740706f17cSEgbert Eich {
275a9c287c9SJani Nikula 	u32 val;
2760706f17cSEgbert Eich 
27767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2780706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2790706f17cSEgbert Eich 
2800706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2810706f17cSEgbert Eich 	val &= ~mask;
2820706f17cSEgbert Eich 	val |= bits;
2830706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2840706f17cSEgbert Eich }
2850706f17cSEgbert Eich 
2860706f17cSEgbert Eich /**
2870706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2880706f17cSEgbert Eich  * @dev_priv: driver private
2890706f17cSEgbert Eich  * @mask: bits to update
2900706f17cSEgbert Eich  * @bits: bits to enable
2910706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2920706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2930706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2940706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2950706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2960706f17cSEgbert Eich  * version is also available.
2970706f17cSEgbert Eich  */
2980706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
299a9c287c9SJani Nikula 				   u32 mask,
300a9c287c9SJani Nikula 				   u32 bits)
3010706f17cSEgbert Eich {
3020706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
3030706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
3040706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
3050706f17cSEgbert Eich }
3060706f17cSEgbert Eich 
30796606f3bSOscar Mateo static u32
3089b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
30996606f3bSOscar Mateo 			 const unsigned int bank, const unsigned int bit);
31096606f3bSOscar Mateo 
3119b77011eSTvrtko Ursulin static bool gen11_reset_one_iir(struct intel_gt *gt,
31296606f3bSOscar Mateo 				const unsigned int bank,
31396606f3bSOscar Mateo 				const unsigned int bit)
31496606f3bSOscar Mateo {
3159b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31696606f3bSOscar Mateo 	u32 dw;
31796606f3bSOscar Mateo 
3189b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
31996606f3bSOscar Mateo 
32096606f3bSOscar Mateo 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
32196606f3bSOscar Mateo 	if (dw & BIT(bit)) {
32296606f3bSOscar Mateo 		/*
32396606f3bSOscar Mateo 		 * According to the BSpec, DW_IIR bits cannot be cleared without
32496606f3bSOscar Mateo 		 * first servicing the Selector & Shared IIR registers.
32596606f3bSOscar Mateo 		 */
3269b77011eSTvrtko Ursulin 		gen11_gt_engine_identity(gt, bank, bit);
32796606f3bSOscar Mateo 
32896606f3bSOscar Mateo 		/*
32996606f3bSOscar Mateo 		 * We locked GT INT DW by reading it. If we want to (try
33096606f3bSOscar Mateo 		 * to) recover from this succesfully, we need to clear
33196606f3bSOscar Mateo 		 * our bit, otherwise we are locking the register for
33296606f3bSOscar Mateo 		 * everybody.
33396606f3bSOscar Mateo 		 */
33496606f3bSOscar Mateo 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
33596606f3bSOscar Mateo 
33696606f3bSOscar Mateo 		return true;
33796606f3bSOscar Mateo 	}
33896606f3bSOscar Mateo 
33996606f3bSOscar Mateo 	return false;
34096606f3bSOscar Mateo }
34196606f3bSOscar Mateo 
342d9dc34f1SVille Syrjälä /**
343d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
344d9dc34f1SVille Syrjälä  * @dev_priv: driver private
345d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
346d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
347d9dc34f1SVille Syrjälä  */
348fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
349a9c287c9SJani Nikula 			    u32 interrupt_mask,
350a9c287c9SJani Nikula 			    u32 enabled_irq_mask)
351036a4a7dSZhenyu Wang {
352a9c287c9SJani Nikula 	u32 new_val;
353d9dc34f1SVille Syrjälä 
35467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3554bc9d430SDaniel Vetter 
356d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
357d9dc34f1SVille Syrjälä 
3589df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
359c67a470bSPaulo Zanoni 		return;
360c67a470bSPaulo Zanoni 
361d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
362d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
363d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
364d9dc34f1SVille Syrjälä 
365d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
366d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
3671ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
3683143a2bfSChris Wilson 		POSTING_READ(DEIMR);
369036a4a7dSZhenyu Wang 	}
370036a4a7dSZhenyu Wang }
371036a4a7dSZhenyu Wang 
37243eaea13SPaulo Zanoni /**
37343eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
37443eaea13SPaulo Zanoni  * @dev_priv: driver private
37543eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
37643eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
37743eaea13SPaulo Zanoni  */
37843eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
379a9c287c9SJani Nikula 			      u32 interrupt_mask,
380a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
38143eaea13SPaulo Zanoni {
38267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
38343eaea13SPaulo Zanoni 
38415a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
38515a17aaeSDaniel Vetter 
3869df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
387c67a470bSPaulo Zanoni 		return;
388c67a470bSPaulo Zanoni 
38943eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
39043eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
39143eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
39243eaea13SPaulo Zanoni }
39343eaea13SPaulo Zanoni 
394a9c287c9SJani Nikula void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
39543eaea13SPaulo Zanoni {
39643eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
397e33a4be8STvrtko Ursulin 	intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
39843eaea13SPaulo Zanoni }
39943eaea13SPaulo Zanoni 
400a9c287c9SJani Nikula void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
40143eaea13SPaulo Zanoni {
40243eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
40343eaea13SPaulo Zanoni }
40443eaea13SPaulo Zanoni 
405f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
406b900b949SImre Deak {
407d02b98b8SOscar Mateo 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
408d02b98b8SOscar Mateo 
409bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
410b900b949SImre Deak }
411b900b949SImre Deak 
41258820574STvrtko Ursulin static void write_pm_imr(struct intel_gt *gt)
413a72fbc3aSImre Deak {
41458820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
41558820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
41658820574STvrtko Ursulin 	u32 mask = gt->pm_imr;
417917dc6b5SMika Kuoppala 	i915_reg_t reg;
418917dc6b5SMika Kuoppala 
41958820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
420917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
421917dc6b5SMika Kuoppala 		/* pm is in upper half */
422917dc6b5SMika Kuoppala 		mask = mask << 16;
42358820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
424917dc6b5SMika Kuoppala 		reg = GEN8_GT_IMR(2);
425917dc6b5SMika Kuoppala 	} else {
426917dc6b5SMika Kuoppala 		reg = GEN6_PMIMR;
427a72fbc3aSImre Deak 	}
428a72fbc3aSImre Deak 
42958820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
43058820574STvrtko Ursulin 	intel_uncore_posting_read(uncore, reg);
431917dc6b5SMika Kuoppala }
432917dc6b5SMika Kuoppala 
43358820574STvrtko Ursulin static void write_pm_ier(struct intel_gt *gt)
434b900b949SImre Deak {
43558820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
43658820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
43758820574STvrtko Ursulin 	u32 mask = gt->pm_ier;
438917dc6b5SMika Kuoppala 	i915_reg_t reg;
439917dc6b5SMika Kuoppala 
44058820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
441917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
442917dc6b5SMika Kuoppala 		/* pm is in upper half */
443917dc6b5SMika Kuoppala 		mask = mask << 16;
44458820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
445917dc6b5SMika Kuoppala 		reg = GEN8_GT_IER(2);
446917dc6b5SMika Kuoppala 	} else {
447917dc6b5SMika Kuoppala 		reg = GEN6_PMIER;
448917dc6b5SMika Kuoppala 	}
449917dc6b5SMika Kuoppala 
45058820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
451b900b949SImre Deak }
452b900b949SImre Deak 
453edbfdb45SPaulo Zanoni /**
454edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
45558820574STvrtko Ursulin  * @gt: gt for the interrupts
456edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
457edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
458edbfdb45SPaulo Zanoni  */
45958820574STvrtko Ursulin static void snb_update_pm_irq(struct intel_gt *gt,
460a9c287c9SJani Nikula 			      u32 interrupt_mask,
461a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
462edbfdb45SPaulo Zanoni {
463a9c287c9SJani Nikula 	u32 new_val;
464edbfdb45SPaulo Zanoni 
46515a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
46615a17aaeSDaniel Vetter 
46758820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
468edbfdb45SPaulo Zanoni 
46958820574STvrtko Ursulin 	new_val = gt->pm_imr;
470f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
471f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
472f52ecbcfSPaulo Zanoni 
47358820574STvrtko Ursulin 	if (new_val != gt->pm_imr) {
47458820574STvrtko Ursulin 		gt->pm_imr = new_val;
47558820574STvrtko Ursulin 		write_pm_imr(gt);
476edbfdb45SPaulo Zanoni 	}
477f52ecbcfSPaulo Zanoni }
478edbfdb45SPaulo Zanoni 
47958820574STvrtko Ursulin void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
480edbfdb45SPaulo Zanoni {
48158820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4829939fba2SImre Deak 		return;
4839939fba2SImre Deak 
48458820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, mask);
485edbfdb45SPaulo Zanoni }
486edbfdb45SPaulo Zanoni 
48758820574STvrtko Ursulin static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
4889939fba2SImre Deak {
48958820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, 0);
4909939fba2SImre Deak }
4919939fba2SImre Deak 
49258820574STvrtko Ursulin void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
493edbfdb45SPaulo Zanoni {
49458820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4959939fba2SImre Deak 		return;
4969939fba2SImre Deak 
49758820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, mask);
498f4e9af4fSAkash Goel }
499f4e9af4fSAkash Goel 
5003814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
501f4e9af4fSAkash Goel {
502f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
503f4e9af4fSAkash Goel 
50467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
505f4e9af4fSAkash Goel 
506f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
507f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
508f4e9af4fSAkash Goel 	POSTING_READ(reg);
509f4e9af4fSAkash Goel }
510f4e9af4fSAkash Goel 
51158820574STvrtko Ursulin static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
512f4e9af4fSAkash Goel {
51358820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
514f4e9af4fSAkash Goel 
51558820574STvrtko Ursulin 	gt->pm_ier |= enable_mask;
51658820574STvrtko Ursulin 	write_pm_ier(gt);
51758820574STvrtko Ursulin 	gen6_unmask_pm_irq(gt, enable_mask);
518f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
519f4e9af4fSAkash Goel }
520f4e9af4fSAkash Goel 
52158820574STvrtko Ursulin static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
522f4e9af4fSAkash Goel {
52358820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
524f4e9af4fSAkash Goel 
52558820574STvrtko Ursulin 	gt->pm_ier &= ~disable_mask;
52658820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, disable_mask);
52758820574STvrtko Ursulin 	write_pm_ier(gt);
528f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
529edbfdb45SPaulo Zanoni }
530edbfdb45SPaulo Zanoni 
531d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
532d02b98b8SOscar Mateo {
533d02b98b8SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
534d02b98b8SOscar Mateo 
5359b77011eSTvrtko Ursulin 	while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
53696606f3bSOscar Mateo 		;
537d02b98b8SOscar Mateo 
538d02b98b8SOscar Mateo 	dev_priv->gt_pm.rps.pm_iir = 0;
539d02b98b8SOscar Mateo 
540d02b98b8SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
541d02b98b8SOscar Mateo }
542d02b98b8SOscar Mateo 
543dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
5443cc134e3SImre Deak {
5453cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
5464668f695SChris Wilson 	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
547562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
5483cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
5493cc134e3SImre Deak }
5503cc134e3SImre Deak 
55191d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
552b900b949SImre Deak {
55358820574STvrtko Ursulin 	struct intel_gt *gt = &dev_priv->gt;
554562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
555562d9baeSSagar Arun Kamble 
556562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
557f2a91d1aSChris Wilson 		return;
558f2a91d1aSChris Wilson 
559b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
560562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
56196606f3bSOscar Mateo 
562d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
56358820574STvrtko Ursulin 		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
564d02b98b8SOscar Mateo 	else
565c33d247dSChris Wilson 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
56696606f3bSOscar Mateo 
567562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
56858820574STvrtko Ursulin 	gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
56978e68d36SImre Deak 
570b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
571b900b949SImre Deak }
572b900b949SImre Deak 
57391d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
574b900b949SImre Deak {
575562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
576562d9baeSSagar Arun Kamble 
577562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
578f2a91d1aSChris Wilson 		return;
579f2a91d1aSChris Wilson 
580d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
581562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
5829939fba2SImre Deak 
583b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
5849939fba2SImre Deak 
58558820574STvrtko Ursulin 	gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
58658072ccbSImre Deak 
58758072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
588315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
589c33d247dSChris Wilson 
590c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
5913814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
592c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
593c33d247dSChris Wilson 	 * state of the worker can be discarded.
594c33d247dSChris Wilson 	 */
595562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
596d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
597d02b98b8SOscar Mateo 		gen11_reset_rps_interrupts(dev_priv);
598d02b98b8SOscar Mateo 	else
599c33d247dSChris Wilson 		gen6_reset_rps_interrupts(dev_priv);
600b900b949SImre Deak }
601b900b949SImre Deak 
602*9cbd51c2SDaniele Ceraolo Spurio void gen9_reset_guc_interrupts(struct intel_guc *guc)
60326705e20SSagar Arun Kamble {
604*9cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
605*9cbd51c2SDaniele Ceraolo Spurio 
60687b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6071be333d3SSagar Arun Kamble 
60826705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
60926705e20SSagar Arun Kamble 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
61026705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
61126705e20SSagar Arun Kamble }
61226705e20SSagar Arun Kamble 
613*9cbd51c2SDaniele Ceraolo Spurio void gen9_enable_guc_interrupts(struct intel_guc *guc)
61426705e20SSagar Arun Kamble {
615*9cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
616*9cbd51c2SDaniele Ceraolo Spurio 
61787b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6181be333d3SSagar Arun Kamble 
61926705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
620*9cbd51c2SDaniele Ceraolo Spurio 	if (!guc->interrupts.enabled) {
62126705e20SSagar Arun Kamble 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
62226705e20SSagar Arun Kamble 				       dev_priv->pm_guc_events);
623*9cbd51c2SDaniele Ceraolo Spurio 		guc->interrupts.enabled = true;
62458820574STvrtko Ursulin 		gen6_enable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
62526705e20SSagar Arun Kamble 	}
62626705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
62726705e20SSagar Arun Kamble }
62826705e20SSagar Arun Kamble 
629*9cbd51c2SDaniele Ceraolo Spurio void gen9_disable_guc_interrupts(struct intel_guc *guc)
63026705e20SSagar Arun Kamble {
631*9cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
632*9cbd51c2SDaniele Ceraolo Spurio 
63387b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6341be333d3SSagar Arun Kamble 
63526705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
636*9cbd51c2SDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
63726705e20SSagar Arun Kamble 
63858820574STvrtko Ursulin 	gen6_disable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
63926705e20SSagar Arun Kamble 
64026705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
641315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
64226705e20SSagar Arun Kamble 
643*9cbd51c2SDaniele Ceraolo Spurio 	gen9_reset_guc_interrupts(guc);
64426705e20SSagar Arun Kamble }
64526705e20SSagar Arun Kamble 
646*9cbd51c2SDaniele Ceraolo Spurio void gen11_reset_guc_interrupts(struct intel_guc *guc)
64754c52a84SOscar Mateo {
648*9cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = guc_to_i915(guc);
649*9cbd51c2SDaniele Ceraolo Spurio 
65054c52a84SOscar Mateo 	spin_lock_irq(&i915->irq_lock);
6519b77011eSTvrtko Ursulin 	gen11_reset_one_iir(&i915->gt, 0, GEN11_GUC);
65254c52a84SOscar Mateo 	spin_unlock_irq(&i915->irq_lock);
65354c52a84SOscar Mateo }
65454c52a84SOscar Mateo 
655*9cbd51c2SDaniele Ceraolo Spurio void gen11_enable_guc_interrupts(struct intel_guc *guc)
65654c52a84SOscar Mateo {
657*9cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
658*9cbd51c2SDaniele Ceraolo Spurio 
65954c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
660*9cbd51c2SDaniele Ceraolo Spurio 	if (!guc->interrupts.enabled) {
66154c52a84SOscar Mateo 		u32 events = REG_FIELD_PREP(ENGINE1_MASK,
66254c52a84SOscar Mateo 					    GEN11_GUC_INTR_GUC2HOST);
66354c52a84SOscar Mateo 
6649b77011eSTvrtko Ursulin 		WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC));
66554c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
66654c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
667*9cbd51c2SDaniele Ceraolo Spurio 		guc->interrupts.enabled = true;
66854c52a84SOscar Mateo 	}
66954c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
67054c52a84SOscar Mateo }
67154c52a84SOscar Mateo 
672*9cbd51c2SDaniele Ceraolo Spurio void gen11_disable_guc_interrupts(struct intel_guc *guc)
67354c52a84SOscar Mateo {
674*9cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
675*9cbd51c2SDaniele Ceraolo Spurio 
67654c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
677*9cbd51c2SDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
67854c52a84SOscar Mateo 
67954c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
68054c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
68154c52a84SOscar Mateo 
68254c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
683315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
68454c52a84SOscar Mateo 
685*9cbd51c2SDaniele Ceraolo Spurio 	gen11_reset_guc_interrupts(guc);
68654c52a84SOscar Mateo }
68754c52a84SOscar Mateo 
6880961021aSBen Widawsky /**
6893a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
6903a3b3c7dSVille Syrjälä  * @dev_priv: driver private
6913a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
6923a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
6933a3b3c7dSVille Syrjälä  */
6943a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
695a9c287c9SJani Nikula 				u32 interrupt_mask,
696a9c287c9SJani Nikula 				u32 enabled_irq_mask)
6973a3b3c7dSVille Syrjälä {
698a9c287c9SJani Nikula 	u32 new_val;
699a9c287c9SJani Nikula 	u32 old_val;
7003a3b3c7dSVille Syrjälä 
70167520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
7023a3b3c7dSVille Syrjälä 
7033a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
7043a3b3c7dSVille Syrjälä 
7053a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
7063a3b3c7dSVille Syrjälä 		return;
7073a3b3c7dSVille Syrjälä 
7083a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
7093a3b3c7dSVille Syrjälä 
7103a3b3c7dSVille Syrjälä 	new_val = old_val;
7113a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
7123a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
7133a3b3c7dSVille Syrjälä 
7143a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
7153a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
7163a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
7173a3b3c7dSVille Syrjälä 	}
7183a3b3c7dSVille Syrjälä }
7193a3b3c7dSVille Syrjälä 
7203a3b3c7dSVille Syrjälä /**
721013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
722013d3752SVille Syrjälä  * @dev_priv: driver private
723013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
724013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
725013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
726013d3752SVille Syrjälä  */
727013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
728013d3752SVille Syrjälä 			 enum pipe pipe,
729a9c287c9SJani Nikula 			 u32 interrupt_mask,
730a9c287c9SJani Nikula 			 u32 enabled_irq_mask)
731013d3752SVille Syrjälä {
732a9c287c9SJani Nikula 	u32 new_val;
733013d3752SVille Syrjälä 
73467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
735013d3752SVille Syrjälä 
736013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
737013d3752SVille Syrjälä 
738013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
739013d3752SVille Syrjälä 		return;
740013d3752SVille Syrjälä 
741013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
742013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
743013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
744013d3752SVille Syrjälä 
745013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
746013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
747013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
748013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
749013d3752SVille Syrjälä 	}
750013d3752SVille Syrjälä }
751013d3752SVille Syrjälä 
752013d3752SVille Syrjälä /**
753fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
754fee884edSDaniel Vetter  * @dev_priv: driver private
755fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
756fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
757fee884edSDaniel Vetter  */
75847339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
759a9c287c9SJani Nikula 				  u32 interrupt_mask,
760a9c287c9SJani Nikula 				  u32 enabled_irq_mask)
761fee884edSDaniel Vetter {
762a9c287c9SJani Nikula 	u32 sdeimr = I915_READ(SDEIMR);
763fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
764fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
765fee884edSDaniel Vetter 
76615a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
76715a17aaeSDaniel Vetter 
76867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
769fee884edSDaniel Vetter 
7709df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
771c67a470bSPaulo Zanoni 		return;
772c67a470bSPaulo Zanoni 
773fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
774fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
775fee884edSDaniel Vetter }
7768664281bSPaulo Zanoni 
7776b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
7786b12ca56SVille Syrjälä 			      enum pipe pipe)
7797c463586SKeith Packard {
7806b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
78110c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
78210c59c51SImre Deak 
7836b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
7846b12ca56SVille Syrjälä 
7856b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
7866b12ca56SVille Syrjälä 		goto out;
7876b12ca56SVille Syrjälä 
78810c59c51SImre Deak 	/*
789724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
790724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
79110c59c51SImre Deak 	 */
79210c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
79310c59c51SImre Deak 		return 0;
794724a6905SVille Syrjälä 	/*
795724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
796724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
797724a6905SVille Syrjälä 	 */
798724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
799724a6905SVille Syrjälä 		return 0;
80010c59c51SImre Deak 
80110c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
80210c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
80310c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
80410c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
80510c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
80610c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
80710c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
80810c59c51SImre Deak 
8096b12ca56SVille Syrjälä out:
8106b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
8116b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
8126b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
8136b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
8146b12ca56SVille Syrjälä 
81510c59c51SImre Deak 	return enable_mask;
81610c59c51SImre Deak }
81710c59c51SImre Deak 
8186b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
8196b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
820755e9019SImre Deak {
8216b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
822755e9019SImre Deak 	u32 enable_mask;
823755e9019SImre Deak 
8246b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8256b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8266b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8276b12ca56SVille Syrjälä 
8286b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8296b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8306b12ca56SVille Syrjälä 
8316b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
8326b12ca56SVille Syrjälä 		return;
8336b12ca56SVille Syrjälä 
8346b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
8356b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8366b12ca56SVille Syrjälä 
8376b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8386b12ca56SVille Syrjälä 	POSTING_READ(reg);
839755e9019SImre Deak }
840755e9019SImre Deak 
8416b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
8426b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
843755e9019SImre Deak {
8446b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
845755e9019SImre Deak 	u32 enable_mask;
846755e9019SImre Deak 
8476b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8486b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8496b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8506b12ca56SVille Syrjälä 
8516b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8526b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8536b12ca56SVille Syrjälä 
8546b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
8556b12ca56SVille Syrjälä 		return;
8566b12ca56SVille Syrjälä 
8576b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
8586b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8596b12ca56SVille Syrjälä 
8606b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8616b12ca56SVille Syrjälä 	POSTING_READ(reg);
862755e9019SImre Deak }
863755e9019SImre Deak 
864f3e30485SVille Syrjälä static bool i915_has_asle(struct drm_i915_private *dev_priv)
865f3e30485SVille Syrjälä {
866f3e30485SVille Syrjälä 	if (!dev_priv->opregion.asle)
867f3e30485SVille Syrjälä 		return false;
868f3e30485SVille Syrjälä 
869f3e30485SVille Syrjälä 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
870f3e30485SVille Syrjälä }
871f3e30485SVille Syrjälä 
872c0e09200SDave Airlie /**
873f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
87414bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
87501c66889SZhao Yakui  */
87691d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
87701c66889SZhao Yakui {
878f3e30485SVille Syrjälä 	if (!i915_has_asle(dev_priv))
879f49e38ddSJani Nikula 		return;
880f49e38ddSJani Nikula 
88113321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
88201c66889SZhao Yakui 
883755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
88491d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
8853b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
886755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
8871ec14ad3SChris Wilson 
88813321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
88901c66889SZhao Yakui }
89001c66889SZhao Yakui 
891f75f3746SVille Syrjälä /*
892f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
893f75f3746SVille Syrjälä  * around the vertical blanking period.
894f75f3746SVille Syrjälä  *
895f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
896f75f3746SVille Syrjälä  *  vblank_start >= 3
897f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
898f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
899f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
900f75f3746SVille Syrjälä  *
901f75f3746SVille Syrjälä  *           start of vblank:
902f75f3746SVille Syrjälä  *           latch double buffered registers
903f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
904f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
905f75f3746SVille Syrjälä  *           |
906f75f3746SVille Syrjälä  *           |          frame start:
907f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
908f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
909f75f3746SVille Syrjälä  *           |          |
910f75f3746SVille Syrjälä  *           |          |  start of vsync:
911f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
912f75f3746SVille Syrjälä  *           |          |  |
913f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
914f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
915f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
916f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
917f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
918f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
919f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
920f75f3746SVille Syrjälä  *       |          |                                         |
921f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
922f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
923f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
924f75f3746SVille Syrjälä  *
925f75f3746SVille Syrjälä  * x  = horizontal active
926f75f3746SVille Syrjälä  * _  = horizontal blanking
927f75f3746SVille Syrjälä  * hs = horizontal sync
928f75f3746SVille Syrjälä  * va = vertical active
929f75f3746SVille Syrjälä  * vb = vertical blanking
930f75f3746SVille Syrjälä  * vs = vertical sync
931f75f3746SVille Syrjälä  * vbs = vblank_start (number)
932f75f3746SVille Syrjälä  *
933f75f3746SVille Syrjälä  * Summary:
934f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
935f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
936f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
937f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
938f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
939f75f3746SVille Syrjälä  */
940f75f3746SVille Syrjälä 
94142f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
94242f52ef8SKeith Packard  * we use as a pipe index
94342f52ef8SKeith Packard  */
94408fa8fd0SVille Syrjälä u32 i915_get_vblank_counter(struct drm_crtc *crtc)
9450a3e67a4SJesse Barnes {
94608fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
94708fa8fd0SVille Syrjälä 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
94832db0b65SVille Syrjälä 	const struct drm_display_mode *mode = &vblank->hwmode;
94908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
950f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
9510b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
952694e409dSVille Syrjälä 	unsigned long irqflags;
953391f75e2SVille Syrjälä 
95432db0b65SVille Syrjälä 	/*
95532db0b65SVille Syrjälä 	 * On i965gm TV output the frame counter only works up to
95632db0b65SVille Syrjälä 	 * the point when we enable the TV encoder. After that the
95732db0b65SVille Syrjälä 	 * frame counter ceases to work and reads zero. We need a
95832db0b65SVille Syrjälä 	 * vblank wait before enabling the TV encoder and so we
95932db0b65SVille Syrjälä 	 * have to enable vblank interrupts while the frame counter
96032db0b65SVille Syrjälä 	 * is still in a working state. However the core vblank code
96132db0b65SVille Syrjälä 	 * does not like us returning non-zero frame counter values
96232db0b65SVille Syrjälä 	 * when we've told it that we don't have a working frame
96332db0b65SVille Syrjälä 	 * counter. Thus we must stop non-zero values leaking out.
96432db0b65SVille Syrjälä 	 */
96532db0b65SVille Syrjälä 	if (!vblank->max_vblank_count)
96632db0b65SVille Syrjälä 		return 0;
96732db0b65SVille Syrjälä 
9680b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
9690b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
9700b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
9710b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
9720b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
973391f75e2SVille Syrjälä 
9740b2a8e09SVille Syrjälä 	/* Convert to pixel count */
9750b2a8e09SVille Syrjälä 	vbl_start *= htotal;
9760b2a8e09SVille Syrjälä 
9770b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
9780b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
9790b2a8e09SVille Syrjälä 
9809db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
9819db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
9825eddb70bSChris Wilson 
983694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
984694e409dSVille Syrjälä 
9850a3e67a4SJesse Barnes 	/*
9860a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
9870a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
9880a3e67a4SJesse Barnes 	 * register.
9890a3e67a4SJesse Barnes 	 */
9900a3e67a4SJesse Barnes 	do {
991694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
992694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
993694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
9940a3e67a4SJesse Barnes 	} while (high1 != high2);
9950a3e67a4SJesse Barnes 
996694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
997694e409dSVille Syrjälä 
9985eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
999391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
10005eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
1001391f75e2SVille Syrjälä 
1002391f75e2SVille Syrjälä 	/*
1003391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
1004391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
1005391f75e2SVille Syrjälä 	 * counter against vblank start.
1006391f75e2SVille Syrjälä 	 */
1007edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
10080a3e67a4SJesse Barnes }
10090a3e67a4SJesse Barnes 
101008fa8fd0SVille Syrjälä u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
10119880b7a5SJesse Barnes {
101208fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
101308fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
10149880b7a5SJesse Barnes 
1015649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
10169880b7a5SJesse Barnes }
10179880b7a5SJesse Barnes 
1018aec0246fSUma Shankar /*
1019aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
1020aec0246fSUma Shankar  * scanline register will not work to get the scanline,
1021aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
1022aec0246fSUma Shankar  * with scanline register updates.
1023aec0246fSUma Shankar  * This function will use Framestamp and current
1024aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
1025aec0246fSUma Shankar  */
1026aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
1027aec0246fSUma Shankar {
1028aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1029aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
1030aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1031aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
1032aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
1033aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
1034aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
1035aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
1036aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
1037aec0246fSUma Shankar 
1038aec0246fSUma Shankar 	/*
1039aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
1040aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
1041aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
1042aec0246fSUma Shankar 	 * during the same frame.
1043aec0246fSUma Shankar 	 */
1044aec0246fSUma Shankar 	do {
1045aec0246fSUma Shankar 		/*
1046aec0246fSUma Shankar 		 * This field provides read back of the display
1047aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
1048aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
1049aec0246fSUma Shankar 		 */
1050aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1051aec0246fSUma Shankar 
1052aec0246fSUma Shankar 		/*
1053aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
1054aec0246fSUma Shankar 		 * time stamp value.
1055aec0246fSUma Shankar 		 */
1056aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
1057aec0246fSUma Shankar 
1058aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1059aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
1060aec0246fSUma Shankar 
1061aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
1062aec0246fSUma Shankar 					clock), 1000 * htotal);
1063aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
1064aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
1065aec0246fSUma Shankar 
1066aec0246fSUma Shankar 	return scanline;
1067aec0246fSUma Shankar }
1068aec0246fSUma Shankar 
106975aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1070a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1071a225f079SVille Syrjälä {
1072a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
1073fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
10745caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
10755caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
1076a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
107780715b2fSVille Syrjälä 	int position, vtotal;
1078a225f079SVille Syrjälä 
107972259536SVille Syrjälä 	if (!crtc->active)
108072259536SVille Syrjälä 		return -1;
108172259536SVille Syrjälä 
10825caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
10835caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
10845caa0feaSDaniel Vetter 
1085aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1086aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
1087aec0246fSUma Shankar 
108880715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
1089a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1090a225f079SVille Syrjälä 		vtotal /= 2;
1091a225f079SVille Syrjälä 
1092cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 2))
109375aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1094a225f079SVille Syrjälä 	else
109575aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1096a225f079SVille Syrjälä 
1097a225f079SVille Syrjälä 	/*
109841b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
109941b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
110041b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
110141b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
110241b578fbSJesse Barnes 	 *
110341b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
110441b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
110541b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
110641b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
110741b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
110841b578fbSJesse Barnes 	 */
110991d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
111041b578fbSJesse Barnes 		int i, temp;
111141b578fbSJesse Barnes 
111241b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
111341b578fbSJesse Barnes 			udelay(1);
1114707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
111541b578fbSJesse Barnes 			if (temp != position) {
111641b578fbSJesse Barnes 				position = temp;
111741b578fbSJesse Barnes 				break;
111841b578fbSJesse Barnes 			}
111941b578fbSJesse Barnes 		}
112041b578fbSJesse Barnes 	}
112141b578fbSJesse Barnes 
112241b578fbSJesse Barnes 	/*
112380715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
112480715b2fSVille Syrjälä 	 * scanline_offset adjustment.
1125a225f079SVille Syrjälä 	 */
112680715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
1127a225f079SVille Syrjälä }
1128a225f079SVille Syrjälä 
11297d23e593SVille Syrjälä bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
11301bf6ad62SDaniel Vetter 			      bool in_vblank_irq, int *vpos, int *hpos,
11313bb403bfSVille Syrjälä 			      ktime_t *stime, ktime_t *etime,
11323bb403bfSVille Syrjälä 			      const struct drm_display_mode *mode)
11330af7e4dfSMario Kleiner {
1134fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
113598187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
113698187836SVille Syrjälä 								pipe);
11373aa18df8SVille Syrjälä 	int position;
113878e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1139ad3543edSMario Kleiner 	unsigned long irqflags;
11408a920e24SVille Syrjälä 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
11418a920e24SVille Syrjälä 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
11428a920e24SVille Syrjälä 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
11430af7e4dfSMario Kleiner 
1144fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
11450af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
11469db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
11471bf6ad62SDaniel Vetter 		return false;
11480af7e4dfSMario Kleiner 	}
11490af7e4dfSMario Kleiner 
1150c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
115178e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
1152c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
1153c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
1154c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
11550af7e4dfSMario Kleiner 
1156d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1157d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1158d31faf65SVille Syrjälä 		vbl_end /= 2;
1159d31faf65SVille Syrjälä 		vtotal /= 2;
1160d31faf65SVille Syrjälä 	}
1161d31faf65SVille Syrjälä 
1162ad3543edSMario Kleiner 	/*
1163ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
1164ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
1165ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
1166ad3543edSMario Kleiner 	 */
1167ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1168ad3543edSMario Kleiner 
1169ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1170ad3543edSMario Kleiner 
1171ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
1172ad3543edSMario Kleiner 	if (stime)
1173ad3543edSMario Kleiner 		*stime = ktime_get();
1174ad3543edSMario Kleiner 
11758a920e24SVille Syrjälä 	if (use_scanline_counter) {
11760af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
11770af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
11780af7e4dfSMario Kleiner 		 */
1179a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
11800af7e4dfSMario Kleiner 	} else {
11810af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
11820af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
11830af7e4dfSMario Kleiner 		 * scanout position.
11840af7e4dfSMario Kleiner 		 */
118575aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
11860af7e4dfSMario Kleiner 
11873aa18df8SVille Syrjälä 		/* convert to pixel counts */
11883aa18df8SVille Syrjälä 		vbl_start *= htotal;
11893aa18df8SVille Syrjälä 		vbl_end *= htotal;
11903aa18df8SVille Syrjälä 		vtotal *= htotal;
119178e8fc6bSVille Syrjälä 
119278e8fc6bSVille Syrjälä 		/*
11937e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
11947e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
11957e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
11967e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
11977e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
11987e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
11997e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
12007e78f1cbSVille Syrjälä 		 */
12017e78f1cbSVille Syrjälä 		if (position >= vtotal)
12027e78f1cbSVille Syrjälä 			position = vtotal - 1;
12037e78f1cbSVille Syrjälä 
12047e78f1cbSVille Syrjälä 		/*
120578e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
120678e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
120778e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
120878e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
120978e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
121078e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
121178e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
121278e8fc6bSVille Syrjälä 		 */
121378e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
12143aa18df8SVille Syrjälä 	}
12153aa18df8SVille Syrjälä 
1216ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
1217ad3543edSMario Kleiner 	if (etime)
1218ad3543edSMario Kleiner 		*etime = ktime_get();
1219ad3543edSMario Kleiner 
1220ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1221ad3543edSMario Kleiner 
1222ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1223ad3543edSMario Kleiner 
12243aa18df8SVille Syrjälä 	/*
12253aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
12263aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
12273aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
12283aa18df8SVille Syrjälä 	 * up since vbl_end.
12293aa18df8SVille Syrjälä 	 */
12303aa18df8SVille Syrjälä 	if (position >= vbl_start)
12313aa18df8SVille Syrjälä 		position -= vbl_end;
12323aa18df8SVille Syrjälä 	else
12333aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
12343aa18df8SVille Syrjälä 
12358a920e24SVille Syrjälä 	if (use_scanline_counter) {
12363aa18df8SVille Syrjälä 		*vpos = position;
12373aa18df8SVille Syrjälä 		*hpos = 0;
12383aa18df8SVille Syrjälä 	} else {
12390af7e4dfSMario Kleiner 		*vpos = position / htotal;
12400af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
12410af7e4dfSMario Kleiner 	}
12420af7e4dfSMario Kleiner 
12431bf6ad62SDaniel Vetter 	return true;
12440af7e4dfSMario Kleiner }
12450af7e4dfSMario Kleiner 
1246a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1247a225f079SVille Syrjälä {
1248fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1249a225f079SVille Syrjälä 	unsigned long irqflags;
1250a225f079SVille Syrjälä 	int position;
1251a225f079SVille Syrjälä 
1252a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1253a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1254a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1255a225f079SVille Syrjälä 
1256a225f079SVille Syrjälä 	return position;
1257a225f079SVille Syrjälä }
1258a225f079SVille Syrjälä 
125991d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1260f97108d1SJesse Barnes {
12614f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &dev_priv->uncore;
1262b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
12639270388eSDaniel Vetter 	u8 new_delay;
12649270388eSDaniel Vetter 
1265d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1266f97108d1SJesse Barnes 
12674f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
12684f5fd91fSTvrtko Ursulin 			     MEMINTRSTS,
12694f5fd91fSTvrtko Ursulin 			     intel_uncore_read(uncore, MEMINTRSTS));
127073edd18fSDaniel Vetter 
127120e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
12729270388eSDaniel Vetter 
12734f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
12744f5fd91fSTvrtko Ursulin 	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
12754f5fd91fSTvrtko Ursulin 	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
12764f5fd91fSTvrtko Ursulin 	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
12774f5fd91fSTvrtko Ursulin 	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1278f97108d1SJesse Barnes 
1279f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1280b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
128120e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
128220e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
128320e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
128420e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1285b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
128620e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
128720e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
128820e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
128920e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1290f97108d1SJesse Barnes 	}
1291f97108d1SJesse Barnes 
129291d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
129320e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1294f97108d1SJesse Barnes 
1295d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
12969270388eSDaniel Vetter 
1297f97108d1SJesse Barnes 	return;
1298f97108d1SJesse Barnes }
1299f97108d1SJesse Barnes 
130043cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
130143cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
130231685c25SDeepak S {
1303679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
130443cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
130543cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
130631685c25SDeepak S }
130731685c25SDeepak S 
130843cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
130943cf3bf0SChris Wilson {
1310562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
131143cf3bf0SChris Wilson }
131243cf3bf0SChris Wilson 
131343cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
131443cf3bf0SChris Wilson {
1315562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1316562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
131743cf3bf0SChris Wilson 	struct intel_rps_ei now;
131843cf3bf0SChris Wilson 	u32 events = 0;
131943cf3bf0SChris Wilson 
1320e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
132143cf3bf0SChris Wilson 		return 0;
132243cf3bf0SChris Wilson 
132343cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
132431685c25SDeepak S 
1325679cb6c1SMika Kuoppala 	if (prev->ktime) {
1326e0e8c7cbSChris Wilson 		u64 time, c0;
1327569884e3SChris Wilson 		u32 render, media;
1328e0e8c7cbSChris Wilson 
1329679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
13308f68d591SChris Wilson 
1331e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1332e0e8c7cbSChris Wilson 
1333e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1334e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1335e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1336e0e8c7cbSChris Wilson 		 * into our activity counter.
1337e0e8c7cbSChris Wilson 		 */
1338569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1339569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1340569884e3SChris Wilson 		c0 = max(render, media);
13416b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1342e0e8c7cbSChris Wilson 
134360548c55SChris Wilson 		if (c0 > time * rps->power.up_threshold)
1344e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
134560548c55SChris Wilson 		else if (c0 < time * rps->power.down_threshold)
1346e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
134731685c25SDeepak S 	}
134831685c25SDeepak S 
1349562d9baeSSagar Arun Kamble 	rps->ei = now;
135043cf3bf0SChris Wilson 	return events;
135131685c25SDeepak S }
135231685c25SDeepak S 
13534912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
13543b8d8d91SJesse Barnes {
13552d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1356562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1357562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
13587c0a16adSChris Wilson 	bool client_boost = false;
13598d3afd7dSChris Wilson 	int new_delay, adj, min, max;
13607c0a16adSChris Wilson 	u32 pm_iir = 0;
13613b8d8d91SJesse Barnes 
136259cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1363562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1364562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1365562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1366d4d70aa5SImre Deak 	}
136759cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
13684912d041SBen Widawsky 
136960611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1370a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
13718d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
13727c0a16adSChris Wilson 		goto out;
13733b8d8d91SJesse Barnes 
1374ebb5eb7dSChris Wilson 	mutex_lock(&rps->lock);
13757b9e0ae6SChris Wilson 
137643cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
137743cf3bf0SChris Wilson 
1378562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1379562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1380562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1381562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
13827b92c1bdSChris Wilson 	if (client_boost)
1383562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1384562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1385562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
13868d3afd7dSChris Wilson 		adj = 0;
13878d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1388dd75fdc8SChris Wilson 		if (adj > 0)
1389dd75fdc8SChris Wilson 			adj *= 2;
1390edcf284bSChris Wilson 		else /* CHV needs even encode values */
1391edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
13927e79a683SSagar Arun Kamble 
1393562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
13947e79a683SSagar Arun Kamble 			adj = 0;
13957b92c1bdSChris Wilson 	} else if (client_boost) {
1396f5a4c67dSChris Wilson 		adj = 0;
1397dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1398562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1399562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1400562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1401562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1402dd75fdc8SChris Wilson 		adj = 0;
1403dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1404dd75fdc8SChris Wilson 		if (adj < 0)
1405dd75fdc8SChris Wilson 			adj *= 2;
1406edcf284bSChris Wilson 		else /* CHV needs even encode values */
1407edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
14087e79a683SSagar Arun Kamble 
1409562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
14107e79a683SSagar Arun Kamble 			adj = 0;
1411dd75fdc8SChris Wilson 	} else { /* unknown event */
1412edcf284bSChris Wilson 		adj = 0;
1413dd75fdc8SChris Wilson 	}
14143b8d8d91SJesse Barnes 
1415562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1416edcf284bSChris Wilson 
14172a8862d2SChris Wilson 	/*
14182a8862d2SChris Wilson 	 * Limit deboosting and boosting to keep ourselves at the extremes
14192a8862d2SChris Wilson 	 * when in the respective power modes (i.e. slowly decrease frequencies
14202a8862d2SChris Wilson 	 * while in the HIGH_POWER zone and slowly increase frequencies while
14212a8862d2SChris Wilson 	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
14222a8862d2SChris Wilson 	 * to the next level quickly, and conversely if busy we expect to
14232a8862d2SChris Wilson 	 * hit a waitboost and rapidly switch into max power.
14242a8862d2SChris Wilson 	 */
14252a8862d2SChris Wilson 	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
14262a8862d2SChris Wilson 	    (adj > 0 && rps->power.mode == LOW_POWER))
14272a8862d2SChris Wilson 		rps->last_adj = 0;
14282a8862d2SChris Wilson 
142979249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
143079249636SBen Widawsky 	 * interrupt
143179249636SBen Widawsky 	 */
1432edcf284bSChris Wilson 	new_delay += adj;
14338d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
143427544369SDeepak S 
14359fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
14369fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1437562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
14389fcee2f7SChris Wilson 	}
14393b8d8d91SJesse Barnes 
1440ebb5eb7dSChris Wilson 	mutex_unlock(&rps->lock);
14417c0a16adSChris Wilson 
14427c0a16adSChris Wilson out:
14437c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
14447c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1445562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
144658820574STvrtko Ursulin 		gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
14477c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
14483b8d8d91SJesse Barnes }
14493b8d8d91SJesse Barnes 
1450e3689190SBen Widawsky 
1451e3689190SBen Widawsky /**
1452e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1453e3689190SBen Widawsky  * occurred.
1454e3689190SBen Widawsky  * @work: workqueue struct
1455e3689190SBen Widawsky  *
1456e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1457e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1458e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1459e3689190SBen Widawsky  */
1460e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1461e3689190SBen Widawsky {
14622d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1463cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1464e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
146535a85ac6SBen Widawsky 	char *parity_event[6];
1466a9c287c9SJani Nikula 	u32 misccpctl;
1467a9c287c9SJani Nikula 	u8 slice = 0;
1468e3689190SBen Widawsky 
1469e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1470e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1471e3689190SBen Widawsky 	 * any time we access those registers.
1472e3689190SBen Widawsky 	 */
147391c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1474e3689190SBen Widawsky 
147535a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
147635a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
147735a85ac6SBen Widawsky 		goto out;
147835a85ac6SBen Widawsky 
1479e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1480e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1481e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1482e3689190SBen Widawsky 
148335a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1484f0f59a00SVille Syrjälä 		i915_reg_t reg;
148535a85ac6SBen Widawsky 
148635a85ac6SBen Widawsky 		slice--;
14872d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
148835a85ac6SBen Widawsky 			break;
148935a85ac6SBen Widawsky 
149035a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
149135a85ac6SBen Widawsky 
14926fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
149335a85ac6SBen Widawsky 
149435a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1495e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1496e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1497e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1498e3689190SBen Widawsky 
149935a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
150035a85ac6SBen Widawsky 		POSTING_READ(reg);
1501e3689190SBen Widawsky 
1502cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1503e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1504e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1505e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
150635a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
150735a85ac6SBen Widawsky 		parity_event[5] = NULL;
1508e3689190SBen Widawsky 
150991c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1510e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1511e3689190SBen Widawsky 
151235a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
151335a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1514e3689190SBen Widawsky 
151535a85ac6SBen Widawsky 		kfree(parity_event[4]);
1516e3689190SBen Widawsky 		kfree(parity_event[3]);
1517e3689190SBen Widawsky 		kfree(parity_event[2]);
1518e3689190SBen Widawsky 		kfree(parity_event[1]);
1519e3689190SBen Widawsky 	}
1520e3689190SBen Widawsky 
152135a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
152235a85ac6SBen Widawsky 
152335a85ac6SBen Widawsky out:
152435a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
15254cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
15262d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
15274cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
152835a85ac6SBen Widawsky 
152991c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
153035a85ac6SBen Widawsky }
153135a85ac6SBen Widawsky 
1532261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1533261e40b8SVille Syrjälä 					       u32 iir)
1534e3689190SBen Widawsky {
1535261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1536e3689190SBen Widawsky 		return;
1537e3689190SBen Widawsky 
1538d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1539261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1540d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1541e3689190SBen Widawsky 
1542261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
154335a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
154435a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
154535a85ac6SBen Widawsky 
154635a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
154735a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
154835a85ac6SBen Widawsky 
1549a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1550e3689190SBen Widawsky }
1551e3689190SBen Widawsky 
1552261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1553f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1554f1af8fc1SPaulo Zanoni {
1555f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15568a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1557f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
15588a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1559f1af8fc1SPaulo Zanoni }
1560f1af8fc1SPaulo Zanoni 
1561261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1562e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1563e7b4c6b1SDaniel Vetter {
1564f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15658a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1566cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
15678a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1568cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
15698a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1570e7b4c6b1SDaniel Vetter 
1571cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1572cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1573aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1574aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1575e3689190SBen Widawsky 
1576261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1577261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1578e7b4c6b1SDaniel Vetter }
1579e7b4c6b1SDaniel Vetter 
15805d3d69d5SChris Wilson static void
158151f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1582fbcc1a0cSNick Hoath {
158331de7350SChris Wilson 	bool tasklet = false;
1584f747026cSChris Wilson 
1585fd8526e5SChris Wilson 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
15868ea397faSChris Wilson 		tasklet = true;
158731de7350SChris Wilson 
158851f6b0f9SChris Wilson 	if (iir & GT_RENDER_USER_INTERRUPT) {
158952c0fdb2SChris Wilson 		intel_engine_breadcrumbs_irq(engine);
15904c6ce5c9SChris Wilson 		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
159131de7350SChris Wilson 	}
159231de7350SChris Wilson 
159331de7350SChris Wilson 	if (tasklet)
1594fd8526e5SChris Wilson 		tasklet_hi_schedule(&engine->execlists.tasklet);
1595fbcc1a0cSNick Hoath }
1596fbcc1a0cSNick Hoath 
15972e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915,
159855ef72f2SChris Wilson 			    u32 master_ctl, u32 gt_iir[4])
1599abd58f01SBen Widawsky {
160025286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
16012e4a5b25SChris Wilson 
1602f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1603f0fd96f5SChris Wilson 		      GEN8_GT_BCS_IRQ | \
16048a68d464SChris Wilson 		      GEN8_GT_VCS0_IRQ | \
1605f0fd96f5SChris Wilson 		      GEN8_GT_VCS1_IRQ | \
1606f0fd96f5SChris Wilson 		      GEN8_GT_VECS_IRQ | \
1607f0fd96f5SChris Wilson 		      GEN8_GT_PM_IRQ | \
1608f0fd96f5SChris Wilson 		      GEN8_GT_GUC_IRQ)
1609f0fd96f5SChris Wilson 
1610abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16112e4a5b25SChris Wilson 		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
16122e4a5b25SChris Wilson 		if (likely(gt_iir[0]))
16132e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1614abd58f01SBen Widawsky 	}
1615abd58f01SBen Widawsky 
16168a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16172e4a5b25SChris Wilson 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
16182e4a5b25SChris Wilson 		if (likely(gt_iir[1]))
16192e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
162074cdb337SChris Wilson 	}
162174cdb337SChris Wilson 
162226705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16232e4a5b25SChris Wilson 		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1624f4de7794SChris Wilson 		if (likely(gt_iir[2]))
1625f4de7794SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
16260961021aSBen Widawsky 	}
16272e4a5b25SChris Wilson 
16282e4a5b25SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16292e4a5b25SChris Wilson 		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
16302e4a5b25SChris Wilson 		if (likely(gt_iir[3]))
16312e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
163255ef72f2SChris Wilson 	}
1633abd58f01SBen Widawsky }
1634abd58f01SBen Widawsky 
16352e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1636f0fd96f5SChris Wilson 				u32 master_ctl, u32 gt_iir[4])
1637e30e251aSVille Syrjälä {
1638f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16398a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[RCS0],
164051f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
16418a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[BCS0],
164251f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1643e30e251aSVille Syrjälä 	}
1644e30e251aSVille Syrjälä 
16458a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16468a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS0],
16478a68d464SChris Wilson 				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
16488a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS1],
164951f6b0f9SChris Wilson 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1650e30e251aSVille Syrjälä 	}
1651e30e251aSVille Syrjälä 
1652f0fd96f5SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16538a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VECS0],
165451f6b0f9SChris Wilson 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1655f0fd96f5SChris Wilson 	}
1656e30e251aSVille Syrjälä 
1657f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16582e4a5b25SChris Wilson 		gen6_rps_irq_handler(i915, gt_iir[2]);
16592e4a5b25SChris Wilson 		gen9_guc_irq_handler(i915, gt_iir[2]);
1660e30e251aSVille Syrjälä 	}
1661f0fd96f5SChris Wilson }
1662e30e251aSVille Syrjälä 
1663af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1664121e758eSDhinakaran Pandiyan {
1665af92058fSVille Syrjälä 	switch (pin) {
1666af92058fSVille Syrjälä 	case HPD_PORT_C:
1667121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1668af92058fSVille Syrjälä 	case HPD_PORT_D:
1669121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1670af92058fSVille Syrjälä 	case HPD_PORT_E:
1671121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1672af92058fSVille Syrjälä 	case HPD_PORT_F:
1673121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1674121e758eSDhinakaran Pandiyan 	default:
1675121e758eSDhinakaran Pandiyan 		return false;
1676121e758eSDhinakaran Pandiyan 	}
1677121e758eSDhinakaran Pandiyan }
1678121e758eSDhinakaran Pandiyan 
1679af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
168063c88d22SImre Deak {
1681af92058fSVille Syrjälä 	switch (pin) {
1682af92058fSVille Syrjälä 	case HPD_PORT_A:
1683195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1684af92058fSVille Syrjälä 	case HPD_PORT_B:
168563c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
1686af92058fSVille Syrjälä 	case HPD_PORT_C:
168763c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
168863c88d22SImre Deak 	default:
168963c88d22SImre Deak 		return false;
169063c88d22SImre Deak 	}
169163c88d22SImre Deak }
169263c88d22SImre Deak 
1693af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
169431604222SAnusha Srivatsa {
1695af92058fSVille Syrjälä 	switch (pin) {
1696af92058fSVille Syrjälä 	case HPD_PORT_A:
169731604222SAnusha Srivatsa 		return val & ICP_DDIA_HPD_LONG_DETECT;
1698af92058fSVille Syrjälä 	case HPD_PORT_B:
169931604222SAnusha Srivatsa 		return val & ICP_DDIB_HPD_LONG_DETECT;
170031604222SAnusha Srivatsa 	default:
170131604222SAnusha Srivatsa 		return false;
170231604222SAnusha Srivatsa 	}
170331604222SAnusha Srivatsa }
170431604222SAnusha Srivatsa 
1705af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
170631604222SAnusha Srivatsa {
1707af92058fSVille Syrjälä 	switch (pin) {
1708af92058fSVille Syrjälä 	case HPD_PORT_C:
170931604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1710af92058fSVille Syrjälä 	case HPD_PORT_D:
171131604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1712af92058fSVille Syrjälä 	case HPD_PORT_E:
171331604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1714af92058fSVille Syrjälä 	case HPD_PORT_F:
171531604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
171631604222SAnusha Srivatsa 	default:
171731604222SAnusha Srivatsa 		return false;
171831604222SAnusha Srivatsa 	}
171931604222SAnusha Srivatsa }
172031604222SAnusha Srivatsa 
1721af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
17226dbf30ceSVille Syrjälä {
1723af92058fSVille Syrjälä 	switch (pin) {
1724af92058fSVille Syrjälä 	case HPD_PORT_E:
17256dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
17266dbf30ceSVille Syrjälä 	default:
17276dbf30ceSVille Syrjälä 		return false;
17286dbf30ceSVille Syrjälä 	}
17296dbf30ceSVille Syrjälä }
17306dbf30ceSVille Syrjälä 
1731af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
173274c0b395SVille Syrjälä {
1733af92058fSVille Syrjälä 	switch (pin) {
1734af92058fSVille Syrjälä 	case HPD_PORT_A:
173574c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1736af92058fSVille Syrjälä 	case HPD_PORT_B:
173774c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
1738af92058fSVille Syrjälä 	case HPD_PORT_C:
173974c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
1740af92058fSVille Syrjälä 	case HPD_PORT_D:
174174c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
174274c0b395SVille Syrjälä 	default:
174374c0b395SVille Syrjälä 		return false;
174474c0b395SVille Syrjälä 	}
174574c0b395SVille Syrjälä }
174674c0b395SVille Syrjälä 
1747af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1748e4ce95aaSVille Syrjälä {
1749af92058fSVille Syrjälä 	switch (pin) {
1750af92058fSVille Syrjälä 	case HPD_PORT_A:
1751e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1752e4ce95aaSVille Syrjälä 	default:
1753e4ce95aaSVille Syrjälä 		return false;
1754e4ce95aaSVille Syrjälä 	}
1755e4ce95aaSVille Syrjälä }
1756e4ce95aaSVille Syrjälä 
1757af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
175813cf5504SDave Airlie {
1759af92058fSVille Syrjälä 	switch (pin) {
1760af92058fSVille Syrjälä 	case HPD_PORT_B:
1761676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
1762af92058fSVille Syrjälä 	case HPD_PORT_C:
1763676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
1764af92058fSVille Syrjälä 	case HPD_PORT_D:
1765676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1766676574dfSJani Nikula 	default:
1767676574dfSJani Nikula 		return false;
176813cf5504SDave Airlie 	}
176913cf5504SDave Airlie }
177013cf5504SDave Airlie 
1771af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
177213cf5504SDave Airlie {
1773af92058fSVille Syrjälä 	switch (pin) {
1774af92058fSVille Syrjälä 	case HPD_PORT_B:
1775676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1776af92058fSVille Syrjälä 	case HPD_PORT_C:
1777676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1778af92058fSVille Syrjälä 	case HPD_PORT_D:
1779676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1780676574dfSJani Nikula 	default:
1781676574dfSJani Nikula 		return false;
178213cf5504SDave Airlie 	}
178313cf5504SDave Airlie }
178413cf5504SDave Airlie 
178542db67d6SVille Syrjälä /*
178642db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
178742db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
178842db67d6SVille Syrjälä  * hotplug detection results from several registers.
178942db67d6SVille Syrjälä  *
179042db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
179142db67d6SVille Syrjälä  */
1792cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1793cf53902fSRodrigo Vivi 			       u32 *pin_mask, u32 *long_mask,
17948c841e57SJani Nikula 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1795fd63e2a9SImre Deak 			       const u32 hpd[HPD_NUM_PINS],
1796af92058fSVille Syrjälä 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1797676574dfSJani Nikula {
1798e9be2850SVille Syrjälä 	enum hpd_pin pin;
1799676574dfSJani Nikula 
1800e9be2850SVille Syrjälä 	for_each_hpd_pin(pin) {
1801e9be2850SVille Syrjälä 		if ((hpd[pin] & hotplug_trigger) == 0)
18028c841e57SJani Nikula 			continue;
18038c841e57SJani Nikula 
1804e9be2850SVille Syrjälä 		*pin_mask |= BIT(pin);
1805676574dfSJani Nikula 
1806af92058fSVille Syrjälä 		if (long_pulse_detect(pin, dig_hotplug_reg))
1807e9be2850SVille Syrjälä 			*long_mask |= BIT(pin);
1808676574dfSJani Nikula 	}
1809676574dfSJani Nikula 
1810f88f0478SVille Syrjälä 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1811f88f0478SVille Syrjälä 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1812676574dfSJani Nikula 
1813676574dfSJani Nikula }
1814676574dfSJani Nikula 
181591d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1816515ac2bbSDaniel Vetter {
181728c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1818515ac2bbSDaniel Vetter }
1819515ac2bbSDaniel Vetter 
182091d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1821ce99c256SDaniel Vetter {
18229ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1823ce99c256SDaniel Vetter }
1824ce99c256SDaniel Vetter 
18258bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
182691d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
182791d14251STvrtko Ursulin 					 enum pipe pipe,
1828a9c287c9SJani Nikula 					 u32 crc0, u32 crc1,
1829a9c287c9SJani Nikula 					 u32 crc2, u32 crc3,
1830a9c287c9SJani Nikula 					 u32 crc4)
18318bf1e9f1SShuang He {
18328bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
18338c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18345cee6c45SVille Syrjälä 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
18355cee6c45SVille Syrjälä 
18365cee6c45SVille Syrjälä 	trace_intel_pipe_crc(crtc, crcs);
1837b2c88f5bSDamien Lespiau 
1838d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
18398c6b709dSTomeu Vizoso 	/*
18408c6b709dSTomeu Vizoso 	 * For some not yet identified reason, the first CRC is
18418c6b709dSTomeu Vizoso 	 * bonkers. So let's just wait for the next vblank and read
18428c6b709dSTomeu Vizoso 	 * out the buggy result.
18438c6b709dSTomeu Vizoso 	 *
1844163e8aecSRodrigo Vivi 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
18458c6b709dSTomeu Vizoso 	 * don't trust that one either.
18468c6b709dSTomeu Vizoso 	 */
1847033b7a23SMaarten Lankhorst 	if (pipe_crc->skipped <= 0 ||
1848163e8aecSRodrigo Vivi 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
18498c6b709dSTomeu Vizoso 		pipe_crc->skipped++;
18508c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
18518c6b709dSTomeu Vizoso 		return;
18528c6b709dSTomeu Vizoso 	}
18538c6b709dSTomeu Vizoso 	spin_unlock(&pipe_crc->lock);
18546cc42152SMaarten Lankhorst 
1855246ee524STomeu Vizoso 	drm_crtc_add_crc_entry(&crtc->base, true,
1856ca814b25SDaniel Vetter 				drm_crtc_accurate_vblank_count(&crtc->base),
1857246ee524STomeu Vizoso 				crcs);
18588c6b709dSTomeu Vizoso }
1859277de95eSDaniel Vetter #else
1860277de95eSDaniel Vetter static inline void
186191d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
186291d14251STvrtko Ursulin 			     enum pipe pipe,
1863a9c287c9SJani Nikula 			     u32 crc0, u32 crc1,
1864a9c287c9SJani Nikula 			     u32 crc2, u32 crc3,
1865a9c287c9SJani Nikula 			     u32 crc4) {}
1866277de95eSDaniel Vetter #endif
1867eba94eb9SDaniel Vetter 
1868277de95eSDaniel Vetter 
186991d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
187091d14251STvrtko Ursulin 				     enum pipe pipe)
18715a69b89fSDaniel Vetter {
187291d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
18735a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
18745a69b89fSDaniel Vetter 				     0, 0, 0, 0);
18755a69b89fSDaniel Vetter }
18765a69b89fSDaniel Vetter 
187791d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
187891d14251STvrtko Ursulin 				     enum pipe pipe)
1879eba94eb9SDaniel Vetter {
188091d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1881eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1882eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1883eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1884eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
18858bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1886eba94eb9SDaniel Vetter }
18875b3a856bSDaniel Vetter 
188891d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
188991d14251STvrtko Ursulin 				      enum pipe pipe)
18905b3a856bSDaniel Vetter {
1891a9c287c9SJani Nikula 	u32 res1, res2;
18920b5c5ed0SDaniel Vetter 
189391d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
18940b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
18950b5c5ed0SDaniel Vetter 	else
18960b5c5ed0SDaniel Vetter 		res1 = 0;
18970b5c5ed0SDaniel Vetter 
189891d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
18990b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
19000b5c5ed0SDaniel Vetter 	else
19010b5c5ed0SDaniel Vetter 		res2 = 0;
19025b3a856bSDaniel Vetter 
190391d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
19040b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
19050b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
19060b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
19070b5c5ed0SDaniel Vetter 				     res1, res2);
19085b3a856bSDaniel Vetter }
19098bf1e9f1SShuang He 
19101403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
19111403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
19121403c0d4SPaulo Zanoni  * the work queue. */
191358820574STvrtko Ursulin static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
1914a087bafeSMika Kuoppala {
191558820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
1916a087bafeSMika Kuoppala 	struct intel_rps *rps = &i915->gt_pm.rps;
1917a087bafeSMika Kuoppala 	const u32 events = i915->pm_rps_events & pm_iir;
1918a087bafeSMika Kuoppala 
1919a087bafeSMika Kuoppala 	lockdep_assert_held(&i915->irq_lock);
1920a087bafeSMika Kuoppala 
1921a087bafeSMika Kuoppala 	if (unlikely(!events))
1922a087bafeSMika Kuoppala 		return;
1923a087bafeSMika Kuoppala 
192458820574STvrtko Ursulin 	gen6_mask_pm_irq(gt, events);
1925a087bafeSMika Kuoppala 
1926a087bafeSMika Kuoppala 	if (!rps->interrupts_enabled)
1927a087bafeSMika Kuoppala 		return;
1928a087bafeSMika Kuoppala 
1929a087bafeSMika Kuoppala 	rps->pm_iir |= events;
1930a087bafeSMika Kuoppala 	schedule_work(&rps->work);
1931a087bafeSMika Kuoppala }
1932a087bafeSMika Kuoppala 
19331403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1934baf02a1fSBen Widawsky {
1935562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1936562d9baeSSagar Arun Kamble 
1937a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
193859cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
193958820574STvrtko Ursulin 		gen6_mask_pm_irq(&dev_priv->gt,
194058820574STvrtko Ursulin 				 pm_iir & dev_priv->pm_rps_events);
1941562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1942562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1943562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
194441a05a3aSDaniel Vetter 		}
1945d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1946d4d70aa5SImre Deak 	}
1947baf02a1fSBen Widawsky 
1948bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1949c9a9a268SImre Deak 		return;
1950c9a9a268SImre Deak 
195112638c57SBen Widawsky 	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
19528a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
195312638c57SBen Widawsky 
1954aaecdf61SDaniel Vetter 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1955aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
195612638c57SBen Widawsky }
1957baf02a1fSBen Widawsky 
195826705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
195926705e20SSagar Arun Kamble {
196093bf8096SMichal Wajdeczko 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
196193bf8096SMichal Wajdeczko 		intel_guc_to_host_event_handler(&dev_priv->guc);
196226705e20SSagar Arun Kamble }
196326705e20SSagar Arun Kamble 
196454c52a84SOscar Mateo static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
196554c52a84SOscar Mateo {
196654c52a84SOscar Mateo 	if (iir & GEN11_GUC_INTR_GUC2HOST)
196754c52a84SOscar Mateo 		intel_guc_to_host_event_handler(&i915->guc);
196854c52a84SOscar Mateo }
196954c52a84SOscar Mateo 
197044d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
197144d9241eSVille Syrjälä {
197244d9241eSVille Syrjälä 	enum pipe pipe;
197344d9241eSVille Syrjälä 
197444d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
197544d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
197644d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
197744d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
197844d9241eSVille Syrjälä 
197944d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
198044d9241eSVille Syrjälä 	}
198144d9241eSVille Syrjälä }
198244d9241eSVille Syrjälä 
1983eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
198491d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
19857e231dbeSJesse Barnes {
19867e231dbeSJesse Barnes 	int pipe;
19877e231dbeSJesse Barnes 
198858ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
19891ca993d2SVille Syrjälä 
19901ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
19911ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
19921ca993d2SVille Syrjälä 		return;
19931ca993d2SVille Syrjälä 	}
19941ca993d2SVille Syrjälä 
1995055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1996f0f59a00SVille Syrjälä 		i915_reg_t reg;
19976b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
199891d181ddSImre Deak 
1999bbb5eebfSDaniel Vetter 		/*
2000bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
2001bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
2002bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
2003bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
2004bbb5eebfSDaniel Vetter 		 * handle.
2005bbb5eebfSDaniel Vetter 		 */
20060f239f4cSDaniel Vetter 
20070f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
20086b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
2009bbb5eebfSDaniel Vetter 
2010bbb5eebfSDaniel Vetter 		switch (pipe) {
2011bbb5eebfSDaniel Vetter 		case PIPE_A:
2012bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2013bbb5eebfSDaniel Vetter 			break;
2014bbb5eebfSDaniel Vetter 		case PIPE_B:
2015bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2016bbb5eebfSDaniel Vetter 			break;
20173278f67fSVille Syrjälä 		case PIPE_C:
20183278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
20193278f67fSVille Syrjälä 			break;
2020bbb5eebfSDaniel Vetter 		}
2021bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
20226b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
2023bbb5eebfSDaniel Vetter 
20246b12ca56SVille Syrjälä 		if (!status_mask)
202591d181ddSImre Deak 			continue;
202691d181ddSImre Deak 
202791d181ddSImre Deak 		reg = PIPESTAT(pipe);
20286b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
20296b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
20307e231dbeSJesse Barnes 
20317e231dbeSJesse Barnes 		/*
20327e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
2033132c27c9SVille Syrjälä 		 *
2034132c27c9SVille Syrjälä 		 * Toggle the enable bits to make sure we get an
2035132c27c9SVille Syrjälä 		 * edge in the ISR pipe event bit if we don't clear
2036132c27c9SVille Syrjälä 		 * all the enabled status bits. Otherwise the edge
2037132c27c9SVille Syrjälä 		 * triggered IIR on i965/g4x wouldn't notice that
2038132c27c9SVille Syrjälä 		 * an interrupt is still pending.
20397e231dbeSJesse Barnes 		 */
2040132c27c9SVille Syrjälä 		if (pipe_stats[pipe]) {
2041132c27c9SVille Syrjälä 			I915_WRITE(reg, pipe_stats[pipe]);
2042132c27c9SVille Syrjälä 			I915_WRITE(reg, enable_mask);
2043132c27c9SVille Syrjälä 		}
20447e231dbeSJesse Barnes 	}
204558ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
20462ecb8ca4SVille Syrjälä }
20472ecb8ca4SVille Syrjälä 
2048eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2049eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
2050eb64343cSVille Syrjälä {
2051eb64343cSVille Syrjälä 	enum pipe pipe;
2052eb64343cSVille Syrjälä 
2053eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2054eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2055eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2056eb64343cSVille Syrjälä 
2057eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2058eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2059eb64343cSVille Syrjälä 
2060eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2061eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2062eb64343cSVille Syrjälä 	}
2063eb64343cSVille Syrjälä }
2064eb64343cSVille Syrjälä 
2065eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2066eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2067eb64343cSVille Syrjälä {
2068eb64343cSVille Syrjälä 	bool blc_event = false;
2069eb64343cSVille Syrjälä 	enum pipe pipe;
2070eb64343cSVille Syrjälä 
2071eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2072eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2073eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2074eb64343cSVille Syrjälä 
2075eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2076eb64343cSVille Syrjälä 			blc_event = true;
2077eb64343cSVille Syrjälä 
2078eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2079eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2080eb64343cSVille Syrjälä 
2081eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2082eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2083eb64343cSVille Syrjälä 	}
2084eb64343cSVille Syrjälä 
2085eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2086eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2087eb64343cSVille Syrjälä }
2088eb64343cSVille Syrjälä 
2089eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2090eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2091eb64343cSVille Syrjälä {
2092eb64343cSVille Syrjälä 	bool blc_event = false;
2093eb64343cSVille Syrjälä 	enum pipe pipe;
2094eb64343cSVille Syrjälä 
2095eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2096eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2097eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2098eb64343cSVille Syrjälä 
2099eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2100eb64343cSVille Syrjälä 			blc_event = true;
2101eb64343cSVille Syrjälä 
2102eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2103eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2104eb64343cSVille Syrjälä 
2105eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2106eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2107eb64343cSVille Syrjälä 	}
2108eb64343cSVille Syrjälä 
2109eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2110eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2111eb64343cSVille Syrjälä 
2112eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2113eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
2114eb64343cSVille Syrjälä }
2115eb64343cSVille Syrjälä 
211691d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
21172ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
21182ecb8ca4SVille Syrjälä {
21192ecb8ca4SVille Syrjälä 	enum pipe pipe;
21207e231dbeSJesse Barnes 
2121055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2122fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2123fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
21244356d586SDaniel Vetter 
21254356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
212691d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
21272d9d2b0bSVille Syrjälä 
21281f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
21291f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
213031acc7f5SJesse Barnes 	}
213131acc7f5SJesse Barnes 
2132c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
213391d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2134c1874ed7SImre Deak }
2135c1874ed7SImre Deak 
21361ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
213716c6c56bSVille Syrjälä {
21380ba7c51aSVille Syrjälä 	u32 hotplug_status = 0, hotplug_status_mask;
21390ba7c51aSVille Syrjälä 	int i;
214016c6c56bSVille Syrjälä 
21410ba7c51aSVille Syrjälä 	if (IS_G4X(dev_priv) ||
21420ba7c51aSVille Syrjälä 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
21430ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
21440ba7c51aSVille Syrjälä 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
21450ba7c51aSVille Syrjälä 	else
21460ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
21470ba7c51aSVille Syrjälä 
21480ba7c51aSVille Syrjälä 	/*
21490ba7c51aSVille Syrjälä 	 * We absolutely have to clear all the pending interrupt
21500ba7c51aSVille Syrjälä 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
21510ba7c51aSVille Syrjälä 	 * interrupt bit won't have an edge, and the i965/g4x
21520ba7c51aSVille Syrjälä 	 * edge triggered IIR will not notice that an interrupt
21530ba7c51aSVille Syrjälä 	 * is still pending. We can't use PORT_HOTPLUG_EN to
21540ba7c51aSVille Syrjälä 	 * guarantee the edge as the act of toggling the enable
21550ba7c51aSVille Syrjälä 	 * bits can itself generate a new hotplug interrupt :(
21560ba7c51aSVille Syrjälä 	 */
21570ba7c51aSVille Syrjälä 	for (i = 0; i < 10; i++) {
21580ba7c51aSVille Syrjälä 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
21590ba7c51aSVille Syrjälä 
21600ba7c51aSVille Syrjälä 		if (tmp == 0)
21610ba7c51aSVille Syrjälä 			return hotplug_status;
21620ba7c51aSVille Syrjälä 
21630ba7c51aSVille Syrjälä 		hotplug_status |= tmp;
21643ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
21650ba7c51aSVille Syrjälä 	}
21660ba7c51aSVille Syrjälä 
21670ba7c51aSVille Syrjälä 	WARN_ONCE(1,
21680ba7c51aSVille Syrjälä 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
21690ba7c51aSVille Syrjälä 		  I915_READ(PORT_HOTPLUG_STAT));
21701ae3c34cSVille Syrjälä 
21711ae3c34cSVille Syrjälä 	return hotplug_status;
21721ae3c34cSVille Syrjälä }
21731ae3c34cSVille Syrjälä 
217491d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
21751ae3c34cSVille Syrjälä 				 u32 hotplug_status)
21761ae3c34cSVille Syrjälä {
21771ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
21783ff60f89SOscar Mateo 
217991d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
218091d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
218116c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
218216c6c56bSVille Syrjälä 
218358f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2184cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2185cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2186cf53902fSRodrigo Vivi 					   hpd_status_g4x,
2187fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
218858f2cf24SVille Syrjälä 
218991d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
219058f2cf24SVille Syrjälä 		}
2191369712e8SJani Nikula 
2192369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
219391d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
219416c6c56bSVille Syrjälä 	} else {
219516c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
219616c6c56bSVille Syrjälä 
219758f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2198cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2199cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2200cf53902fSRodrigo Vivi 					   hpd_status_i915,
2201fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
220291d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
220316c6c56bSVille Syrjälä 		}
22043ff60f89SOscar Mateo 	}
220558f2cf24SVille Syrjälä }
220616c6c56bSVille Syrjälä 
2207c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2208c1874ed7SImre Deak {
2209b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2210c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
2211c1874ed7SImre Deak 
22122dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22132dd2a883SImre Deak 		return IRQ_NONE;
22142dd2a883SImre Deak 
22151f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
22169102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22171f814dacSImre Deak 
22181e1cace9SVille Syrjälä 	do {
22196e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
22202ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
22211ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2222a5e485a9SVille Syrjälä 		u32 ier = 0;
22233ff60f89SOscar Mateo 
2224c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2225c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
22263ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2227c1874ed7SImre Deak 
2228c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
22291e1cace9SVille Syrjälä 			break;
2230c1874ed7SImre Deak 
2231c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2232c1874ed7SImre Deak 
2233a5e485a9SVille Syrjälä 		/*
2234a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2235a5e485a9SVille Syrjälä 		 *
2236a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2237a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2238a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2239a5e485a9SVille Syrjälä 		 *
2240a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2241a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2242a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2243a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2244a5e485a9SVille Syrjälä 		 * bits this time around.
2245a5e485a9SVille Syrjälä 		 */
22464a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2247a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2248a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
22494a0a0202SVille Syrjälä 
22504a0a0202SVille Syrjälä 		if (gt_iir)
22514a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
22524a0a0202SVille Syrjälä 		if (pm_iir)
22534a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
22544a0a0202SVille Syrjälä 
22557ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
22561ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
22577ce4d1f2SVille Syrjälä 
22583ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
22593ff60f89SOscar Mateo 		 * signalled in iir */
2260eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
22617ce4d1f2SVille Syrjälä 
2262eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2263eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2264eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2265eef57324SJerome Anand 
22667ce4d1f2SVille Syrjälä 		/*
22677ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
22687ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
22697ce4d1f2SVille Syrjälä 		 */
22707ce4d1f2SVille Syrjälä 		if (iir)
22717ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
22724a0a0202SVille Syrjälä 
2273a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
22744a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
22751ae3c34cSVille Syrjälä 
227652894874SVille Syrjälä 		if (gt_iir)
2277261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
227852894874SVille Syrjälä 		if (pm_iir)
227952894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
228052894874SVille Syrjälä 
22811ae3c34cSVille Syrjälä 		if (hotplug_status)
228291d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
22832ecb8ca4SVille Syrjälä 
228491d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
22851e1cace9SVille Syrjälä 	} while (0);
22867e231dbeSJesse Barnes 
22879102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22881f814dacSImre Deak 
22897e231dbeSJesse Barnes 	return ret;
22907e231dbeSJesse Barnes }
22917e231dbeSJesse Barnes 
229243f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
229343f328d7SVille Syrjälä {
2294b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
229543f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
229643f328d7SVille Syrjälä 
22972dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22982dd2a883SImre Deak 		return IRQ_NONE;
22992dd2a883SImre Deak 
23001f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
23019102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
23021f814dacSImre Deak 
2303579de73bSChris Wilson 	do {
23046e814800SVille Syrjälä 		u32 master_ctl, iir;
23052ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
23061ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2307f0fd96f5SChris Wilson 		u32 gt_iir[4];
2308a5e485a9SVille Syrjälä 		u32 ier = 0;
2309a5e485a9SVille Syrjälä 
23108e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
23113278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
23123278f67fSVille Syrjälä 
23133278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
23148e5fd599SVille Syrjälä 			break;
231543f328d7SVille Syrjälä 
231627b6c122SOscar Mateo 		ret = IRQ_HANDLED;
231727b6c122SOscar Mateo 
2318a5e485a9SVille Syrjälä 		/*
2319a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2320a5e485a9SVille Syrjälä 		 *
2321a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2322a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2323a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2324a5e485a9SVille Syrjälä 		 *
2325a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2326a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2327a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2328a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2329a5e485a9SVille Syrjälä 		 * bits this time around.
2330a5e485a9SVille Syrjälä 		 */
233143f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2332a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2333a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
233443f328d7SVille Syrjälä 
2335e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
233627b6c122SOscar Mateo 
233727b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
23381ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
233943f328d7SVille Syrjälä 
234027b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
234127b6c122SOscar Mateo 		 * signalled in iir */
2342eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
234343f328d7SVille Syrjälä 
2344eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2345eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2346eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2347eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2348eef57324SJerome Anand 
23497ce4d1f2SVille Syrjälä 		/*
23507ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
23517ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
23527ce4d1f2SVille Syrjälä 		 */
23537ce4d1f2SVille Syrjälä 		if (iir)
23547ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
23557ce4d1f2SVille Syrjälä 
2356a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2357e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
23581ae3c34cSVille Syrjälä 
2359f0fd96f5SChris Wilson 		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2360e30e251aSVille Syrjälä 
23611ae3c34cSVille Syrjälä 		if (hotplug_status)
236291d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
23632ecb8ca4SVille Syrjälä 
236491d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2365579de73bSChris Wilson 	} while (0);
23663278f67fSVille Syrjälä 
23679102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
23681f814dacSImre Deak 
236943f328d7SVille Syrjälä 	return ret;
237043f328d7SVille Syrjälä }
237143f328d7SVille Syrjälä 
237291d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
237391d14251STvrtko Ursulin 				u32 hotplug_trigger,
237440e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2375776ad806SJesse Barnes {
237642db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2377776ad806SJesse Barnes 
23786a39d7c9SJani Nikula 	/*
23796a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
23806a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
23816a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
23826a39d7c9SJani Nikula 	 * errors.
23836a39d7c9SJani Nikula 	 */
238413cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23856a39d7c9SJani Nikula 	if (!hotplug_trigger) {
23866a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
23876a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
23886a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
23896a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
23906a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
23916a39d7c9SJani Nikula 	}
23926a39d7c9SJani Nikula 
239313cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23946a39d7c9SJani Nikula 	if (!hotplug_trigger)
23956a39d7c9SJani Nikula 		return;
239613cf5504SDave Airlie 
2397cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
239840e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2399fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
240040e56410SVille Syrjälä 
240191d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2402aaf5ec2eSSonika Jindal }
240391d131d2SDaniel Vetter 
240491d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
240540e56410SVille Syrjälä {
240640e56410SVille Syrjälä 	int pipe;
240740e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
240840e56410SVille Syrjälä 
240991d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
241040e56410SVille Syrjälä 
2411cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2412cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2413776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2414cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2415cfc33bf7SVille Syrjälä 				 port_name(port));
2416cfc33bf7SVille Syrjälä 	}
2417776ad806SJesse Barnes 
2418ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
241991d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2420ce99c256SDaniel Vetter 
2421776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
242291d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2423776ad806SJesse Barnes 
2424776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2425776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2426776ad806SJesse Barnes 
2427776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2428776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2429776ad806SJesse Barnes 
2430776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2431776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2432776ad806SJesse Barnes 
24339db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2434055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
24359db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
24369db4a9c7SJesse Barnes 					 pipe_name(pipe),
24379db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2438776ad806SJesse Barnes 
2439776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2440776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2441776ad806SJesse Barnes 
2442776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2443776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2444776ad806SJesse Barnes 
2445776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2446a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
24478664281bSPaulo Zanoni 
24488664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2449a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
24508664281bSPaulo Zanoni }
24518664281bSPaulo Zanoni 
245291d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
24538664281bSPaulo Zanoni {
24548664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
24555a69b89fSDaniel Vetter 	enum pipe pipe;
24568664281bSPaulo Zanoni 
2457de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2458de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2459de032bf4SPaulo Zanoni 
2460055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
24611f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
24621f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
24638664281bSPaulo Zanoni 
24645a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
246591d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
246691d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
24675a69b89fSDaniel Vetter 			else
246891d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
24695a69b89fSDaniel Vetter 		}
24705a69b89fSDaniel Vetter 	}
24718bf1e9f1SShuang He 
24728664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
24738664281bSPaulo Zanoni }
24748664281bSPaulo Zanoni 
247591d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
24768664281bSPaulo Zanoni {
24778664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
247845c1cd87SMika Kahola 	enum pipe pipe;
24798664281bSPaulo Zanoni 
2480de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2481de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2482de032bf4SPaulo Zanoni 
248345c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
248445c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
248545c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
24868664281bSPaulo Zanoni 
24878664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2488776ad806SJesse Barnes }
2489776ad806SJesse Barnes 
249091d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
249123e81d69SAdam Jackson {
249223e81d69SAdam Jackson 	int pipe;
24936dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2494aaf5ec2eSSonika Jindal 
249591d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
249691d131d2SDaniel Vetter 
2497cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2498cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
249923e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2500cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2501cfc33bf7SVille Syrjälä 				 port_name(port));
2502cfc33bf7SVille Syrjälä 	}
250323e81d69SAdam Jackson 
250423e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
250591d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
250623e81d69SAdam Jackson 
250723e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
250891d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
250923e81d69SAdam Jackson 
251023e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
251123e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
251223e81d69SAdam Jackson 
251323e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
251423e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
251523e81d69SAdam Jackson 
251623e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2517055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
251823e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
251923e81d69SAdam Jackson 					 pipe_name(pipe),
252023e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
25218664281bSPaulo Zanoni 
25228664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
252391d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
252423e81d69SAdam Jackson }
252523e81d69SAdam Jackson 
2526c6f7acb8SMatt Roper static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
2527c6f7acb8SMatt Roper 			    const u32 *pins)
252831604222SAnusha Srivatsa {
252931604222SAnusha Srivatsa 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
253031604222SAnusha Srivatsa 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
253131604222SAnusha Srivatsa 	u32 pin_mask = 0, long_mask = 0;
253231604222SAnusha Srivatsa 
253331604222SAnusha Srivatsa 	if (ddi_hotplug_trigger) {
253431604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
253531604222SAnusha Srivatsa 
253631604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
253731604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
253831604222SAnusha Srivatsa 
253931604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
254031604222SAnusha Srivatsa 				   ddi_hotplug_trigger,
2541c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
254231604222SAnusha Srivatsa 				   icp_ddi_port_hotplug_long_detect);
254331604222SAnusha Srivatsa 	}
254431604222SAnusha Srivatsa 
254531604222SAnusha Srivatsa 	if (tc_hotplug_trigger) {
254631604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
254731604222SAnusha Srivatsa 
254831604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
254931604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
255031604222SAnusha Srivatsa 
255131604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
255231604222SAnusha Srivatsa 				   tc_hotplug_trigger,
2553c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
255431604222SAnusha Srivatsa 				   icp_tc_port_hotplug_long_detect);
255531604222SAnusha Srivatsa 	}
255631604222SAnusha Srivatsa 
255731604222SAnusha Srivatsa 	if (pin_mask)
255831604222SAnusha Srivatsa 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
255931604222SAnusha Srivatsa 
256031604222SAnusha Srivatsa 	if (pch_iir & SDE_GMBUS_ICP)
256131604222SAnusha Srivatsa 		gmbus_irq_handler(dev_priv);
256231604222SAnusha Srivatsa }
256331604222SAnusha Srivatsa 
256491d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
25656dbf30ceSVille Syrjälä {
25666dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
25676dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
25686dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
25696dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
25706dbf30ceSVille Syrjälä 
25716dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
25726dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25736dbf30ceSVille Syrjälä 
25746dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
25756dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
25766dbf30ceSVille Syrjälä 
2577cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2578cf53902fSRodrigo Vivi 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
257974c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
25806dbf30ceSVille Syrjälä 	}
25816dbf30ceSVille Syrjälä 
25826dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
25836dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25846dbf30ceSVille Syrjälä 
25856dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
25866dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
25876dbf30ceSVille Syrjälä 
2588cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2589cf53902fSRodrigo Vivi 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
25906dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
25916dbf30ceSVille Syrjälä 	}
25926dbf30ceSVille Syrjälä 
25936dbf30ceSVille Syrjälä 	if (pin_mask)
259491d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
25956dbf30ceSVille Syrjälä 
25966dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
259791d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
25986dbf30ceSVille Syrjälä }
25996dbf30ceSVille Syrjälä 
260091d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
260191d14251STvrtko Ursulin 				u32 hotplug_trigger,
260240e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2603c008bc6eSPaulo Zanoni {
2604e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2605e4ce95aaSVille Syrjälä 
2606e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2607e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2608e4ce95aaSVille Syrjälä 
2609cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
261040e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2611e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
261240e56410SVille Syrjälä 
261391d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2614e4ce95aaSVille Syrjälä }
2615c008bc6eSPaulo Zanoni 
261691d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
261791d14251STvrtko Ursulin 				    u32 de_iir)
261840e56410SVille Syrjälä {
261940e56410SVille Syrjälä 	enum pipe pipe;
262040e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
262140e56410SVille Syrjälä 
262240e56410SVille Syrjälä 	if (hotplug_trigger)
262391d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
262440e56410SVille Syrjälä 
2625c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
262691d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2627c008bc6eSPaulo Zanoni 
2628c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
262991d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2630c008bc6eSPaulo Zanoni 
2631c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2632c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2633c008bc6eSPaulo Zanoni 
2634055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2635fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2636fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2637c008bc6eSPaulo Zanoni 
263840da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
26391f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2640c008bc6eSPaulo Zanoni 
264140da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
264291d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2643c008bc6eSPaulo Zanoni 	}
2644c008bc6eSPaulo Zanoni 
2645c008bc6eSPaulo Zanoni 	/* check event from PCH */
2646c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2647c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2648c008bc6eSPaulo Zanoni 
264991d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
265091d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2651c008bc6eSPaulo Zanoni 		else
265291d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2653c008bc6eSPaulo Zanoni 
2654c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2655c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2656c008bc6eSPaulo Zanoni 	}
2657c008bc6eSPaulo Zanoni 
2658cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
265991d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2660c008bc6eSPaulo Zanoni }
2661c008bc6eSPaulo Zanoni 
266291d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
266391d14251STvrtko Ursulin 				    u32 de_iir)
26649719fb98SPaulo Zanoni {
266507d27e20SDamien Lespiau 	enum pipe pipe;
266623bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
266723bb4cb5SVille Syrjälä 
266840e56410SVille Syrjälä 	if (hotplug_trigger)
266991d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
26709719fb98SPaulo Zanoni 
26719719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
267291d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
26739719fb98SPaulo Zanoni 
267454fd3149SDhinakaran Pandiyan 	if (de_iir & DE_EDP_PSR_INT_HSW) {
267554fd3149SDhinakaran Pandiyan 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
267654fd3149SDhinakaran Pandiyan 
267754fd3149SDhinakaran Pandiyan 		intel_psr_irq_handler(dev_priv, psr_iir);
267854fd3149SDhinakaran Pandiyan 		I915_WRITE(EDP_PSR_IIR, psr_iir);
267954fd3149SDhinakaran Pandiyan 	}
2680fc340442SDaniel Vetter 
26819719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
268291d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
26839719fb98SPaulo Zanoni 
26849719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
268591d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
26869719fb98SPaulo Zanoni 
2687055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2688fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2689fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
26909719fb98SPaulo Zanoni 	}
26919719fb98SPaulo Zanoni 
26929719fb98SPaulo Zanoni 	/* check event from PCH */
269391d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
26949719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
26959719fb98SPaulo Zanoni 
269691d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
26979719fb98SPaulo Zanoni 
26989719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
26999719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
27009719fb98SPaulo Zanoni 	}
27019719fb98SPaulo Zanoni }
27029719fb98SPaulo Zanoni 
270372c90f62SOscar Mateo /*
270472c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
270572c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
270672c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
270772c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
270872c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
270972c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
271072c90f62SOscar Mateo  */
2711f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2712b1f14ad0SJesse Barnes {
2713b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2714f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
27150e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2716b1f14ad0SJesse Barnes 
27172dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
27182dd2a883SImre Deak 		return IRQ_NONE;
27192dd2a883SImre Deak 
27201f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27219102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27221f814dacSImre Deak 
2723b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2724b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2725b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
27260e43406bSChris Wilson 
272744498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
272844498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
272944498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
273044498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
273144498aeaSPaulo Zanoni 	 * due to its back queue). */
273291d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
273344498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
273444498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
2735ab5c608bSBen Widawsky 	}
273644498aeaSPaulo Zanoni 
273772c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
273872c90f62SOscar Mateo 
27390e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
27400e43406bSChris Wilson 	if (gt_iir) {
274172c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
274272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
274391d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2744261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2745d8fc8a47SPaulo Zanoni 		else
2746261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
27470e43406bSChris Wilson 	}
2748b1f14ad0SJesse Barnes 
2749b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
27500e43406bSChris Wilson 	if (de_iir) {
275172c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
275272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
275391d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
275491d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2755f1af8fc1SPaulo Zanoni 		else
275691d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
27570e43406bSChris Wilson 	}
27580e43406bSChris Wilson 
275991d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2760f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
27610e43406bSChris Wilson 		if (pm_iir) {
2762b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
27630e43406bSChris Wilson 			ret = IRQ_HANDLED;
276472c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
27650e43406bSChris Wilson 		}
2766f1af8fc1SPaulo Zanoni 	}
2767b1f14ad0SJesse Barnes 
2768b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
276974093f3eSChris Wilson 	if (!HAS_PCH_NOP(dev_priv))
277044498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
2771b1f14ad0SJesse Barnes 
27721f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27739102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27741f814dacSImre Deak 
2775b1f14ad0SJesse Barnes 	return ret;
2776b1f14ad0SJesse Barnes }
2777b1f14ad0SJesse Barnes 
277891d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
277991d14251STvrtko Ursulin 				u32 hotplug_trigger,
278040e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2781d04a492dSShashank Sharma {
2782cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2783d04a492dSShashank Sharma 
2784a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2785a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2786d04a492dSShashank Sharma 
2787cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
278840e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2789cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
279040e56410SVille Syrjälä 
279191d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2792d04a492dSShashank Sharma }
2793d04a492dSShashank Sharma 
2794121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2795121e758eSDhinakaran Pandiyan {
2796121e758eSDhinakaran Pandiyan 	u32 pin_mask = 0, long_mask = 0;
2797b796b971SDhinakaran Pandiyan 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2798b796b971SDhinakaran Pandiyan 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2799121e758eSDhinakaran Pandiyan 
2800121e758eSDhinakaran Pandiyan 	if (trigger_tc) {
2801b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2802b796b971SDhinakaran Pandiyan 
2803121e758eSDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2804121e758eSDhinakaran Pandiyan 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2805121e758eSDhinakaran Pandiyan 
2806121e758eSDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2807b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2808121e758eSDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2809121e758eSDhinakaran Pandiyan 	}
2810b796b971SDhinakaran Pandiyan 
2811b796b971SDhinakaran Pandiyan 	if (trigger_tbt) {
2812b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2813b796b971SDhinakaran Pandiyan 
2814b796b971SDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2815b796b971SDhinakaran Pandiyan 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2816b796b971SDhinakaran Pandiyan 
2817b796b971SDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2818b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2819b796b971SDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2820b796b971SDhinakaran Pandiyan 	}
2821b796b971SDhinakaran Pandiyan 
2822b796b971SDhinakaran Pandiyan 	if (pin_mask)
2823b796b971SDhinakaran Pandiyan 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2824b796b971SDhinakaran Pandiyan 	else
2825b796b971SDhinakaran Pandiyan 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2826121e758eSDhinakaran Pandiyan }
2827121e758eSDhinakaran Pandiyan 
28289d17210fSLucas De Marchi static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
28299d17210fSLucas De Marchi {
28309d17210fSLucas De Marchi 	u32 mask = GEN8_AUX_CHANNEL_A;
28319d17210fSLucas De Marchi 
28329d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 9)
28339d17210fSLucas De Marchi 		mask |= GEN9_AUX_CHANNEL_B |
28349d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_C |
28359d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_D;
28369d17210fSLucas De Marchi 
28379d17210fSLucas De Marchi 	if (IS_CNL_WITH_PORT_F(dev_priv))
28389d17210fSLucas De Marchi 		mask |= CNL_AUX_CHANNEL_F;
28399d17210fSLucas De Marchi 
28409d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 11)
28419d17210fSLucas De Marchi 		mask |= ICL_AUX_CHANNEL_E |
28429d17210fSLucas De Marchi 			CNL_AUX_CHANNEL_F;
28439d17210fSLucas De Marchi 
28449d17210fSLucas De Marchi 	return mask;
28459d17210fSLucas De Marchi }
28469d17210fSLucas De Marchi 
2847f11a0f46STvrtko Ursulin static irqreturn_t
2848f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2849abd58f01SBen Widawsky {
2850abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2851f11a0f46STvrtko Ursulin 	u32 iir;
2852c42664ccSDaniel Vetter 	enum pipe pipe;
285388e04703SJesse Barnes 
2854abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2855e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2856e32192e1STvrtko Ursulin 		if (iir) {
2857e04f7eceSVille Syrjälä 			bool found = false;
2858e04f7eceSVille Syrjälä 
2859e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2860abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2861e04f7eceSVille Syrjälä 
2862e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_MISC_GSE) {
286391d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
2864e04f7eceSVille Syrjälä 				found = true;
2865e04f7eceSVille Syrjälä 			}
2866e04f7eceSVille Syrjälä 
2867e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_EDP_PSR) {
286854fd3149SDhinakaran Pandiyan 				u32 psr_iir = I915_READ(EDP_PSR_IIR);
286954fd3149SDhinakaran Pandiyan 
287054fd3149SDhinakaran Pandiyan 				intel_psr_irq_handler(dev_priv, psr_iir);
287154fd3149SDhinakaran Pandiyan 				I915_WRITE(EDP_PSR_IIR, psr_iir);
2872e04f7eceSVille Syrjälä 				found = true;
2873e04f7eceSVille Syrjälä 			}
2874e04f7eceSVille Syrjälä 
2875e04f7eceSVille Syrjälä 			if (!found)
287638cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2877abd58f01SBen Widawsky 		}
287838cc46d7SOscar Mateo 		else
287938cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2880abd58f01SBen Widawsky 	}
2881abd58f01SBen Widawsky 
2882121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2883121e758eSDhinakaran Pandiyan 		iir = I915_READ(GEN11_DE_HPD_IIR);
2884121e758eSDhinakaran Pandiyan 		if (iir) {
2885121e758eSDhinakaran Pandiyan 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2886121e758eSDhinakaran Pandiyan 			ret = IRQ_HANDLED;
2887121e758eSDhinakaran Pandiyan 			gen11_hpd_irq_handler(dev_priv, iir);
2888121e758eSDhinakaran Pandiyan 		} else {
2889121e758eSDhinakaran Pandiyan 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2890121e758eSDhinakaran Pandiyan 		}
2891121e758eSDhinakaran Pandiyan 	}
2892121e758eSDhinakaran Pandiyan 
28936d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2894e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2895e32192e1STvrtko Ursulin 		if (iir) {
2896e32192e1STvrtko Ursulin 			u32 tmp_mask;
2897d04a492dSShashank Sharma 			bool found = false;
2898cebd87a0SVille Syrjälä 
2899e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
29006d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
290188e04703SJesse Barnes 
29029d17210fSLucas De Marchi 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
290391d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2904d04a492dSShashank Sharma 				found = true;
2905d04a492dSShashank Sharma 			}
2906d04a492dSShashank Sharma 
2907cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2908e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2909e32192e1STvrtko Ursulin 				if (tmp_mask) {
291091d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
291191d14251STvrtko Ursulin 							    hpd_bxt);
2912d04a492dSShashank Sharma 					found = true;
2913d04a492dSShashank Sharma 				}
2914e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2915e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2916e32192e1STvrtko Ursulin 				if (tmp_mask) {
291791d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
291891d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2919e32192e1STvrtko Ursulin 					found = true;
2920e32192e1STvrtko Ursulin 				}
2921e32192e1STvrtko Ursulin 			}
2922d04a492dSShashank Sharma 
2923cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
292491d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
29259e63743eSShashank Sharma 				found = true;
29269e63743eSShashank Sharma 			}
29279e63743eSShashank Sharma 
2928d04a492dSShashank Sharma 			if (!found)
292938cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
29306d766f02SDaniel Vetter 		}
293138cc46d7SOscar Mateo 		else
293238cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
29336d766f02SDaniel Vetter 	}
29346d766f02SDaniel Vetter 
2935055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2936fd3a4024SDaniel Vetter 		u32 fault_errors;
2937abd58f01SBen Widawsky 
2938c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2939c42664ccSDaniel Vetter 			continue;
2940c42664ccSDaniel Vetter 
2941e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2942e32192e1STvrtko Ursulin 		if (!iir) {
2943e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2944e32192e1STvrtko Ursulin 			continue;
2945e32192e1STvrtko Ursulin 		}
2946770de83dSDamien Lespiau 
2947e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2948e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2949e32192e1STvrtko Ursulin 
2950fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2951fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2952abd58f01SBen Widawsky 
2953e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
295491d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
29550fbe7870SDaniel Vetter 
2956e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2957e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
295838d83c96SDaniel Vetter 
2959e32192e1STvrtko Ursulin 		fault_errors = iir;
2960bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2961e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2962770de83dSDamien Lespiau 		else
2963e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2964770de83dSDamien Lespiau 
2965770de83dSDamien Lespiau 		if (fault_errors)
29661353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
296730100f2bSDaniel Vetter 				  pipe_name(pipe),
2968e32192e1STvrtko Ursulin 				  fault_errors);
2969abd58f01SBen Widawsky 	}
2970abd58f01SBen Widawsky 
297191d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2972266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
297392d03a80SDaniel Vetter 		/*
297492d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
297592d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
297692d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
297792d03a80SDaniel Vetter 		 */
2978e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2979e32192e1STvrtko Ursulin 		if (iir) {
2980e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
298192d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
29826dbf30ceSVille Syrjälä 
2983c6f7acb8SMatt Roper 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
2984c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_mcc);
2985c6f7acb8SMatt Roper 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2986c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_icp);
2987c6c30b91SRodrigo Vivi 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
298891d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
29896dbf30ceSVille Syrjälä 			else
299091d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
29912dfb0b81SJani Nikula 		} else {
29922dfb0b81SJani Nikula 			/*
29932dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
29942dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
29952dfb0b81SJani Nikula 			 */
29962dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
29972dfb0b81SJani Nikula 		}
299892d03a80SDaniel Vetter 	}
299992d03a80SDaniel Vetter 
3000f11a0f46STvrtko Ursulin 	return ret;
3001f11a0f46STvrtko Ursulin }
3002f11a0f46STvrtko Ursulin 
30034376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
30044376b9c9SMika Kuoppala {
30054376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
30064376b9c9SMika Kuoppala 
30074376b9c9SMika Kuoppala 	/*
30084376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
30094376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
30104376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
30114376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
30124376b9c9SMika Kuoppala 	 */
30134376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
30144376b9c9SMika Kuoppala }
30154376b9c9SMika Kuoppala 
30164376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
30174376b9c9SMika Kuoppala {
30184376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
30194376b9c9SMika Kuoppala }
30204376b9c9SMika Kuoppala 
3021f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
3022f11a0f46STvrtko Ursulin {
3023b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
302425286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = dev_priv->uncore.regs;
3025f11a0f46STvrtko Ursulin 	u32 master_ctl;
3026f0fd96f5SChris Wilson 	u32 gt_iir[4];
3027f11a0f46STvrtko Ursulin 
3028f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
3029f11a0f46STvrtko Ursulin 		return IRQ_NONE;
3030f11a0f46STvrtko Ursulin 
30314376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
30324376b9c9SMika Kuoppala 	if (!master_ctl) {
30334376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
3034f11a0f46STvrtko Ursulin 		return IRQ_NONE;
30354376b9c9SMika Kuoppala 	}
3036f11a0f46STvrtko Ursulin 
3037f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
303855ef72f2SChris Wilson 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3039f0fd96f5SChris Wilson 
3040f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3041f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
30429102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
304355ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
30449102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3045f0fd96f5SChris Wilson 	}
3046f11a0f46STvrtko Ursulin 
30474376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
3048abd58f01SBen Widawsky 
3049f0fd96f5SChris Wilson 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
30501f814dacSImre Deak 
305155ef72f2SChris Wilson 	return IRQ_HANDLED;
3052abd58f01SBen Widawsky }
3053abd58f01SBen Widawsky 
305451951ae7SMika Kuoppala static u32
30559b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
305651951ae7SMika Kuoppala 			 const unsigned int bank, const unsigned int bit)
305751951ae7SMika Kuoppala {
30589b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
305951951ae7SMika Kuoppala 	u32 timeout_ts;
306051951ae7SMika Kuoppala 	u32 ident;
306151951ae7SMika Kuoppala 
30629b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
306396606f3bSOscar Mateo 
306451951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
306551951ae7SMika Kuoppala 
306651951ae7SMika Kuoppala 	/*
306751951ae7SMika Kuoppala 	 * NB: Specs do not specify how long to spin wait,
306851951ae7SMika Kuoppala 	 * so we do ~100us as an educated guess.
306951951ae7SMika Kuoppala 	 */
307051951ae7SMika Kuoppala 	timeout_ts = (local_clock() >> 10) + 100;
307151951ae7SMika Kuoppala 	do {
307251951ae7SMika Kuoppala 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
307351951ae7SMika Kuoppala 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
307451951ae7SMika Kuoppala 		 !time_after32(local_clock() >> 10, timeout_ts));
307551951ae7SMika Kuoppala 
307651951ae7SMika Kuoppala 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
307751951ae7SMika Kuoppala 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
307851951ae7SMika Kuoppala 			  bank, bit, ident);
307951951ae7SMika Kuoppala 		return 0;
308051951ae7SMika Kuoppala 	}
308151951ae7SMika Kuoppala 
308251951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
308351951ae7SMika Kuoppala 		      GEN11_INTR_DATA_VALID);
308451951ae7SMika Kuoppala 
3085f744dbc2SMika Kuoppala 	return ident;
3086f744dbc2SMika Kuoppala }
3087f744dbc2SMika Kuoppala 
3088f744dbc2SMika Kuoppala static void
30899b77011eSTvrtko Ursulin gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
30909b77011eSTvrtko Ursulin 			const u16 iir)
3091f744dbc2SMika Kuoppala {
30929b77011eSTvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
30939b77011eSTvrtko Ursulin 
309454c52a84SOscar Mateo 	if (instance == OTHER_GUC_INSTANCE)
309554c52a84SOscar Mateo 		return gen11_guc_irq_handler(i915, iir);
309654c52a84SOscar Mateo 
3097d02b98b8SOscar Mateo 	if (instance == OTHER_GTPM_INSTANCE)
309858820574STvrtko Ursulin 		return gen11_rps_irq_handler(gt, iir);
3099d02b98b8SOscar Mateo 
3100f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3101f744dbc2SMika Kuoppala 		  instance, iir);
3102f744dbc2SMika Kuoppala }
3103f744dbc2SMika Kuoppala 
3104f744dbc2SMika Kuoppala static void
31059b77011eSTvrtko Ursulin gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
31069b77011eSTvrtko Ursulin 			 const u8 instance, const u16 iir)
3107f744dbc2SMika Kuoppala {
3108f744dbc2SMika Kuoppala 	struct intel_engine_cs *engine;
3109f744dbc2SMika Kuoppala 
3110f744dbc2SMika Kuoppala 	if (instance <= MAX_ENGINE_INSTANCE)
31119b77011eSTvrtko Ursulin 		engine = gt->i915->engine_class[class][instance];
3112f744dbc2SMika Kuoppala 	else
3113f744dbc2SMika Kuoppala 		engine = NULL;
3114f744dbc2SMika Kuoppala 
3115f744dbc2SMika Kuoppala 	if (likely(engine))
3116f744dbc2SMika Kuoppala 		return gen8_cs_irq_handler(engine, iir);
3117f744dbc2SMika Kuoppala 
3118f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3119f744dbc2SMika Kuoppala 		  class, instance);
3120f744dbc2SMika Kuoppala }
3121f744dbc2SMika Kuoppala 
3122f744dbc2SMika Kuoppala static void
31239b77011eSTvrtko Ursulin gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
3124f744dbc2SMika Kuoppala {
3125f744dbc2SMika Kuoppala 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3126f744dbc2SMika Kuoppala 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3127f744dbc2SMika Kuoppala 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3128f744dbc2SMika Kuoppala 
3129f744dbc2SMika Kuoppala 	if (unlikely(!intr))
3130f744dbc2SMika Kuoppala 		return;
3131f744dbc2SMika Kuoppala 
3132f744dbc2SMika Kuoppala 	if (class <= COPY_ENGINE_CLASS)
31339b77011eSTvrtko Ursulin 		return gen11_engine_irq_handler(gt, class, instance, intr);
3134f744dbc2SMika Kuoppala 
3135f744dbc2SMika Kuoppala 	if (class == OTHER_CLASS)
31369b77011eSTvrtko Ursulin 		return gen11_other_irq_handler(gt, instance, intr);
3137f744dbc2SMika Kuoppala 
3138f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3139f744dbc2SMika Kuoppala 		  class, instance, intr);
314051951ae7SMika Kuoppala }
314151951ae7SMika Kuoppala 
314251951ae7SMika Kuoppala static void
31439b77011eSTvrtko Ursulin gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
314451951ae7SMika Kuoppala {
31459b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
314651951ae7SMika Kuoppala 	unsigned long intr_dw;
314751951ae7SMika Kuoppala 	unsigned int bit;
314851951ae7SMika Kuoppala 
31499b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
315051951ae7SMika Kuoppala 
315151951ae7SMika Kuoppala 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
315251951ae7SMika Kuoppala 
315351951ae7SMika Kuoppala 	for_each_set_bit(bit, &intr_dw, 32) {
31549b77011eSTvrtko Ursulin 		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
315551951ae7SMika Kuoppala 
31569b77011eSTvrtko Ursulin 		gen11_gt_identity_handler(gt, ident);
315751951ae7SMika Kuoppala 	}
315851951ae7SMika Kuoppala 
315951951ae7SMika Kuoppala 	/* Clear must be after shared has been served for engine */
316051951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
316151951ae7SMika Kuoppala }
316296606f3bSOscar Mateo 
316396606f3bSOscar Mateo static void
31649b77011eSTvrtko Ursulin gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
316596606f3bSOscar Mateo {
31669b77011eSTvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
316796606f3bSOscar Mateo 	unsigned int bank;
316896606f3bSOscar Mateo 
316996606f3bSOscar Mateo 	spin_lock(&i915->irq_lock);
317096606f3bSOscar Mateo 
317196606f3bSOscar Mateo 	for (bank = 0; bank < 2; bank++) {
317296606f3bSOscar Mateo 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
31739b77011eSTvrtko Ursulin 			gen11_gt_bank_handler(gt, bank);
317496606f3bSOscar Mateo 	}
317596606f3bSOscar Mateo 
317696606f3bSOscar Mateo 	spin_unlock(&i915->irq_lock);
317751951ae7SMika Kuoppala }
317851951ae7SMika Kuoppala 
31797a909383SChris Wilson static u32
31809b77011eSTvrtko Ursulin gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
3181df0d28c1SDhinakaran Pandiyan {
31829b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31837a909383SChris Wilson 	u32 iir;
3184df0d28c1SDhinakaran Pandiyan 
3185df0d28c1SDhinakaran Pandiyan 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
31867a909383SChris Wilson 		return 0;
3187df0d28c1SDhinakaran Pandiyan 
31887a909383SChris Wilson 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
31897a909383SChris Wilson 	if (likely(iir))
31907a909383SChris Wilson 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
31917a909383SChris Wilson 
31927a909383SChris Wilson 	return iir;
3193df0d28c1SDhinakaran Pandiyan }
3194df0d28c1SDhinakaran Pandiyan 
3195df0d28c1SDhinakaran Pandiyan static void
31969b77011eSTvrtko Ursulin gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
3197df0d28c1SDhinakaran Pandiyan {
3198df0d28c1SDhinakaran Pandiyan 	if (iir & GEN11_GU_MISC_GSE)
31999b77011eSTvrtko Ursulin 		intel_opregion_asle_intr(gt->i915);
3200df0d28c1SDhinakaran Pandiyan }
3201df0d28c1SDhinakaran Pandiyan 
320281067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
320381067b71SMika Kuoppala {
320481067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
320581067b71SMika Kuoppala 
320681067b71SMika Kuoppala 	/*
320781067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
320881067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
320981067b71SMika Kuoppala 	 * New indications can and will light up during processing,
321081067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
321181067b71SMika Kuoppala 	 */
321281067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
321381067b71SMika Kuoppala }
321481067b71SMika Kuoppala 
321581067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
321681067b71SMika Kuoppala {
321781067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
321881067b71SMika Kuoppala }
321981067b71SMika Kuoppala 
322051951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg)
322151951ae7SMika Kuoppala {
3222b318b824SVille Syrjälä 	struct drm_i915_private * const i915 = arg;
322325286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
32249b77011eSTvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
322551951ae7SMika Kuoppala 	u32 master_ctl;
3226df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
322751951ae7SMika Kuoppala 
322851951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
322951951ae7SMika Kuoppala 		return IRQ_NONE;
323051951ae7SMika Kuoppala 
323181067b71SMika Kuoppala 	master_ctl = gen11_master_intr_disable(regs);
323281067b71SMika Kuoppala 	if (!master_ctl) {
323381067b71SMika Kuoppala 		gen11_master_intr_enable(regs);
323451951ae7SMika Kuoppala 		return IRQ_NONE;
323581067b71SMika Kuoppala 	}
323651951ae7SMika Kuoppala 
323751951ae7SMika Kuoppala 	/* Find, clear, then process each source of interrupt. */
32389b77011eSTvrtko Ursulin 	gen11_gt_irq_handler(gt, master_ctl);
323951951ae7SMika Kuoppala 
324051951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
324151951ae7SMika Kuoppala 	if (master_ctl & GEN11_DISPLAY_IRQ) {
324251951ae7SMika Kuoppala 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
324351951ae7SMika Kuoppala 
32449102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&i915->runtime_pm);
324551951ae7SMika Kuoppala 		/*
324651951ae7SMika Kuoppala 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
324751951ae7SMika Kuoppala 		 * for the display related bits.
324851951ae7SMika Kuoppala 		 */
324951951ae7SMika Kuoppala 		gen8_de_irq_handler(i915, disp_ctl);
32509102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&i915->runtime_pm);
325151951ae7SMika Kuoppala 	}
325251951ae7SMika Kuoppala 
32539b77011eSTvrtko Ursulin 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
3254df0d28c1SDhinakaran Pandiyan 
325581067b71SMika Kuoppala 	gen11_master_intr_enable(regs);
325651951ae7SMika Kuoppala 
32579b77011eSTvrtko Ursulin 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
3258df0d28c1SDhinakaran Pandiyan 
325951951ae7SMika Kuoppala 	return IRQ_HANDLED;
326051951ae7SMika Kuoppala }
326151951ae7SMika Kuoppala 
326242f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
326342f52ef8SKeith Packard  * we use as a pipe index
326442f52ef8SKeith Packard  */
326508fa8fd0SVille Syrjälä int i8xx_enable_vblank(struct drm_crtc *crtc)
32660a3e67a4SJesse Barnes {
326708fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
326808fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3269e9d21d7fSKeith Packard 	unsigned long irqflags;
327071e0ffa5SJesse Barnes 
32711ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
327286e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
327386e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
327486e83e35SChris Wilson 
327586e83e35SChris Wilson 	return 0;
327686e83e35SChris Wilson }
327786e83e35SChris Wilson 
327808fa8fd0SVille Syrjälä int i945gm_enable_vblank(struct drm_crtc *crtc)
3279d938da6bSVille Syrjälä {
328008fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3281d938da6bSVille Syrjälä 
3282d938da6bSVille Syrjälä 	if (dev_priv->i945gm_vblank.enabled++ == 0)
3283d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3284d938da6bSVille Syrjälä 
328508fa8fd0SVille Syrjälä 	return i8xx_enable_vblank(crtc);
3286d938da6bSVille Syrjälä }
3287d938da6bSVille Syrjälä 
328808fa8fd0SVille Syrjälä int i965_enable_vblank(struct drm_crtc *crtc)
328986e83e35SChris Wilson {
329008fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
329108fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
329286e83e35SChris Wilson 	unsigned long irqflags;
329386e83e35SChris Wilson 
329486e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
32957c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
3296755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
32971ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
32988692d00eSChris Wilson 
32990a3e67a4SJesse Barnes 	return 0;
33000a3e67a4SJesse Barnes }
33010a3e67a4SJesse Barnes 
330208fa8fd0SVille Syrjälä int ilk_enable_vblank(struct drm_crtc *crtc)
3303f796cf8fSJesse Barnes {
330408fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
330508fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3306f796cf8fSJesse Barnes 	unsigned long irqflags;
3307a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
330886e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3309f796cf8fSJesse Barnes 
3310f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3311fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
3312b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3313b1f14ad0SJesse Barnes 
33142e8bf223SDhinakaran Pandiyan 	/* Even though there is no DMC, frame counter can get stuck when
33152e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated.
33162e8bf223SDhinakaran Pandiyan 	 */
33172e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
331808fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33192e8bf223SDhinakaran Pandiyan 
3320b1f14ad0SJesse Barnes 	return 0;
3321b1f14ad0SJesse Barnes }
3322b1f14ad0SJesse Barnes 
332308fa8fd0SVille Syrjälä int bdw_enable_vblank(struct drm_crtc *crtc)
3324abd58f01SBen Widawsky {
332508fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
332608fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3327abd58f01SBen Widawsky 	unsigned long irqflags;
3328abd58f01SBen Widawsky 
3329abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3330013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3331abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3332013d3752SVille Syrjälä 
33332e8bf223SDhinakaran Pandiyan 	/* Even if there is no DMC, frame counter can get stuck when
33342e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated, so check only for PSR.
33352e8bf223SDhinakaran Pandiyan 	 */
33362e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
333708fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33382e8bf223SDhinakaran Pandiyan 
3339abd58f01SBen Widawsky 	return 0;
3340abd58f01SBen Widawsky }
3341abd58f01SBen Widawsky 
334242f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
334342f52ef8SKeith Packard  * we use as a pipe index
334442f52ef8SKeith Packard  */
334508fa8fd0SVille Syrjälä void i8xx_disable_vblank(struct drm_crtc *crtc)
334686e83e35SChris Wilson {
334708fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
334808fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
334986e83e35SChris Wilson 	unsigned long irqflags;
335086e83e35SChris Wilson 
335186e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
335286e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
335386e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
335486e83e35SChris Wilson }
335586e83e35SChris Wilson 
335608fa8fd0SVille Syrjälä void i945gm_disable_vblank(struct drm_crtc *crtc)
3357d938da6bSVille Syrjälä {
335808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3359d938da6bSVille Syrjälä 
336008fa8fd0SVille Syrjälä 	i8xx_disable_vblank(crtc);
3361d938da6bSVille Syrjälä 
3362d938da6bSVille Syrjälä 	if (--dev_priv->i945gm_vblank.enabled == 0)
3363d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3364d938da6bSVille Syrjälä }
3365d938da6bSVille Syrjälä 
336608fa8fd0SVille Syrjälä void i965_disable_vblank(struct drm_crtc *crtc)
33670a3e67a4SJesse Barnes {
336808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
336908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3370e9d21d7fSKeith Packard 	unsigned long irqflags;
33710a3e67a4SJesse Barnes 
33721ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
33737c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
3374755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
33751ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
33760a3e67a4SJesse Barnes }
33770a3e67a4SJesse Barnes 
337808fa8fd0SVille Syrjälä void ilk_disable_vblank(struct drm_crtc *crtc)
3379f796cf8fSJesse Barnes {
338008fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
338108fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3382f796cf8fSJesse Barnes 	unsigned long irqflags;
3383a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
338486e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3385f796cf8fSJesse Barnes 
3386f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3387fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3388b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3389b1f14ad0SJesse Barnes }
3390b1f14ad0SJesse Barnes 
339108fa8fd0SVille Syrjälä void bdw_disable_vblank(struct drm_crtc *crtc)
3392abd58f01SBen Widawsky {
339308fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
339408fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3395abd58f01SBen Widawsky 	unsigned long irqflags;
3396abd58f01SBen Widawsky 
3397abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3398013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3399abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3400abd58f01SBen Widawsky }
3401abd58f01SBen Widawsky 
34027218524dSChris Wilson static void i945gm_vblank_work_func(struct work_struct *work)
3403d938da6bSVille Syrjälä {
3404d938da6bSVille Syrjälä 	struct drm_i915_private *dev_priv =
3405d938da6bSVille Syrjälä 		container_of(work, struct drm_i915_private, i945gm_vblank.work);
3406d938da6bSVille Syrjälä 
3407d938da6bSVille Syrjälä 	/*
3408d938da6bSVille Syrjälä 	 * Vblank interrupts fail to wake up the device from C3,
3409d938da6bSVille Syrjälä 	 * hence we want to prevent C3 usage while vblank interrupts
3410d938da6bSVille Syrjälä 	 * are enabled.
3411d938da6bSVille Syrjälä 	 */
3412d938da6bSVille Syrjälä 	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3413d938da6bSVille Syrjälä 			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3414d938da6bSVille Syrjälä 			      dev_priv->i945gm_vblank.c3_disable_latency :
3415d938da6bSVille Syrjälä 			      PM_QOS_DEFAULT_VALUE);
3416d938da6bSVille Syrjälä }
3417d938da6bSVille Syrjälä 
3418d938da6bSVille Syrjälä static int cstate_disable_latency(const char *name)
3419d938da6bSVille Syrjälä {
3420d938da6bSVille Syrjälä 	const struct cpuidle_driver *drv;
3421d938da6bSVille Syrjälä 	int i;
3422d938da6bSVille Syrjälä 
3423d938da6bSVille Syrjälä 	drv = cpuidle_get_driver();
3424d938da6bSVille Syrjälä 	if (!drv)
3425d938da6bSVille Syrjälä 		return 0;
3426d938da6bSVille Syrjälä 
3427d938da6bSVille Syrjälä 	for (i = 0; i < drv->state_count; i++) {
3428d938da6bSVille Syrjälä 		const struct cpuidle_state *state = &drv->states[i];
3429d938da6bSVille Syrjälä 
3430d938da6bSVille Syrjälä 		if (!strcmp(state->name, name))
3431d938da6bSVille Syrjälä 			return state->exit_latency ?
3432d938da6bSVille Syrjälä 				state->exit_latency - 1 : 0;
3433d938da6bSVille Syrjälä 	}
3434d938da6bSVille Syrjälä 
3435d938da6bSVille Syrjälä 	return 0;
3436d938da6bSVille Syrjälä }
3437d938da6bSVille Syrjälä 
3438d938da6bSVille Syrjälä static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3439d938da6bSVille Syrjälä {
3440d938da6bSVille Syrjälä 	INIT_WORK(&dev_priv->i945gm_vblank.work,
3441d938da6bSVille Syrjälä 		  i945gm_vblank_work_func);
3442d938da6bSVille Syrjälä 
3443d938da6bSVille Syrjälä 	dev_priv->i945gm_vblank.c3_disable_latency =
3444d938da6bSVille Syrjälä 		cstate_disable_latency("C3");
3445d938da6bSVille Syrjälä 	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3446d938da6bSVille Syrjälä 			   PM_QOS_CPU_DMA_LATENCY,
3447d938da6bSVille Syrjälä 			   PM_QOS_DEFAULT_VALUE);
3448d938da6bSVille Syrjälä }
3449d938da6bSVille Syrjälä 
3450d938da6bSVille Syrjälä static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3451d938da6bSVille Syrjälä {
3452d938da6bSVille Syrjälä 	cancel_work_sync(&dev_priv->i945gm_vblank.work);
3453d938da6bSVille Syrjälä 	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3454d938da6bSVille Syrjälä }
3455d938da6bSVille Syrjälä 
3456b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
345791738a95SPaulo Zanoni {
3458b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3459b16b2a2fSPaulo Zanoni 
34606e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
346191738a95SPaulo Zanoni 		return;
346291738a95SPaulo Zanoni 
3463b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
3464105b122eSPaulo Zanoni 
34656e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3466105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3467622364b6SPaulo Zanoni }
3468105b122eSPaulo Zanoni 
346991738a95SPaulo Zanoni /*
3470622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3471622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3472622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3473622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3474622364b6SPaulo Zanoni  *
3475622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
347691738a95SPaulo Zanoni  */
3477b318b824SVille Syrjälä static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
3478622364b6SPaulo Zanoni {
34796e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3480622364b6SPaulo Zanoni 		return;
3481622364b6SPaulo Zanoni 
3482622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
348391738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
348491738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
348591738a95SPaulo Zanoni }
348691738a95SPaulo Zanoni 
3487b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3488d18ea1b5SDaniel Vetter {
3489b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3490b16b2a2fSPaulo Zanoni 
3491b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GT);
3492b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
3493b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, GEN6_PM);
3494d18ea1b5SDaniel Vetter }
3495d18ea1b5SDaniel Vetter 
349670591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
349770591a41SVille Syrjälä {
3498b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3499b16b2a2fSPaulo Zanoni 
350071b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3501f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
350271b8b41dSVille Syrjälä 	else
3503f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
350471b8b41dSVille Syrjälä 
3505ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3506f0818984STvrtko Ursulin 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
350770591a41SVille Syrjälä 
350844d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
350970591a41SVille Syrjälä 
3510b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, VLV_);
35118bd099a7SChris Wilson 	dev_priv->irq_mask = ~0u;
351270591a41SVille Syrjälä }
351370591a41SVille Syrjälä 
35148bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
35158bb61306SVille Syrjälä {
3516b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3517b16b2a2fSPaulo Zanoni 
35188bb61306SVille Syrjälä 	u32 pipestat_mask;
35199ab981f2SVille Syrjälä 	u32 enable_mask;
35208bb61306SVille Syrjälä 	enum pipe pipe;
35218bb61306SVille Syrjälä 
3522842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
35238bb61306SVille Syrjälä 
35248bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
35258bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
35268bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
35278bb61306SVille Syrjälä 
35289ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
35298bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3530ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3531ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3532ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3533ebf5f921SVille Syrjälä 
35348bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3535ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3536ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
35376b7eafc1SVille Syrjälä 
35388bd099a7SChris Wilson 	WARN_ON(dev_priv->irq_mask != ~0u);
35396b7eafc1SVille Syrjälä 
35409ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
35418bb61306SVille Syrjälä 
3542b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
35438bb61306SVille Syrjälä }
35448bb61306SVille Syrjälä 
35458bb61306SVille Syrjälä /* drm_dma.h hooks
35468bb61306SVille Syrjälä */
3547b318b824SVille Syrjälä static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
35488bb61306SVille Syrjälä {
3549b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
35508bb61306SVille Syrjälä 
3551b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
3552cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 7))
3553f0818984STvrtko Ursulin 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
35548bb61306SVille Syrjälä 
3555fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
3556f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3557f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3558fc340442SDaniel Vetter 	}
3559fc340442SDaniel Vetter 
3560b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35618bb61306SVille Syrjälä 
3562b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
35638bb61306SVille Syrjälä }
35648bb61306SVille Syrjälä 
3565b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
35667e231dbeSJesse Barnes {
356734c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
356834c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
356934c7b8a7SVille Syrjälä 
3570b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35717e231dbeSJesse Barnes 
3572ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35739918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
357470591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3575ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
35767e231dbeSJesse Barnes }
35777e231dbeSJesse Barnes 
3578d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3579d6e3cca3SDaniel Vetter {
3580b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3581b16b2a2fSPaulo Zanoni 
3582b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3583b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3584b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3585b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3586d6e3cca3SDaniel Vetter }
3587d6e3cca3SDaniel Vetter 
3588b318b824SVille Syrjälä static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3589abd58f01SBen Widawsky {
3590b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3591abd58f01SBen Widawsky 	int pipe;
3592abd58f01SBen Widawsky 
359325286aacSDaniele Ceraolo Spurio 	gen8_master_intr_disable(dev_priv->uncore.regs);
3594abd58f01SBen Widawsky 
3595d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3596abd58f01SBen Widawsky 
3597f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3598f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3599e04f7eceSVille Syrjälä 
3600055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3601f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3602813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3603b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3604abd58f01SBen Widawsky 
3605b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3606b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3607b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3608abd58f01SBen Widawsky 
36096e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3610b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3611abd58f01SBen Widawsky }
3612abd58f01SBen Widawsky 
36139b77011eSTvrtko Ursulin static void gen11_gt_irq_reset(struct intel_gt *gt)
361451951ae7SMika Kuoppala {
3615f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
36169b77011eSTvrtko Ursulin 
361751951ae7SMika Kuoppala 	/* Disable RCS, BCS, VCS and VECS class engines. */
3618f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
3619f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
362051951ae7SMika Kuoppala 
362151951ae7SMika Kuoppala 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3622f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
3623f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
3624f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
3625f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
3626f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
3627d02b98b8SOscar Mateo 
3628f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3629f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3630f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
3631f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
363251951ae7SMika Kuoppala }
363351951ae7SMika Kuoppala 
3634b318b824SVille Syrjälä static void gen11_irq_reset(struct drm_i915_private *dev_priv)
363551951ae7SMika Kuoppala {
3636b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
363751951ae7SMika Kuoppala 	int pipe;
363851951ae7SMika Kuoppala 
363925286aacSDaniele Ceraolo Spurio 	gen11_master_intr_disable(dev_priv->uncore.regs);
364051951ae7SMika Kuoppala 
36419b77011eSTvrtko Ursulin 	gen11_gt_irq_reset(&dev_priv->gt);
364251951ae7SMika Kuoppala 
3643f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
364451951ae7SMika Kuoppala 
3645f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3646f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
364762819dfdSJosé Roberto de Souza 
364851951ae7SMika Kuoppala 	for_each_pipe(dev_priv, pipe)
364951951ae7SMika Kuoppala 		if (intel_display_power_is_enabled(dev_priv,
365051951ae7SMika Kuoppala 						   POWER_DOMAIN_PIPE(pipe)))
3651b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
365251951ae7SMika Kuoppala 
3653b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3654b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3655b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3656b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3657b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
365831604222SAnusha Srivatsa 
365929b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3660b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, SDE);
366151951ae7SMika Kuoppala }
366251951ae7SMika Kuoppala 
36634c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3664001bd2cbSImre Deak 				     u8 pipe_mask)
3665d49bdb0eSPaulo Zanoni {
3666b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3667b16b2a2fSPaulo Zanoni 
3668a9c287c9SJani Nikula 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
36696831f3e3SVille Syrjälä 	enum pipe pipe;
3670d49bdb0eSPaulo Zanoni 
367113321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
36729dfe2e3aSImre Deak 
36739dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36749dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36759dfe2e3aSImre Deak 		return;
36769dfe2e3aSImre Deak 	}
36779dfe2e3aSImre Deak 
36786831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3679b16b2a2fSPaulo Zanoni 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
36806831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
36816831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
36829dfe2e3aSImre Deak 
368313321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3684d49bdb0eSPaulo Zanoni }
3685d49bdb0eSPaulo Zanoni 
3686aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3687001bd2cbSImre Deak 				     u8 pipe_mask)
3688aae8ba84SVille Syrjälä {
3689b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
36906831f3e3SVille Syrjälä 	enum pipe pipe;
36916831f3e3SVille Syrjälä 
3692aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36939dfe2e3aSImre Deak 
36949dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36959dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36969dfe2e3aSImre Deak 		return;
36979dfe2e3aSImre Deak 	}
36989dfe2e3aSImre Deak 
36996831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3700b16b2a2fSPaulo Zanoni 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
37019dfe2e3aSImre Deak 
3702aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3703aae8ba84SVille Syrjälä 
3704aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
3705315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
3706aae8ba84SVille Syrjälä }
3707aae8ba84SVille Syrjälä 
3708b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
370943f328d7SVille Syrjälä {
3710b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
371143f328d7SVille Syrjälä 
371243f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
371343f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
371443f328d7SVille Syrjälä 
3715d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
371643f328d7SVille Syrjälä 
3717b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
371843f328d7SVille Syrjälä 
3719ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
37209918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
372170591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3722ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
372343f328d7SVille Syrjälä }
372443f328d7SVille Syrjälä 
372591d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
372687a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
372787a02106SVille Syrjälä {
372887a02106SVille Syrjälä 	struct intel_encoder *encoder;
372987a02106SVille Syrjälä 	u32 enabled_irqs = 0;
373087a02106SVille Syrjälä 
373191c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
373287a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
373387a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
373487a02106SVille Syrjälä 
373587a02106SVille Syrjälä 	return enabled_irqs;
373687a02106SVille Syrjälä }
373787a02106SVille Syrjälä 
37381a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
37391a56b1a2SImre Deak {
37401a56b1a2SImre Deak 	u32 hotplug;
37411a56b1a2SImre Deak 
37421a56b1a2SImre Deak 	/*
37431a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
37441a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
37451a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
37461a56b1a2SImre Deak 	 */
37471a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
37481a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
37491a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
37501a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
37511a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
37521a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
37531a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
37541a56b1a2SImre Deak 	/*
37551a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
37561a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
37571a56b1a2SImre Deak 	 */
37581a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
37591a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
37601a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
37611a56b1a2SImre Deak }
37621a56b1a2SImre Deak 
376391d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
376482a28bcfSDaniel Vetter {
37651a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
376682a28bcfSDaniel Vetter 
376791d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3768fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
376991d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
377082a28bcfSDaniel Vetter 	} else {
3771fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
377291d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
377382a28bcfSDaniel Vetter 	}
377482a28bcfSDaniel Vetter 
3775fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
377682a28bcfSDaniel Vetter 
37771a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
37786dbf30ceSVille Syrjälä }
377926951cafSXiong Zhang 
378031604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
378131604222SAnusha Srivatsa {
378231604222SAnusha Srivatsa 	u32 hotplug;
378331604222SAnusha Srivatsa 
378431604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
378531604222SAnusha Srivatsa 	hotplug |= ICP_DDIA_HPD_ENABLE |
378631604222SAnusha Srivatsa 		   ICP_DDIB_HPD_ENABLE;
378731604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
378831604222SAnusha Srivatsa 
378931604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_TC);
379031604222SAnusha Srivatsa 	hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
379131604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC2) |
379231604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC3) |
379331604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC4);
379431604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
379531604222SAnusha Srivatsa }
379631604222SAnusha Srivatsa 
379731604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
379831604222SAnusha Srivatsa {
379931604222SAnusha Srivatsa 	u32 hotplug_irqs, enabled_irqs;
380031604222SAnusha Srivatsa 
380131604222SAnusha Srivatsa 	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
380231604222SAnusha Srivatsa 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
380331604222SAnusha Srivatsa 
380431604222SAnusha Srivatsa 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
380531604222SAnusha Srivatsa 
380631604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
380731604222SAnusha Srivatsa }
380831604222SAnusha Srivatsa 
3809121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3810121e758eSDhinakaran Pandiyan {
3811121e758eSDhinakaran Pandiyan 	u32 hotplug;
3812121e758eSDhinakaran Pandiyan 
3813121e758eSDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3814121e758eSDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3815121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3816121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3817121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3818121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3819b796b971SDhinakaran Pandiyan 
3820b796b971SDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3821b796b971SDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3822b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3823b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3824b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3825b796b971SDhinakaran Pandiyan 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3826121e758eSDhinakaran Pandiyan }
3827121e758eSDhinakaran Pandiyan 
3828121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3829121e758eSDhinakaran Pandiyan {
3830121e758eSDhinakaran Pandiyan 	u32 hotplug_irqs, enabled_irqs;
3831121e758eSDhinakaran Pandiyan 	u32 val;
3832121e758eSDhinakaran Pandiyan 
3833b796b971SDhinakaran Pandiyan 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3834b796b971SDhinakaran Pandiyan 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3835121e758eSDhinakaran Pandiyan 
3836121e758eSDhinakaran Pandiyan 	val = I915_READ(GEN11_DE_HPD_IMR);
3837121e758eSDhinakaran Pandiyan 	val &= ~hotplug_irqs;
3838121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3839121e758eSDhinakaran Pandiyan 	POSTING_READ(GEN11_DE_HPD_IMR);
3840121e758eSDhinakaran Pandiyan 
3841121e758eSDhinakaran Pandiyan 	gen11_hpd_detection_setup(dev_priv);
384231604222SAnusha Srivatsa 
384329b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
384431604222SAnusha Srivatsa 		icp_hpd_irq_setup(dev_priv);
3845121e758eSDhinakaran Pandiyan }
3846121e758eSDhinakaran Pandiyan 
38472a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
38482a57d9ccSImre Deak {
38493b92e263SRodrigo Vivi 	u32 val, hotplug;
38503b92e263SRodrigo Vivi 
38513b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
38523b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
38533b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
38543b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
38553b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
38563b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
38573b92e263SRodrigo Vivi 	}
38582a57d9ccSImre Deak 
38592a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
38602a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
38612a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
38622a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
38632a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
38642a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
38652a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
38662a57d9ccSImre Deak 
38672a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
38682a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
38692a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
38702a57d9ccSImre Deak }
38712a57d9ccSImre Deak 
387291d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
38736dbf30ceSVille Syrjälä {
38742a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
38756dbf30ceSVille Syrjälä 
38766dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
387791d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
38786dbf30ceSVille Syrjälä 
38796dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
38806dbf30ceSVille Syrjälä 
38812a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
388226951cafSXiong Zhang }
38837fe0b973SKeith Packard 
38841a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
38851a56b1a2SImre Deak {
38861a56b1a2SImre Deak 	u32 hotplug;
38871a56b1a2SImre Deak 
38881a56b1a2SImre Deak 	/*
38891a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
38901a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
38911a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
38921a56b1a2SImre Deak 	 */
38931a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
38941a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
38951a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
38961a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
38971a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
38981a56b1a2SImre Deak }
38991a56b1a2SImre Deak 
390091d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3901e4ce95aaSVille Syrjälä {
39021a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3903e4ce95aaSVille Syrjälä 
390491d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
39053a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
390691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
39073a3b3c7dSVille Syrjälä 
39083a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
390991d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
391023bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
391191d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
39123a3b3c7dSVille Syrjälä 
39133a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
391423bb4cb5SVille Syrjälä 	} else {
3915e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
391691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3917e4ce95aaSVille Syrjälä 
3918e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
39193a3b3c7dSVille Syrjälä 	}
3920e4ce95aaSVille Syrjälä 
39211a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3922e4ce95aaSVille Syrjälä 
392391d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3924e4ce95aaSVille Syrjälä }
3925e4ce95aaSVille Syrjälä 
39262a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
39272a57d9ccSImre Deak 				      u32 enabled_irqs)
3928e0a20ad7SShashank Sharma {
39292a57d9ccSImre Deak 	u32 hotplug;
3930e0a20ad7SShashank Sharma 
3931a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
39322a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
39332a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
39342a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3935d252bf68SShubhangi Shrivastava 
3936d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3937d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3938d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3939d252bf68SShubhangi Shrivastava 
3940d252bf68SShubhangi Shrivastava 	/*
3941d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3942d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3943d252bf68SShubhangi Shrivastava 	 */
3944d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3945d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3946d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3947d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3948d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3949d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3950d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3951d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3952d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3953d252bf68SShubhangi Shrivastava 
3954a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3955e0a20ad7SShashank Sharma }
3956e0a20ad7SShashank Sharma 
39572a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
39582a57d9ccSImre Deak {
39592a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
39602a57d9ccSImre Deak }
39612a57d9ccSImre Deak 
39622a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
39632a57d9ccSImre Deak {
39642a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
39652a57d9ccSImre Deak 
39662a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
39672a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
39682a57d9ccSImre Deak 
39692a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
39702a57d9ccSImre Deak 
39712a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
39722a57d9ccSImre Deak }
39732a57d9ccSImre Deak 
3974b318b824SVille Syrjälä static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3975d46da437SPaulo Zanoni {
397682a28bcfSDaniel Vetter 	u32 mask;
3977d46da437SPaulo Zanoni 
39786e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3979692a04cfSDaniel Vetter 		return;
3980692a04cfSDaniel Vetter 
39816e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
39825c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
39834ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
39845c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
39854ebc6509SDhinakaran Pandiyan 	else
39864ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
39878664281bSPaulo Zanoni 
398865f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3989d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
39902a57d9ccSImre Deak 
39912a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
39922a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
39931a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
39942a57d9ccSImre Deak 	else
39952a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3996d46da437SPaulo Zanoni }
3997d46da437SPaulo Zanoni 
3998b318b824SVille Syrjälä static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
39990a9a8c91SDaniel Vetter {
4000b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
40010a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
40020a9a8c91SDaniel Vetter 
40030a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
40040a9a8c91SDaniel Vetter 
40050a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
40063c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
40070a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
4008772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
4009772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
40100a9a8c91SDaniel Vetter 	}
40110a9a8c91SDaniel Vetter 
40120a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
4013cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5)) {
4014f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
40150a9a8c91SDaniel Vetter 	} else {
40160a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
40170a9a8c91SDaniel Vetter 	}
40180a9a8c91SDaniel Vetter 
4019b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
40200a9a8c91SDaniel Vetter 
4021b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
402278e68d36SImre Deak 		/*
402378e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
402478e68d36SImre Deak 		 * itself is enabled/disabled.
402578e68d36SImre Deak 		 */
40268a68d464SChris Wilson 		if (HAS_ENGINE(dev_priv, VECS0)) {
40270a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
402858820574STvrtko Ursulin 			dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
4029f4e9af4fSAkash Goel 		}
40300a9a8c91SDaniel Vetter 
403158820574STvrtko Ursulin 		dev_priv->gt.pm_imr = 0xffffffff;
403258820574STvrtko Ursulin 		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
40330a9a8c91SDaniel Vetter 	}
40340a9a8c91SDaniel Vetter }
40350a9a8c91SDaniel Vetter 
4036b318b824SVille Syrjälä static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
4037036a4a7dSZhenyu Wang {
4038b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
40398e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
40408e76f8dcSPaulo Zanoni 
4041b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
40428e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4043842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
40448e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
404523bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
404623bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
40478e76f8dcSPaulo Zanoni 	} else {
40488e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4049842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4050842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
4051e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4052e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4053e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
40548e76f8dcSPaulo Zanoni 	}
4055036a4a7dSZhenyu Wang 
4056fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
4057b16b2a2fSPaulo Zanoni 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
40581aeb1b5fSDhinakaran Pandiyan 		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4059fc340442SDaniel Vetter 		display_mask |= DE_EDP_PSR_INT_HSW;
4060fc340442SDaniel Vetter 	}
4061fc340442SDaniel Vetter 
40621ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
4063036a4a7dSZhenyu Wang 
4064b318b824SVille Syrjälä 	ibx_irq_pre_postinstall(dev_priv);
4065622364b6SPaulo Zanoni 
4066b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
4067b16b2a2fSPaulo Zanoni 		      display_mask | extra_mask);
4068036a4a7dSZhenyu Wang 
4069b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
4070036a4a7dSZhenyu Wang 
40711a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
40721a56b1a2SImre Deak 
4073b318b824SVille Syrjälä 	ibx_irq_postinstall(dev_priv);
40747fe0b973SKeith Packard 
407550a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
40766005ce42SDaniel Vetter 		/* Enable PCU event interrupts
40776005ce42SDaniel Vetter 		 *
40786005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
40794bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
40804bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
4081d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
4082fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4083d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
4084f97108d1SJesse Barnes 	}
4085036a4a7dSZhenyu Wang }
4086036a4a7dSZhenyu Wang 
4087f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4088f8b79e58SImre Deak {
408967520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4090f8b79e58SImre Deak 
4091f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
4092f8b79e58SImre Deak 		return;
4093f8b79e58SImre Deak 
4094f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
4095f8b79e58SImre Deak 
4096d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
4097d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4098ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4099f8b79e58SImre Deak 	}
4100d6c69803SVille Syrjälä }
4101f8b79e58SImre Deak 
4102f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4103f8b79e58SImre Deak {
410467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4105f8b79e58SImre Deak 
4106f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
4107f8b79e58SImre Deak 		return;
4108f8b79e58SImre Deak 
4109f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
4110f8b79e58SImre Deak 
4111950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
4112ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4113f8b79e58SImre Deak }
4114f8b79e58SImre Deak 
41150e6c9a9eSVille Syrjälä 
4116b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
41170e6c9a9eSVille Syrjälä {
4118b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
41197e231dbeSJesse Barnes 
4120ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
41219918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4122ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4123ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4124ad22d106SVille Syrjälä 
41257e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
412634c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
412720afbda2SDaniel Vetter }
412820afbda2SDaniel Vetter 
412958820574STvrtko Ursulin static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
4130abd58f01SBen Widawsky {
413158820574STvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
413258820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4133b16b2a2fSPaulo Zanoni 
4134abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
4135a9c287c9SJani Nikula 	u32 gt_interrupts[] = {
41368a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
413773d477f6SOscar Mateo 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
413873d477f6SOscar Mateo 		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
41398a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
41408a68d464SChris Wilson 
41418a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
41428a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4143abd58f01SBen Widawsky 		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
41448a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
41458a68d464SChris Wilson 
4146abd58f01SBen Widawsky 		0,
41478a68d464SChris Wilson 
41488a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
41498a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4150abd58f01SBen Widawsky 	};
4151abd58f01SBen Widawsky 
415258820574STvrtko Ursulin 	gt->pm_ier = 0x0;
415358820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4154b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4155b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
415678e68d36SImre Deak 	/*
415778e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
415826705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
415978e68d36SImre Deak 	 */
416058820574STvrtko Ursulin 	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
4161b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4162abd58f01SBen Widawsky }
4163abd58f01SBen Widawsky 
4164abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4165abd58f01SBen Widawsky {
4166b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4167b16b2a2fSPaulo Zanoni 
4168a9c287c9SJani Nikula 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4169a9c287c9SJani Nikula 	u32 de_pipe_enables;
41703a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
41713a3b3c7dSVille Syrjälä 	u32 de_port_enables;
4172df0d28c1SDhinakaran Pandiyan 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
41733a3b3c7dSVille Syrjälä 	enum pipe pipe;
4174770de83dSDamien Lespiau 
4175df0d28c1SDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) <= 10)
4176df0d28c1SDhinakaran Pandiyan 		de_misc_masked |= GEN8_DE_MISC_GSE;
4177df0d28c1SDhinakaran Pandiyan 
4178bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
4179842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
41803a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
418188e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
4182cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
41833a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
41843a3b3c7dSVille Syrjälä 	} else {
4185842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
41863a3b3c7dSVille Syrjälä 	}
4187770de83dSDamien Lespiau 
4188bb187e93SJames Ausmus 	if (INTEL_GEN(dev_priv) >= 11)
4189bb187e93SJames Ausmus 		de_port_masked |= ICL_AUX_CHANNEL_E;
4190bb187e93SJames Ausmus 
41919bb635d9SDhinakaran Pandiyan 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4192a324fcacSRodrigo Vivi 		de_port_masked |= CNL_AUX_CHANNEL_F;
4193a324fcacSRodrigo Vivi 
4194770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4195770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
4196770de83dSDamien Lespiau 
41973a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
4198cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
4199a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4200a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
42013a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
42023a3b3c7dSVille Syrjälä 
4203b16b2a2fSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
420454fd3149SDhinakaran Pandiyan 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4205e04f7eceSVille Syrjälä 
42060a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
42070a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4208abd58f01SBen Widawsky 
4209f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
4210813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
4211b16b2a2fSPaulo Zanoni 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4212813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
421335079899SPaulo Zanoni 					  de_pipe_enables);
42140a195c02SMika Kahola 	}
4215abd58f01SBen Widawsky 
4216b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4217b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
42182a57d9ccSImre Deak 
4219121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11) {
4220121e758eSDhinakaran Pandiyan 		u32 de_hpd_masked = 0;
4221b796b971SDhinakaran Pandiyan 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4222b796b971SDhinakaran Pandiyan 				     GEN11_DE_TBT_HOTPLUG_MASK;
4223121e758eSDhinakaran Pandiyan 
4224b16b2a2fSPaulo Zanoni 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4225b16b2a2fSPaulo Zanoni 			      de_hpd_enables);
4226121e758eSDhinakaran Pandiyan 		gen11_hpd_detection_setup(dev_priv);
4227121e758eSDhinakaran Pandiyan 	} else if (IS_GEN9_LP(dev_priv)) {
42282a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
4229121e758eSDhinakaran Pandiyan 	} else if (IS_BROADWELL(dev_priv)) {
42301a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
4231abd58f01SBen Widawsky 	}
4232121e758eSDhinakaran Pandiyan }
4233abd58f01SBen Widawsky 
4234b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
4235abd58f01SBen Widawsky {
42366e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4237b318b824SVille Syrjälä 		ibx_irq_pre_postinstall(dev_priv);
4238622364b6SPaulo Zanoni 
4239abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
4240abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
4241abd58f01SBen Widawsky 
42426e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4243b318b824SVille Syrjälä 		ibx_irq_postinstall(dev_priv);
4244abd58f01SBen Widawsky 
424525286aacSDaniele Ceraolo Spurio 	gen8_master_intr_enable(dev_priv->uncore.regs);
4246abd58f01SBen Widawsky }
4247abd58f01SBen Widawsky 
42489b77011eSTvrtko Ursulin static void gen11_gt_irq_postinstall(struct intel_gt *gt)
424951951ae7SMika Kuoppala {
425051951ae7SMika Kuoppala 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4251f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4252f0818984STvrtko Ursulin 	const u32 dmask = irqs << 16 | irqs;
4253f0818984STvrtko Ursulin 	const u32 smask = irqs << 16;
425451951ae7SMika Kuoppala 
425551951ae7SMika Kuoppala 	BUILD_BUG_ON(irqs & 0xffff0000);
425651951ae7SMika Kuoppala 
425751951ae7SMika Kuoppala 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
4258f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
4259f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
426051951ae7SMika Kuoppala 
426151951ae7SMika Kuoppala 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4262f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
4263f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
4264f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
4265f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
4266f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
426751951ae7SMika Kuoppala 
4268d02b98b8SOscar Mateo 	/*
4269d02b98b8SOscar Mateo 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4270d02b98b8SOscar Mateo 	 * is enabled/disabled.
4271d02b98b8SOscar Mateo 	 */
427258820574STvrtko Ursulin 	gt->pm_ier = 0x0;
427358820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4274f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4275f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
427654c52a84SOscar Mateo 
427754c52a84SOscar Mateo 	/* Same thing for GuC interrupts */
4278f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
4279f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
428051951ae7SMika Kuoppala }
428151951ae7SMika Kuoppala 
4282b318b824SVille Syrjälä static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
428331604222SAnusha Srivatsa {
428431604222SAnusha Srivatsa 	u32 mask = SDE_GMBUS_ICP;
428531604222SAnusha Srivatsa 
428631604222SAnusha Srivatsa 	WARN_ON(I915_READ(SDEIER) != 0);
428731604222SAnusha Srivatsa 	I915_WRITE(SDEIER, 0xffffffff);
428831604222SAnusha Srivatsa 	POSTING_READ(SDEIER);
428931604222SAnusha Srivatsa 
429065f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
429131604222SAnusha Srivatsa 	I915_WRITE(SDEIMR, ~mask);
429231604222SAnusha Srivatsa 
429331604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
429431604222SAnusha Srivatsa }
429531604222SAnusha Srivatsa 
4296b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
429751951ae7SMika Kuoppala {
4298b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4299df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
430051951ae7SMika Kuoppala 
430129b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4302b318b824SVille Syrjälä 		icp_irq_postinstall(dev_priv);
430331604222SAnusha Srivatsa 
43049b77011eSTvrtko Ursulin 	gen11_gt_irq_postinstall(&dev_priv->gt);
430551951ae7SMika Kuoppala 	gen8_de_irq_postinstall(dev_priv);
430651951ae7SMika Kuoppala 
4307b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4308df0d28c1SDhinakaran Pandiyan 
430951951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
431051951ae7SMika Kuoppala 
43119b77011eSTvrtko Ursulin 	gen11_master_intr_enable(uncore->regs);
4312c25f0c6aSDaniele Ceraolo Spurio 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
431351951ae7SMika Kuoppala }
431451951ae7SMika Kuoppala 
4315b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
431643f328d7SVille Syrjälä {
431743f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
431843f328d7SVille Syrjälä 
4319ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
43209918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4321ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4322ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4323ad22d106SVille Syrjälä 
4324e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
432543f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
432643f328d7SVille Syrjälä }
432743f328d7SVille Syrjälä 
4328b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
4329c2798b19SChris Wilson {
4330b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4331c2798b19SChris Wilson 
433244d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
433344d9241eSVille Syrjälä 
4334b16b2a2fSPaulo Zanoni 	GEN2_IRQ_RESET(uncore);
4335c2798b19SChris Wilson }
4336c2798b19SChris Wilson 
4337b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
4338c2798b19SChris Wilson {
4339b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4340e9e9848aSVille Syrjälä 	u16 enable_mask;
4341c2798b19SChris Wilson 
43424f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
43434f5fd91fSTvrtko Ursulin 			     EMR,
43444f5fd91fSTvrtko Ursulin 			     ~(I915_ERROR_PAGE_TABLE |
4345045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH));
4346c2798b19SChris Wilson 
4347c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
4348c2798b19SChris Wilson 	dev_priv->irq_mask =
4349c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
435016659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
435116659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4352c2798b19SChris Wilson 
4353e9e9848aSVille Syrjälä 	enable_mask =
4354c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4355c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
435616659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4357e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
4358e9e9848aSVille Syrjälä 
4359b16b2a2fSPaulo Zanoni 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4360c2798b19SChris Wilson 
4361379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4362379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4363d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4364755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4365755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4366d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4367c2798b19SChris Wilson }
4368c2798b19SChris Wilson 
43694f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915,
437078c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
437178c357ddSVille Syrjälä {
43724f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &i915->uncore;
437378c357ddSVille Syrjälä 	u16 emr;
437478c357ddSVille Syrjälä 
43754f5fd91fSTvrtko Ursulin 	*eir = intel_uncore_read16(uncore, EIR);
437678c357ddSVille Syrjälä 
437778c357ddSVille Syrjälä 	if (*eir)
43784f5fd91fSTvrtko Ursulin 		intel_uncore_write16(uncore, EIR, *eir);
437978c357ddSVille Syrjälä 
43804f5fd91fSTvrtko Ursulin 	*eir_stuck = intel_uncore_read16(uncore, EIR);
438178c357ddSVille Syrjälä 	if (*eir_stuck == 0)
438278c357ddSVille Syrjälä 		return;
438378c357ddSVille Syrjälä 
438478c357ddSVille Syrjälä 	/*
438578c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
438678c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
438778c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
438878c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
438978c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
439078c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
439178c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
439278c357ddSVille Syrjälä 	 * remains set.
439378c357ddSVille Syrjälä 	 */
43944f5fd91fSTvrtko Ursulin 	emr = intel_uncore_read16(uncore, EMR);
43954f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, 0xffff);
43964f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
439778c357ddSVille Syrjälä }
439878c357ddSVille Syrjälä 
439978c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
440078c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
440178c357ddSVille Syrjälä {
440278c357ddSVille Syrjälä 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
440378c357ddSVille Syrjälä 
440478c357ddSVille Syrjälä 	if (eir_stuck)
440578c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
440678c357ddSVille Syrjälä }
440778c357ddSVille Syrjälä 
440878c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
440978c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
441078c357ddSVille Syrjälä {
441178c357ddSVille Syrjälä 	u32 emr;
441278c357ddSVille Syrjälä 
441378c357ddSVille Syrjälä 	*eir = I915_READ(EIR);
441478c357ddSVille Syrjälä 
441578c357ddSVille Syrjälä 	I915_WRITE(EIR, *eir);
441678c357ddSVille Syrjälä 
441778c357ddSVille Syrjälä 	*eir_stuck = I915_READ(EIR);
441878c357ddSVille Syrjälä 	if (*eir_stuck == 0)
441978c357ddSVille Syrjälä 		return;
442078c357ddSVille Syrjälä 
442178c357ddSVille Syrjälä 	/*
442278c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
442378c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
442478c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
442578c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
442678c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
442778c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
442878c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
442978c357ddSVille Syrjälä 	 * remains set.
443078c357ddSVille Syrjälä 	 */
443178c357ddSVille Syrjälä 	emr = I915_READ(EMR);
443278c357ddSVille Syrjälä 	I915_WRITE(EMR, 0xffffffff);
443378c357ddSVille Syrjälä 	I915_WRITE(EMR, emr | *eir_stuck);
443478c357ddSVille Syrjälä }
443578c357ddSVille Syrjälä 
443678c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
443778c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
443878c357ddSVille Syrjälä {
443978c357ddSVille Syrjälä 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
444078c357ddSVille Syrjälä 
444178c357ddSVille Syrjälä 	if (eir_stuck)
444278c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
444378c357ddSVille Syrjälä }
444478c357ddSVille Syrjälä 
4445ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4446c2798b19SChris Wilson {
4447b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4448af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4449c2798b19SChris Wilson 
44502dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
44512dd2a883SImre Deak 		return IRQ_NONE;
44522dd2a883SImre Deak 
44531f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
44549102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44551f814dacSImre Deak 
4456af722d28SVille Syrjälä 	do {
4457af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
445878c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
4459af722d28SVille Syrjälä 		u16 iir;
4460af722d28SVille Syrjälä 
44614f5fd91fSTvrtko Ursulin 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4462c2798b19SChris Wilson 		if (iir == 0)
4463af722d28SVille Syrjälä 			break;
4464c2798b19SChris Wilson 
4465af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4466c2798b19SChris Wilson 
4467eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4468eb64343cSVille Syrjälä 		 * signalled in iir */
4469eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4470c2798b19SChris Wilson 
447178c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
447278c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
447378c357ddSVille Syrjälä 
44744f5fd91fSTvrtko Ursulin 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4475c2798b19SChris Wilson 
4476c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
44778a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4478c2798b19SChris Wilson 
447978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
448078c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4481af722d28SVille Syrjälä 
4482eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4483af722d28SVille Syrjälä 	} while (0);
4484c2798b19SChris Wilson 
44859102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44861f814dacSImre Deak 
44871f814dacSImre Deak 	return ret;
4488c2798b19SChris Wilson }
4489c2798b19SChris Wilson 
4490b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv)
4491a266c7d5SChris Wilson {
4492b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4493a266c7d5SChris Wilson 
449456b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
44950706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4496a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4497a266c7d5SChris Wilson 	}
4498a266c7d5SChris Wilson 
449944d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
450044d9241eSVille Syrjälä 
4501b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4502a266c7d5SChris Wilson }
4503a266c7d5SChris Wilson 
4504b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4505a266c7d5SChris Wilson {
4506b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
450738bde180SChris Wilson 	u32 enable_mask;
4508a266c7d5SChris Wilson 
4509045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4510045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
451138bde180SChris Wilson 
451238bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
451338bde180SChris Wilson 	dev_priv->irq_mask =
451438bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
451538bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
451616659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
451716659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
451838bde180SChris Wilson 
451938bde180SChris Wilson 	enable_mask =
452038bde180SChris Wilson 		I915_ASLE_INTERRUPT |
452138bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
452238bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
452316659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
452438bde180SChris Wilson 		I915_USER_INTERRUPT;
452538bde180SChris Wilson 
452656b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
4527a266c7d5SChris Wilson 		/* Enable in IER... */
4528a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4529a266c7d5SChris Wilson 		/* and unmask in IMR */
4530a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4531a266c7d5SChris Wilson 	}
4532a266c7d5SChris Wilson 
4533b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4534a266c7d5SChris Wilson 
4535379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4536379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4537d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4538755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4539755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4540d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4541379ef82dSDaniel Vetter 
4542c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
454320afbda2SDaniel Vetter }
454420afbda2SDaniel Vetter 
4545ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
4546a266c7d5SChris Wilson {
4547b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4548af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4549a266c7d5SChris Wilson 
45502dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
45512dd2a883SImre Deak 		return IRQ_NONE;
45522dd2a883SImre Deak 
45531f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
45549102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45551f814dacSImre Deak 
455638bde180SChris Wilson 	do {
4557eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
455878c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4559af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4560af722d28SVille Syrjälä 		u32 iir;
4561a266c7d5SChris Wilson 
45629d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4563af722d28SVille Syrjälä 		if (iir == 0)
4564af722d28SVille Syrjälä 			break;
4565af722d28SVille Syrjälä 
4566af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4567af722d28SVille Syrjälä 
4568af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
4569af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4570af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4571a266c7d5SChris Wilson 
4572eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4573eb64343cSVille Syrjälä 		 * signalled in iir */
4574eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4575a266c7d5SChris Wilson 
457678c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
457778c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
457878c357ddSVille Syrjälä 
45799d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4580a266c7d5SChris Wilson 
4581a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
45828a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4583a266c7d5SChris Wilson 
458478c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
458578c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4586a266c7d5SChris Wilson 
4587af722d28SVille Syrjälä 		if (hotplug_status)
4588af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4589af722d28SVille Syrjälä 
4590af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4591af722d28SVille Syrjälä 	} while (0);
4592a266c7d5SChris Wilson 
45939102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45941f814dacSImre Deak 
4595a266c7d5SChris Wilson 	return ret;
4596a266c7d5SChris Wilson }
4597a266c7d5SChris Wilson 
4598b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv)
4599a266c7d5SChris Wilson {
4600b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4601a266c7d5SChris Wilson 
46020706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4603a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4604a266c7d5SChris Wilson 
460544d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
460644d9241eSVille Syrjälä 
4607b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4608a266c7d5SChris Wilson }
4609a266c7d5SChris Wilson 
4610b318b824SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4611a266c7d5SChris Wilson {
4612b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4613bbba0a97SChris Wilson 	u32 enable_mask;
4614a266c7d5SChris Wilson 	u32 error_mask;
4615a266c7d5SChris Wilson 
4616045cebd2SVille Syrjälä 	/*
4617045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
4618045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
4619045cebd2SVille Syrjälä 	 */
4620045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
4621045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4622045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
4623045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
4624045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4625045cebd2SVille Syrjälä 	} else {
4626045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4627045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4628045cebd2SVille Syrjälä 	}
4629045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
4630045cebd2SVille Syrjälä 
4631a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
4632c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
4633c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
4634adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
4635bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4636bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
463778c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4638bbba0a97SChris Wilson 
4639c30bb1fdSVille Syrjälä 	enable_mask =
4640c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
4641c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
4642c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4643c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
464478c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4645c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
4646bbba0a97SChris Wilson 
464791d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4648bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
4649a266c7d5SChris Wilson 
4650b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4651c30bb1fdSVille Syrjälä 
4652b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4653b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4654d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4655755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4656755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4657755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4658d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4659a266c7d5SChris Wilson 
466091d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
466120afbda2SDaniel Vetter }
466220afbda2SDaniel Vetter 
466391d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
466420afbda2SDaniel Vetter {
466520afbda2SDaniel Vetter 	u32 hotplug_en;
466620afbda2SDaniel Vetter 
466767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4668b5ea2d56SDaniel Vetter 
4669adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
4670e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
467191d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4672a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
4673a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
4674a266c7d5SChris Wilson 	   seconds later.  So just do it once.
4675a266c7d5SChris Wilson 	*/
467691d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4677a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4678a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4679a266c7d5SChris Wilson 
4680a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
46810706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
4682f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
4683f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4684f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
46850706f17cSEgbert Eich 					     hotplug_en);
4686a266c7d5SChris Wilson }
4687a266c7d5SChris Wilson 
4688ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
4689a266c7d5SChris Wilson {
4690b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4691af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4692a266c7d5SChris Wilson 
46932dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
46942dd2a883SImre Deak 		return IRQ_NONE;
46952dd2a883SImre Deak 
46961f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
46979102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
46981f814dacSImre Deak 
4699af722d28SVille Syrjälä 	do {
4700eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
470178c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4702af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4703af722d28SVille Syrjälä 		u32 iir;
47042c8ba29fSChris Wilson 
47059d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4706af722d28SVille Syrjälä 		if (iir == 0)
4707af722d28SVille Syrjälä 			break;
4708af722d28SVille Syrjälä 
4709af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4710af722d28SVille Syrjälä 
4711af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4712af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4713a266c7d5SChris Wilson 
4714eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4715eb64343cSVille Syrjälä 		 * signalled in iir */
4716eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4717a266c7d5SChris Wilson 
471878c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
471978c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
472078c357ddSVille Syrjälä 
47219d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4722a266c7d5SChris Wilson 
4723a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
47248a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4725af722d28SVille Syrjälä 
4726a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
47278a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4728a266c7d5SChris Wilson 
472978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
473078c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4731515ac2bbSDaniel Vetter 
4732af722d28SVille Syrjälä 		if (hotplug_status)
4733af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4734af722d28SVille Syrjälä 
4735af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4736af722d28SVille Syrjälä 	} while (0);
4737a266c7d5SChris Wilson 
47389102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
47391f814dacSImre Deak 
4740a266c7d5SChris Wilson 	return ret;
4741a266c7d5SChris Wilson }
4742a266c7d5SChris Wilson 
4743fca52a55SDaniel Vetter /**
4744fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4745fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4746fca52a55SDaniel Vetter  *
4747fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4748fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4749fca52a55SDaniel Vetter  */
4750b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4751f71d4af4SJesse Barnes {
475291c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4753562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4754cefcff8fSJoonas Lahtinen 	int i;
47558b2e326dSChris Wilson 
4756d938da6bSVille Syrjälä 	if (IS_I945GM(dev_priv))
4757d938da6bSVille Syrjälä 		i945gm_vblank_work_init(dev_priv);
4758d938da6bSVille Syrjälä 
475977913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
476077913b39SJani Nikula 
4761562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4762cefcff8fSJoonas Lahtinen 
4763a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4764cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4765cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
47668b2e326dSChris Wilson 
476754c52a84SOscar Mateo 	if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
476826705e20SSagar Arun Kamble 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
476926705e20SSagar Arun Kamble 
4770a6706b45SDeepak S 	/* Let's track the enabled rps events */
4771666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
47726c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4773e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
477431685c25SDeepak S 	else
47754668f695SChris Wilson 		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
47764668f695SChris Wilson 					   GEN6_PM_RP_DOWN_THRESHOLD |
47774668f695SChris Wilson 					   GEN6_PM_RP_DOWN_TIMEOUT);
4778a6706b45SDeepak S 
4779917dc6b5SMika Kuoppala 	/* We share the register with other engine */
4780917dc6b5SMika Kuoppala 	if (INTEL_GEN(dev_priv) > 9)
4781917dc6b5SMika Kuoppala 		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4782917dc6b5SMika Kuoppala 
4783562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
47841800ad25SSagar Arun Kamble 
47851800ad25SSagar Arun Kamble 	/*
4786acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
47871800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
47881800ad25SSagar Arun Kamble 	 *
47891800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
47901800ad25SSagar Arun Kamble 	 */
4791bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4792562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
47931800ad25SSagar Arun Kamble 
4794bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4795562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
47961800ad25SSagar Arun Kamble 
479721da2700SVille Syrjälä 	dev->vblank_disable_immediate = true;
479821da2700SVille Syrjälä 
4799262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4800262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4801262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4802262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4803262fd485SChris Wilson 	 * in this case to the runtime pm.
4804262fd485SChris Wilson 	 */
4805262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4806262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4807262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4808262fd485SChris Wilson 
4809317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
48109a64c650SLyude Paul 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
48119a64c650SLyude Paul 	 * detection, as short HPD storms will occur as a natural part of
48129a64c650SLyude Paul 	 * sideband messaging with MST.
48139a64c650SLyude Paul 	 * On older platforms however, IRQ storms can occur with both long and
48149a64c650SLyude Paul 	 * short pulses, as seen on some G4x systems.
48159a64c650SLyude Paul 	 */
48169a64c650SLyude Paul 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4817317eaa95SLyude 
4818b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4819b318b824SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
482043f328d7SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4821b318b824SVille Syrjälä 	} else {
4822b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4823121e758eSDhinakaran Pandiyan 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4824b318b824SVille Syrjälä 		else if (IS_GEN9_LP(dev_priv))
4825e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4826c6c30b91SRodrigo Vivi 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
48276dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
48286dbf30ceSVille Syrjälä 		else
48293a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4830f71d4af4SJesse Barnes 	}
4831f71d4af4SJesse Barnes }
483220afbda2SDaniel Vetter 
4833fca52a55SDaniel Vetter /**
4834cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4835cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4836cefcff8fSJoonas Lahtinen  *
4837cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4838cefcff8fSJoonas Lahtinen  */
4839cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4840cefcff8fSJoonas Lahtinen {
4841cefcff8fSJoonas Lahtinen 	int i;
4842cefcff8fSJoonas Lahtinen 
4843d938da6bSVille Syrjälä 	if (IS_I945GM(i915))
4844d938da6bSVille Syrjälä 		i945gm_vblank_work_fini(i915);
4845d938da6bSVille Syrjälä 
4846cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4847cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4848cefcff8fSJoonas Lahtinen }
4849cefcff8fSJoonas Lahtinen 
4850b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4851b318b824SVille Syrjälä {
4852b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4853b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4854b318b824SVille Syrjälä 			return cherryview_irq_handler;
4855b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4856b318b824SVille Syrjälä 			return valleyview_irq_handler;
4857b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4858b318b824SVille Syrjälä 			return i965_irq_handler;
4859b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4860b318b824SVille Syrjälä 			return i915_irq_handler;
4861b318b824SVille Syrjälä 		else
4862b318b824SVille Syrjälä 			return i8xx_irq_handler;
4863b318b824SVille Syrjälä 	} else {
4864b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4865b318b824SVille Syrjälä 			return gen11_irq_handler;
4866b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4867b318b824SVille Syrjälä 			return gen8_irq_handler;
4868b318b824SVille Syrjälä 		else
4869b318b824SVille Syrjälä 			return ironlake_irq_handler;
4870b318b824SVille Syrjälä 	}
4871b318b824SVille Syrjälä }
4872b318b824SVille Syrjälä 
4873b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv)
4874b318b824SVille Syrjälä {
4875b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4876b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4877b318b824SVille Syrjälä 			cherryview_irq_reset(dev_priv);
4878b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4879b318b824SVille Syrjälä 			valleyview_irq_reset(dev_priv);
4880b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4881b318b824SVille Syrjälä 			i965_irq_reset(dev_priv);
4882b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4883b318b824SVille Syrjälä 			i915_irq_reset(dev_priv);
4884b318b824SVille Syrjälä 		else
4885b318b824SVille Syrjälä 			i8xx_irq_reset(dev_priv);
4886b318b824SVille Syrjälä 	} else {
4887b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4888b318b824SVille Syrjälä 			gen11_irq_reset(dev_priv);
4889b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4890b318b824SVille Syrjälä 			gen8_irq_reset(dev_priv);
4891b318b824SVille Syrjälä 		else
4892b318b824SVille Syrjälä 			ironlake_irq_reset(dev_priv);
4893b318b824SVille Syrjälä 	}
4894b318b824SVille Syrjälä }
4895b318b824SVille Syrjälä 
4896b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4897b318b824SVille Syrjälä {
4898b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4899b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4900b318b824SVille Syrjälä 			cherryview_irq_postinstall(dev_priv);
4901b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4902b318b824SVille Syrjälä 			valleyview_irq_postinstall(dev_priv);
4903b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4904b318b824SVille Syrjälä 			i965_irq_postinstall(dev_priv);
4905b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4906b318b824SVille Syrjälä 			i915_irq_postinstall(dev_priv);
4907b318b824SVille Syrjälä 		else
4908b318b824SVille Syrjälä 			i8xx_irq_postinstall(dev_priv);
4909b318b824SVille Syrjälä 	} else {
4910b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4911b318b824SVille Syrjälä 			gen11_irq_postinstall(dev_priv);
4912b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4913b318b824SVille Syrjälä 			gen8_irq_postinstall(dev_priv);
4914b318b824SVille Syrjälä 		else
4915b318b824SVille Syrjälä 			ironlake_irq_postinstall(dev_priv);
4916b318b824SVille Syrjälä 	}
4917b318b824SVille Syrjälä }
4918b318b824SVille Syrjälä 
4919cefcff8fSJoonas Lahtinen /**
4920fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4921fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4922fca52a55SDaniel Vetter  *
4923fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4924fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4925fca52a55SDaniel Vetter  *
4926fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4927fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4928fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4929fca52a55SDaniel Vetter  */
49302aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
49312aeb7d3aSDaniel Vetter {
4932b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4933b318b824SVille Syrjälä 	int ret;
4934b318b824SVille Syrjälä 
49352aeb7d3aSDaniel Vetter 	/*
49362aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
49372aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
49382aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
49392aeb7d3aSDaniel Vetter 	 */
4940ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
49412aeb7d3aSDaniel Vetter 
4942b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = true;
4943b318b824SVille Syrjälä 
4944b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4945b318b824SVille Syrjälä 
4946b318b824SVille Syrjälä 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4947b318b824SVille Syrjälä 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4948b318b824SVille Syrjälä 	if (ret < 0) {
4949b318b824SVille Syrjälä 		dev_priv->drm.irq_enabled = false;
4950b318b824SVille Syrjälä 		return ret;
4951b318b824SVille Syrjälä 	}
4952b318b824SVille Syrjälä 
4953b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
4954b318b824SVille Syrjälä 
4955b318b824SVille Syrjälä 	return ret;
49562aeb7d3aSDaniel Vetter }
49572aeb7d3aSDaniel Vetter 
4958fca52a55SDaniel Vetter /**
4959fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4960fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4961fca52a55SDaniel Vetter  *
4962fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4963fca52a55SDaniel Vetter  * resources acquired in the init functions.
4964fca52a55SDaniel Vetter  */
49652aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
49662aeb7d3aSDaniel Vetter {
4967b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4968b318b824SVille Syrjälä 
4969b318b824SVille Syrjälä 	/*
4970b318b824SVille Syrjälä 	 * FIXME we can get called twice during driver load
4971b318b824SVille Syrjälä 	 * error handling due to intel_modeset_cleanup()
4972b318b824SVille Syrjälä 	 * calling us out of sequence. Would be nice if
4973b318b824SVille Syrjälä 	 * it didn't do that...
4974b318b824SVille Syrjälä 	 */
4975b318b824SVille Syrjälä 	if (!dev_priv->drm.irq_enabled)
4976b318b824SVille Syrjälä 		return;
4977b318b824SVille Syrjälä 
4978b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = false;
4979b318b824SVille Syrjälä 
4980b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4981b318b824SVille Syrjälä 
4982b318b824SVille Syrjälä 	free_irq(irq, dev_priv);
4983b318b824SVille Syrjälä 
49842aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4985ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
49862aeb7d3aSDaniel Vetter }
49872aeb7d3aSDaniel Vetter 
4988fca52a55SDaniel Vetter /**
4989fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4990fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4991fca52a55SDaniel Vetter  *
4992fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4993fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4994fca52a55SDaniel Vetter  */
4995b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4996c67a470bSPaulo Zanoni {
4997b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4998ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
4999315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
5000c67a470bSPaulo Zanoni }
5001c67a470bSPaulo Zanoni 
5002fca52a55SDaniel Vetter /**
5003fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
5004fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
5005fca52a55SDaniel Vetter  *
5006fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
5007fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
5008fca52a55SDaniel Vetter  */
5009b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
5010c67a470bSPaulo Zanoni {
5011ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
5012b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
5013b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
5014c67a470bSPaulo Zanoni }
5015