xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 58820574f1e937a1cf3eea629f1496e02560a132)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
31b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
3255367a27SJani Nikula #include <linux/cpuidle.h>
3355367a27SJani Nikula #include <linux/slab.h>
3455367a27SJani Nikula #include <linux/sysrq.h>
3555367a27SJani Nikula 
36fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3755367a27SJani Nikula #include <drm/drm_irq.h>
38760285e7SDavid Howells #include <drm/i915_drm.h>
3955367a27SJani Nikula 
40df0566a6SJani Nikula #include "display/intel_fifo_underrun.h"
41df0566a6SJani Nikula #include "display/intel_hotplug.h"
42df0566a6SJani Nikula #include "display/intel_lpe_audio.h"
43df0566a6SJani Nikula #include "display/intel_psr.h"
44df0566a6SJani Nikula 
45c0e09200SDave Airlie #include "i915_drv.h"
46440e2b3dSJani Nikula #include "i915_irq.h"
471c5d22f7SChris Wilson #include "i915_trace.h"
4879e53945SJesse Barnes #include "intel_drv.h"
49d13616dbSJani Nikula #include "intel_pm.h"
50c0e09200SDave Airlie 
51fca52a55SDaniel Vetter /**
52fca52a55SDaniel Vetter  * DOC: interrupt handling
53fca52a55SDaniel Vetter  *
54fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
55fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
56fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
57fca52a55SDaniel Vetter  */
58fca52a55SDaniel Vetter 
59e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
60e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
61e4ce95aaSVille Syrjälä };
62e4ce95aaSVille Syrjälä 
6323bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
6423bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
6523bb4cb5SVille Syrjälä };
6623bb4cb5SVille Syrjälä 
673a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
683a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
693a3b3c7dSVille Syrjälä };
703a3b3c7dSVille Syrjälä 
717c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
72e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
73e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
74e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
75e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
76e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
77e5868a31SEgbert Eich };
78e5868a31SEgbert Eich 
797c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
80e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
8173c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
82e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
83e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
84e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
85e5868a31SEgbert Eich };
86e5868a31SEgbert Eich 
8726951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
8874c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
8926951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
9026951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
9126951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
9226951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
9326951cafSXiong Zhang };
9426951cafSXiong Zhang 
957c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
96e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
97e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
98e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
99e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
100e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
101e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
102e5868a31SEgbert Eich };
103e5868a31SEgbert Eich 
1047c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
105e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
106e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
107e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
108e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
109e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
110e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
111e5868a31SEgbert Eich };
112e5868a31SEgbert Eich 
1134bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
114e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
115e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
116e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
117e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
118e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
119e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
120e5868a31SEgbert Eich };
121e5868a31SEgbert Eich 
122e0a20ad7SShashank Sharma /* BXT hpd list */
123e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1247f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
125e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
126e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
127e0a20ad7SShashank Sharma };
128e0a20ad7SShashank Sharma 
129b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = {
130b796b971SDhinakaran Pandiyan 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
131b796b971SDhinakaran Pandiyan 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
132b796b971SDhinakaran Pandiyan 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
133b796b971SDhinakaran Pandiyan 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
134121e758eSDhinakaran Pandiyan };
135121e758eSDhinakaran Pandiyan 
13631604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = {
13731604222SAnusha Srivatsa 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
13831604222SAnusha Srivatsa 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
13931604222SAnusha Srivatsa 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
14031604222SAnusha Srivatsa 	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
14131604222SAnusha Srivatsa 	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
14231604222SAnusha Srivatsa 	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
14331604222SAnusha Srivatsa };
14431604222SAnusha Srivatsa 
145c6f7acb8SMatt Roper static const u32 hpd_mcc[HPD_NUM_PINS] = {
146c6f7acb8SMatt Roper 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
147c6f7acb8SMatt Roper 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
148c6f7acb8SMatt Roper 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
149c6f7acb8SMatt Roper };
150c6f7acb8SMatt Roper 
15165f42cdcSPaulo Zanoni static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
15268eb49b1SPaulo Zanoni 			   i915_reg_t iir, i915_reg_t ier)
15368eb49b1SPaulo Zanoni {
15465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
15565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
15668eb49b1SPaulo Zanoni 
15765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
15868eb49b1SPaulo Zanoni 
1595c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
16065f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16165f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16468eb49b1SPaulo Zanoni }
1655c502442SPaulo Zanoni 
16665f42cdcSPaulo Zanoni static void gen2_irq_reset(struct intel_uncore *uncore)
16768eb49b1SPaulo Zanoni {
16865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
16965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
170a9d356a6SPaulo Zanoni 
17165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
17268eb49b1SPaulo Zanoni 
17368eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
17465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17868eb49b1SPaulo Zanoni }
17968eb49b1SPaulo Zanoni 
180b16b2a2fSPaulo Zanoni #define GEN8_IRQ_RESET_NDX(uncore, type, which) \
18168eb49b1SPaulo Zanoni ({ \
18268eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
183b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
18468eb49b1SPaulo Zanoni 		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
18568eb49b1SPaulo Zanoni })
18668eb49b1SPaulo Zanoni 
187b16b2a2fSPaulo Zanoni #define GEN3_IRQ_RESET(uncore, type) \
188b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
18968eb49b1SPaulo Zanoni 
190b16b2a2fSPaulo Zanoni #define GEN2_IRQ_RESET(uncore) \
191b16b2a2fSPaulo Zanoni 	gen2_irq_reset(uncore)
192e9e9848aSVille Syrjälä 
193337ba017SPaulo Zanoni /*
194337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
195337ba017SPaulo Zanoni  */
19665f42cdcSPaulo Zanoni static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
197b51a2842SVille Syrjälä {
19865f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
199b51a2842SVille Syrjälä 
200b51a2842SVille Syrjälä 	if (val == 0)
201b51a2842SVille Syrjälä 		return;
202b51a2842SVille Syrjälä 
203b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
204f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
20565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20665f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
20765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20865f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
209b51a2842SVille Syrjälä }
210337ba017SPaulo Zanoni 
21165f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
212e9e9848aSVille Syrjälä {
21365f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
214e9e9848aSVille Syrjälä 
215e9e9848aSVille Syrjälä 	if (val == 0)
216e9e9848aSVille Syrjälä 		return;
217e9e9848aSVille Syrjälä 
218e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
2199d9523d8SPaulo Zanoni 	     i915_mmio_reg_offset(GEN2_IIR), val);
22065f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22165f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
22265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22365f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
224e9e9848aSVille Syrjälä }
225e9e9848aSVille Syrjälä 
22665f42cdcSPaulo Zanoni static void gen3_irq_init(struct intel_uncore *uncore,
22768eb49b1SPaulo Zanoni 			  i915_reg_t imr, u32 imr_val,
22868eb49b1SPaulo Zanoni 			  i915_reg_t ier, u32 ier_val,
22968eb49b1SPaulo Zanoni 			  i915_reg_t iir)
23068eb49b1SPaulo Zanoni {
23165f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
23235079899SPaulo Zanoni 
23365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
23465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
23565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
23668eb49b1SPaulo Zanoni }
23735079899SPaulo Zanoni 
23865f42cdcSPaulo Zanoni static void gen2_irq_init(struct intel_uncore *uncore,
2392918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
24068eb49b1SPaulo Zanoni {
24165f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
24268eb49b1SPaulo Zanoni 
24365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
24465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
24565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
24668eb49b1SPaulo Zanoni }
24768eb49b1SPaulo Zanoni 
248b16b2a2fSPaulo Zanoni #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
24968eb49b1SPaulo Zanoni ({ \
25068eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
251b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25268eb49b1SPaulo Zanoni 		      GEN8_##type##_IMR(which_), imr_val, \
25368eb49b1SPaulo Zanoni 		      GEN8_##type##_IER(which_), ier_val, \
25468eb49b1SPaulo Zanoni 		      GEN8_##type##_IIR(which_)); \
25568eb49b1SPaulo Zanoni })
25668eb49b1SPaulo Zanoni 
257b16b2a2fSPaulo Zanoni #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
258b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25968eb49b1SPaulo Zanoni 		      type##IMR, imr_val, \
26068eb49b1SPaulo Zanoni 		      type##IER, ier_val, \
26168eb49b1SPaulo Zanoni 		      type##IIR)
26268eb49b1SPaulo Zanoni 
263b16b2a2fSPaulo Zanoni #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
264b16b2a2fSPaulo Zanoni 	gen2_irq_init((uncore), imr_val, ier_val)
265e9e9848aSVille Syrjälä 
266c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
26726705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
268c9a9a268SImre Deak 
2690706f17cSEgbert Eich /* For display hotplug interrupt */
2700706f17cSEgbert Eich static inline void
2710706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
272a9c287c9SJani Nikula 				     u32 mask,
273a9c287c9SJani Nikula 				     u32 bits)
2740706f17cSEgbert Eich {
275a9c287c9SJani Nikula 	u32 val;
2760706f17cSEgbert Eich 
27767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2780706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2790706f17cSEgbert Eich 
2800706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2810706f17cSEgbert Eich 	val &= ~mask;
2820706f17cSEgbert Eich 	val |= bits;
2830706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2840706f17cSEgbert Eich }
2850706f17cSEgbert Eich 
2860706f17cSEgbert Eich /**
2870706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2880706f17cSEgbert Eich  * @dev_priv: driver private
2890706f17cSEgbert Eich  * @mask: bits to update
2900706f17cSEgbert Eich  * @bits: bits to enable
2910706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2920706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2930706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2940706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2950706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2960706f17cSEgbert Eich  * version is also available.
2970706f17cSEgbert Eich  */
2980706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
299a9c287c9SJani Nikula 				   u32 mask,
300a9c287c9SJani Nikula 				   u32 bits)
3010706f17cSEgbert Eich {
3020706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
3030706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
3040706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
3050706f17cSEgbert Eich }
3060706f17cSEgbert Eich 
30796606f3bSOscar Mateo static u32
3089b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
30996606f3bSOscar Mateo 			 const unsigned int bank, const unsigned int bit);
31096606f3bSOscar Mateo 
3119b77011eSTvrtko Ursulin static bool gen11_reset_one_iir(struct intel_gt *gt,
31296606f3bSOscar Mateo 				const unsigned int bank,
31396606f3bSOscar Mateo 				const unsigned int bit)
31496606f3bSOscar Mateo {
3159b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31696606f3bSOscar Mateo 	u32 dw;
31796606f3bSOscar Mateo 
3189b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
31996606f3bSOscar Mateo 
32096606f3bSOscar Mateo 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
32196606f3bSOscar Mateo 	if (dw & BIT(bit)) {
32296606f3bSOscar Mateo 		/*
32396606f3bSOscar Mateo 		 * According to the BSpec, DW_IIR bits cannot be cleared without
32496606f3bSOscar Mateo 		 * first servicing the Selector & Shared IIR registers.
32596606f3bSOscar Mateo 		 */
3269b77011eSTvrtko Ursulin 		gen11_gt_engine_identity(gt, bank, bit);
32796606f3bSOscar Mateo 
32896606f3bSOscar Mateo 		/*
32996606f3bSOscar Mateo 		 * We locked GT INT DW by reading it. If we want to (try
33096606f3bSOscar Mateo 		 * to) recover from this succesfully, we need to clear
33196606f3bSOscar Mateo 		 * our bit, otherwise we are locking the register for
33296606f3bSOscar Mateo 		 * everybody.
33396606f3bSOscar Mateo 		 */
33496606f3bSOscar Mateo 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
33596606f3bSOscar Mateo 
33696606f3bSOscar Mateo 		return true;
33796606f3bSOscar Mateo 	}
33896606f3bSOscar Mateo 
33996606f3bSOscar Mateo 	return false;
34096606f3bSOscar Mateo }
34196606f3bSOscar Mateo 
342d9dc34f1SVille Syrjälä /**
343d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
344d9dc34f1SVille Syrjälä  * @dev_priv: driver private
345d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
346d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
347d9dc34f1SVille Syrjälä  */
348fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
349a9c287c9SJani Nikula 			    u32 interrupt_mask,
350a9c287c9SJani Nikula 			    u32 enabled_irq_mask)
351036a4a7dSZhenyu Wang {
352a9c287c9SJani Nikula 	u32 new_val;
353d9dc34f1SVille Syrjälä 
35467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3554bc9d430SDaniel Vetter 
356d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
357d9dc34f1SVille Syrjälä 
3589df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
359c67a470bSPaulo Zanoni 		return;
360c67a470bSPaulo Zanoni 
361d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
362d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
363d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
364d9dc34f1SVille Syrjälä 
365d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
366d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
3671ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
3683143a2bfSChris Wilson 		POSTING_READ(DEIMR);
369036a4a7dSZhenyu Wang 	}
370036a4a7dSZhenyu Wang }
371036a4a7dSZhenyu Wang 
37243eaea13SPaulo Zanoni /**
37343eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
37443eaea13SPaulo Zanoni  * @dev_priv: driver private
37543eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
37643eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
37743eaea13SPaulo Zanoni  */
37843eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
379a9c287c9SJani Nikula 			      u32 interrupt_mask,
380a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
38143eaea13SPaulo Zanoni {
38267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
38343eaea13SPaulo Zanoni 
38415a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
38515a17aaeSDaniel Vetter 
3869df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
387c67a470bSPaulo Zanoni 		return;
388c67a470bSPaulo Zanoni 
38943eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
39043eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
39143eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
39243eaea13SPaulo Zanoni }
39343eaea13SPaulo Zanoni 
394a9c287c9SJani Nikula void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
39543eaea13SPaulo Zanoni {
39643eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
397e33a4be8STvrtko Ursulin 	intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
39843eaea13SPaulo Zanoni }
39943eaea13SPaulo Zanoni 
400a9c287c9SJani Nikula void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
40143eaea13SPaulo Zanoni {
40243eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
40343eaea13SPaulo Zanoni }
40443eaea13SPaulo Zanoni 
405f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
406b900b949SImre Deak {
407d02b98b8SOscar Mateo 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
408d02b98b8SOscar Mateo 
409bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
410b900b949SImre Deak }
411b900b949SImre Deak 
412*58820574STvrtko Ursulin static void write_pm_imr(struct intel_gt *gt)
413a72fbc3aSImre Deak {
414*58820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
415*58820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
416*58820574STvrtko Ursulin 	u32 mask = gt->pm_imr;
417917dc6b5SMika Kuoppala 	i915_reg_t reg;
418917dc6b5SMika Kuoppala 
419*58820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
420917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
421917dc6b5SMika Kuoppala 		/* pm is in upper half */
422917dc6b5SMika Kuoppala 		mask = mask << 16;
423*58820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
424917dc6b5SMika Kuoppala 		reg = GEN8_GT_IMR(2);
425917dc6b5SMika Kuoppala 	} else {
426917dc6b5SMika Kuoppala 		reg = GEN6_PMIMR;
427a72fbc3aSImre Deak 	}
428a72fbc3aSImre Deak 
429*58820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
430*58820574STvrtko Ursulin 	intel_uncore_posting_read(uncore, reg);
431917dc6b5SMika Kuoppala }
432917dc6b5SMika Kuoppala 
433*58820574STvrtko Ursulin static void write_pm_ier(struct intel_gt *gt)
434b900b949SImre Deak {
435*58820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
436*58820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
437*58820574STvrtko Ursulin 	u32 mask = gt->pm_ier;
438917dc6b5SMika Kuoppala 	i915_reg_t reg;
439917dc6b5SMika Kuoppala 
440*58820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
441917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
442917dc6b5SMika Kuoppala 		/* pm is in upper half */
443917dc6b5SMika Kuoppala 		mask = mask << 16;
444*58820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
445917dc6b5SMika Kuoppala 		reg = GEN8_GT_IER(2);
446917dc6b5SMika Kuoppala 	} else {
447917dc6b5SMika Kuoppala 		reg = GEN6_PMIER;
448917dc6b5SMika Kuoppala 	}
449917dc6b5SMika Kuoppala 
450*58820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
451b900b949SImre Deak }
452b900b949SImre Deak 
453edbfdb45SPaulo Zanoni /**
454edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
455*58820574STvrtko Ursulin  * @gt: gt for the interrupts
456edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
457edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
458edbfdb45SPaulo Zanoni  */
459*58820574STvrtko Ursulin static void snb_update_pm_irq(struct intel_gt *gt,
460a9c287c9SJani Nikula 			      u32 interrupt_mask,
461a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
462edbfdb45SPaulo Zanoni {
463a9c287c9SJani Nikula 	u32 new_val;
464edbfdb45SPaulo Zanoni 
46515a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
46615a17aaeSDaniel Vetter 
467*58820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
468edbfdb45SPaulo Zanoni 
469*58820574STvrtko Ursulin 	new_val = gt->pm_imr;
470f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
471f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
472f52ecbcfSPaulo Zanoni 
473*58820574STvrtko Ursulin 	if (new_val != gt->pm_imr) {
474*58820574STvrtko Ursulin 		gt->pm_imr = new_val;
475*58820574STvrtko Ursulin 		write_pm_imr(gt);
476edbfdb45SPaulo Zanoni 	}
477f52ecbcfSPaulo Zanoni }
478edbfdb45SPaulo Zanoni 
479*58820574STvrtko Ursulin void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
480edbfdb45SPaulo Zanoni {
481*58820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4829939fba2SImre Deak 		return;
4839939fba2SImre Deak 
484*58820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, mask);
485edbfdb45SPaulo Zanoni }
486edbfdb45SPaulo Zanoni 
487*58820574STvrtko Ursulin static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
4889939fba2SImre Deak {
489*58820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, 0);
4909939fba2SImre Deak }
4919939fba2SImre Deak 
492*58820574STvrtko Ursulin void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
493edbfdb45SPaulo Zanoni {
494*58820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4959939fba2SImre Deak 		return;
4969939fba2SImre Deak 
497*58820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, mask);
498f4e9af4fSAkash Goel }
499f4e9af4fSAkash Goel 
5003814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
501f4e9af4fSAkash Goel {
502f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
503f4e9af4fSAkash Goel 
50467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
505f4e9af4fSAkash Goel 
506f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
507f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
508f4e9af4fSAkash Goel 	POSTING_READ(reg);
509f4e9af4fSAkash Goel }
510f4e9af4fSAkash Goel 
511*58820574STvrtko Ursulin static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
512f4e9af4fSAkash Goel {
513*58820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
514f4e9af4fSAkash Goel 
515*58820574STvrtko Ursulin 	gt->pm_ier |= enable_mask;
516*58820574STvrtko Ursulin 	write_pm_ier(gt);
517*58820574STvrtko Ursulin 	gen6_unmask_pm_irq(gt, enable_mask);
518f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
519f4e9af4fSAkash Goel }
520f4e9af4fSAkash Goel 
521*58820574STvrtko Ursulin static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
522f4e9af4fSAkash Goel {
523*58820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
524f4e9af4fSAkash Goel 
525*58820574STvrtko Ursulin 	gt->pm_ier &= ~disable_mask;
526*58820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, disable_mask);
527*58820574STvrtko Ursulin 	write_pm_ier(gt);
528f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
529edbfdb45SPaulo Zanoni }
530edbfdb45SPaulo Zanoni 
531d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
532d02b98b8SOscar Mateo {
533d02b98b8SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
534d02b98b8SOscar Mateo 
5359b77011eSTvrtko Ursulin 	while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
53696606f3bSOscar Mateo 		;
537d02b98b8SOscar Mateo 
538d02b98b8SOscar Mateo 	dev_priv->gt_pm.rps.pm_iir = 0;
539d02b98b8SOscar Mateo 
540d02b98b8SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
541d02b98b8SOscar Mateo }
542d02b98b8SOscar Mateo 
543dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
5443cc134e3SImre Deak {
5453cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
5464668f695SChris Wilson 	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
547562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
5483cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
5493cc134e3SImre Deak }
5503cc134e3SImre Deak 
55191d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
552b900b949SImre Deak {
553*58820574STvrtko Ursulin 	struct intel_gt *gt = &dev_priv->gt;
554562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
555562d9baeSSagar Arun Kamble 
556562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
557f2a91d1aSChris Wilson 		return;
558f2a91d1aSChris Wilson 
559b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
560562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
56196606f3bSOscar Mateo 
562d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
563*58820574STvrtko Ursulin 		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
564d02b98b8SOscar Mateo 	else
565c33d247dSChris Wilson 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
56696606f3bSOscar Mateo 
567562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
568*58820574STvrtko Ursulin 	gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
56978e68d36SImre Deak 
570b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
571b900b949SImre Deak }
572b900b949SImre Deak 
57391d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
574b900b949SImre Deak {
575562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
576562d9baeSSagar Arun Kamble 
577562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
578f2a91d1aSChris Wilson 		return;
579f2a91d1aSChris Wilson 
580d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
581562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
5829939fba2SImre Deak 
583b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
5849939fba2SImre Deak 
585*58820574STvrtko Ursulin 	gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
58658072ccbSImre Deak 
58758072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
588315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
589c33d247dSChris Wilson 
590c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
5913814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
592c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
593c33d247dSChris Wilson 	 * state of the worker can be discarded.
594c33d247dSChris Wilson 	 */
595562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
596d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
597d02b98b8SOscar Mateo 		gen11_reset_rps_interrupts(dev_priv);
598d02b98b8SOscar Mateo 	else
599c33d247dSChris Wilson 		gen6_reset_rps_interrupts(dev_priv);
600b900b949SImre Deak }
601b900b949SImre Deak 
60226705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
60326705e20SSagar Arun Kamble {
60487b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6051be333d3SSagar Arun Kamble 
60626705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
60726705e20SSagar Arun Kamble 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
60826705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
60926705e20SSagar Arun Kamble }
61026705e20SSagar Arun Kamble 
61126705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
61226705e20SSagar Arun Kamble {
61387b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6141be333d3SSagar Arun Kamble 
61526705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
6161e83e7a6SOscar Mateo 	if (!dev_priv->guc.interrupts.enabled) {
61726705e20SSagar Arun Kamble 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
61826705e20SSagar Arun Kamble 				       dev_priv->pm_guc_events);
6191e83e7a6SOscar Mateo 		dev_priv->guc.interrupts.enabled = true;
620*58820574STvrtko Ursulin 		gen6_enable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
62126705e20SSagar Arun Kamble 	}
62226705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
62326705e20SSagar Arun Kamble }
62426705e20SSagar Arun Kamble 
62526705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
62626705e20SSagar Arun Kamble {
62787b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6281be333d3SSagar Arun Kamble 
62926705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
6301e83e7a6SOscar Mateo 	dev_priv->guc.interrupts.enabled = false;
63126705e20SSagar Arun Kamble 
632*58820574STvrtko Ursulin 	gen6_disable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
63326705e20SSagar Arun Kamble 
63426705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
635315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
63626705e20SSagar Arun Kamble 
63726705e20SSagar Arun Kamble 	gen9_reset_guc_interrupts(dev_priv);
63826705e20SSagar Arun Kamble }
63926705e20SSagar Arun Kamble 
64054c52a84SOscar Mateo void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
64154c52a84SOscar Mateo {
64254c52a84SOscar Mateo 	spin_lock_irq(&i915->irq_lock);
6439b77011eSTvrtko Ursulin 	gen11_reset_one_iir(&i915->gt, 0, GEN11_GUC);
64454c52a84SOscar Mateo 	spin_unlock_irq(&i915->irq_lock);
64554c52a84SOscar Mateo }
64654c52a84SOscar Mateo 
64754c52a84SOscar Mateo void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
64854c52a84SOscar Mateo {
64954c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
65054c52a84SOscar Mateo 	if (!dev_priv->guc.interrupts.enabled) {
65154c52a84SOscar Mateo 		u32 events = REG_FIELD_PREP(ENGINE1_MASK,
65254c52a84SOscar Mateo 					    GEN11_GUC_INTR_GUC2HOST);
65354c52a84SOscar Mateo 
6549b77011eSTvrtko Ursulin 		WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC));
65554c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
65654c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
65754c52a84SOscar Mateo 		dev_priv->guc.interrupts.enabled = true;
65854c52a84SOscar Mateo 	}
65954c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
66054c52a84SOscar Mateo }
66154c52a84SOscar Mateo 
66254c52a84SOscar Mateo void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
66354c52a84SOscar Mateo {
66454c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
66554c52a84SOscar Mateo 	dev_priv->guc.interrupts.enabled = false;
66654c52a84SOscar Mateo 
66754c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
66854c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
66954c52a84SOscar Mateo 
67054c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
671315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
67254c52a84SOscar Mateo 
67354c52a84SOscar Mateo 	gen11_reset_guc_interrupts(dev_priv);
67454c52a84SOscar Mateo }
67554c52a84SOscar Mateo 
6760961021aSBen Widawsky /**
6773a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
6783a3b3c7dSVille Syrjälä  * @dev_priv: driver private
6793a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
6803a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
6813a3b3c7dSVille Syrjälä  */
6823a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
683a9c287c9SJani Nikula 				u32 interrupt_mask,
684a9c287c9SJani Nikula 				u32 enabled_irq_mask)
6853a3b3c7dSVille Syrjälä {
686a9c287c9SJani Nikula 	u32 new_val;
687a9c287c9SJani Nikula 	u32 old_val;
6883a3b3c7dSVille Syrjälä 
68967520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
6903a3b3c7dSVille Syrjälä 
6913a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
6923a3b3c7dSVille Syrjälä 
6933a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
6943a3b3c7dSVille Syrjälä 		return;
6953a3b3c7dSVille Syrjälä 
6963a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
6973a3b3c7dSVille Syrjälä 
6983a3b3c7dSVille Syrjälä 	new_val = old_val;
6993a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
7003a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
7013a3b3c7dSVille Syrjälä 
7023a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
7033a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
7043a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
7053a3b3c7dSVille Syrjälä 	}
7063a3b3c7dSVille Syrjälä }
7073a3b3c7dSVille Syrjälä 
7083a3b3c7dSVille Syrjälä /**
709013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
710013d3752SVille Syrjälä  * @dev_priv: driver private
711013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
712013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
713013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
714013d3752SVille Syrjälä  */
715013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
716013d3752SVille Syrjälä 			 enum pipe pipe,
717a9c287c9SJani Nikula 			 u32 interrupt_mask,
718a9c287c9SJani Nikula 			 u32 enabled_irq_mask)
719013d3752SVille Syrjälä {
720a9c287c9SJani Nikula 	u32 new_val;
721013d3752SVille Syrjälä 
72267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
723013d3752SVille Syrjälä 
724013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
725013d3752SVille Syrjälä 
726013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
727013d3752SVille Syrjälä 		return;
728013d3752SVille Syrjälä 
729013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
730013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
731013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
732013d3752SVille Syrjälä 
733013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
734013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
735013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
736013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
737013d3752SVille Syrjälä 	}
738013d3752SVille Syrjälä }
739013d3752SVille Syrjälä 
740013d3752SVille Syrjälä /**
741fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
742fee884edSDaniel Vetter  * @dev_priv: driver private
743fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
744fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
745fee884edSDaniel Vetter  */
74647339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
747a9c287c9SJani Nikula 				  u32 interrupt_mask,
748a9c287c9SJani Nikula 				  u32 enabled_irq_mask)
749fee884edSDaniel Vetter {
750a9c287c9SJani Nikula 	u32 sdeimr = I915_READ(SDEIMR);
751fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
752fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
753fee884edSDaniel Vetter 
75415a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
75515a17aaeSDaniel Vetter 
75667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
757fee884edSDaniel Vetter 
7589df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
759c67a470bSPaulo Zanoni 		return;
760c67a470bSPaulo Zanoni 
761fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
762fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
763fee884edSDaniel Vetter }
7648664281bSPaulo Zanoni 
7656b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
7666b12ca56SVille Syrjälä 			      enum pipe pipe)
7677c463586SKeith Packard {
7686b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
76910c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
77010c59c51SImre Deak 
7716b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
7726b12ca56SVille Syrjälä 
7736b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
7746b12ca56SVille Syrjälä 		goto out;
7756b12ca56SVille Syrjälä 
77610c59c51SImre Deak 	/*
777724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
778724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
77910c59c51SImre Deak 	 */
78010c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
78110c59c51SImre Deak 		return 0;
782724a6905SVille Syrjälä 	/*
783724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
784724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
785724a6905SVille Syrjälä 	 */
786724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
787724a6905SVille Syrjälä 		return 0;
78810c59c51SImre Deak 
78910c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
79010c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
79110c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
79210c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
79310c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
79410c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
79510c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
79610c59c51SImre Deak 
7976b12ca56SVille Syrjälä out:
7986b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
7996b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
8006b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
8016b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
8026b12ca56SVille Syrjälä 
80310c59c51SImre Deak 	return enable_mask;
80410c59c51SImre Deak }
80510c59c51SImre Deak 
8066b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
8076b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
808755e9019SImre Deak {
8096b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
810755e9019SImre Deak 	u32 enable_mask;
811755e9019SImre Deak 
8126b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8136b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8146b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8156b12ca56SVille Syrjälä 
8166b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8176b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8186b12ca56SVille Syrjälä 
8196b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
8206b12ca56SVille Syrjälä 		return;
8216b12ca56SVille Syrjälä 
8226b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
8236b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8246b12ca56SVille Syrjälä 
8256b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8266b12ca56SVille Syrjälä 	POSTING_READ(reg);
827755e9019SImre Deak }
828755e9019SImre Deak 
8296b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
8306b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
831755e9019SImre Deak {
8326b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
833755e9019SImre Deak 	u32 enable_mask;
834755e9019SImre Deak 
8356b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8366b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8376b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8386b12ca56SVille Syrjälä 
8396b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8406b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8416b12ca56SVille Syrjälä 
8426b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
8436b12ca56SVille Syrjälä 		return;
8446b12ca56SVille Syrjälä 
8456b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
8466b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8476b12ca56SVille Syrjälä 
8486b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8496b12ca56SVille Syrjälä 	POSTING_READ(reg);
850755e9019SImre Deak }
851755e9019SImre Deak 
852f3e30485SVille Syrjälä static bool i915_has_asle(struct drm_i915_private *dev_priv)
853f3e30485SVille Syrjälä {
854f3e30485SVille Syrjälä 	if (!dev_priv->opregion.asle)
855f3e30485SVille Syrjälä 		return false;
856f3e30485SVille Syrjälä 
857f3e30485SVille Syrjälä 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
858f3e30485SVille Syrjälä }
859f3e30485SVille Syrjälä 
860c0e09200SDave Airlie /**
861f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
86214bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
86301c66889SZhao Yakui  */
86491d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
86501c66889SZhao Yakui {
866f3e30485SVille Syrjälä 	if (!i915_has_asle(dev_priv))
867f49e38ddSJani Nikula 		return;
868f49e38ddSJani Nikula 
86913321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
87001c66889SZhao Yakui 
871755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
87291d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
8733b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
874755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
8751ec14ad3SChris Wilson 
87613321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
87701c66889SZhao Yakui }
87801c66889SZhao Yakui 
879f75f3746SVille Syrjälä /*
880f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
881f75f3746SVille Syrjälä  * around the vertical blanking period.
882f75f3746SVille Syrjälä  *
883f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
884f75f3746SVille Syrjälä  *  vblank_start >= 3
885f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
886f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
887f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
888f75f3746SVille Syrjälä  *
889f75f3746SVille Syrjälä  *           start of vblank:
890f75f3746SVille Syrjälä  *           latch double buffered registers
891f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
892f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
893f75f3746SVille Syrjälä  *           |
894f75f3746SVille Syrjälä  *           |          frame start:
895f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
896f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
897f75f3746SVille Syrjälä  *           |          |
898f75f3746SVille Syrjälä  *           |          |  start of vsync:
899f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
900f75f3746SVille Syrjälä  *           |          |  |
901f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
902f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
903f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
904f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
905f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
906f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
907f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
908f75f3746SVille Syrjälä  *       |          |                                         |
909f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
910f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
911f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
912f75f3746SVille Syrjälä  *
913f75f3746SVille Syrjälä  * x  = horizontal active
914f75f3746SVille Syrjälä  * _  = horizontal blanking
915f75f3746SVille Syrjälä  * hs = horizontal sync
916f75f3746SVille Syrjälä  * va = vertical active
917f75f3746SVille Syrjälä  * vb = vertical blanking
918f75f3746SVille Syrjälä  * vs = vertical sync
919f75f3746SVille Syrjälä  * vbs = vblank_start (number)
920f75f3746SVille Syrjälä  *
921f75f3746SVille Syrjälä  * Summary:
922f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
923f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
924f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
925f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
926f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
927f75f3746SVille Syrjälä  */
928f75f3746SVille Syrjälä 
92942f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
93042f52ef8SKeith Packard  * we use as a pipe index
93142f52ef8SKeith Packard  */
93208fa8fd0SVille Syrjälä u32 i915_get_vblank_counter(struct drm_crtc *crtc)
9330a3e67a4SJesse Barnes {
93408fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
93508fa8fd0SVille Syrjälä 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
93632db0b65SVille Syrjälä 	const struct drm_display_mode *mode = &vblank->hwmode;
93708fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
938f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
9390b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
940694e409dSVille Syrjälä 	unsigned long irqflags;
941391f75e2SVille Syrjälä 
94232db0b65SVille Syrjälä 	/*
94332db0b65SVille Syrjälä 	 * On i965gm TV output the frame counter only works up to
94432db0b65SVille Syrjälä 	 * the point when we enable the TV encoder. After that the
94532db0b65SVille Syrjälä 	 * frame counter ceases to work and reads zero. We need a
94632db0b65SVille Syrjälä 	 * vblank wait before enabling the TV encoder and so we
94732db0b65SVille Syrjälä 	 * have to enable vblank interrupts while the frame counter
94832db0b65SVille Syrjälä 	 * is still in a working state. However the core vblank code
94932db0b65SVille Syrjälä 	 * does not like us returning non-zero frame counter values
95032db0b65SVille Syrjälä 	 * when we've told it that we don't have a working frame
95132db0b65SVille Syrjälä 	 * counter. Thus we must stop non-zero values leaking out.
95232db0b65SVille Syrjälä 	 */
95332db0b65SVille Syrjälä 	if (!vblank->max_vblank_count)
95432db0b65SVille Syrjälä 		return 0;
95532db0b65SVille Syrjälä 
9560b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
9570b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
9580b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
9590b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
9600b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
961391f75e2SVille Syrjälä 
9620b2a8e09SVille Syrjälä 	/* Convert to pixel count */
9630b2a8e09SVille Syrjälä 	vbl_start *= htotal;
9640b2a8e09SVille Syrjälä 
9650b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
9660b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
9670b2a8e09SVille Syrjälä 
9689db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
9699db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
9705eddb70bSChris Wilson 
971694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
972694e409dSVille Syrjälä 
9730a3e67a4SJesse Barnes 	/*
9740a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
9750a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
9760a3e67a4SJesse Barnes 	 * register.
9770a3e67a4SJesse Barnes 	 */
9780a3e67a4SJesse Barnes 	do {
979694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
980694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
981694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
9820a3e67a4SJesse Barnes 	} while (high1 != high2);
9830a3e67a4SJesse Barnes 
984694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
985694e409dSVille Syrjälä 
9865eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
987391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
9885eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
989391f75e2SVille Syrjälä 
990391f75e2SVille Syrjälä 	/*
991391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
992391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
993391f75e2SVille Syrjälä 	 * counter against vblank start.
994391f75e2SVille Syrjälä 	 */
995edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
9960a3e67a4SJesse Barnes }
9970a3e67a4SJesse Barnes 
99808fa8fd0SVille Syrjälä u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
9999880b7a5SJesse Barnes {
100008fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
100108fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
10029880b7a5SJesse Barnes 
1003649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
10049880b7a5SJesse Barnes }
10059880b7a5SJesse Barnes 
1006aec0246fSUma Shankar /*
1007aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
1008aec0246fSUma Shankar  * scanline register will not work to get the scanline,
1009aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
1010aec0246fSUma Shankar  * with scanline register updates.
1011aec0246fSUma Shankar  * This function will use Framestamp and current
1012aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
1013aec0246fSUma Shankar  */
1014aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
1015aec0246fSUma Shankar {
1016aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1017aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
1018aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1019aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
1020aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
1021aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
1022aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
1023aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
1024aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
1025aec0246fSUma Shankar 
1026aec0246fSUma Shankar 	/*
1027aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
1028aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
1029aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
1030aec0246fSUma Shankar 	 * during the same frame.
1031aec0246fSUma Shankar 	 */
1032aec0246fSUma Shankar 	do {
1033aec0246fSUma Shankar 		/*
1034aec0246fSUma Shankar 		 * This field provides read back of the display
1035aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
1036aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
1037aec0246fSUma Shankar 		 */
1038aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1039aec0246fSUma Shankar 
1040aec0246fSUma Shankar 		/*
1041aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
1042aec0246fSUma Shankar 		 * time stamp value.
1043aec0246fSUma Shankar 		 */
1044aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
1045aec0246fSUma Shankar 
1046aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1047aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
1048aec0246fSUma Shankar 
1049aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
1050aec0246fSUma Shankar 					clock), 1000 * htotal);
1051aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
1052aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
1053aec0246fSUma Shankar 
1054aec0246fSUma Shankar 	return scanline;
1055aec0246fSUma Shankar }
1056aec0246fSUma Shankar 
105775aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1058a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1059a225f079SVille Syrjälä {
1060a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
1061fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
10625caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
10635caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
1064a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
106580715b2fSVille Syrjälä 	int position, vtotal;
1066a225f079SVille Syrjälä 
106772259536SVille Syrjälä 	if (!crtc->active)
106872259536SVille Syrjälä 		return -1;
106972259536SVille Syrjälä 
10705caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
10715caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
10725caa0feaSDaniel Vetter 
1073aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1074aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
1075aec0246fSUma Shankar 
107680715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
1077a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1078a225f079SVille Syrjälä 		vtotal /= 2;
1079a225f079SVille Syrjälä 
1080cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 2))
108175aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1082a225f079SVille Syrjälä 	else
108375aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1084a225f079SVille Syrjälä 
1085a225f079SVille Syrjälä 	/*
108641b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
108741b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
108841b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
108941b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
109041b578fbSJesse Barnes 	 *
109141b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
109241b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
109341b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
109441b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
109541b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
109641b578fbSJesse Barnes 	 */
109791d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
109841b578fbSJesse Barnes 		int i, temp;
109941b578fbSJesse Barnes 
110041b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
110141b578fbSJesse Barnes 			udelay(1);
1102707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
110341b578fbSJesse Barnes 			if (temp != position) {
110441b578fbSJesse Barnes 				position = temp;
110541b578fbSJesse Barnes 				break;
110641b578fbSJesse Barnes 			}
110741b578fbSJesse Barnes 		}
110841b578fbSJesse Barnes 	}
110941b578fbSJesse Barnes 
111041b578fbSJesse Barnes 	/*
111180715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
111280715b2fSVille Syrjälä 	 * scanline_offset adjustment.
1113a225f079SVille Syrjälä 	 */
111480715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
1115a225f079SVille Syrjälä }
1116a225f079SVille Syrjälä 
11177d23e593SVille Syrjälä bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
11181bf6ad62SDaniel Vetter 			      bool in_vblank_irq, int *vpos, int *hpos,
11193bb403bfSVille Syrjälä 			      ktime_t *stime, ktime_t *etime,
11203bb403bfSVille Syrjälä 			      const struct drm_display_mode *mode)
11210af7e4dfSMario Kleiner {
1122fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
112398187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
112498187836SVille Syrjälä 								pipe);
11253aa18df8SVille Syrjälä 	int position;
112678e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1127ad3543edSMario Kleiner 	unsigned long irqflags;
11288a920e24SVille Syrjälä 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
11298a920e24SVille Syrjälä 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
11308a920e24SVille Syrjälä 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
11310af7e4dfSMario Kleiner 
1132fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
11330af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
11349db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
11351bf6ad62SDaniel Vetter 		return false;
11360af7e4dfSMario Kleiner 	}
11370af7e4dfSMario Kleiner 
1138c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
113978e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
1140c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
1141c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
1142c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
11430af7e4dfSMario Kleiner 
1144d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1145d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1146d31faf65SVille Syrjälä 		vbl_end /= 2;
1147d31faf65SVille Syrjälä 		vtotal /= 2;
1148d31faf65SVille Syrjälä 	}
1149d31faf65SVille Syrjälä 
1150ad3543edSMario Kleiner 	/*
1151ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
1152ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
1153ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
1154ad3543edSMario Kleiner 	 */
1155ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1156ad3543edSMario Kleiner 
1157ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1158ad3543edSMario Kleiner 
1159ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
1160ad3543edSMario Kleiner 	if (stime)
1161ad3543edSMario Kleiner 		*stime = ktime_get();
1162ad3543edSMario Kleiner 
11638a920e24SVille Syrjälä 	if (use_scanline_counter) {
11640af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
11650af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
11660af7e4dfSMario Kleiner 		 */
1167a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
11680af7e4dfSMario Kleiner 	} else {
11690af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
11700af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
11710af7e4dfSMario Kleiner 		 * scanout position.
11720af7e4dfSMario Kleiner 		 */
117375aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
11740af7e4dfSMario Kleiner 
11753aa18df8SVille Syrjälä 		/* convert to pixel counts */
11763aa18df8SVille Syrjälä 		vbl_start *= htotal;
11773aa18df8SVille Syrjälä 		vbl_end *= htotal;
11783aa18df8SVille Syrjälä 		vtotal *= htotal;
117978e8fc6bSVille Syrjälä 
118078e8fc6bSVille Syrjälä 		/*
11817e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
11827e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
11837e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
11847e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
11857e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
11867e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
11877e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
11887e78f1cbSVille Syrjälä 		 */
11897e78f1cbSVille Syrjälä 		if (position >= vtotal)
11907e78f1cbSVille Syrjälä 			position = vtotal - 1;
11917e78f1cbSVille Syrjälä 
11927e78f1cbSVille Syrjälä 		/*
119378e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
119478e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
119578e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
119678e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
119778e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
119878e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
119978e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
120078e8fc6bSVille Syrjälä 		 */
120178e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
12023aa18df8SVille Syrjälä 	}
12033aa18df8SVille Syrjälä 
1204ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
1205ad3543edSMario Kleiner 	if (etime)
1206ad3543edSMario Kleiner 		*etime = ktime_get();
1207ad3543edSMario Kleiner 
1208ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1209ad3543edSMario Kleiner 
1210ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1211ad3543edSMario Kleiner 
12123aa18df8SVille Syrjälä 	/*
12133aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
12143aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
12153aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
12163aa18df8SVille Syrjälä 	 * up since vbl_end.
12173aa18df8SVille Syrjälä 	 */
12183aa18df8SVille Syrjälä 	if (position >= vbl_start)
12193aa18df8SVille Syrjälä 		position -= vbl_end;
12203aa18df8SVille Syrjälä 	else
12213aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
12223aa18df8SVille Syrjälä 
12238a920e24SVille Syrjälä 	if (use_scanline_counter) {
12243aa18df8SVille Syrjälä 		*vpos = position;
12253aa18df8SVille Syrjälä 		*hpos = 0;
12263aa18df8SVille Syrjälä 	} else {
12270af7e4dfSMario Kleiner 		*vpos = position / htotal;
12280af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
12290af7e4dfSMario Kleiner 	}
12300af7e4dfSMario Kleiner 
12311bf6ad62SDaniel Vetter 	return true;
12320af7e4dfSMario Kleiner }
12330af7e4dfSMario Kleiner 
1234a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1235a225f079SVille Syrjälä {
1236fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1237a225f079SVille Syrjälä 	unsigned long irqflags;
1238a225f079SVille Syrjälä 	int position;
1239a225f079SVille Syrjälä 
1240a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1241a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1242a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1243a225f079SVille Syrjälä 
1244a225f079SVille Syrjälä 	return position;
1245a225f079SVille Syrjälä }
1246a225f079SVille Syrjälä 
124791d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1248f97108d1SJesse Barnes {
12494f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &dev_priv->uncore;
1250b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
12519270388eSDaniel Vetter 	u8 new_delay;
12529270388eSDaniel Vetter 
1253d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1254f97108d1SJesse Barnes 
12554f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
12564f5fd91fSTvrtko Ursulin 			     MEMINTRSTS,
12574f5fd91fSTvrtko Ursulin 			     intel_uncore_read(uncore, MEMINTRSTS));
125873edd18fSDaniel Vetter 
125920e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
12609270388eSDaniel Vetter 
12614f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
12624f5fd91fSTvrtko Ursulin 	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
12634f5fd91fSTvrtko Ursulin 	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
12644f5fd91fSTvrtko Ursulin 	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
12654f5fd91fSTvrtko Ursulin 	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1266f97108d1SJesse Barnes 
1267f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1268b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
126920e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
127020e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
127120e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
127220e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1273b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
127420e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
127520e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
127620e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
127720e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1278f97108d1SJesse Barnes 	}
1279f97108d1SJesse Barnes 
128091d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
128120e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1282f97108d1SJesse Barnes 
1283d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
12849270388eSDaniel Vetter 
1285f97108d1SJesse Barnes 	return;
1286f97108d1SJesse Barnes }
1287f97108d1SJesse Barnes 
128843cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
128943cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
129031685c25SDeepak S {
1291679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
129243cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
129343cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
129431685c25SDeepak S }
129531685c25SDeepak S 
129643cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
129743cf3bf0SChris Wilson {
1298562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
129943cf3bf0SChris Wilson }
130043cf3bf0SChris Wilson 
130143cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
130243cf3bf0SChris Wilson {
1303562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1304562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
130543cf3bf0SChris Wilson 	struct intel_rps_ei now;
130643cf3bf0SChris Wilson 	u32 events = 0;
130743cf3bf0SChris Wilson 
1308e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
130943cf3bf0SChris Wilson 		return 0;
131043cf3bf0SChris Wilson 
131143cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
131231685c25SDeepak S 
1313679cb6c1SMika Kuoppala 	if (prev->ktime) {
1314e0e8c7cbSChris Wilson 		u64 time, c0;
1315569884e3SChris Wilson 		u32 render, media;
1316e0e8c7cbSChris Wilson 
1317679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
13188f68d591SChris Wilson 
1319e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1320e0e8c7cbSChris Wilson 
1321e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1322e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1323e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1324e0e8c7cbSChris Wilson 		 * into our activity counter.
1325e0e8c7cbSChris Wilson 		 */
1326569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1327569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1328569884e3SChris Wilson 		c0 = max(render, media);
13296b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1330e0e8c7cbSChris Wilson 
133160548c55SChris Wilson 		if (c0 > time * rps->power.up_threshold)
1332e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
133360548c55SChris Wilson 		else if (c0 < time * rps->power.down_threshold)
1334e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
133531685c25SDeepak S 	}
133631685c25SDeepak S 
1337562d9baeSSagar Arun Kamble 	rps->ei = now;
133843cf3bf0SChris Wilson 	return events;
133931685c25SDeepak S }
134031685c25SDeepak S 
13414912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
13423b8d8d91SJesse Barnes {
13432d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1344562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1345562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
13467c0a16adSChris Wilson 	bool client_boost = false;
13478d3afd7dSChris Wilson 	int new_delay, adj, min, max;
13487c0a16adSChris Wilson 	u32 pm_iir = 0;
13493b8d8d91SJesse Barnes 
135059cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1351562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1352562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1353562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1354d4d70aa5SImre Deak 	}
135559cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
13564912d041SBen Widawsky 
135760611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1358a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
13598d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
13607c0a16adSChris Wilson 		goto out;
13613b8d8d91SJesse Barnes 
1362ebb5eb7dSChris Wilson 	mutex_lock(&rps->lock);
13637b9e0ae6SChris Wilson 
136443cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
136543cf3bf0SChris Wilson 
1366562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1367562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1368562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1369562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
13707b92c1bdSChris Wilson 	if (client_boost)
1371562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1372562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1373562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
13748d3afd7dSChris Wilson 		adj = 0;
13758d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1376dd75fdc8SChris Wilson 		if (adj > 0)
1377dd75fdc8SChris Wilson 			adj *= 2;
1378edcf284bSChris Wilson 		else /* CHV needs even encode values */
1379edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
13807e79a683SSagar Arun Kamble 
1381562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
13827e79a683SSagar Arun Kamble 			adj = 0;
13837b92c1bdSChris Wilson 	} else if (client_boost) {
1384f5a4c67dSChris Wilson 		adj = 0;
1385dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1386562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1387562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1388562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1389562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1390dd75fdc8SChris Wilson 		adj = 0;
1391dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1392dd75fdc8SChris Wilson 		if (adj < 0)
1393dd75fdc8SChris Wilson 			adj *= 2;
1394edcf284bSChris Wilson 		else /* CHV needs even encode values */
1395edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
13967e79a683SSagar Arun Kamble 
1397562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
13987e79a683SSagar Arun Kamble 			adj = 0;
1399dd75fdc8SChris Wilson 	} else { /* unknown event */
1400edcf284bSChris Wilson 		adj = 0;
1401dd75fdc8SChris Wilson 	}
14023b8d8d91SJesse Barnes 
1403562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1404edcf284bSChris Wilson 
14052a8862d2SChris Wilson 	/*
14062a8862d2SChris Wilson 	 * Limit deboosting and boosting to keep ourselves at the extremes
14072a8862d2SChris Wilson 	 * when in the respective power modes (i.e. slowly decrease frequencies
14082a8862d2SChris Wilson 	 * while in the HIGH_POWER zone and slowly increase frequencies while
14092a8862d2SChris Wilson 	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
14102a8862d2SChris Wilson 	 * to the next level quickly, and conversely if busy we expect to
14112a8862d2SChris Wilson 	 * hit a waitboost and rapidly switch into max power.
14122a8862d2SChris Wilson 	 */
14132a8862d2SChris Wilson 	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
14142a8862d2SChris Wilson 	    (adj > 0 && rps->power.mode == LOW_POWER))
14152a8862d2SChris Wilson 		rps->last_adj = 0;
14162a8862d2SChris Wilson 
141779249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
141879249636SBen Widawsky 	 * interrupt
141979249636SBen Widawsky 	 */
1420edcf284bSChris Wilson 	new_delay += adj;
14218d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
142227544369SDeepak S 
14239fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
14249fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1425562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
14269fcee2f7SChris Wilson 	}
14273b8d8d91SJesse Barnes 
1428ebb5eb7dSChris Wilson 	mutex_unlock(&rps->lock);
14297c0a16adSChris Wilson 
14307c0a16adSChris Wilson out:
14317c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
14327c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1433562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
1434*58820574STvrtko Ursulin 		gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
14357c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
14363b8d8d91SJesse Barnes }
14373b8d8d91SJesse Barnes 
1438e3689190SBen Widawsky 
1439e3689190SBen Widawsky /**
1440e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1441e3689190SBen Widawsky  * occurred.
1442e3689190SBen Widawsky  * @work: workqueue struct
1443e3689190SBen Widawsky  *
1444e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1445e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1446e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1447e3689190SBen Widawsky  */
1448e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1449e3689190SBen Widawsky {
14502d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1451cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1452e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
145335a85ac6SBen Widawsky 	char *parity_event[6];
1454a9c287c9SJani Nikula 	u32 misccpctl;
1455a9c287c9SJani Nikula 	u8 slice = 0;
1456e3689190SBen Widawsky 
1457e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1458e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1459e3689190SBen Widawsky 	 * any time we access those registers.
1460e3689190SBen Widawsky 	 */
146191c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1462e3689190SBen Widawsky 
146335a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
146435a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
146535a85ac6SBen Widawsky 		goto out;
146635a85ac6SBen Widawsky 
1467e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1468e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1469e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1470e3689190SBen Widawsky 
147135a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1472f0f59a00SVille Syrjälä 		i915_reg_t reg;
147335a85ac6SBen Widawsky 
147435a85ac6SBen Widawsky 		slice--;
14752d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
147635a85ac6SBen Widawsky 			break;
147735a85ac6SBen Widawsky 
147835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
147935a85ac6SBen Widawsky 
14806fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
148135a85ac6SBen Widawsky 
148235a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1483e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1484e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1485e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1486e3689190SBen Widawsky 
148735a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
148835a85ac6SBen Widawsky 		POSTING_READ(reg);
1489e3689190SBen Widawsky 
1490cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1491e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1492e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1493e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
149435a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
149535a85ac6SBen Widawsky 		parity_event[5] = NULL;
1496e3689190SBen Widawsky 
149791c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1498e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1499e3689190SBen Widawsky 
150035a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
150135a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1502e3689190SBen Widawsky 
150335a85ac6SBen Widawsky 		kfree(parity_event[4]);
1504e3689190SBen Widawsky 		kfree(parity_event[3]);
1505e3689190SBen Widawsky 		kfree(parity_event[2]);
1506e3689190SBen Widawsky 		kfree(parity_event[1]);
1507e3689190SBen Widawsky 	}
1508e3689190SBen Widawsky 
150935a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
151035a85ac6SBen Widawsky 
151135a85ac6SBen Widawsky out:
151235a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
15134cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
15142d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
15154cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
151635a85ac6SBen Widawsky 
151791c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
151835a85ac6SBen Widawsky }
151935a85ac6SBen Widawsky 
1520261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1521261e40b8SVille Syrjälä 					       u32 iir)
1522e3689190SBen Widawsky {
1523261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1524e3689190SBen Widawsky 		return;
1525e3689190SBen Widawsky 
1526d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1527261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1528d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1529e3689190SBen Widawsky 
1530261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
153135a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
153235a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
153335a85ac6SBen Widawsky 
153435a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
153535a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
153635a85ac6SBen Widawsky 
1537a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1538e3689190SBen Widawsky }
1539e3689190SBen Widawsky 
1540261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1541f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1542f1af8fc1SPaulo Zanoni {
1543f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15448a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1545f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
15468a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1547f1af8fc1SPaulo Zanoni }
1548f1af8fc1SPaulo Zanoni 
1549261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1550e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1551e7b4c6b1SDaniel Vetter {
1552f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15538a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1554cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
15558a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1556cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
15578a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1558e7b4c6b1SDaniel Vetter 
1559cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1560cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1561aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1562aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1563e3689190SBen Widawsky 
1564261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1565261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1566e7b4c6b1SDaniel Vetter }
1567e7b4c6b1SDaniel Vetter 
15685d3d69d5SChris Wilson static void
156951f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1570fbcc1a0cSNick Hoath {
157131de7350SChris Wilson 	bool tasklet = false;
1572f747026cSChris Wilson 
1573fd8526e5SChris Wilson 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
15748ea397faSChris Wilson 		tasklet = true;
157531de7350SChris Wilson 
157651f6b0f9SChris Wilson 	if (iir & GT_RENDER_USER_INTERRUPT) {
157752c0fdb2SChris Wilson 		intel_engine_breadcrumbs_irq(engine);
15784c6ce5c9SChris Wilson 		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
157931de7350SChris Wilson 	}
158031de7350SChris Wilson 
158131de7350SChris Wilson 	if (tasklet)
1582fd8526e5SChris Wilson 		tasklet_hi_schedule(&engine->execlists.tasklet);
1583fbcc1a0cSNick Hoath }
1584fbcc1a0cSNick Hoath 
15852e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915,
158655ef72f2SChris Wilson 			    u32 master_ctl, u32 gt_iir[4])
1587abd58f01SBen Widawsky {
158825286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
15892e4a5b25SChris Wilson 
1590f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1591f0fd96f5SChris Wilson 		      GEN8_GT_BCS_IRQ | \
15928a68d464SChris Wilson 		      GEN8_GT_VCS0_IRQ | \
1593f0fd96f5SChris Wilson 		      GEN8_GT_VCS1_IRQ | \
1594f0fd96f5SChris Wilson 		      GEN8_GT_VECS_IRQ | \
1595f0fd96f5SChris Wilson 		      GEN8_GT_PM_IRQ | \
1596f0fd96f5SChris Wilson 		      GEN8_GT_GUC_IRQ)
1597f0fd96f5SChris Wilson 
1598abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
15992e4a5b25SChris Wilson 		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
16002e4a5b25SChris Wilson 		if (likely(gt_iir[0]))
16012e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1602abd58f01SBen Widawsky 	}
1603abd58f01SBen Widawsky 
16048a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16052e4a5b25SChris Wilson 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
16062e4a5b25SChris Wilson 		if (likely(gt_iir[1]))
16072e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
160874cdb337SChris Wilson 	}
160974cdb337SChris Wilson 
161026705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16112e4a5b25SChris Wilson 		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1612f4de7794SChris Wilson 		if (likely(gt_iir[2]))
1613f4de7794SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
16140961021aSBen Widawsky 	}
16152e4a5b25SChris Wilson 
16162e4a5b25SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16172e4a5b25SChris Wilson 		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
16182e4a5b25SChris Wilson 		if (likely(gt_iir[3]))
16192e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
162055ef72f2SChris Wilson 	}
1621abd58f01SBen Widawsky }
1622abd58f01SBen Widawsky 
16232e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1624f0fd96f5SChris Wilson 				u32 master_ctl, u32 gt_iir[4])
1625e30e251aSVille Syrjälä {
1626f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16278a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[RCS0],
162851f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
16298a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[BCS0],
163051f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1631e30e251aSVille Syrjälä 	}
1632e30e251aSVille Syrjälä 
16338a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16348a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS0],
16358a68d464SChris Wilson 				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
16368a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS1],
163751f6b0f9SChris Wilson 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1638e30e251aSVille Syrjälä 	}
1639e30e251aSVille Syrjälä 
1640f0fd96f5SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16418a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VECS0],
164251f6b0f9SChris Wilson 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1643f0fd96f5SChris Wilson 	}
1644e30e251aSVille Syrjälä 
1645f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16462e4a5b25SChris Wilson 		gen6_rps_irq_handler(i915, gt_iir[2]);
16472e4a5b25SChris Wilson 		gen9_guc_irq_handler(i915, gt_iir[2]);
1648e30e251aSVille Syrjälä 	}
1649f0fd96f5SChris Wilson }
1650e30e251aSVille Syrjälä 
1651af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1652121e758eSDhinakaran Pandiyan {
1653af92058fSVille Syrjälä 	switch (pin) {
1654af92058fSVille Syrjälä 	case HPD_PORT_C:
1655121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1656af92058fSVille Syrjälä 	case HPD_PORT_D:
1657121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1658af92058fSVille Syrjälä 	case HPD_PORT_E:
1659121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1660af92058fSVille Syrjälä 	case HPD_PORT_F:
1661121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1662121e758eSDhinakaran Pandiyan 	default:
1663121e758eSDhinakaran Pandiyan 		return false;
1664121e758eSDhinakaran Pandiyan 	}
1665121e758eSDhinakaran Pandiyan }
1666121e758eSDhinakaran Pandiyan 
1667af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
166863c88d22SImre Deak {
1669af92058fSVille Syrjälä 	switch (pin) {
1670af92058fSVille Syrjälä 	case HPD_PORT_A:
1671195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1672af92058fSVille Syrjälä 	case HPD_PORT_B:
167363c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
1674af92058fSVille Syrjälä 	case HPD_PORT_C:
167563c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
167663c88d22SImre Deak 	default:
167763c88d22SImre Deak 		return false;
167863c88d22SImre Deak 	}
167963c88d22SImre Deak }
168063c88d22SImre Deak 
1681af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
168231604222SAnusha Srivatsa {
1683af92058fSVille Syrjälä 	switch (pin) {
1684af92058fSVille Syrjälä 	case HPD_PORT_A:
168531604222SAnusha Srivatsa 		return val & ICP_DDIA_HPD_LONG_DETECT;
1686af92058fSVille Syrjälä 	case HPD_PORT_B:
168731604222SAnusha Srivatsa 		return val & ICP_DDIB_HPD_LONG_DETECT;
168831604222SAnusha Srivatsa 	default:
168931604222SAnusha Srivatsa 		return false;
169031604222SAnusha Srivatsa 	}
169131604222SAnusha Srivatsa }
169231604222SAnusha Srivatsa 
1693af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
169431604222SAnusha Srivatsa {
1695af92058fSVille Syrjälä 	switch (pin) {
1696af92058fSVille Syrjälä 	case HPD_PORT_C:
169731604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1698af92058fSVille Syrjälä 	case HPD_PORT_D:
169931604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1700af92058fSVille Syrjälä 	case HPD_PORT_E:
170131604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1702af92058fSVille Syrjälä 	case HPD_PORT_F:
170331604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
170431604222SAnusha Srivatsa 	default:
170531604222SAnusha Srivatsa 		return false;
170631604222SAnusha Srivatsa 	}
170731604222SAnusha Srivatsa }
170831604222SAnusha Srivatsa 
1709af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
17106dbf30ceSVille Syrjälä {
1711af92058fSVille Syrjälä 	switch (pin) {
1712af92058fSVille Syrjälä 	case HPD_PORT_E:
17136dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
17146dbf30ceSVille Syrjälä 	default:
17156dbf30ceSVille Syrjälä 		return false;
17166dbf30ceSVille Syrjälä 	}
17176dbf30ceSVille Syrjälä }
17186dbf30ceSVille Syrjälä 
1719af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
172074c0b395SVille Syrjälä {
1721af92058fSVille Syrjälä 	switch (pin) {
1722af92058fSVille Syrjälä 	case HPD_PORT_A:
172374c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1724af92058fSVille Syrjälä 	case HPD_PORT_B:
172574c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
1726af92058fSVille Syrjälä 	case HPD_PORT_C:
172774c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
1728af92058fSVille Syrjälä 	case HPD_PORT_D:
172974c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
173074c0b395SVille Syrjälä 	default:
173174c0b395SVille Syrjälä 		return false;
173274c0b395SVille Syrjälä 	}
173374c0b395SVille Syrjälä }
173474c0b395SVille Syrjälä 
1735af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1736e4ce95aaSVille Syrjälä {
1737af92058fSVille Syrjälä 	switch (pin) {
1738af92058fSVille Syrjälä 	case HPD_PORT_A:
1739e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1740e4ce95aaSVille Syrjälä 	default:
1741e4ce95aaSVille Syrjälä 		return false;
1742e4ce95aaSVille Syrjälä 	}
1743e4ce95aaSVille Syrjälä }
1744e4ce95aaSVille Syrjälä 
1745af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
174613cf5504SDave Airlie {
1747af92058fSVille Syrjälä 	switch (pin) {
1748af92058fSVille Syrjälä 	case HPD_PORT_B:
1749676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
1750af92058fSVille Syrjälä 	case HPD_PORT_C:
1751676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
1752af92058fSVille Syrjälä 	case HPD_PORT_D:
1753676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1754676574dfSJani Nikula 	default:
1755676574dfSJani Nikula 		return false;
175613cf5504SDave Airlie 	}
175713cf5504SDave Airlie }
175813cf5504SDave Airlie 
1759af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
176013cf5504SDave Airlie {
1761af92058fSVille Syrjälä 	switch (pin) {
1762af92058fSVille Syrjälä 	case HPD_PORT_B:
1763676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1764af92058fSVille Syrjälä 	case HPD_PORT_C:
1765676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1766af92058fSVille Syrjälä 	case HPD_PORT_D:
1767676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1768676574dfSJani Nikula 	default:
1769676574dfSJani Nikula 		return false;
177013cf5504SDave Airlie 	}
177113cf5504SDave Airlie }
177213cf5504SDave Airlie 
177342db67d6SVille Syrjälä /*
177442db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
177542db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
177642db67d6SVille Syrjälä  * hotplug detection results from several registers.
177742db67d6SVille Syrjälä  *
177842db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
177942db67d6SVille Syrjälä  */
1780cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1781cf53902fSRodrigo Vivi 			       u32 *pin_mask, u32 *long_mask,
17828c841e57SJani Nikula 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1783fd63e2a9SImre Deak 			       const u32 hpd[HPD_NUM_PINS],
1784af92058fSVille Syrjälä 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1785676574dfSJani Nikula {
1786e9be2850SVille Syrjälä 	enum hpd_pin pin;
1787676574dfSJani Nikula 
1788e9be2850SVille Syrjälä 	for_each_hpd_pin(pin) {
1789e9be2850SVille Syrjälä 		if ((hpd[pin] & hotplug_trigger) == 0)
17908c841e57SJani Nikula 			continue;
17918c841e57SJani Nikula 
1792e9be2850SVille Syrjälä 		*pin_mask |= BIT(pin);
1793676574dfSJani Nikula 
1794af92058fSVille Syrjälä 		if (long_pulse_detect(pin, dig_hotplug_reg))
1795e9be2850SVille Syrjälä 			*long_mask |= BIT(pin);
1796676574dfSJani Nikula 	}
1797676574dfSJani Nikula 
1798f88f0478SVille Syrjälä 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1799f88f0478SVille Syrjälä 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1800676574dfSJani Nikula 
1801676574dfSJani Nikula }
1802676574dfSJani Nikula 
180391d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1804515ac2bbSDaniel Vetter {
180528c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1806515ac2bbSDaniel Vetter }
1807515ac2bbSDaniel Vetter 
180891d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1809ce99c256SDaniel Vetter {
18109ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1811ce99c256SDaniel Vetter }
1812ce99c256SDaniel Vetter 
18138bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
181491d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
181591d14251STvrtko Ursulin 					 enum pipe pipe,
1816a9c287c9SJani Nikula 					 u32 crc0, u32 crc1,
1817a9c287c9SJani Nikula 					 u32 crc2, u32 crc3,
1818a9c287c9SJani Nikula 					 u32 crc4)
18198bf1e9f1SShuang He {
18208bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
18218c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18225cee6c45SVille Syrjälä 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
18235cee6c45SVille Syrjälä 
18245cee6c45SVille Syrjälä 	trace_intel_pipe_crc(crtc, crcs);
1825b2c88f5bSDamien Lespiau 
1826d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
18278c6b709dSTomeu Vizoso 	/*
18288c6b709dSTomeu Vizoso 	 * For some not yet identified reason, the first CRC is
18298c6b709dSTomeu Vizoso 	 * bonkers. So let's just wait for the next vblank and read
18308c6b709dSTomeu Vizoso 	 * out the buggy result.
18318c6b709dSTomeu Vizoso 	 *
1832163e8aecSRodrigo Vivi 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
18338c6b709dSTomeu Vizoso 	 * don't trust that one either.
18348c6b709dSTomeu Vizoso 	 */
1835033b7a23SMaarten Lankhorst 	if (pipe_crc->skipped <= 0 ||
1836163e8aecSRodrigo Vivi 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
18378c6b709dSTomeu Vizoso 		pipe_crc->skipped++;
18388c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
18398c6b709dSTomeu Vizoso 		return;
18408c6b709dSTomeu Vizoso 	}
18418c6b709dSTomeu Vizoso 	spin_unlock(&pipe_crc->lock);
18426cc42152SMaarten Lankhorst 
1843246ee524STomeu Vizoso 	drm_crtc_add_crc_entry(&crtc->base, true,
1844ca814b25SDaniel Vetter 				drm_crtc_accurate_vblank_count(&crtc->base),
1845246ee524STomeu Vizoso 				crcs);
18468c6b709dSTomeu Vizoso }
1847277de95eSDaniel Vetter #else
1848277de95eSDaniel Vetter static inline void
184991d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
185091d14251STvrtko Ursulin 			     enum pipe pipe,
1851a9c287c9SJani Nikula 			     u32 crc0, u32 crc1,
1852a9c287c9SJani Nikula 			     u32 crc2, u32 crc3,
1853a9c287c9SJani Nikula 			     u32 crc4) {}
1854277de95eSDaniel Vetter #endif
1855eba94eb9SDaniel Vetter 
1856277de95eSDaniel Vetter 
185791d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
185891d14251STvrtko Ursulin 				     enum pipe pipe)
18595a69b89fSDaniel Vetter {
186091d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
18615a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
18625a69b89fSDaniel Vetter 				     0, 0, 0, 0);
18635a69b89fSDaniel Vetter }
18645a69b89fSDaniel Vetter 
186591d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
186691d14251STvrtko Ursulin 				     enum pipe pipe)
1867eba94eb9SDaniel Vetter {
186891d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1869eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1870eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1871eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1872eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
18738bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1874eba94eb9SDaniel Vetter }
18755b3a856bSDaniel Vetter 
187691d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
187791d14251STvrtko Ursulin 				      enum pipe pipe)
18785b3a856bSDaniel Vetter {
1879a9c287c9SJani Nikula 	u32 res1, res2;
18800b5c5ed0SDaniel Vetter 
188191d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
18820b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
18830b5c5ed0SDaniel Vetter 	else
18840b5c5ed0SDaniel Vetter 		res1 = 0;
18850b5c5ed0SDaniel Vetter 
188691d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
18870b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
18880b5c5ed0SDaniel Vetter 	else
18890b5c5ed0SDaniel Vetter 		res2 = 0;
18905b3a856bSDaniel Vetter 
189191d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
18920b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
18930b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
18940b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
18950b5c5ed0SDaniel Vetter 				     res1, res2);
18965b3a856bSDaniel Vetter }
18978bf1e9f1SShuang He 
18981403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
18991403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
19001403c0d4SPaulo Zanoni  * the work queue. */
1901*58820574STvrtko Ursulin static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
1902a087bafeSMika Kuoppala {
1903*58820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
1904a087bafeSMika Kuoppala 	struct intel_rps *rps = &i915->gt_pm.rps;
1905a087bafeSMika Kuoppala 	const u32 events = i915->pm_rps_events & pm_iir;
1906a087bafeSMika Kuoppala 
1907a087bafeSMika Kuoppala 	lockdep_assert_held(&i915->irq_lock);
1908a087bafeSMika Kuoppala 
1909a087bafeSMika Kuoppala 	if (unlikely(!events))
1910a087bafeSMika Kuoppala 		return;
1911a087bafeSMika Kuoppala 
1912*58820574STvrtko Ursulin 	gen6_mask_pm_irq(gt, events);
1913a087bafeSMika Kuoppala 
1914a087bafeSMika Kuoppala 	if (!rps->interrupts_enabled)
1915a087bafeSMika Kuoppala 		return;
1916a087bafeSMika Kuoppala 
1917a087bafeSMika Kuoppala 	rps->pm_iir |= events;
1918a087bafeSMika Kuoppala 	schedule_work(&rps->work);
1919a087bafeSMika Kuoppala }
1920a087bafeSMika Kuoppala 
19211403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1922baf02a1fSBen Widawsky {
1923562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1924562d9baeSSagar Arun Kamble 
1925a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
192659cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
1927*58820574STvrtko Ursulin 		gen6_mask_pm_irq(&dev_priv->gt,
1928*58820574STvrtko Ursulin 				 pm_iir & dev_priv->pm_rps_events);
1929562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1930562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1931562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
193241a05a3aSDaniel Vetter 		}
1933d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1934d4d70aa5SImre Deak 	}
1935baf02a1fSBen Widawsky 
1936bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1937c9a9a268SImre Deak 		return;
1938c9a9a268SImre Deak 
193912638c57SBen Widawsky 	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
19408a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
194112638c57SBen Widawsky 
1942aaecdf61SDaniel Vetter 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1943aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
194412638c57SBen Widawsky }
1945baf02a1fSBen Widawsky 
194626705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
194726705e20SSagar Arun Kamble {
194893bf8096SMichal Wajdeczko 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
194993bf8096SMichal Wajdeczko 		intel_guc_to_host_event_handler(&dev_priv->guc);
195026705e20SSagar Arun Kamble }
195126705e20SSagar Arun Kamble 
195254c52a84SOscar Mateo static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
195354c52a84SOscar Mateo {
195454c52a84SOscar Mateo 	if (iir & GEN11_GUC_INTR_GUC2HOST)
195554c52a84SOscar Mateo 		intel_guc_to_host_event_handler(&i915->guc);
195654c52a84SOscar Mateo }
195754c52a84SOscar Mateo 
195844d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
195944d9241eSVille Syrjälä {
196044d9241eSVille Syrjälä 	enum pipe pipe;
196144d9241eSVille Syrjälä 
196244d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
196344d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
196444d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
196544d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
196644d9241eSVille Syrjälä 
196744d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
196844d9241eSVille Syrjälä 	}
196944d9241eSVille Syrjälä }
197044d9241eSVille Syrjälä 
1971eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
197291d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
19737e231dbeSJesse Barnes {
19747e231dbeSJesse Barnes 	int pipe;
19757e231dbeSJesse Barnes 
197658ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
19771ca993d2SVille Syrjälä 
19781ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
19791ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
19801ca993d2SVille Syrjälä 		return;
19811ca993d2SVille Syrjälä 	}
19821ca993d2SVille Syrjälä 
1983055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1984f0f59a00SVille Syrjälä 		i915_reg_t reg;
19856b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
198691d181ddSImre Deak 
1987bbb5eebfSDaniel Vetter 		/*
1988bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
1989bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
1990bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
1991bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
1992bbb5eebfSDaniel Vetter 		 * handle.
1993bbb5eebfSDaniel Vetter 		 */
19940f239f4cSDaniel Vetter 
19950f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
19966b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1997bbb5eebfSDaniel Vetter 
1998bbb5eebfSDaniel Vetter 		switch (pipe) {
1999bbb5eebfSDaniel Vetter 		case PIPE_A:
2000bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2001bbb5eebfSDaniel Vetter 			break;
2002bbb5eebfSDaniel Vetter 		case PIPE_B:
2003bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2004bbb5eebfSDaniel Vetter 			break;
20053278f67fSVille Syrjälä 		case PIPE_C:
20063278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
20073278f67fSVille Syrjälä 			break;
2008bbb5eebfSDaniel Vetter 		}
2009bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
20106b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
2011bbb5eebfSDaniel Vetter 
20126b12ca56SVille Syrjälä 		if (!status_mask)
201391d181ddSImre Deak 			continue;
201491d181ddSImre Deak 
201591d181ddSImre Deak 		reg = PIPESTAT(pipe);
20166b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
20176b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
20187e231dbeSJesse Barnes 
20197e231dbeSJesse Barnes 		/*
20207e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
2021132c27c9SVille Syrjälä 		 *
2022132c27c9SVille Syrjälä 		 * Toggle the enable bits to make sure we get an
2023132c27c9SVille Syrjälä 		 * edge in the ISR pipe event bit if we don't clear
2024132c27c9SVille Syrjälä 		 * all the enabled status bits. Otherwise the edge
2025132c27c9SVille Syrjälä 		 * triggered IIR on i965/g4x wouldn't notice that
2026132c27c9SVille Syrjälä 		 * an interrupt is still pending.
20277e231dbeSJesse Barnes 		 */
2028132c27c9SVille Syrjälä 		if (pipe_stats[pipe]) {
2029132c27c9SVille Syrjälä 			I915_WRITE(reg, pipe_stats[pipe]);
2030132c27c9SVille Syrjälä 			I915_WRITE(reg, enable_mask);
2031132c27c9SVille Syrjälä 		}
20327e231dbeSJesse Barnes 	}
203358ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
20342ecb8ca4SVille Syrjälä }
20352ecb8ca4SVille Syrjälä 
2036eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2037eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
2038eb64343cSVille Syrjälä {
2039eb64343cSVille Syrjälä 	enum pipe pipe;
2040eb64343cSVille Syrjälä 
2041eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2042eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2043eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2044eb64343cSVille Syrjälä 
2045eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2046eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2047eb64343cSVille Syrjälä 
2048eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2049eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2050eb64343cSVille Syrjälä 	}
2051eb64343cSVille Syrjälä }
2052eb64343cSVille Syrjälä 
2053eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2054eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2055eb64343cSVille Syrjälä {
2056eb64343cSVille Syrjälä 	bool blc_event = false;
2057eb64343cSVille Syrjälä 	enum pipe pipe;
2058eb64343cSVille Syrjälä 
2059eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2060eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2061eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2062eb64343cSVille Syrjälä 
2063eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2064eb64343cSVille Syrjälä 			blc_event = true;
2065eb64343cSVille Syrjälä 
2066eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2067eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2068eb64343cSVille Syrjälä 
2069eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2070eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2071eb64343cSVille Syrjälä 	}
2072eb64343cSVille Syrjälä 
2073eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2074eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2075eb64343cSVille Syrjälä }
2076eb64343cSVille Syrjälä 
2077eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2078eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2079eb64343cSVille Syrjälä {
2080eb64343cSVille Syrjälä 	bool blc_event = false;
2081eb64343cSVille Syrjälä 	enum pipe pipe;
2082eb64343cSVille Syrjälä 
2083eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2084eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2085eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2086eb64343cSVille Syrjälä 
2087eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2088eb64343cSVille Syrjälä 			blc_event = true;
2089eb64343cSVille Syrjälä 
2090eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2091eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2092eb64343cSVille Syrjälä 
2093eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2094eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2095eb64343cSVille Syrjälä 	}
2096eb64343cSVille Syrjälä 
2097eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2098eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2099eb64343cSVille Syrjälä 
2100eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2101eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
2102eb64343cSVille Syrjälä }
2103eb64343cSVille Syrjälä 
210491d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
21052ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
21062ecb8ca4SVille Syrjälä {
21072ecb8ca4SVille Syrjälä 	enum pipe pipe;
21087e231dbeSJesse Barnes 
2109055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2110fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2111fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
21124356d586SDaniel Vetter 
21134356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
211491d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
21152d9d2b0bSVille Syrjälä 
21161f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
21171f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
211831acc7f5SJesse Barnes 	}
211931acc7f5SJesse Barnes 
2120c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
212191d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2122c1874ed7SImre Deak }
2123c1874ed7SImre Deak 
21241ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
212516c6c56bSVille Syrjälä {
21260ba7c51aSVille Syrjälä 	u32 hotplug_status = 0, hotplug_status_mask;
21270ba7c51aSVille Syrjälä 	int i;
212816c6c56bSVille Syrjälä 
21290ba7c51aSVille Syrjälä 	if (IS_G4X(dev_priv) ||
21300ba7c51aSVille Syrjälä 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
21310ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
21320ba7c51aSVille Syrjälä 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
21330ba7c51aSVille Syrjälä 	else
21340ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
21350ba7c51aSVille Syrjälä 
21360ba7c51aSVille Syrjälä 	/*
21370ba7c51aSVille Syrjälä 	 * We absolutely have to clear all the pending interrupt
21380ba7c51aSVille Syrjälä 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
21390ba7c51aSVille Syrjälä 	 * interrupt bit won't have an edge, and the i965/g4x
21400ba7c51aSVille Syrjälä 	 * edge triggered IIR will not notice that an interrupt
21410ba7c51aSVille Syrjälä 	 * is still pending. We can't use PORT_HOTPLUG_EN to
21420ba7c51aSVille Syrjälä 	 * guarantee the edge as the act of toggling the enable
21430ba7c51aSVille Syrjälä 	 * bits can itself generate a new hotplug interrupt :(
21440ba7c51aSVille Syrjälä 	 */
21450ba7c51aSVille Syrjälä 	for (i = 0; i < 10; i++) {
21460ba7c51aSVille Syrjälä 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
21470ba7c51aSVille Syrjälä 
21480ba7c51aSVille Syrjälä 		if (tmp == 0)
21490ba7c51aSVille Syrjälä 			return hotplug_status;
21500ba7c51aSVille Syrjälä 
21510ba7c51aSVille Syrjälä 		hotplug_status |= tmp;
21523ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
21530ba7c51aSVille Syrjälä 	}
21540ba7c51aSVille Syrjälä 
21550ba7c51aSVille Syrjälä 	WARN_ONCE(1,
21560ba7c51aSVille Syrjälä 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
21570ba7c51aSVille Syrjälä 		  I915_READ(PORT_HOTPLUG_STAT));
21581ae3c34cSVille Syrjälä 
21591ae3c34cSVille Syrjälä 	return hotplug_status;
21601ae3c34cSVille Syrjälä }
21611ae3c34cSVille Syrjälä 
216291d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
21631ae3c34cSVille Syrjälä 				 u32 hotplug_status)
21641ae3c34cSVille Syrjälä {
21651ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
21663ff60f89SOscar Mateo 
216791d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
216891d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
216916c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
217016c6c56bSVille Syrjälä 
217158f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2172cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2173cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2174cf53902fSRodrigo Vivi 					   hpd_status_g4x,
2175fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
217658f2cf24SVille Syrjälä 
217791d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
217858f2cf24SVille Syrjälä 		}
2179369712e8SJani Nikula 
2180369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
218191d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
218216c6c56bSVille Syrjälä 	} else {
218316c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
218416c6c56bSVille Syrjälä 
218558f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2186cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2187cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2188cf53902fSRodrigo Vivi 					   hpd_status_i915,
2189fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
219091d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
219116c6c56bSVille Syrjälä 		}
21923ff60f89SOscar Mateo 	}
219358f2cf24SVille Syrjälä }
219416c6c56bSVille Syrjälä 
2195c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2196c1874ed7SImre Deak {
2197b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2198c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
2199c1874ed7SImre Deak 
22002dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22012dd2a883SImre Deak 		return IRQ_NONE;
22022dd2a883SImre Deak 
22031f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
22049102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22051f814dacSImre Deak 
22061e1cace9SVille Syrjälä 	do {
22076e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
22082ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
22091ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2210a5e485a9SVille Syrjälä 		u32 ier = 0;
22113ff60f89SOscar Mateo 
2212c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2213c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
22143ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2215c1874ed7SImre Deak 
2216c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
22171e1cace9SVille Syrjälä 			break;
2218c1874ed7SImre Deak 
2219c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2220c1874ed7SImre Deak 
2221a5e485a9SVille Syrjälä 		/*
2222a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2223a5e485a9SVille Syrjälä 		 *
2224a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2225a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2226a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2227a5e485a9SVille Syrjälä 		 *
2228a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2229a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2230a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2231a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2232a5e485a9SVille Syrjälä 		 * bits this time around.
2233a5e485a9SVille Syrjälä 		 */
22344a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2235a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2236a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
22374a0a0202SVille Syrjälä 
22384a0a0202SVille Syrjälä 		if (gt_iir)
22394a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
22404a0a0202SVille Syrjälä 		if (pm_iir)
22414a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
22424a0a0202SVille Syrjälä 
22437ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
22441ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
22457ce4d1f2SVille Syrjälä 
22463ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
22473ff60f89SOscar Mateo 		 * signalled in iir */
2248eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
22497ce4d1f2SVille Syrjälä 
2250eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2251eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2252eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2253eef57324SJerome Anand 
22547ce4d1f2SVille Syrjälä 		/*
22557ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
22567ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
22577ce4d1f2SVille Syrjälä 		 */
22587ce4d1f2SVille Syrjälä 		if (iir)
22597ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
22604a0a0202SVille Syrjälä 
2261a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
22624a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
22631ae3c34cSVille Syrjälä 
226452894874SVille Syrjälä 		if (gt_iir)
2265261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
226652894874SVille Syrjälä 		if (pm_iir)
226752894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
226852894874SVille Syrjälä 
22691ae3c34cSVille Syrjälä 		if (hotplug_status)
227091d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
22712ecb8ca4SVille Syrjälä 
227291d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
22731e1cace9SVille Syrjälä 	} while (0);
22747e231dbeSJesse Barnes 
22759102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22761f814dacSImre Deak 
22777e231dbeSJesse Barnes 	return ret;
22787e231dbeSJesse Barnes }
22797e231dbeSJesse Barnes 
228043f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
228143f328d7SVille Syrjälä {
2282b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
228343f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
228443f328d7SVille Syrjälä 
22852dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22862dd2a883SImre Deak 		return IRQ_NONE;
22872dd2a883SImre Deak 
22881f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
22899102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22901f814dacSImre Deak 
2291579de73bSChris Wilson 	do {
22926e814800SVille Syrjälä 		u32 master_ctl, iir;
22932ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
22941ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2295f0fd96f5SChris Wilson 		u32 gt_iir[4];
2296a5e485a9SVille Syrjälä 		u32 ier = 0;
2297a5e485a9SVille Syrjälä 
22988e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
22993278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
23003278f67fSVille Syrjälä 
23013278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
23028e5fd599SVille Syrjälä 			break;
230343f328d7SVille Syrjälä 
230427b6c122SOscar Mateo 		ret = IRQ_HANDLED;
230527b6c122SOscar Mateo 
2306a5e485a9SVille Syrjälä 		/*
2307a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2308a5e485a9SVille Syrjälä 		 *
2309a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2310a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2311a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2312a5e485a9SVille Syrjälä 		 *
2313a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2314a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2315a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2316a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2317a5e485a9SVille Syrjälä 		 * bits this time around.
2318a5e485a9SVille Syrjälä 		 */
231943f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2320a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2321a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
232243f328d7SVille Syrjälä 
2323e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
232427b6c122SOscar Mateo 
232527b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
23261ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
232743f328d7SVille Syrjälä 
232827b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
232927b6c122SOscar Mateo 		 * signalled in iir */
2330eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
233143f328d7SVille Syrjälä 
2332eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2333eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2334eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2335eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2336eef57324SJerome Anand 
23377ce4d1f2SVille Syrjälä 		/*
23387ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
23397ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
23407ce4d1f2SVille Syrjälä 		 */
23417ce4d1f2SVille Syrjälä 		if (iir)
23427ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
23437ce4d1f2SVille Syrjälä 
2344a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2345e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
23461ae3c34cSVille Syrjälä 
2347f0fd96f5SChris Wilson 		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2348e30e251aSVille Syrjälä 
23491ae3c34cSVille Syrjälä 		if (hotplug_status)
235091d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
23512ecb8ca4SVille Syrjälä 
235291d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2353579de73bSChris Wilson 	} while (0);
23543278f67fSVille Syrjälä 
23559102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
23561f814dacSImre Deak 
235743f328d7SVille Syrjälä 	return ret;
235843f328d7SVille Syrjälä }
235943f328d7SVille Syrjälä 
236091d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
236191d14251STvrtko Ursulin 				u32 hotplug_trigger,
236240e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2363776ad806SJesse Barnes {
236442db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2365776ad806SJesse Barnes 
23666a39d7c9SJani Nikula 	/*
23676a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
23686a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
23696a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
23706a39d7c9SJani Nikula 	 * errors.
23716a39d7c9SJani Nikula 	 */
237213cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23736a39d7c9SJani Nikula 	if (!hotplug_trigger) {
23746a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
23756a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
23766a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
23776a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
23786a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
23796a39d7c9SJani Nikula 	}
23806a39d7c9SJani Nikula 
238113cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23826a39d7c9SJani Nikula 	if (!hotplug_trigger)
23836a39d7c9SJani Nikula 		return;
238413cf5504SDave Airlie 
2385cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
238640e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2387fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
238840e56410SVille Syrjälä 
238991d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2390aaf5ec2eSSonika Jindal }
239191d131d2SDaniel Vetter 
239291d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
239340e56410SVille Syrjälä {
239440e56410SVille Syrjälä 	int pipe;
239540e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
239640e56410SVille Syrjälä 
239791d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
239840e56410SVille Syrjälä 
2399cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2400cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2401776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2402cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2403cfc33bf7SVille Syrjälä 				 port_name(port));
2404cfc33bf7SVille Syrjälä 	}
2405776ad806SJesse Barnes 
2406ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
240791d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2408ce99c256SDaniel Vetter 
2409776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
241091d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2411776ad806SJesse Barnes 
2412776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2413776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2414776ad806SJesse Barnes 
2415776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2416776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2417776ad806SJesse Barnes 
2418776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2419776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2420776ad806SJesse Barnes 
24219db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2422055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
24239db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
24249db4a9c7SJesse Barnes 					 pipe_name(pipe),
24259db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2426776ad806SJesse Barnes 
2427776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2428776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2429776ad806SJesse Barnes 
2430776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2431776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2432776ad806SJesse Barnes 
2433776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2434a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
24358664281bSPaulo Zanoni 
24368664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2437a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
24388664281bSPaulo Zanoni }
24398664281bSPaulo Zanoni 
244091d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
24418664281bSPaulo Zanoni {
24428664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
24435a69b89fSDaniel Vetter 	enum pipe pipe;
24448664281bSPaulo Zanoni 
2445de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2446de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2447de032bf4SPaulo Zanoni 
2448055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
24491f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
24501f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
24518664281bSPaulo Zanoni 
24525a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
245391d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
245491d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
24555a69b89fSDaniel Vetter 			else
245691d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
24575a69b89fSDaniel Vetter 		}
24585a69b89fSDaniel Vetter 	}
24598bf1e9f1SShuang He 
24608664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
24618664281bSPaulo Zanoni }
24628664281bSPaulo Zanoni 
246391d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
24648664281bSPaulo Zanoni {
24658664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
246645c1cd87SMika Kahola 	enum pipe pipe;
24678664281bSPaulo Zanoni 
2468de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2469de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2470de032bf4SPaulo Zanoni 
247145c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
247245c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
247345c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
24748664281bSPaulo Zanoni 
24758664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2476776ad806SJesse Barnes }
2477776ad806SJesse Barnes 
247891d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
247923e81d69SAdam Jackson {
248023e81d69SAdam Jackson 	int pipe;
24816dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2482aaf5ec2eSSonika Jindal 
248391d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
248491d131d2SDaniel Vetter 
2485cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2486cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
248723e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2488cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2489cfc33bf7SVille Syrjälä 				 port_name(port));
2490cfc33bf7SVille Syrjälä 	}
249123e81d69SAdam Jackson 
249223e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
249391d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
249423e81d69SAdam Jackson 
249523e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
249691d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
249723e81d69SAdam Jackson 
249823e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
249923e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
250023e81d69SAdam Jackson 
250123e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
250223e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
250323e81d69SAdam Jackson 
250423e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2505055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
250623e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
250723e81d69SAdam Jackson 					 pipe_name(pipe),
250823e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
25098664281bSPaulo Zanoni 
25108664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
251191d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
251223e81d69SAdam Jackson }
251323e81d69SAdam Jackson 
2514c6f7acb8SMatt Roper static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
2515c6f7acb8SMatt Roper 			    const u32 *pins)
251631604222SAnusha Srivatsa {
251731604222SAnusha Srivatsa 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
251831604222SAnusha Srivatsa 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
251931604222SAnusha Srivatsa 	u32 pin_mask = 0, long_mask = 0;
252031604222SAnusha Srivatsa 
252131604222SAnusha Srivatsa 	if (ddi_hotplug_trigger) {
252231604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
252331604222SAnusha Srivatsa 
252431604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
252531604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
252631604222SAnusha Srivatsa 
252731604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
252831604222SAnusha Srivatsa 				   ddi_hotplug_trigger,
2529c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
253031604222SAnusha Srivatsa 				   icp_ddi_port_hotplug_long_detect);
253131604222SAnusha Srivatsa 	}
253231604222SAnusha Srivatsa 
253331604222SAnusha Srivatsa 	if (tc_hotplug_trigger) {
253431604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
253531604222SAnusha Srivatsa 
253631604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
253731604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
253831604222SAnusha Srivatsa 
253931604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
254031604222SAnusha Srivatsa 				   tc_hotplug_trigger,
2541c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
254231604222SAnusha Srivatsa 				   icp_tc_port_hotplug_long_detect);
254331604222SAnusha Srivatsa 	}
254431604222SAnusha Srivatsa 
254531604222SAnusha Srivatsa 	if (pin_mask)
254631604222SAnusha Srivatsa 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
254731604222SAnusha Srivatsa 
254831604222SAnusha Srivatsa 	if (pch_iir & SDE_GMBUS_ICP)
254931604222SAnusha Srivatsa 		gmbus_irq_handler(dev_priv);
255031604222SAnusha Srivatsa }
255131604222SAnusha Srivatsa 
255291d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
25536dbf30ceSVille Syrjälä {
25546dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
25556dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
25566dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
25576dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
25586dbf30ceSVille Syrjälä 
25596dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
25606dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25616dbf30ceSVille Syrjälä 
25626dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
25636dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
25646dbf30ceSVille Syrjälä 
2565cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2566cf53902fSRodrigo Vivi 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
256774c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
25686dbf30ceSVille Syrjälä 	}
25696dbf30ceSVille Syrjälä 
25706dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
25716dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25726dbf30ceSVille Syrjälä 
25736dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
25746dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
25756dbf30ceSVille Syrjälä 
2576cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2577cf53902fSRodrigo Vivi 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
25786dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
25796dbf30ceSVille Syrjälä 	}
25806dbf30ceSVille Syrjälä 
25816dbf30ceSVille Syrjälä 	if (pin_mask)
258291d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
25836dbf30ceSVille Syrjälä 
25846dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
258591d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
25866dbf30ceSVille Syrjälä }
25876dbf30ceSVille Syrjälä 
258891d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
258991d14251STvrtko Ursulin 				u32 hotplug_trigger,
259040e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2591c008bc6eSPaulo Zanoni {
2592e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2593e4ce95aaSVille Syrjälä 
2594e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2595e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2596e4ce95aaSVille Syrjälä 
2597cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
259840e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2599e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
260040e56410SVille Syrjälä 
260191d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2602e4ce95aaSVille Syrjälä }
2603c008bc6eSPaulo Zanoni 
260491d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
260591d14251STvrtko Ursulin 				    u32 de_iir)
260640e56410SVille Syrjälä {
260740e56410SVille Syrjälä 	enum pipe pipe;
260840e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
260940e56410SVille Syrjälä 
261040e56410SVille Syrjälä 	if (hotplug_trigger)
261191d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
261240e56410SVille Syrjälä 
2613c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
261491d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2615c008bc6eSPaulo Zanoni 
2616c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
261791d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2618c008bc6eSPaulo Zanoni 
2619c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2620c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2621c008bc6eSPaulo Zanoni 
2622055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2623fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2624fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2625c008bc6eSPaulo Zanoni 
262640da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
26271f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2628c008bc6eSPaulo Zanoni 
262940da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
263091d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2631c008bc6eSPaulo Zanoni 	}
2632c008bc6eSPaulo Zanoni 
2633c008bc6eSPaulo Zanoni 	/* check event from PCH */
2634c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2635c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2636c008bc6eSPaulo Zanoni 
263791d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
263891d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2639c008bc6eSPaulo Zanoni 		else
264091d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2641c008bc6eSPaulo Zanoni 
2642c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2643c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2644c008bc6eSPaulo Zanoni 	}
2645c008bc6eSPaulo Zanoni 
2646cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
264791d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2648c008bc6eSPaulo Zanoni }
2649c008bc6eSPaulo Zanoni 
265091d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
265191d14251STvrtko Ursulin 				    u32 de_iir)
26529719fb98SPaulo Zanoni {
265307d27e20SDamien Lespiau 	enum pipe pipe;
265423bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
265523bb4cb5SVille Syrjälä 
265640e56410SVille Syrjälä 	if (hotplug_trigger)
265791d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
26589719fb98SPaulo Zanoni 
26599719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
266091d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
26619719fb98SPaulo Zanoni 
266254fd3149SDhinakaran Pandiyan 	if (de_iir & DE_EDP_PSR_INT_HSW) {
266354fd3149SDhinakaran Pandiyan 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
266454fd3149SDhinakaran Pandiyan 
266554fd3149SDhinakaran Pandiyan 		intel_psr_irq_handler(dev_priv, psr_iir);
266654fd3149SDhinakaran Pandiyan 		I915_WRITE(EDP_PSR_IIR, psr_iir);
266754fd3149SDhinakaran Pandiyan 	}
2668fc340442SDaniel Vetter 
26699719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
267091d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
26719719fb98SPaulo Zanoni 
26729719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
267391d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
26749719fb98SPaulo Zanoni 
2675055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2676fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2677fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
26789719fb98SPaulo Zanoni 	}
26799719fb98SPaulo Zanoni 
26809719fb98SPaulo Zanoni 	/* check event from PCH */
268191d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
26829719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
26839719fb98SPaulo Zanoni 
268491d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
26859719fb98SPaulo Zanoni 
26869719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
26879719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
26889719fb98SPaulo Zanoni 	}
26899719fb98SPaulo Zanoni }
26909719fb98SPaulo Zanoni 
269172c90f62SOscar Mateo /*
269272c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
269372c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
269472c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
269572c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
269672c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
269772c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
269872c90f62SOscar Mateo  */
2699f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2700b1f14ad0SJesse Barnes {
2701b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2702f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
27030e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2704b1f14ad0SJesse Barnes 
27052dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
27062dd2a883SImre Deak 		return IRQ_NONE;
27072dd2a883SImre Deak 
27081f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27099102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27101f814dacSImre Deak 
2711b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2712b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2713b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
27140e43406bSChris Wilson 
271544498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
271644498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
271744498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
271844498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
271944498aeaSPaulo Zanoni 	 * due to its back queue). */
272091d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
272144498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
272244498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
2723ab5c608bSBen Widawsky 	}
272444498aeaSPaulo Zanoni 
272572c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
272672c90f62SOscar Mateo 
27270e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
27280e43406bSChris Wilson 	if (gt_iir) {
272972c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
273072c90f62SOscar Mateo 		ret = IRQ_HANDLED;
273191d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2732261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2733d8fc8a47SPaulo Zanoni 		else
2734261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
27350e43406bSChris Wilson 	}
2736b1f14ad0SJesse Barnes 
2737b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
27380e43406bSChris Wilson 	if (de_iir) {
273972c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
274072c90f62SOscar Mateo 		ret = IRQ_HANDLED;
274191d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
274291d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2743f1af8fc1SPaulo Zanoni 		else
274491d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
27450e43406bSChris Wilson 	}
27460e43406bSChris Wilson 
274791d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2748f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
27490e43406bSChris Wilson 		if (pm_iir) {
2750b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
27510e43406bSChris Wilson 			ret = IRQ_HANDLED;
275272c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
27530e43406bSChris Wilson 		}
2754f1af8fc1SPaulo Zanoni 	}
2755b1f14ad0SJesse Barnes 
2756b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
275774093f3eSChris Wilson 	if (!HAS_PCH_NOP(dev_priv))
275844498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
2759b1f14ad0SJesse Barnes 
27601f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27619102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27621f814dacSImre Deak 
2763b1f14ad0SJesse Barnes 	return ret;
2764b1f14ad0SJesse Barnes }
2765b1f14ad0SJesse Barnes 
276691d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
276791d14251STvrtko Ursulin 				u32 hotplug_trigger,
276840e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2769d04a492dSShashank Sharma {
2770cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2771d04a492dSShashank Sharma 
2772a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2773a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2774d04a492dSShashank Sharma 
2775cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
277640e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2777cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
277840e56410SVille Syrjälä 
277991d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2780d04a492dSShashank Sharma }
2781d04a492dSShashank Sharma 
2782121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2783121e758eSDhinakaran Pandiyan {
2784121e758eSDhinakaran Pandiyan 	u32 pin_mask = 0, long_mask = 0;
2785b796b971SDhinakaran Pandiyan 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2786b796b971SDhinakaran Pandiyan 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2787121e758eSDhinakaran Pandiyan 
2788121e758eSDhinakaran Pandiyan 	if (trigger_tc) {
2789b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2790b796b971SDhinakaran Pandiyan 
2791121e758eSDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2792121e758eSDhinakaran Pandiyan 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2793121e758eSDhinakaran Pandiyan 
2794121e758eSDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2795b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2796121e758eSDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2797121e758eSDhinakaran Pandiyan 	}
2798b796b971SDhinakaran Pandiyan 
2799b796b971SDhinakaran Pandiyan 	if (trigger_tbt) {
2800b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2801b796b971SDhinakaran Pandiyan 
2802b796b971SDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2803b796b971SDhinakaran Pandiyan 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2804b796b971SDhinakaran Pandiyan 
2805b796b971SDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2806b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2807b796b971SDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2808b796b971SDhinakaran Pandiyan 	}
2809b796b971SDhinakaran Pandiyan 
2810b796b971SDhinakaran Pandiyan 	if (pin_mask)
2811b796b971SDhinakaran Pandiyan 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2812b796b971SDhinakaran Pandiyan 	else
2813b796b971SDhinakaran Pandiyan 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2814121e758eSDhinakaran Pandiyan }
2815121e758eSDhinakaran Pandiyan 
28169d17210fSLucas De Marchi static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
28179d17210fSLucas De Marchi {
28189d17210fSLucas De Marchi 	u32 mask = GEN8_AUX_CHANNEL_A;
28199d17210fSLucas De Marchi 
28209d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 9)
28219d17210fSLucas De Marchi 		mask |= GEN9_AUX_CHANNEL_B |
28229d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_C |
28239d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_D;
28249d17210fSLucas De Marchi 
28259d17210fSLucas De Marchi 	if (IS_CNL_WITH_PORT_F(dev_priv))
28269d17210fSLucas De Marchi 		mask |= CNL_AUX_CHANNEL_F;
28279d17210fSLucas De Marchi 
28289d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 11)
28299d17210fSLucas De Marchi 		mask |= ICL_AUX_CHANNEL_E |
28309d17210fSLucas De Marchi 			CNL_AUX_CHANNEL_F;
28319d17210fSLucas De Marchi 
28329d17210fSLucas De Marchi 	return mask;
28339d17210fSLucas De Marchi }
28349d17210fSLucas De Marchi 
2835f11a0f46STvrtko Ursulin static irqreturn_t
2836f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2837abd58f01SBen Widawsky {
2838abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2839f11a0f46STvrtko Ursulin 	u32 iir;
2840c42664ccSDaniel Vetter 	enum pipe pipe;
284188e04703SJesse Barnes 
2842abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2843e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2844e32192e1STvrtko Ursulin 		if (iir) {
2845e04f7eceSVille Syrjälä 			bool found = false;
2846e04f7eceSVille Syrjälä 
2847e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2848abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2849e04f7eceSVille Syrjälä 
2850e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_MISC_GSE) {
285191d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
2852e04f7eceSVille Syrjälä 				found = true;
2853e04f7eceSVille Syrjälä 			}
2854e04f7eceSVille Syrjälä 
2855e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_EDP_PSR) {
285654fd3149SDhinakaran Pandiyan 				u32 psr_iir = I915_READ(EDP_PSR_IIR);
285754fd3149SDhinakaran Pandiyan 
285854fd3149SDhinakaran Pandiyan 				intel_psr_irq_handler(dev_priv, psr_iir);
285954fd3149SDhinakaran Pandiyan 				I915_WRITE(EDP_PSR_IIR, psr_iir);
2860e04f7eceSVille Syrjälä 				found = true;
2861e04f7eceSVille Syrjälä 			}
2862e04f7eceSVille Syrjälä 
2863e04f7eceSVille Syrjälä 			if (!found)
286438cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2865abd58f01SBen Widawsky 		}
286638cc46d7SOscar Mateo 		else
286738cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2868abd58f01SBen Widawsky 	}
2869abd58f01SBen Widawsky 
2870121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2871121e758eSDhinakaran Pandiyan 		iir = I915_READ(GEN11_DE_HPD_IIR);
2872121e758eSDhinakaran Pandiyan 		if (iir) {
2873121e758eSDhinakaran Pandiyan 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2874121e758eSDhinakaran Pandiyan 			ret = IRQ_HANDLED;
2875121e758eSDhinakaran Pandiyan 			gen11_hpd_irq_handler(dev_priv, iir);
2876121e758eSDhinakaran Pandiyan 		} else {
2877121e758eSDhinakaran Pandiyan 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2878121e758eSDhinakaran Pandiyan 		}
2879121e758eSDhinakaran Pandiyan 	}
2880121e758eSDhinakaran Pandiyan 
28816d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2882e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2883e32192e1STvrtko Ursulin 		if (iir) {
2884e32192e1STvrtko Ursulin 			u32 tmp_mask;
2885d04a492dSShashank Sharma 			bool found = false;
2886cebd87a0SVille Syrjälä 
2887e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
28886d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
288988e04703SJesse Barnes 
28909d17210fSLucas De Marchi 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
289191d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2892d04a492dSShashank Sharma 				found = true;
2893d04a492dSShashank Sharma 			}
2894d04a492dSShashank Sharma 
2895cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2896e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2897e32192e1STvrtko Ursulin 				if (tmp_mask) {
289891d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
289991d14251STvrtko Ursulin 							    hpd_bxt);
2900d04a492dSShashank Sharma 					found = true;
2901d04a492dSShashank Sharma 				}
2902e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2903e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2904e32192e1STvrtko Ursulin 				if (tmp_mask) {
290591d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
290691d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2907e32192e1STvrtko Ursulin 					found = true;
2908e32192e1STvrtko Ursulin 				}
2909e32192e1STvrtko Ursulin 			}
2910d04a492dSShashank Sharma 
2911cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
291291d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
29139e63743eSShashank Sharma 				found = true;
29149e63743eSShashank Sharma 			}
29159e63743eSShashank Sharma 
2916d04a492dSShashank Sharma 			if (!found)
291738cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
29186d766f02SDaniel Vetter 		}
291938cc46d7SOscar Mateo 		else
292038cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
29216d766f02SDaniel Vetter 	}
29226d766f02SDaniel Vetter 
2923055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2924fd3a4024SDaniel Vetter 		u32 fault_errors;
2925abd58f01SBen Widawsky 
2926c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2927c42664ccSDaniel Vetter 			continue;
2928c42664ccSDaniel Vetter 
2929e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2930e32192e1STvrtko Ursulin 		if (!iir) {
2931e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2932e32192e1STvrtko Ursulin 			continue;
2933e32192e1STvrtko Ursulin 		}
2934770de83dSDamien Lespiau 
2935e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2936e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2937e32192e1STvrtko Ursulin 
2938fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2939fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2940abd58f01SBen Widawsky 
2941e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
294291d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
29430fbe7870SDaniel Vetter 
2944e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2945e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
294638d83c96SDaniel Vetter 
2947e32192e1STvrtko Ursulin 		fault_errors = iir;
2948bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2949e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2950770de83dSDamien Lespiau 		else
2951e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2952770de83dSDamien Lespiau 
2953770de83dSDamien Lespiau 		if (fault_errors)
29541353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
295530100f2bSDaniel Vetter 				  pipe_name(pipe),
2956e32192e1STvrtko Ursulin 				  fault_errors);
2957abd58f01SBen Widawsky 	}
2958abd58f01SBen Widawsky 
295991d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2960266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
296192d03a80SDaniel Vetter 		/*
296292d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
296392d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
296492d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
296592d03a80SDaniel Vetter 		 */
2966e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2967e32192e1STvrtko Ursulin 		if (iir) {
2968e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
296992d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
29706dbf30ceSVille Syrjälä 
2971c6f7acb8SMatt Roper 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
2972c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_mcc);
2973c6f7acb8SMatt Roper 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2974c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_icp);
2975c6c30b91SRodrigo Vivi 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
297691d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
29776dbf30ceSVille Syrjälä 			else
297891d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
29792dfb0b81SJani Nikula 		} else {
29802dfb0b81SJani Nikula 			/*
29812dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
29822dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
29832dfb0b81SJani Nikula 			 */
29842dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
29852dfb0b81SJani Nikula 		}
298692d03a80SDaniel Vetter 	}
298792d03a80SDaniel Vetter 
2988f11a0f46STvrtko Ursulin 	return ret;
2989f11a0f46STvrtko Ursulin }
2990f11a0f46STvrtko Ursulin 
29914376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
29924376b9c9SMika Kuoppala {
29934376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
29944376b9c9SMika Kuoppala 
29954376b9c9SMika Kuoppala 	/*
29964376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
29974376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
29984376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
29994376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
30004376b9c9SMika Kuoppala 	 */
30014376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
30024376b9c9SMika Kuoppala }
30034376b9c9SMika Kuoppala 
30044376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
30054376b9c9SMika Kuoppala {
30064376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
30074376b9c9SMika Kuoppala }
30084376b9c9SMika Kuoppala 
3009f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
3010f11a0f46STvrtko Ursulin {
3011b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
301225286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = dev_priv->uncore.regs;
3013f11a0f46STvrtko Ursulin 	u32 master_ctl;
3014f0fd96f5SChris Wilson 	u32 gt_iir[4];
3015f11a0f46STvrtko Ursulin 
3016f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
3017f11a0f46STvrtko Ursulin 		return IRQ_NONE;
3018f11a0f46STvrtko Ursulin 
30194376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
30204376b9c9SMika Kuoppala 	if (!master_ctl) {
30214376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
3022f11a0f46STvrtko Ursulin 		return IRQ_NONE;
30234376b9c9SMika Kuoppala 	}
3024f11a0f46STvrtko Ursulin 
3025f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
302655ef72f2SChris Wilson 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3027f0fd96f5SChris Wilson 
3028f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3029f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
30309102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
303155ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
30329102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3033f0fd96f5SChris Wilson 	}
3034f11a0f46STvrtko Ursulin 
30354376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
3036abd58f01SBen Widawsky 
3037f0fd96f5SChris Wilson 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
30381f814dacSImre Deak 
303955ef72f2SChris Wilson 	return IRQ_HANDLED;
3040abd58f01SBen Widawsky }
3041abd58f01SBen Widawsky 
304251951ae7SMika Kuoppala static u32
30439b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
304451951ae7SMika Kuoppala 			 const unsigned int bank, const unsigned int bit)
304551951ae7SMika Kuoppala {
30469b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
304751951ae7SMika Kuoppala 	u32 timeout_ts;
304851951ae7SMika Kuoppala 	u32 ident;
304951951ae7SMika Kuoppala 
30509b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
305196606f3bSOscar Mateo 
305251951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
305351951ae7SMika Kuoppala 
305451951ae7SMika Kuoppala 	/*
305551951ae7SMika Kuoppala 	 * NB: Specs do not specify how long to spin wait,
305651951ae7SMika Kuoppala 	 * so we do ~100us as an educated guess.
305751951ae7SMika Kuoppala 	 */
305851951ae7SMika Kuoppala 	timeout_ts = (local_clock() >> 10) + 100;
305951951ae7SMika Kuoppala 	do {
306051951ae7SMika Kuoppala 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
306151951ae7SMika Kuoppala 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
306251951ae7SMika Kuoppala 		 !time_after32(local_clock() >> 10, timeout_ts));
306351951ae7SMika Kuoppala 
306451951ae7SMika Kuoppala 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
306551951ae7SMika Kuoppala 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
306651951ae7SMika Kuoppala 			  bank, bit, ident);
306751951ae7SMika Kuoppala 		return 0;
306851951ae7SMika Kuoppala 	}
306951951ae7SMika Kuoppala 
307051951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
307151951ae7SMika Kuoppala 		      GEN11_INTR_DATA_VALID);
307251951ae7SMika Kuoppala 
3073f744dbc2SMika Kuoppala 	return ident;
3074f744dbc2SMika Kuoppala }
3075f744dbc2SMika Kuoppala 
3076f744dbc2SMika Kuoppala static void
30779b77011eSTvrtko Ursulin gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
30789b77011eSTvrtko Ursulin 			const u16 iir)
3079f744dbc2SMika Kuoppala {
30809b77011eSTvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
30819b77011eSTvrtko Ursulin 
308254c52a84SOscar Mateo 	if (instance == OTHER_GUC_INSTANCE)
308354c52a84SOscar Mateo 		return gen11_guc_irq_handler(i915, iir);
308454c52a84SOscar Mateo 
3085d02b98b8SOscar Mateo 	if (instance == OTHER_GTPM_INSTANCE)
3086*58820574STvrtko Ursulin 		return gen11_rps_irq_handler(gt, iir);
3087d02b98b8SOscar Mateo 
3088f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3089f744dbc2SMika Kuoppala 		  instance, iir);
3090f744dbc2SMika Kuoppala }
3091f744dbc2SMika Kuoppala 
3092f744dbc2SMika Kuoppala static void
30939b77011eSTvrtko Ursulin gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
30949b77011eSTvrtko Ursulin 			 const u8 instance, const u16 iir)
3095f744dbc2SMika Kuoppala {
3096f744dbc2SMika Kuoppala 	struct intel_engine_cs *engine;
3097f744dbc2SMika Kuoppala 
3098f744dbc2SMika Kuoppala 	if (instance <= MAX_ENGINE_INSTANCE)
30999b77011eSTvrtko Ursulin 		engine = gt->i915->engine_class[class][instance];
3100f744dbc2SMika Kuoppala 	else
3101f744dbc2SMika Kuoppala 		engine = NULL;
3102f744dbc2SMika Kuoppala 
3103f744dbc2SMika Kuoppala 	if (likely(engine))
3104f744dbc2SMika Kuoppala 		return gen8_cs_irq_handler(engine, iir);
3105f744dbc2SMika Kuoppala 
3106f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3107f744dbc2SMika Kuoppala 		  class, instance);
3108f744dbc2SMika Kuoppala }
3109f744dbc2SMika Kuoppala 
3110f744dbc2SMika Kuoppala static void
31119b77011eSTvrtko Ursulin gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
3112f744dbc2SMika Kuoppala {
3113f744dbc2SMika Kuoppala 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3114f744dbc2SMika Kuoppala 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3115f744dbc2SMika Kuoppala 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3116f744dbc2SMika Kuoppala 
3117f744dbc2SMika Kuoppala 	if (unlikely(!intr))
3118f744dbc2SMika Kuoppala 		return;
3119f744dbc2SMika Kuoppala 
3120f744dbc2SMika Kuoppala 	if (class <= COPY_ENGINE_CLASS)
31219b77011eSTvrtko Ursulin 		return gen11_engine_irq_handler(gt, class, instance, intr);
3122f744dbc2SMika Kuoppala 
3123f744dbc2SMika Kuoppala 	if (class == OTHER_CLASS)
31249b77011eSTvrtko Ursulin 		return gen11_other_irq_handler(gt, instance, intr);
3125f744dbc2SMika Kuoppala 
3126f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3127f744dbc2SMika Kuoppala 		  class, instance, intr);
312851951ae7SMika Kuoppala }
312951951ae7SMika Kuoppala 
313051951ae7SMika Kuoppala static void
31319b77011eSTvrtko Ursulin gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
313251951ae7SMika Kuoppala {
31339b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
313451951ae7SMika Kuoppala 	unsigned long intr_dw;
313551951ae7SMika Kuoppala 	unsigned int bit;
313651951ae7SMika Kuoppala 
31379b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
313851951ae7SMika Kuoppala 
313951951ae7SMika Kuoppala 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
314051951ae7SMika Kuoppala 
314151951ae7SMika Kuoppala 	for_each_set_bit(bit, &intr_dw, 32) {
31429b77011eSTvrtko Ursulin 		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
314351951ae7SMika Kuoppala 
31449b77011eSTvrtko Ursulin 		gen11_gt_identity_handler(gt, ident);
314551951ae7SMika Kuoppala 	}
314651951ae7SMika Kuoppala 
314751951ae7SMika Kuoppala 	/* Clear must be after shared has been served for engine */
314851951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
314951951ae7SMika Kuoppala }
315096606f3bSOscar Mateo 
315196606f3bSOscar Mateo static void
31529b77011eSTvrtko Ursulin gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
315396606f3bSOscar Mateo {
31549b77011eSTvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
315596606f3bSOscar Mateo 	unsigned int bank;
315696606f3bSOscar Mateo 
315796606f3bSOscar Mateo 	spin_lock(&i915->irq_lock);
315896606f3bSOscar Mateo 
315996606f3bSOscar Mateo 	for (bank = 0; bank < 2; bank++) {
316096606f3bSOscar Mateo 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
31619b77011eSTvrtko Ursulin 			gen11_gt_bank_handler(gt, bank);
316296606f3bSOscar Mateo 	}
316396606f3bSOscar Mateo 
316496606f3bSOscar Mateo 	spin_unlock(&i915->irq_lock);
316551951ae7SMika Kuoppala }
316651951ae7SMika Kuoppala 
31677a909383SChris Wilson static u32
31689b77011eSTvrtko Ursulin gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
3169df0d28c1SDhinakaran Pandiyan {
31709b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31717a909383SChris Wilson 	u32 iir;
3172df0d28c1SDhinakaran Pandiyan 
3173df0d28c1SDhinakaran Pandiyan 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
31747a909383SChris Wilson 		return 0;
3175df0d28c1SDhinakaran Pandiyan 
31767a909383SChris Wilson 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
31777a909383SChris Wilson 	if (likely(iir))
31787a909383SChris Wilson 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
31797a909383SChris Wilson 
31807a909383SChris Wilson 	return iir;
3181df0d28c1SDhinakaran Pandiyan }
3182df0d28c1SDhinakaran Pandiyan 
3183df0d28c1SDhinakaran Pandiyan static void
31849b77011eSTvrtko Ursulin gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
3185df0d28c1SDhinakaran Pandiyan {
3186df0d28c1SDhinakaran Pandiyan 	if (iir & GEN11_GU_MISC_GSE)
31879b77011eSTvrtko Ursulin 		intel_opregion_asle_intr(gt->i915);
3188df0d28c1SDhinakaran Pandiyan }
3189df0d28c1SDhinakaran Pandiyan 
319081067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
319181067b71SMika Kuoppala {
319281067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
319381067b71SMika Kuoppala 
319481067b71SMika Kuoppala 	/*
319581067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
319681067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
319781067b71SMika Kuoppala 	 * New indications can and will light up during processing,
319881067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
319981067b71SMika Kuoppala 	 */
320081067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
320181067b71SMika Kuoppala }
320281067b71SMika Kuoppala 
320381067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
320481067b71SMika Kuoppala {
320581067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
320681067b71SMika Kuoppala }
320781067b71SMika Kuoppala 
320851951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg)
320951951ae7SMika Kuoppala {
3210b318b824SVille Syrjälä 	struct drm_i915_private * const i915 = arg;
321125286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
32129b77011eSTvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
321351951ae7SMika Kuoppala 	u32 master_ctl;
3214df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
321551951ae7SMika Kuoppala 
321651951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
321751951ae7SMika Kuoppala 		return IRQ_NONE;
321851951ae7SMika Kuoppala 
321981067b71SMika Kuoppala 	master_ctl = gen11_master_intr_disable(regs);
322081067b71SMika Kuoppala 	if (!master_ctl) {
322181067b71SMika Kuoppala 		gen11_master_intr_enable(regs);
322251951ae7SMika Kuoppala 		return IRQ_NONE;
322381067b71SMika Kuoppala 	}
322451951ae7SMika Kuoppala 
322551951ae7SMika Kuoppala 	/* Find, clear, then process each source of interrupt. */
32269b77011eSTvrtko Ursulin 	gen11_gt_irq_handler(gt, master_ctl);
322751951ae7SMika Kuoppala 
322851951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
322951951ae7SMika Kuoppala 	if (master_ctl & GEN11_DISPLAY_IRQ) {
323051951ae7SMika Kuoppala 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
323151951ae7SMika Kuoppala 
32329102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&i915->runtime_pm);
323351951ae7SMika Kuoppala 		/*
323451951ae7SMika Kuoppala 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
323551951ae7SMika Kuoppala 		 * for the display related bits.
323651951ae7SMika Kuoppala 		 */
323751951ae7SMika Kuoppala 		gen8_de_irq_handler(i915, disp_ctl);
32389102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&i915->runtime_pm);
323951951ae7SMika Kuoppala 	}
324051951ae7SMika Kuoppala 
32419b77011eSTvrtko Ursulin 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
3242df0d28c1SDhinakaran Pandiyan 
324381067b71SMika Kuoppala 	gen11_master_intr_enable(regs);
324451951ae7SMika Kuoppala 
32459b77011eSTvrtko Ursulin 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
3246df0d28c1SDhinakaran Pandiyan 
324751951ae7SMika Kuoppala 	return IRQ_HANDLED;
324851951ae7SMika Kuoppala }
324951951ae7SMika Kuoppala 
325042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
325142f52ef8SKeith Packard  * we use as a pipe index
325242f52ef8SKeith Packard  */
325308fa8fd0SVille Syrjälä int i8xx_enable_vblank(struct drm_crtc *crtc)
32540a3e67a4SJesse Barnes {
325508fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
325608fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3257e9d21d7fSKeith Packard 	unsigned long irqflags;
325871e0ffa5SJesse Barnes 
32591ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
326086e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
326186e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
326286e83e35SChris Wilson 
326386e83e35SChris Wilson 	return 0;
326486e83e35SChris Wilson }
326586e83e35SChris Wilson 
326608fa8fd0SVille Syrjälä int i945gm_enable_vblank(struct drm_crtc *crtc)
3267d938da6bSVille Syrjälä {
326808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3269d938da6bSVille Syrjälä 
3270d938da6bSVille Syrjälä 	if (dev_priv->i945gm_vblank.enabled++ == 0)
3271d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3272d938da6bSVille Syrjälä 
327308fa8fd0SVille Syrjälä 	return i8xx_enable_vblank(crtc);
3274d938da6bSVille Syrjälä }
3275d938da6bSVille Syrjälä 
327608fa8fd0SVille Syrjälä int i965_enable_vblank(struct drm_crtc *crtc)
327786e83e35SChris Wilson {
327808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
327908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
328086e83e35SChris Wilson 	unsigned long irqflags;
328186e83e35SChris Wilson 
328286e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
32837c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
3284755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
32851ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
32868692d00eSChris Wilson 
32870a3e67a4SJesse Barnes 	return 0;
32880a3e67a4SJesse Barnes }
32890a3e67a4SJesse Barnes 
329008fa8fd0SVille Syrjälä int ilk_enable_vblank(struct drm_crtc *crtc)
3291f796cf8fSJesse Barnes {
329208fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
329308fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3294f796cf8fSJesse Barnes 	unsigned long irqflags;
3295a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
329686e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3297f796cf8fSJesse Barnes 
3298f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3299fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
3300b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3301b1f14ad0SJesse Barnes 
33022e8bf223SDhinakaran Pandiyan 	/* Even though there is no DMC, frame counter can get stuck when
33032e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated.
33042e8bf223SDhinakaran Pandiyan 	 */
33052e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
330608fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33072e8bf223SDhinakaran Pandiyan 
3308b1f14ad0SJesse Barnes 	return 0;
3309b1f14ad0SJesse Barnes }
3310b1f14ad0SJesse Barnes 
331108fa8fd0SVille Syrjälä int bdw_enable_vblank(struct drm_crtc *crtc)
3312abd58f01SBen Widawsky {
331308fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
331408fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3315abd58f01SBen Widawsky 	unsigned long irqflags;
3316abd58f01SBen Widawsky 
3317abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3318013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3319abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3320013d3752SVille Syrjälä 
33212e8bf223SDhinakaran Pandiyan 	/* Even if there is no DMC, frame counter can get stuck when
33222e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated, so check only for PSR.
33232e8bf223SDhinakaran Pandiyan 	 */
33242e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
332508fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33262e8bf223SDhinakaran Pandiyan 
3327abd58f01SBen Widawsky 	return 0;
3328abd58f01SBen Widawsky }
3329abd58f01SBen Widawsky 
333042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
333142f52ef8SKeith Packard  * we use as a pipe index
333242f52ef8SKeith Packard  */
333308fa8fd0SVille Syrjälä void i8xx_disable_vblank(struct drm_crtc *crtc)
333486e83e35SChris Wilson {
333508fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
333608fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
333786e83e35SChris Wilson 	unsigned long irqflags;
333886e83e35SChris Wilson 
333986e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
334086e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
334186e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
334286e83e35SChris Wilson }
334386e83e35SChris Wilson 
334408fa8fd0SVille Syrjälä void i945gm_disable_vblank(struct drm_crtc *crtc)
3345d938da6bSVille Syrjälä {
334608fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3347d938da6bSVille Syrjälä 
334808fa8fd0SVille Syrjälä 	i8xx_disable_vblank(crtc);
3349d938da6bSVille Syrjälä 
3350d938da6bSVille Syrjälä 	if (--dev_priv->i945gm_vblank.enabled == 0)
3351d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3352d938da6bSVille Syrjälä }
3353d938da6bSVille Syrjälä 
335408fa8fd0SVille Syrjälä void i965_disable_vblank(struct drm_crtc *crtc)
33550a3e67a4SJesse Barnes {
335608fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
335708fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3358e9d21d7fSKeith Packard 	unsigned long irqflags;
33590a3e67a4SJesse Barnes 
33601ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
33617c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
3362755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
33631ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
33640a3e67a4SJesse Barnes }
33650a3e67a4SJesse Barnes 
336608fa8fd0SVille Syrjälä void ilk_disable_vblank(struct drm_crtc *crtc)
3367f796cf8fSJesse Barnes {
336808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
336908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3370f796cf8fSJesse Barnes 	unsigned long irqflags;
3371a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
337286e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3373f796cf8fSJesse Barnes 
3374f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3375fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3376b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3377b1f14ad0SJesse Barnes }
3378b1f14ad0SJesse Barnes 
337908fa8fd0SVille Syrjälä void bdw_disable_vblank(struct drm_crtc *crtc)
3380abd58f01SBen Widawsky {
338108fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
338208fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3383abd58f01SBen Widawsky 	unsigned long irqflags;
3384abd58f01SBen Widawsky 
3385abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3386013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3387abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3388abd58f01SBen Widawsky }
3389abd58f01SBen Widawsky 
33907218524dSChris Wilson static void i945gm_vblank_work_func(struct work_struct *work)
3391d938da6bSVille Syrjälä {
3392d938da6bSVille Syrjälä 	struct drm_i915_private *dev_priv =
3393d938da6bSVille Syrjälä 		container_of(work, struct drm_i915_private, i945gm_vblank.work);
3394d938da6bSVille Syrjälä 
3395d938da6bSVille Syrjälä 	/*
3396d938da6bSVille Syrjälä 	 * Vblank interrupts fail to wake up the device from C3,
3397d938da6bSVille Syrjälä 	 * hence we want to prevent C3 usage while vblank interrupts
3398d938da6bSVille Syrjälä 	 * are enabled.
3399d938da6bSVille Syrjälä 	 */
3400d938da6bSVille Syrjälä 	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3401d938da6bSVille Syrjälä 			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3402d938da6bSVille Syrjälä 			      dev_priv->i945gm_vblank.c3_disable_latency :
3403d938da6bSVille Syrjälä 			      PM_QOS_DEFAULT_VALUE);
3404d938da6bSVille Syrjälä }
3405d938da6bSVille Syrjälä 
3406d938da6bSVille Syrjälä static int cstate_disable_latency(const char *name)
3407d938da6bSVille Syrjälä {
3408d938da6bSVille Syrjälä 	const struct cpuidle_driver *drv;
3409d938da6bSVille Syrjälä 	int i;
3410d938da6bSVille Syrjälä 
3411d938da6bSVille Syrjälä 	drv = cpuidle_get_driver();
3412d938da6bSVille Syrjälä 	if (!drv)
3413d938da6bSVille Syrjälä 		return 0;
3414d938da6bSVille Syrjälä 
3415d938da6bSVille Syrjälä 	for (i = 0; i < drv->state_count; i++) {
3416d938da6bSVille Syrjälä 		const struct cpuidle_state *state = &drv->states[i];
3417d938da6bSVille Syrjälä 
3418d938da6bSVille Syrjälä 		if (!strcmp(state->name, name))
3419d938da6bSVille Syrjälä 			return state->exit_latency ?
3420d938da6bSVille Syrjälä 				state->exit_latency - 1 : 0;
3421d938da6bSVille Syrjälä 	}
3422d938da6bSVille Syrjälä 
3423d938da6bSVille Syrjälä 	return 0;
3424d938da6bSVille Syrjälä }
3425d938da6bSVille Syrjälä 
3426d938da6bSVille Syrjälä static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3427d938da6bSVille Syrjälä {
3428d938da6bSVille Syrjälä 	INIT_WORK(&dev_priv->i945gm_vblank.work,
3429d938da6bSVille Syrjälä 		  i945gm_vblank_work_func);
3430d938da6bSVille Syrjälä 
3431d938da6bSVille Syrjälä 	dev_priv->i945gm_vblank.c3_disable_latency =
3432d938da6bSVille Syrjälä 		cstate_disable_latency("C3");
3433d938da6bSVille Syrjälä 	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3434d938da6bSVille Syrjälä 			   PM_QOS_CPU_DMA_LATENCY,
3435d938da6bSVille Syrjälä 			   PM_QOS_DEFAULT_VALUE);
3436d938da6bSVille Syrjälä }
3437d938da6bSVille Syrjälä 
3438d938da6bSVille Syrjälä static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3439d938da6bSVille Syrjälä {
3440d938da6bSVille Syrjälä 	cancel_work_sync(&dev_priv->i945gm_vblank.work);
3441d938da6bSVille Syrjälä 	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3442d938da6bSVille Syrjälä }
3443d938da6bSVille Syrjälä 
3444b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
344591738a95SPaulo Zanoni {
3446b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3447b16b2a2fSPaulo Zanoni 
34486e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
344991738a95SPaulo Zanoni 		return;
345091738a95SPaulo Zanoni 
3451b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
3452105b122eSPaulo Zanoni 
34536e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3454105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3455622364b6SPaulo Zanoni }
3456105b122eSPaulo Zanoni 
345791738a95SPaulo Zanoni /*
3458622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3459622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3460622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3461622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3462622364b6SPaulo Zanoni  *
3463622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
346491738a95SPaulo Zanoni  */
3465b318b824SVille Syrjälä static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
3466622364b6SPaulo Zanoni {
34676e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3468622364b6SPaulo Zanoni 		return;
3469622364b6SPaulo Zanoni 
3470622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
347191738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
347291738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
347391738a95SPaulo Zanoni }
347491738a95SPaulo Zanoni 
3475b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3476d18ea1b5SDaniel Vetter {
3477b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3478b16b2a2fSPaulo Zanoni 
3479b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GT);
3480b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
3481b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, GEN6_PM);
3482d18ea1b5SDaniel Vetter }
3483d18ea1b5SDaniel Vetter 
348470591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
348570591a41SVille Syrjälä {
3486b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3487b16b2a2fSPaulo Zanoni 
348871b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3489f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
349071b8b41dSVille Syrjälä 	else
3491f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
349271b8b41dSVille Syrjälä 
3493ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3494f0818984STvrtko Ursulin 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
349570591a41SVille Syrjälä 
349644d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
349770591a41SVille Syrjälä 
3498b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, VLV_);
34998bd099a7SChris Wilson 	dev_priv->irq_mask = ~0u;
350070591a41SVille Syrjälä }
350170591a41SVille Syrjälä 
35028bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
35038bb61306SVille Syrjälä {
3504b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3505b16b2a2fSPaulo Zanoni 
35068bb61306SVille Syrjälä 	u32 pipestat_mask;
35079ab981f2SVille Syrjälä 	u32 enable_mask;
35088bb61306SVille Syrjälä 	enum pipe pipe;
35098bb61306SVille Syrjälä 
3510842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
35118bb61306SVille Syrjälä 
35128bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
35138bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
35148bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
35158bb61306SVille Syrjälä 
35169ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
35178bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3518ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3519ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3520ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3521ebf5f921SVille Syrjälä 
35228bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3523ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3524ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
35256b7eafc1SVille Syrjälä 
35268bd099a7SChris Wilson 	WARN_ON(dev_priv->irq_mask != ~0u);
35276b7eafc1SVille Syrjälä 
35289ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
35298bb61306SVille Syrjälä 
3530b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
35318bb61306SVille Syrjälä }
35328bb61306SVille Syrjälä 
35338bb61306SVille Syrjälä /* drm_dma.h hooks
35348bb61306SVille Syrjälä */
3535b318b824SVille Syrjälä static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
35368bb61306SVille Syrjälä {
3537b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
35388bb61306SVille Syrjälä 
3539b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
3540cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 7))
3541f0818984STvrtko Ursulin 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
35428bb61306SVille Syrjälä 
3543fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
3544f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3545f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3546fc340442SDaniel Vetter 	}
3547fc340442SDaniel Vetter 
3548b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35498bb61306SVille Syrjälä 
3550b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
35518bb61306SVille Syrjälä }
35528bb61306SVille Syrjälä 
3553b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
35547e231dbeSJesse Barnes {
355534c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
355634c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
355734c7b8a7SVille Syrjälä 
3558b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35597e231dbeSJesse Barnes 
3560ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35619918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
356270591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3563ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
35647e231dbeSJesse Barnes }
35657e231dbeSJesse Barnes 
3566d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3567d6e3cca3SDaniel Vetter {
3568b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3569b16b2a2fSPaulo Zanoni 
3570b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3571b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3572b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3573b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3574d6e3cca3SDaniel Vetter }
3575d6e3cca3SDaniel Vetter 
3576b318b824SVille Syrjälä static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3577abd58f01SBen Widawsky {
3578b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3579abd58f01SBen Widawsky 	int pipe;
3580abd58f01SBen Widawsky 
358125286aacSDaniele Ceraolo Spurio 	gen8_master_intr_disable(dev_priv->uncore.regs);
3582abd58f01SBen Widawsky 
3583d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3584abd58f01SBen Widawsky 
3585f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3586f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3587e04f7eceSVille Syrjälä 
3588055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3589f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3590813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3591b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3592abd58f01SBen Widawsky 
3593b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3594b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3595b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3596abd58f01SBen Widawsky 
35976e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3598b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3599abd58f01SBen Widawsky }
3600abd58f01SBen Widawsky 
36019b77011eSTvrtko Ursulin static void gen11_gt_irq_reset(struct intel_gt *gt)
360251951ae7SMika Kuoppala {
3603f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
36049b77011eSTvrtko Ursulin 
360551951ae7SMika Kuoppala 	/* Disable RCS, BCS, VCS and VECS class engines. */
3606f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
3607f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
360851951ae7SMika Kuoppala 
360951951ae7SMika Kuoppala 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3610f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
3611f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
3612f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
3613f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
3614f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
3615d02b98b8SOscar Mateo 
3616f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3617f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3618f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
3619f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
362051951ae7SMika Kuoppala }
362151951ae7SMika Kuoppala 
3622b318b824SVille Syrjälä static void gen11_irq_reset(struct drm_i915_private *dev_priv)
362351951ae7SMika Kuoppala {
3624b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
362551951ae7SMika Kuoppala 	int pipe;
362651951ae7SMika Kuoppala 
362725286aacSDaniele Ceraolo Spurio 	gen11_master_intr_disable(dev_priv->uncore.regs);
362851951ae7SMika Kuoppala 
36299b77011eSTvrtko Ursulin 	gen11_gt_irq_reset(&dev_priv->gt);
363051951ae7SMika Kuoppala 
3631f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
363251951ae7SMika Kuoppala 
3633f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3634f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
363562819dfdSJosé Roberto de Souza 
363651951ae7SMika Kuoppala 	for_each_pipe(dev_priv, pipe)
363751951ae7SMika Kuoppala 		if (intel_display_power_is_enabled(dev_priv,
363851951ae7SMika Kuoppala 						   POWER_DOMAIN_PIPE(pipe)))
3639b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
364051951ae7SMika Kuoppala 
3641b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3642b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3643b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3644b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3645b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
364631604222SAnusha Srivatsa 
364729b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3648b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, SDE);
364951951ae7SMika Kuoppala }
365051951ae7SMika Kuoppala 
36514c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3652001bd2cbSImre Deak 				     u8 pipe_mask)
3653d49bdb0eSPaulo Zanoni {
3654b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3655b16b2a2fSPaulo Zanoni 
3656a9c287c9SJani Nikula 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
36576831f3e3SVille Syrjälä 	enum pipe pipe;
3658d49bdb0eSPaulo Zanoni 
365913321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
36609dfe2e3aSImre Deak 
36619dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36629dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36639dfe2e3aSImre Deak 		return;
36649dfe2e3aSImre Deak 	}
36659dfe2e3aSImre Deak 
36666831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3667b16b2a2fSPaulo Zanoni 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
36686831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
36696831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
36709dfe2e3aSImre Deak 
367113321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3672d49bdb0eSPaulo Zanoni }
3673d49bdb0eSPaulo Zanoni 
3674aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3675001bd2cbSImre Deak 				     u8 pipe_mask)
3676aae8ba84SVille Syrjälä {
3677b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
36786831f3e3SVille Syrjälä 	enum pipe pipe;
36796831f3e3SVille Syrjälä 
3680aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36819dfe2e3aSImre Deak 
36829dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36839dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36849dfe2e3aSImre Deak 		return;
36859dfe2e3aSImre Deak 	}
36869dfe2e3aSImre Deak 
36876831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3688b16b2a2fSPaulo Zanoni 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
36899dfe2e3aSImre Deak 
3690aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3691aae8ba84SVille Syrjälä 
3692aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
3693315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
3694aae8ba84SVille Syrjälä }
3695aae8ba84SVille Syrjälä 
3696b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
369743f328d7SVille Syrjälä {
3698b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
369943f328d7SVille Syrjälä 
370043f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
370143f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
370243f328d7SVille Syrjälä 
3703d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
370443f328d7SVille Syrjälä 
3705b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
370643f328d7SVille Syrjälä 
3707ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
37089918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
370970591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3710ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
371143f328d7SVille Syrjälä }
371243f328d7SVille Syrjälä 
371391d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
371487a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
371587a02106SVille Syrjälä {
371687a02106SVille Syrjälä 	struct intel_encoder *encoder;
371787a02106SVille Syrjälä 	u32 enabled_irqs = 0;
371887a02106SVille Syrjälä 
371991c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
372087a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
372187a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
372287a02106SVille Syrjälä 
372387a02106SVille Syrjälä 	return enabled_irqs;
372487a02106SVille Syrjälä }
372587a02106SVille Syrjälä 
37261a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
37271a56b1a2SImre Deak {
37281a56b1a2SImre Deak 	u32 hotplug;
37291a56b1a2SImre Deak 
37301a56b1a2SImre Deak 	/*
37311a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
37321a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
37331a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
37341a56b1a2SImre Deak 	 */
37351a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
37361a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
37371a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
37381a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
37391a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
37401a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
37411a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
37421a56b1a2SImre Deak 	/*
37431a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
37441a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
37451a56b1a2SImre Deak 	 */
37461a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
37471a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
37481a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
37491a56b1a2SImre Deak }
37501a56b1a2SImre Deak 
375191d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
375282a28bcfSDaniel Vetter {
37531a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
375482a28bcfSDaniel Vetter 
375591d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3756fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
375791d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
375882a28bcfSDaniel Vetter 	} else {
3759fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
376091d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
376182a28bcfSDaniel Vetter 	}
376282a28bcfSDaniel Vetter 
3763fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
376482a28bcfSDaniel Vetter 
37651a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
37666dbf30ceSVille Syrjälä }
376726951cafSXiong Zhang 
376831604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
376931604222SAnusha Srivatsa {
377031604222SAnusha Srivatsa 	u32 hotplug;
377131604222SAnusha Srivatsa 
377231604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
377331604222SAnusha Srivatsa 	hotplug |= ICP_DDIA_HPD_ENABLE |
377431604222SAnusha Srivatsa 		   ICP_DDIB_HPD_ENABLE;
377531604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
377631604222SAnusha Srivatsa 
377731604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_TC);
377831604222SAnusha Srivatsa 	hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
377931604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC2) |
378031604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC3) |
378131604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC4);
378231604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
378331604222SAnusha Srivatsa }
378431604222SAnusha Srivatsa 
378531604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
378631604222SAnusha Srivatsa {
378731604222SAnusha Srivatsa 	u32 hotplug_irqs, enabled_irqs;
378831604222SAnusha Srivatsa 
378931604222SAnusha Srivatsa 	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
379031604222SAnusha Srivatsa 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
379131604222SAnusha Srivatsa 
379231604222SAnusha Srivatsa 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
379331604222SAnusha Srivatsa 
379431604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
379531604222SAnusha Srivatsa }
379631604222SAnusha Srivatsa 
3797121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3798121e758eSDhinakaran Pandiyan {
3799121e758eSDhinakaran Pandiyan 	u32 hotplug;
3800121e758eSDhinakaran Pandiyan 
3801121e758eSDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3802121e758eSDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3803121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3804121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3805121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3806121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3807b796b971SDhinakaran Pandiyan 
3808b796b971SDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3809b796b971SDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3810b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3811b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3812b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3813b796b971SDhinakaran Pandiyan 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3814121e758eSDhinakaran Pandiyan }
3815121e758eSDhinakaran Pandiyan 
3816121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3817121e758eSDhinakaran Pandiyan {
3818121e758eSDhinakaran Pandiyan 	u32 hotplug_irqs, enabled_irqs;
3819121e758eSDhinakaran Pandiyan 	u32 val;
3820121e758eSDhinakaran Pandiyan 
3821b796b971SDhinakaran Pandiyan 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3822b796b971SDhinakaran Pandiyan 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3823121e758eSDhinakaran Pandiyan 
3824121e758eSDhinakaran Pandiyan 	val = I915_READ(GEN11_DE_HPD_IMR);
3825121e758eSDhinakaran Pandiyan 	val &= ~hotplug_irqs;
3826121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3827121e758eSDhinakaran Pandiyan 	POSTING_READ(GEN11_DE_HPD_IMR);
3828121e758eSDhinakaran Pandiyan 
3829121e758eSDhinakaran Pandiyan 	gen11_hpd_detection_setup(dev_priv);
383031604222SAnusha Srivatsa 
383129b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
383231604222SAnusha Srivatsa 		icp_hpd_irq_setup(dev_priv);
3833121e758eSDhinakaran Pandiyan }
3834121e758eSDhinakaran Pandiyan 
38352a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
38362a57d9ccSImre Deak {
38373b92e263SRodrigo Vivi 	u32 val, hotplug;
38383b92e263SRodrigo Vivi 
38393b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
38403b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
38413b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
38423b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
38433b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
38443b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
38453b92e263SRodrigo Vivi 	}
38462a57d9ccSImre Deak 
38472a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
38482a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
38492a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
38502a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
38512a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
38522a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
38532a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
38542a57d9ccSImre Deak 
38552a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
38562a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
38572a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
38582a57d9ccSImre Deak }
38592a57d9ccSImre Deak 
386091d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
38616dbf30ceSVille Syrjälä {
38622a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
38636dbf30ceSVille Syrjälä 
38646dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
386591d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
38666dbf30ceSVille Syrjälä 
38676dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
38686dbf30ceSVille Syrjälä 
38692a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
387026951cafSXiong Zhang }
38717fe0b973SKeith Packard 
38721a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
38731a56b1a2SImre Deak {
38741a56b1a2SImre Deak 	u32 hotplug;
38751a56b1a2SImre Deak 
38761a56b1a2SImre Deak 	/*
38771a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
38781a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
38791a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
38801a56b1a2SImre Deak 	 */
38811a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
38821a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
38831a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
38841a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
38851a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
38861a56b1a2SImre Deak }
38871a56b1a2SImre Deak 
388891d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3889e4ce95aaSVille Syrjälä {
38901a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3891e4ce95aaSVille Syrjälä 
389291d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
38933a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
389491d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
38953a3b3c7dSVille Syrjälä 
38963a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
389791d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
389823bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
389991d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
39003a3b3c7dSVille Syrjälä 
39013a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
390223bb4cb5SVille Syrjälä 	} else {
3903e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
390491d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3905e4ce95aaSVille Syrjälä 
3906e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
39073a3b3c7dSVille Syrjälä 	}
3908e4ce95aaSVille Syrjälä 
39091a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3910e4ce95aaSVille Syrjälä 
391191d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3912e4ce95aaSVille Syrjälä }
3913e4ce95aaSVille Syrjälä 
39142a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
39152a57d9ccSImre Deak 				      u32 enabled_irqs)
3916e0a20ad7SShashank Sharma {
39172a57d9ccSImre Deak 	u32 hotplug;
3918e0a20ad7SShashank Sharma 
3919a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
39202a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
39212a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
39222a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3923d252bf68SShubhangi Shrivastava 
3924d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3925d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3926d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3927d252bf68SShubhangi Shrivastava 
3928d252bf68SShubhangi Shrivastava 	/*
3929d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3930d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3931d252bf68SShubhangi Shrivastava 	 */
3932d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3933d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3934d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3935d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3936d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3937d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3938d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3939d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3940d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3941d252bf68SShubhangi Shrivastava 
3942a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3943e0a20ad7SShashank Sharma }
3944e0a20ad7SShashank Sharma 
39452a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
39462a57d9ccSImre Deak {
39472a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
39482a57d9ccSImre Deak }
39492a57d9ccSImre Deak 
39502a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
39512a57d9ccSImre Deak {
39522a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
39532a57d9ccSImre Deak 
39542a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
39552a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
39562a57d9ccSImre Deak 
39572a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
39582a57d9ccSImre Deak 
39592a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
39602a57d9ccSImre Deak }
39612a57d9ccSImre Deak 
3962b318b824SVille Syrjälä static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3963d46da437SPaulo Zanoni {
396482a28bcfSDaniel Vetter 	u32 mask;
3965d46da437SPaulo Zanoni 
39666e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3967692a04cfSDaniel Vetter 		return;
3968692a04cfSDaniel Vetter 
39696e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
39705c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
39714ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
39725c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
39734ebc6509SDhinakaran Pandiyan 	else
39744ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
39758664281bSPaulo Zanoni 
397665f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3977d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
39782a57d9ccSImre Deak 
39792a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
39802a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
39811a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
39822a57d9ccSImre Deak 	else
39832a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3984d46da437SPaulo Zanoni }
3985d46da437SPaulo Zanoni 
3986b318b824SVille Syrjälä static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
39870a9a8c91SDaniel Vetter {
3988b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
39890a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
39900a9a8c91SDaniel Vetter 
39910a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
39920a9a8c91SDaniel Vetter 
39930a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
39943c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
39950a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
3996772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3997772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
39980a9a8c91SDaniel Vetter 	}
39990a9a8c91SDaniel Vetter 
40000a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
4001cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5)) {
4002f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
40030a9a8c91SDaniel Vetter 	} else {
40040a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
40050a9a8c91SDaniel Vetter 	}
40060a9a8c91SDaniel Vetter 
4007b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
40080a9a8c91SDaniel Vetter 
4009b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
401078e68d36SImre Deak 		/*
401178e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
401278e68d36SImre Deak 		 * itself is enabled/disabled.
401378e68d36SImre Deak 		 */
40148a68d464SChris Wilson 		if (HAS_ENGINE(dev_priv, VECS0)) {
40150a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4016*58820574STvrtko Ursulin 			dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
4017f4e9af4fSAkash Goel 		}
40180a9a8c91SDaniel Vetter 
4019*58820574STvrtko Ursulin 		dev_priv->gt.pm_imr = 0xffffffff;
4020*58820574STvrtko Ursulin 		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
40210a9a8c91SDaniel Vetter 	}
40220a9a8c91SDaniel Vetter }
40230a9a8c91SDaniel Vetter 
4024b318b824SVille Syrjälä static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
4025036a4a7dSZhenyu Wang {
4026b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
40278e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
40288e76f8dcSPaulo Zanoni 
4029b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
40308e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4031842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
40328e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
403323bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
403423bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
40358e76f8dcSPaulo Zanoni 	} else {
40368e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4037842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4038842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
4039e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4040e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4041e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
40428e76f8dcSPaulo Zanoni 	}
4043036a4a7dSZhenyu Wang 
4044fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
4045b16b2a2fSPaulo Zanoni 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
40461aeb1b5fSDhinakaran Pandiyan 		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4047fc340442SDaniel Vetter 		display_mask |= DE_EDP_PSR_INT_HSW;
4048fc340442SDaniel Vetter 	}
4049fc340442SDaniel Vetter 
40501ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
4051036a4a7dSZhenyu Wang 
4052b318b824SVille Syrjälä 	ibx_irq_pre_postinstall(dev_priv);
4053622364b6SPaulo Zanoni 
4054b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
4055b16b2a2fSPaulo Zanoni 		      display_mask | extra_mask);
4056036a4a7dSZhenyu Wang 
4057b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
4058036a4a7dSZhenyu Wang 
40591a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
40601a56b1a2SImre Deak 
4061b318b824SVille Syrjälä 	ibx_irq_postinstall(dev_priv);
40627fe0b973SKeith Packard 
406350a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
40646005ce42SDaniel Vetter 		/* Enable PCU event interrupts
40656005ce42SDaniel Vetter 		 *
40666005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
40674bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
40684bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
4069d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
4070fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4071d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
4072f97108d1SJesse Barnes 	}
4073036a4a7dSZhenyu Wang }
4074036a4a7dSZhenyu Wang 
4075f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4076f8b79e58SImre Deak {
407767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4078f8b79e58SImre Deak 
4079f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
4080f8b79e58SImre Deak 		return;
4081f8b79e58SImre Deak 
4082f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
4083f8b79e58SImre Deak 
4084d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
4085d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4086ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4087f8b79e58SImre Deak 	}
4088d6c69803SVille Syrjälä }
4089f8b79e58SImre Deak 
4090f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4091f8b79e58SImre Deak {
409267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4093f8b79e58SImre Deak 
4094f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
4095f8b79e58SImre Deak 		return;
4096f8b79e58SImre Deak 
4097f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
4098f8b79e58SImre Deak 
4099950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
4100ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4101f8b79e58SImre Deak }
4102f8b79e58SImre Deak 
41030e6c9a9eSVille Syrjälä 
4104b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
41050e6c9a9eSVille Syrjälä {
4106b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
41077e231dbeSJesse Barnes 
4108ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
41099918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4110ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4111ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4112ad22d106SVille Syrjälä 
41137e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
411434c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
411520afbda2SDaniel Vetter }
411620afbda2SDaniel Vetter 
4117*58820574STvrtko Ursulin static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
4118abd58f01SBen Widawsky {
4119*58820574STvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
4120*58820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4121b16b2a2fSPaulo Zanoni 
4122abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
4123a9c287c9SJani Nikula 	u32 gt_interrupts[] = {
41248a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
412573d477f6SOscar Mateo 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
412673d477f6SOscar Mateo 		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
41278a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
41288a68d464SChris Wilson 
41298a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
41308a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4131abd58f01SBen Widawsky 		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
41328a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
41338a68d464SChris Wilson 
4134abd58f01SBen Widawsky 		0,
41358a68d464SChris Wilson 
41368a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
41378a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4138abd58f01SBen Widawsky 	};
4139abd58f01SBen Widawsky 
4140*58820574STvrtko Ursulin 	gt->pm_ier = 0x0;
4141*58820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4142b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4143b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
414478e68d36SImre Deak 	/*
414578e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
414626705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
414778e68d36SImre Deak 	 */
4148*58820574STvrtko Ursulin 	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
4149b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4150abd58f01SBen Widawsky }
4151abd58f01SBen Widawsky 
4152abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4153abd58f01SBen Widawsky {
4154b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4155b16b2a2fSPaulo Zanoni 
4156a9c287c9SJani Nikula 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4157a9c287c9SJani Nikula 	u32 de_pipe_enables;
41583a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
41593a3b3c7dSVille Syrjälä 	u32 de_port_enables;
4160df0d28c1SDhinakaran Pandiyan 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
41613a3b3c7dSVille Syrjälä 	enum pipe pipe;
4162770de83dSDamien Lespiau 
4163df0d28c1SDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) <= 10)
4164df0d28c1SDhinakaran Pandiyan 		de_misc_masked |= GEN8_DE_MISC_GSE;
4165df0d28c1SDhinakaran Pandiyan 
4166bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
4167842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
41683a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
416988e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
4170cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
41713a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
41723a3b3c7dSVille Syrjälä 	} else {
4173842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
41743a3b3c7dSVille Syrjälä 	}
4175770de83dSDamien Lespiau 
4176bb187e93SJames Ausmus 	if (INTEL_GEN(dev_priv) >= 11)
4177bb187e93SJames Ausmus 		de_port_masked |= ICL_AUX_CHANNEL_E;
4178bb187e93SJames Ausmus 
41799bb635d9SDhinakaran Pandiyan 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4180a324fcacSRodrigo Vivi 		de_port_masked |= CNL_AUX_CHANNEL_F;
4181a324fcacSRodrigo Vivi 
4182770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4183770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
4184770de83dSDamien Lespiau 
41853a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
4186cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
4187a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4188a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
41893a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
41903a3b3c7dSVille Syrjälä 
4191b16b2a2fSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
419254fd3149SDhinakaran Pandiyan 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4193e04f7eceSVille Syrjälä 
41940a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
41950a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4196abd58f01SBen Widawsky 
4197f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
4198813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
4199b16b2a2fSPaulo Zanoni 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4200813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
420135079899SPaulo Zanoni 					  de_pipe_enables);
42020a195c02SMika Kahola 	}
4203abd58f01SBen Widawsky 
4204b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4205b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
42062a57d9ccSImre Deak 
4207121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11) {
4208121e758eSDhinakaran Pandiyan 		u32 de_hpd_masked = 0;
4209b796b971SDhinakaran Pandiyan 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4210b796b971SDhinakaran Pandiyan 				     GEN11_DE_TBT_HOTPLUG_MASK;
4211121e758eSDhinakaran Pandiyan 
4212b16b2a2fSPaulo Zanoni 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4213b16b2a2fSPaulo Zanoni 			      de_hpd_enables);
4214121e758eSDhinakaran Pandiyan 		gen11_hpd_detection_setup(dev_priv);
4215121e758eSDhinakaran Pandiyan 	} else if (IS_GEN9_LP(dev_priv)) {
42162a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
4217121e758eSDhinakaran Pandiyan 	} else if (IS_BROADWELL(dev_priv)) {
42181a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
4219abd58f01SBen Widawsky 	}
4220121e758eSDhinakaran Pandiyan }
4221abd58f01SBen Widawsky 
4222b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
4223abd58f01SBen Widawsky {
42246e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4225b318b824SVille Syrjälä 		ibx_irq_pre_postinstall(dev_priv);
4226622364b6SPaulo Zanoni 
4227abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
4228abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
4229abd58f01SBen Widawsky 
42306e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4231b318b824SVille Syrjälä 		ibx_irq_postinstall(dev_priv);
4232abd58f01SBen Widawsky 
423325286aacSDaniele Ceraolo Spurio 	gen8_master_intr_enable(dev_priv->uncore.regs);
4234abd58f01SBen Widawsky }
4235abd58f01SBen Widawsky 
42369b77011eSTvrtko Ursulin static void gen11_gt_irq_postinstall(struct intel_gt *gt)
423751951ae7SMika Kuoppala {
423851951ae7SMika Kuoppala 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4239f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4240f0818984STvrtko Ursulin 	const u32 dmask = irqs << 16 | irqs;
4241f0818984STvrtko Ursulin 	const u32 smask = irqs << 16;
424251951ae7SMika Kuoppala 
424351951ae7SMika Kuoppala 	BUILD_BUG_ON(irqs & 0xffff0000);
424451951ae7SMika Kuoppala 
424551951ae7SMika Kuoppala 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
4246f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
4247f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
424851951ae7SMika Kuoppala 
424951951ae7SMika Kuoppala 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4250f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
4251f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
4252f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
4253f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
4254f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
425551951ae7SMika Kuoppala 
4256d02b98b8SOscar Mateo 	/*
4257d02b98b8SOscar Mateo 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4258d02b98b8SOscar Mateo 	 * is enabled/disabled.
4259d02b98b8SOscar Mateo 	 */
4260*58820574STvrtko Ursulin 	gt->pm_ier = 0x0;
4261*58820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4262f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4263f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
426454c52a84SOscar Mateo 
426554c52a84SOscar Mateo 	/* Same thing for GuC interrupts */
4266f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
4267f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
426851951ae7SMika Kuoppala }
426951951ae7SMika Kuoppala 
4270b318b824SVille Syrjälä static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
427131604222SAnusha Srivatsa {
427231604222SAnusha Srivatsa 	u32 mask = SDE_GMBUS_ICP;
427331604222SAnusha Srivatsa 
427431604222SAnusha Srivatsa 	WARN_ON(I915_READ(SDEIER) != 0);
427531604222SAnusha Srivatsa 	I915_WRITE(SDEIER, 0xffffffff);
427631604222SAnusha Srivatsa 	POSTING_READ(SDEIER);
427731604222SAnusha Srivatsa 
427865f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
427931604222SAnusha Srivatsa 	I915_WRITE(SDEIMR, ~mask);
428031604222SAnusha Srivatsa 
428131604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
428231604222SAnusha Srivatsa }
428331604222SAnusha Srivatsa 
4284b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
428551951ae7SMika Kuoppala {
4286b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4287df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
428851951ae7SMika Kuoppala 
428929b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4290b318b824SVille Syrjälä 		icp_irq_postinstall(dev_priv);
429131604222SAnusha Srivatsa 
42929b77011eSTvrtko Ursulin 	gen11_gt_irq_postinstall(&dev_priv->gt);
429351951ae7SMika Kuoppala 	gen8_de_irq_postinstall(dev_priv);
429451951ae7SMika Kuoppala 
4295b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4296df0d28c1SDhinakaran Pandiyan 
429751951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
429851951ae7SMika Kuoppala 
42999b77011eSTvrtko Ursulin 	gen11_master_intr_enable(uncore->regs);
4300c25f0c6aSDaniele Ceraolo Spurio 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
430151951ae7SMika Kuoppala }
430251951ae7SMika Kuoppala 
4303b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
430443f328d7SVille Syrjälä {
430543f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
430643f328d7SVille Syrjälä 
4307ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
43089918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4309ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4310ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4311ad22d106SVille Syrjälä 
4312e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
431343f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
431443f328d7SVille Syrjälä }
431543f328d7SVille Syrjälä 
4316b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
4317c2798b19SChris Wilson {
4318b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4319c2798b19SChris Wilson 
432044d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
432144d9241eSVille Syrjälä 
4322b16b2a2fSPaulo Zanoni 	GEN2_IRQ_RESET(uncore);
4323c2798b19SChris Wilson }
4324c2798b19SChris Wilson 
4325b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
4326c2798b19SChris Wilson {
4327b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4328e9e9848aSVille Syrjälä 	u16 enable_mask;
4329c2798b19SChris Wilson 
43304f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
43314f5fd91fSTvrtko Ursulin 			     EMR,
43324f5fd91fSTvrtko Ursulin 			     ~(I915_ERROR_PAGE_TABLE |
4333045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH));
4334c2798b19SChris Wilson 
4335c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
4336c2798b19SChris Wilson 	dev_priv->irq_mask =
4337c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
433816659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
433916659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4340c2798b19SChris Wilson 
4341e9e9848aSVille Syrjälä 	enable_mask =
4342c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4343c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
434416659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4345e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
4346e9e9848aSVille Syrjälä 
4347b16b2a2fSPaulo Zanoni 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4348c2798b19SChris Wilson 
4349379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4350379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4351d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4352755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4353755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4354d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4355c2798b19SChris Wilson }
4356c2798b19SChris Wilson 
43574f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915,
435878c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
435978c357ddSVille Syrjälä {
43604f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &i915->uncore;
436178c357ddSVille Syrjälä 	u16 emr;
436278c357ddSVille Syrjälä 
43634f5fd91fSTvrtko Ursulin 	*eir = intel_uncore_read16(uncore, EIR);
436478c357ddSVille Syrjälä 
436578c357ddSVille Syrjälä 	if (*eir)
43664f5fd91fSTvrtko Ursulin 		intel_uncore_write16(uncore, EIR, *eir);
436778c357ddSVille Syrjälä 
43684f5fd91fSTvrtko Ursulin 	*eir_stuck = intel_uncore_read16(uncore, EIR);
436978c357ddSVille Syrjälä 	if (*eir_stuck == 0)
437078c357ddSVille Syrjälä 		return;
437178c357ddSVille Syrjälä 
437278c357ddSVille Syrjälä 	/*
437378c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
437478c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
437578c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
437678c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
437778c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
437878c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
437978c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
438078c357ddSVille Syrjälä 	 * remains set.
438178c357ddSVille Syrjälä 	 */
43824f5fd91fSTvrtko Ursulin 	emr = intel_uncore_read16(uncore, EMR);
43834f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, 0xffff);
43844f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
438578c357ddSVille Syrjälä }
438678c357ddSVille Syrjälä 
438778c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
438878c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
438978c357ddSVille Syrjälä {
439078c357ddSVille Syrjälä 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
439178c357ddSVille Syrjälä 
439278c357ddSVille Syrjälä 	if (eir_stuck)
439378c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
439478c357ddSVille Syrjälä }
439578c357ddSVille Syrjälä 
439678c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
439778c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
439878c357ddSVille Syrjälä {
439978c357ddSVille Syrjälä 	u32 emr;
440078c357ddSVille Syrjälä 
440178c357ddSVille Syrjälä 	*eir = I915_READ(EIR);
440278c357ddSVille Syrjälä 
440378c357ddSVille Syrjälä 	I915_WRITE(EIR, *eir);
440478c357ddSVille Syrjälä 
440578c357ddSVille Syrjälä 	*eir_stuck = I915_READ(EIR);
440678c357ddSVille Syrjälä 	if (*eir_stuck == 0)
440778c357ddSVille Syrjälä 		return;
440878c357ddSVille Syrjälä 
440978c357ddSVille Syrjälä 	/*
441078c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
441178c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
441278c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
441378c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
441478c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
441578c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
441678c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
441778c357ddSVille Syrjälä 	 * remains set.
441878c357ddSVille Syrjälä 	 */
441978c357ddSVille Syrjälä 	emr = I915_READ(EMR);
442078c357ddSVille Syrjälä 	I915_WRITE(EMR, 0xffffffff);
442178c357ddSVille Syrjälä 	I915_WRITE(EMR, emr | *eir_stuck);
442278c357ddSVille Syrjälä }
442378c357ddSVille Syrjälä 
442478c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
442578c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
442678c357ddSVille Syrjälä {
442778c357ddSVille Syrjälä 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
442878c357ddSVille Syrjälä 
442978c357ddSVille Syrjälä 	if (eir_stuck)
443078c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
443178c357ddSVille Syrjälä }
443278c357ddSVille Syrjälä 
4433ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4434c2798b19SChris Wilson {
4435b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4436af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4437c2798b19SChris Wilson 
44382dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
44392dd2a883SImre Deak 		return IRQ_NONE;
44402dd2a883SImre Deak 
44411f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
44429102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44431f814dacSImre Deak 
4444af722d28SVille Syrjälä 	do {
4445af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
444678c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
4447af722d28SVille Syrjälä 		u16 iir;
4448af722d28SVille Syrjälä 
44494f5fd91fSTvrtko Ursulin 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4450c2798b19SChris Wilson 		if (iir == 0)
4451af722d28SVille Syrjälä 			break;
4452c2798b19SChris Wilson 
4453af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4454c2798b19SChris Wilson 
4455eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4456eb64343cSVille Syrjälä 		 * signalled in iir */
4457eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4458c2798b19SChris Wilson 
445978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
446078c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
446178c357ddSVille Syrjälä 
44624f5fd91fSTvrtko Ursulin 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4463c2798b19SChris Wilson 
4464c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
44658a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4466c2798b19SChris Wilson 
446778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
446878c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4469af722d28SVille Syrjälä 
4470eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4471af722d28SVille Syrjälä 	} while (0);
4472c2798b19SChris Wilson 
44739102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44741f814dacSImre Deak 
44751f814dacSImre Deak 	return ret;
4476c2798b19SChris Wilson }
4477c2798b19SChris Wilson 
4478b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv)
4479a266c7d5SChris Wilson {
4480b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4481a266c7d5SChris Wilson 
448256b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
44830706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4484a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4485a266c7d5SChris Wilson 	}
4486a266c7d5SChris Wilson 
448744d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
448844d9241eSVille Syrjälä 
4489b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4490a266c7d5SChris Wilson }
4491a266c7d5SChris Wilson 
4492b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4493a266c7d5SChris Wilson {
4494b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
449538bde180SChris Wilson 	u32 enable_mask;
4496a266c7d5SChris Wilson 
4497045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4498045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
449938bde180SChris Wilson 
450038bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
450138bde180SChris Wilson 	dev_priv->irq_mask =
450238bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
450338bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
450416659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
450516659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
450638bde180SChris Wilson 
450738bde180SChris Wilson 	enable_mask =
450838bde180SChris Wilson 		I915_ASLE_INTERRUPT |
450938bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
451038bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
451116659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
451238bde180SChris Wilson 		I915_USER_INTERRUPT;
451338bde180SChris Wilson 
451456b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
4515a266c7d5SChris Wilson 		/* Enable in IER... */
4516a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4517a266c7d5SChris Wilson 		/* and unmask in IMR */
4518a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4519a266c7d5SChris Wilson 	}
4520a266c7d5SChris Wilson 
4521b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4522a266c7d5SChris Wilson 
4523379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4524379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4525d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4526755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4527755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4528d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4529379ef82dSDaniel Vetter 
4530c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
453120afbda2SDaniel Vetter }
453220afbda2SDaniel Vetter 
4533ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
4534a266c7d5SChris Wilson {
4535b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4536af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4537a266c7d5SChris Wilson 
45382dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
45392dd2a883SImre Deak 		return IRQ_NONE;
45402dd2a883SImre Deak 
45411f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
45429102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45431f814dacSImre Deak 
454438bde180SChris Wilson 	do {
4545eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
454678c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4547af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4548af722d28SVille Syrjälä 		u32 iir;
4549a266c7d5SChris Wilson 
45509d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4551af722d28SVille Syrjälä 		if (iir == 0)
4552af722d28SVille Syrjälä 			break;
4553af722d28SVille Syrjälä 
4554af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4555af722d28SVille Syrjälä 
4556af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
4557af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4558af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4559a266c7d5SChris Wilson 
4560eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4561eb64343cSVille Syrjälä 		 * signalled in iir */
4562eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4563a266c7d5SChris Wilson 
456478c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
456578c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
456678c357ddSVille Syrjälä 
45679d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4568a266c7d5SChris Wilson 
4569a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
45708a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4571a266c7d5SChris Wilson 
457278c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
457378c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4574a266c7d5SChris Wilson 
4575af722d28SVille Syrjälä 		if (hotplug_status)
4576af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4577af722d28SVille Syrjälä 
4578af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4579af722d28SVille Syrjälä 	} while (0);
4580a266c7d5SChris Wilson 
45819102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45821f814dacSImre Deak 
4583a266c7d5SChris Wilson 	return ret;
4584a266c7d5SChris Wilson }
4585a266c7d5SChris Wilson 
4586b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv)
4587a266c7d5SChris Wilson {
4588b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4589a266c7d5SChris Wilson 
45900706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4591a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4592a266c7d5SChris Wilson 
459344d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
459444d9241eSVille Syrjälä 
4595b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4596a266c7d5SChris Wilson }
4597a266c7d5SChris Wilson 
4598b318b824SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4599a266c7d5SChris Wilson {
4600b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4601bbba0a97SChris Wilson 	u32 enable_mask;
4602a266c7d5SChris Wilson 	u32 error_mask;
4603a266c7d5SChris Wilson 
4604045cebd2SVille Syrjälä 	/*
4605045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
4606045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
4607045cebd2SVille Syrjälä 	 */
4608045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
4609045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4610045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
4611045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
4612045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4613045cebd2SVille Syrjälä 	} else {
4614045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4615045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4616045cebd2SVille Syrjälä 	}
4617045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
4618045cebd2SVille Syrjälä 
4619a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
4620c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
4621c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
4622adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
4623bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4624bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
462578c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4626bbba0a97SChris Wilson 
4627c30bb1fdSVille Syrjälä 	enable_mask =
4628c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
4629c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
4630c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4631c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
463278c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4633c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
4634bbba0a97SChris Wilson 
463591d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4636bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
4637a266c7d5SChris Wilson 
4638b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4639c30bb1fdSVille Syrjälä 
4640b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4641b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4642d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4643755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4644755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4645755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4646d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4647a266c7d5SChris Wilson 
464891d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
464920afbda2SDaniel Vetter }
465020afbda2SDaniel Vetter 
465191d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
465220afbda2SDaniel Vetter {
465320afbda2SDaniel Vetter 	u32 hotplug_en;
465420afbda2SDaniel Vetter 
465567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4656b5ea2d56SDaniel Vetter 
4657adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
4658e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
465991d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4660a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
4661a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
4662a266c7d5SChris Wilson 	   seconds later.  So just do it once.
4663a266c7d5SChris Wilson 	*/
466491d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4665a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4666a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4667a266c7d5SChris Wilson 
4668a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
46690706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
4670f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
4671f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4672f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
46730706f17cSEgbert Eich 					     hotplug_en);
4674a266c7d5SChris Wilson }
4675a266c7d5SChris Wilson 
4676ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
4677a266c7d5SChris Wilson {
4678b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4679af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4680a266c7d5SChris Wilson 
46812dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
46822dd2a883SImre Deak 		return IRQ_NONE;
46832dd2a883SImre Deak 
46841f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
46859102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
46861f814dacSImre Deak 
4687af722d28SVille Syrjälä 	do {
4688eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
468978c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4690af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4691af722d28SVille Syrjälä 		u32 iir;
46922c8ba29fSChris Wilson 
46939d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4694af722d28SVille Syrjälä 		if (iir == 0)
4695af722d28SVille Syrjälä 			break;
4696af722d28SVille Syrjälä 
4697af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4698af722d28SVille Syrjälä 
4699af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4700af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4701a266c7d5SChris Wilson 
4702eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4703eb64343cSVille Syrjälä 		 * signalled in iir */
4704eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4705a266c7d5SChris Wilson 
470678c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
470778c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
470878c357ddSVille Syrjälä 
47099d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4710a266c7d5SChris Wilson 
4711a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
47128a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4713af722d28SVille Syrjälä 
4714a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
47158a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4716a266c7d5SChris Wilson 
471778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
471878c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4719515ac2bbSDaniel Vetter 
4720af722d28SVille Syrjälä 		if (hotplug_status)
4721af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4722af722d28SVille Syrjälä 
4723af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4724af722d28SVille Syrjälä 	} while (0);
4725a266c7d5SChris Wilson 
47269102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
47271f814dacSImre Deak 
4728a266c7d5SChris Wilson 	return ret;
4729a266c7d5SChris Wilson }
4730a266c7d5SChris Wilson 
4731fca52a55SDaniel Vetter /**
4732fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4733fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4734fca52a55SDaniel Vetter  *
4735fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4736fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4737fca52a55SDaniel Vetter  */
4738b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4739f71d4af4SJesse Barnes {
474091c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4741562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4742cefcff8fSJoonas Lahtinen 	int i;
47438b2e326dSChris Wilson 
4744d938da6bSVille Syrjälä 	if (IS_I945GM(dev_priv))
4745d938da6bSVille Syrjälä 		i945gm_vblank_work_init(dev_priv);
4746d938da6bSVille Syrjälä 
474777913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
474877913b39SJani Nikula 
4749562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4750cefcff8fSJoonas Lahtinen 
4751a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4752cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4753cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
47548b2e326dSChris Wilson 
475554c52a84SOscar Mateo 	if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
475626705e20SSagar Arun Kamble 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
475726705e20SSagar Arun Kamble 
4758a6706b45SDeepak S 	/* Let's track the enabled rps events */
4759666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
47606c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4761e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
476231685c25SDeepak S 	else
47634668f695SChris Wilson 		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
47644668f695SChris Wilson 					   GEN6_PM_RP_DOWN_THRESHOLD |
47654668f695SChris Wilson 					   GEN6_PM_RP_DOWN_TIMEOUT);
4766a6706b45SDeepak S 
4767917dc6b5SMika Kuoppala 	/* We share the register with other engine */
4768917dc6b5SMika Kuoppala 	if (INTEL_GEN(dev_priv) > 9)
4769917dc6b5SMika Kuoppala 		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4770917dc6b5SMika Kuoppala 
4771562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
47721800ad25SSagar Arun Kamble 
47731800ad25SSagar Arun Kamble 	/*
4774acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
47751800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
47761800ad25SSagar Arun Kamble 	 *
47771800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
47781800ad25SSagar Arun Kamble 	 */
4779bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4780562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
47811800ad25SSagar Arun Kamble 
4782bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4783562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
47841800ad25SSagar Arun Kamble 
478521da2700SVille Syrjälä 	dev->vblank_disable_immediate = true;
478621da2700SVille Syrjälä 
4787262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4788262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4789262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4790262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4791262fd485SChris Wilson 	 * in this case to the runtime pm.
4792262fd485SChris Wilson 	 */
4793262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4794262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4795262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4796262fd485SChris Wilson 
4797317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
47989a64c650SLyude Paul 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
47999a64c650SLyude Paul 	 * detection, as short HPD storms will occur as a natural part of
48009a64c650SLyude Paul 	 * sideband messaging with MST.
48019a64c650SLyude Paul 	 * On older platforms however, IRQ storms can occur with both long and
48029a64c650SLyude Paul 	 * short pulses, as seen on some G4x systems.
48039a64c650SLyude Paul 	 */
48049a64c650SLyude Paul 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4805317eaa95SLyude 
4806b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4807b318b824SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
480843f328d7SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4809b318b824SVille Syrjälä 	} else {
4810b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4811121e758eSDhinakaran Pandiyan 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4812b318b824SVille Syrjälä 		else if (IS_GEN9_LP(dev_priv))
4813e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4814c6c30b91SRodrigo Vivi 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
48156dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
48166dbf30ceSVille Syrjälä 		else
48173a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4818f71d4af4SJesse Barnes 	}
4819f71d4af4SJesse Barnes }
482020afbda2SDaniel Vetter 
4821fca52a55SDaniel Vetter /**
4822cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4823cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4824cefcff8fSJoonas Lahtinen  *
4825cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4826cefcff8fSJoonas Lahtinen  */
4827cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4828cefcff8fSJoonas Lahtinen {
4829cefcff8fSJoonas Lahtinen 	int i;
4830cefcff8fSJoonas Lahtinen 
4831d938da6bSVille Syrjälä 	if (IS_I945GM(i915))
4832d938da6bSVille Syrjälä 		i945gm_vblank_work_fini(i915);
4833d938da6bSVille Syrjälä 
4834cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4835cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4836cefcff8fSJoonas Lahtinen }
4837cefcff8fSJoonas Lahtinen 
4838b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4839b318b824SVille Syrjälä {
4840b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4841b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4842b318b824SVille Syrjälä 			return cherryview_irq_handler;
4843b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4844b318b824SVille Syrjälä 			return valleyview_irq_handler;
4845b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4846b318b824SVille Syrjälä 			return i965_irq_handler;
4847b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4848b318b824SVille Syrjälä 			return i915_irq_handler;
4849b318b824SVille Syrjälä 		else
4850b318b824SVille Syrjälä 			return i8xx_irq_handler;
4851b318b824SVille Syrjälä 	} else {
4852b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4853b318b824SVille Syrjälä 			return gen11_irq_handler;
4854b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4855b318b824SVille Syrjälä 			return gen8_irq_handler;
4856b318b824SVille Syrjälä 		else
4857b318b824SVille Syrjälä 			return ironlake_irq_handler;
4858b318b824SVille Syrjälä 	}
4859b318b824SVille Syrjälä }
4860b318b824SVille Syrjälä 
4861b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv)
4862b318b824SVille Syrjälä {
4863b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4864b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4865b318b824SVille Syrjälä 			cherryview_irq_reset(dev_priv);
4866b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4867b318b824SVille Syrjälä 			valleyview_irq_reset(dev_priv);
4868b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4869b318b824SVille Syrjälä 			i965_irq_reset(dev_priv);
4870b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4871b318b824SVille Syrjälä 			i915_irq_reset(dev_priv);
4872b318b824SVille Syrjälä 		else
4873b318b824SVille Syrjälä 			i8xx_irq_reset(dev_priv);
4874b318b824SVille Syrjälä 	} else {
4875b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4876b318b824SVille Syrjälä 			gen11_irq_reset(dev_priv);
4877b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4878b318b824SVille Syrjälä 			gen8_irq_reset(dev_priv);
4879b318b824SVille Syrjälä 		else
4880b318b824SVille Syrjälä 			ironlake_irq_reset(dev_priv);
4881b318b824SVille Syrjälä 	}
4882b318b824SVille Syrjälä }
4883b318b824SVille Syrjälä 
4884b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4885b318b824SVille Syrjälä {
4886b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4887b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4888b318b824SVille Syrjälä 			cherryview_irq_postinstall(dev_priv);
4889b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4890b318b824SVille Syrjälä 			valleyview_irq_postinstall(dev_priv);
4891b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4892b318b824SVille Syrjälä 			i965_irq_postinstall(dev_priv);
4893b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4894b318b824SVille Syrjälä 			i915_irq_postinstall(dev_priv);
4895b318b824SVille Syrjälä 		else
4896b318b824SVille Syrjälä 			i8xx_irq_postinstall(dev_priv);
4897b318b824SVille Syrjälä 	} else {
4898b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4899b318b824SVille Syrjälä 			gen11_irq_postinstall(dev_priv);
4900b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4901b318b824SVille Syrjälä 			gen8_irq_postinstall(dev_priv);
4902b318b824SVille Syrjälä 		else
4903b318b824SVille Syrjälä 			ironlake_irq_postinstall(dev_priv);
4904b318b824SVille Syrjälä 	}
4905b318b824SVille Syrjälä }
4906b318b824SVille Syrjälä 
4907cefcff8fSJoonas Lahtinen /**
4908fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4909fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4910fca52a55SDaniel Vetter  *
4911fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4912fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4913fca52a55SDaniel Vetter  *
4914fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4915fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4916fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4917fca52a55SDaniel Vetter  */
49182aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
49192aeb7d3aSDaniel Vetter {
4920b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4921b318b824SVille Syrjälä 	int ret;
4922b318b824SVille Syrjälä 
49232aeb7d3aSDaniel Vetter 	/*
49242aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
49252aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
49262aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
49272aeb7d3aSDaniel Vetter 	 */
4928ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
49292aeb7d3aSDaniel Vetter 
4930b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = true;
4931b318b824SVille Syrjälä 
4932b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4933b318b824SVille Syrjälä 
4934b318b824SVille Syrjälä 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4935b318b824SVille Syrjälä 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4936b318b824SVille Syrjälä 	if (ret < 0) {
4937b318b824SVille Syrjälä 		dev_priv->drm.irq_enabled = false;
4938b318b824SVille Syrjälä 		return ret;
4939b318b824SVille Syrjälä 	}
4940b318b824SVille Syrjälä 
4941b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
4942b318b824SVille Syrjälä 
4943b318b824SVille Syrjälä 	return ret;
49442aeb7d3aSDaniel Vetter }
49452aeb7d3aSDaniel Vetter 
4946fca52a55SDaniel Vetter /**
4947fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4948fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4949fca52a55SDaniel Vetter  *
4950fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4951fca52a55SDaniel Vetter  * resources acquired in the init functions.
4952fca52a55SDaniel Vetter  */
49532aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
49542aeb7d3aSDaniel Vetter {
4955b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4956b318b824SVille Syrjälä 
4957b318b824SVille Syrjälä 	/*
4958b318b824SVille Syrjälä 	 * FIXME we can get called twice during driver load
4959b318b824SVille Syrjälä 	 * error handling due to intel_modeset_cleanup()
4960b318b824SVille Syrjälä 	 * calling us out of sequence. Would be nice if
4961b318b824SVille Syrjälä 	 * it didn't do that...
4962b318b824SVille Syrjälä 	 */
4963b318b824SVille Syrjälä 	if (!dev_priv->drm.irq_enabled)
4964b318b824SVille Syrjälä 		return;
4965b318b824SVille Syrjälä 
4966b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = false;
4967b318b824SVille Syrjälä 
4968b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4969b318b824SVille Syrjälä 
4970b318b824SVille Syrjälä 	free_irq(irq, dev_priv);
4971b318b824SVille Syrjälä 
49722aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4973ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
49742aeb7d3aSDaniel Vetter }
49752aeb7d3aSDaniel Vetter 
4976fca52a55SDaniel Vetter /**
4977fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4978fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4979fca52a55SDaniel Vetter  *
4980fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4981fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4982fca52a55SDaniel Vetter  */
4983b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4984c67a470bSPaulo Zanoni {
4985b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4986ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
4987315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
4988c67a470bSPaulo Zanoni }
4989c67a470bSPaulo Zanoni 
4990fca52a55SDaniel Vetter /**
4991fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4992fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4993fca52a55SDaniel Vetter  *
4994fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
4995fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4996fca52a55SDaniel Vetter  */
4997b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4998c67a470bSPaulo Zanoni {
4999ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
5000b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
5001b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
5002c67a470bSPaulo Zanoni }
5003