xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 8b5689d7e3ca889a7e55c79bc335b33e3f170a18)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
31b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
3255367a27SJani Nikula #include <linux/cpuidle.h>
3355367a27SJani Nikula #include <linux/slab.h>
3455367a27SJani Nikula #include <linux/sysrq.h>
3555367a27SJani Nikula 
36fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3755367a27SJani Nikula #include <drm/drm_irq.h>
38760285e7SDavid Howells #include <drm/i915_drm.h>
3955367a27SJani Nikula 
40df0566a6SJani Nikula #include "display/intel_fifo_underrun.h"
41df0566a6SJani Nikula #include "display/intel_hotplug.h"
42df0566a6SJani Nikula #include "display/intel_lpe_audio.h"
43df0566a6SJani Nikula #include "display/intel_psr.h"
44df0566a6SJani Nikula 
45c0e09200SDave Airlie #include "i915_drv.h"
46440e2b3dSJani Nikula #include "i915_irq.h"
471c5d22f7SChris Wilson #include "i915_trace.h"
4879e53945SJesse Barnes #include "intel_drv.h"
49d13616dbSJani Nikula #include "intel_pm.h"
50c0e09200SDave Airlie 
51fca52a55SDaniel Vetter /**
52fca52a55SDaniel Vetter  * DOC: interrupt handling
53fca52a55SDaniel Vetter  *
54fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
55fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
56fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
57fca52a55SDaniel Vetter  */
58fca52a55SDaniel Vetter 
59e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
60e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
61e4ce95aaSVille Syrjälä };
62e4ce95aaSVille Syrjälä 
6323bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
6423bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
6523bb4cb5SVille Syrjälä };
6623bb4cb5SVille Syrjälä 
673a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
683a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
693a3b3c7dSVille Syrjälä };
703a3b3c7dSVille Syrjälä 
717c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
72e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
73e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
74e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
75e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
76e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
77e5868a31SEgbert Eich };
78e5868a31SEgbert Eich 
797c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
80e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
8173c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
82e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
83e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
84e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
85e5868a31SEgbert Eich };
86e5868a31SEgbert Eich 
8726951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
8874c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
8926951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
9026951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
9126951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
9226951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
9326951cafSXiong Zhang };
9426951cafSXiong Zhang 
957c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
96e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
97e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
98e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
99e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
100e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
101e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
102e5868a31SEgbert Eich };
103e5868a31SEgbert Eich 
1047c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
105e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
106e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
107e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
108e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
109e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
110e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
111e5868a31SEgbert Eich };
112e5868a31SEgbert Eich 
1134bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
114e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
115e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
116e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
117e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
118e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
119e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
120e5868a31SEgbert Eich };
121e5868a31SEgbert Eich 
122e0a20ad7SShashank Sharma /* BXT hpd list */
123e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1247f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
125e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
126e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
127e0a20ad7SShashank Sharma };
128e0a20ad7SShashank Sharma 
129b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = {
130b796b971SDhinakaran Pandiyan 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
131b796b971SDhinakaran Pandiyan 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
132b796b971SDhinakaran Pandiyan 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
133b796b971SDhinakaran Pandiyan 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
134121e758eSDhinakaran Pandiyan };
135121e758eSDhinakaran Pandiyan 
13631604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = {
13731604222SAnusha Srivatsa 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
13831604222SAnusha Srivatsa 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
13931604222SAnusha Srivatsa 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
14031604222SAnusha Srivatsa 	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
14131604222SAnusha Srivatsa 	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
14231604222SAnusha Srivatsa 	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
14331604222SAnusha Srivatsa };
14431604222SAnusha Srivatsa 
145c6f7acb8SMatt Roper static const u32 hpd_mcc[HPD_NUM_PINS] = {
146c6f7acb8SMatt Roper 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
147c6f7acb8SMatt Roper 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
148c6f7acb8SMatt Roper 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
149c6f7acb8SMatt Roper };
150c6f7acb8SMatt Roper 
15165f42cdcSPaulo Zanoni static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
15268eb49b1SPaulo Zanoni 			   i915_reg_t iir, i915_reg_t ier)
15368eb49b1SPaulo Zanoni {
15465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
15565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
15668eb49b1SPaulo Zanoni 
15765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
15868eb49b1SPaulo Zanoni 
1595c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
16065f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16165f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16468eb49b1SPaulo Zanoni }
1655c502442SPaulo Zanoni 
16665f42cdcSPaulo Zanoni static void gen2_irq_reset(struct intel_uncore *uncore)
16768eb49b1SPaulo Zanoni {
16865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
16965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
170a9d356a6SPaulo Zanoni 
17165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
17268eb49b1SPaulo Zanoni 
17368eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
17465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17868eb49b1SPaulo Zanoni }
17968eb49b1SPaulo Zanoni 
180b16b2a2fSPaulo Zanoni #define GEN8_IRQ_RESET_NDX(uncore, type, which) \
18168eb49b1SPaulo Zanoni ({ \
18268eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
183b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
18468eb49b1SPaulo Zanoni 		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
18568eb49b1SPaulo Zanoni })
18668eb49b1SPaulo Zanoni 
187b16b2a2fSPaulo Zanoni #define GEN3_IRQ_RESET(uncore, type) \
188b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
18968eb49b1SPaulo Zanoni 
190b16b2a2fSPaulo Zanoni #define GEN2_IRQ_RESET(uncore) \
191b16b2a2fSPaulo Zanoni 	gen2_irq_reset(uncore)
192e9e9848aSVille Syrjälä 
193337ba017SPaulo Zanoni /*
194337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
195337ba017SPaulo Zanoni  */
19665f42cdcSPaulo Zanoni static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
197b51a2842SVille Syrjälä {
19865f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
199b51a2842SVille Syrjälä 
200b51a2842SVille Syrjälä 	if (val == 0)
201b51a2842SVille Syrjälä 		return;
202b51a2842SVille Syrjälä 
203b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
204f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
20565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20665f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
20765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20865f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
209b51a2842SVille Syrjälä }
210337ba017SPaulo Zanoni 
21165f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
212e9e9848aSVille Syrjälä {
21365f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
214e9e9848aSVille Syrjälä 
215e9e9848aSVille Syrjälä 	if (val == 0)
216e9e9848aSVille Syrjälä 		return;
217e9e9848aSVille Syrjälä 
218e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
2199d9523d8SPaulo Zanoni 	     i915_mmio_reg_offset(GEN2_IIR), val);
22065f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22165f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
22265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22365f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
224e9e9848aSVille Syrjälä }
225e9e9848aSVille Syrjälä 
22665f42cdcSPaulo Zanoni static void gen3_irq_init(struct intel_uncore *uncore,
22768eb49b1SPaulo Zanoni 			  i915_reg_t imr, u32 imr_val,
22868eb49b1SPaulo Zanoni 			  i915_reg_t ier, u32 ier_val,
22968eb49b1SPaulo Zanoni 			  i915_reg_t iir)
23068eb49b1SPaulo Zanoni {
23165f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
23235079899SPaulo Zanoni 
23365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
23465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
23565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
23668eb49b1SPaulo Zanoni }
23735079899SPaulo Zanoni 
23865f42cdcSPaulo Zanoni static void gen2_irq_init(struct intel_uncore *uncore,
2392918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
24068eb49b1SPaulo Zanoni {
24165f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
24268eb49b1SPaulo Zanoni 
24365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
24465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
24565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
24668eb49b1SPaulo Zanoni }
24768eb49b1SPaulo Zanoni 
248b16b2a2fSPaulo Zanoni #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
24968eb49b1SPaulo Zanoni ({ \
25068eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
251b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25268eb49b1SPaulo Zanoni 		      GEN8_##type##_IMR(which_), imr_val, \
25368eb49b1SPaulo Zanoni 		      GEN8_##type##_IER(which_), ier_val, \
25468eb49b1SPaulo Zanoni 		      GEN8_##type##_IIR(which_)); \
25568eb49b1SPaulo Zanoni })
25668eb49b1SPaulo Zanoni 
257b16b2a2fSPaulo Zanoni #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
258b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25968eb49b1SPaulo Zanoni 		      type##IMR, imr_val, \
26068eb49b1SPaulo Zanoni 		      type##IER, ier_val, \
26168eb49b1SPaulo Zanoni 		      type##IIR)
26268eb49b1SPaulo Zanoni 
263b16b2a2fSPaulo Zanoni #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
264b16b2a2fSPaulo Zanoni 	gen2_irq_init((uncore), imr_val, ier_val)
265e9e9848aSVille Syrjälä 
266c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
267633023a4SDaniele Ceraolo Spurio static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
268c9a9a268SImre Deak 
2690706f17cSEgbert Eich /* For display hotplug interrupt */
2700706f17cSEgbert Eich static inline void
2710706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
272a9c287c9SJani Nikula 				     u32 mask,
273a9c287c9SJani Nikula 				     u32 bits)
2740706f17cSEgbert Eich {
275a9c287c9SJani Nikula 	u32 val;
2760706f17cSEgbert Eich 
27767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2780706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2790706f17cSEgbert Eich 
2800706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2810706f17cSEgbert Eich 	val &= ~mask;
2820706f17cSEgbert Eich 	val |= bits;
2830706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2840706f17cSEgbert Eich }
2850706f17cSEgbert Eich 
2860706f17cSEgbert Eich /**
2870706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2880706f17cSEgbert Eich  * @dev_priv: driver private
2890706f17cSEgbert Eich  * @mask: bits to update
2900706f17cSEgbert Eich  * @bits: bits to enable
2910706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2920706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2930706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2940706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2950706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2960706f17cSEgbert Eich  * version is also available.
2970706f17cSEgbert Eich  */
2980706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
299a9c287c9SJani Nikula 				   u32 mask,
300a9c287c9SJani Nikula 				   u32 bits)
3010706f17cSEgbert Eich {
3020706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
3030706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
3040706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
3050706f17cSEgbert Eich }
3060706f17cSEgbert Eich 
30796606f3bSOscar Mateo static u32
3089b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
30996606f3bSOscar Mateo 			 const unsigned int bank, const unsigned int bit);
31096606f3bSOscar Mateo 
3119b77011eSTvrtko Ursulin static bool gen11_reset_one_iir(struct intel_gt *gt,
31296606f3bSOscar Mateo 				const unsigned int bank,
31396606f3bSOscar Mateo 				const unsigned int bit)
31496606f3bSOscar Mateo {
3159b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31696606f3bSOscar Mateo 	u32 dw;
31796606f3bSOscar Mateo 
3189b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
31996606f3bSOscar Mateo 
32096606f3bSOscar Mateo 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
32196606f3bSOscar Mateo 	if (dw & BIT(bit)) {
32296606f3bSOscar Mateo 		/*
32396606f3bSOscar Mateo 		 * According to the BSpec, DW_IIR bits cannot be cleared without
32496606f3bSOscar Mateo 		 * first servicing the Selector & Shared IIR registers.
32596606f3bSOscar Mateo 		 */
3269b77011eSTvrtko Ursulin 		gen11_gt_engine_identity(gt, bank, bit);
32796606f3bSOscar Mateo 
32896606f3bSOscar Mateo 		/*
32996606f3bSOscar Mateo 		 * We locked GT INT DW by reading it. If we want to (try
33096606f3bSOscar Mateo 		 * to) recover from this succesfully, we need to clear
33196606f3bSOscar Mateo 		 * our bit, otherwise we are locking the register for
33296606f3bSOscar Mateo 		 * everybody.
33396606f3bSOscar Mateo 		 */
33496606f3bSOscar Mateo 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
33596606f3bSOscar Mateo 
33696606f3bSOscar Mateo 		return true;
33796606f3bSOscar Mateo 	}
33896606f3bSOscar Mateo 
33996606f3bSOscar Mateo 	return false;
34096606f3bSOscar Mateo }
34196606f3bSOscar Mateo 
342d9dc34f1SVille Syrjälä /**
343d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
344d9dc34f1SVille Syrjälä  * @dev_priv: driver private
345d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
346d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
347d9dc34f1SVille Syrjälä  */
348fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
349a9c287c9SJani Nikula 			    u32 interrupt_mask,
350a9c287c9SJani Nikula 			    u32 enabled_irq_mask)
351036a4a7dSZhenyu Wang {
352a9c287c9SJani Nikula 	u32 new_val;
353d9dc34f1SVille Syrjälä 
35467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3554bc9d430SDaniel Vetter 
356d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
357d9dc34f1SVille Syrjälä 
3589df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
359c67a470bSPaulo Zanoni 		return;
360c67a470bSPaulo Zanoni 
361d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
362d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
363d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
364d9dc34f1SVille Syrjälä 
365d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
366d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
3671ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
3683143a2bfSChris Wilson 		POSTING_READ(DEIMR);
369036a4a7dSZhenyu Wang 	}
370036a4a7dSZhenyu Wang }
371036a4a7dSZhenyu Wang 
37243eaea13SPaulo Zanoni /**
37343eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
37443eaea13SPaulo Zanoni  * @dev_priv: driver private
37543eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
37643eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
37743eaea13SPaulo Zanoni  */
37843eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
379a9c287c9SJani Nikula 			      u32 interrupt_mask,
380a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
38143eaea13SPaulo Zanoni {
38267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
38343eaea13SPaulo Zanoni 
38415a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
38515a17aaeSDaniel Vetter 
3869df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
387c67a470bSPaulo Zanoni 		return;
388c67a470bSPaulo Zanoni 
38943eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
39043eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
39143eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
39243eaea13SPaulo Zanoni }
39343eaea13SPaulo Zanoni 
394a9c287c9SJani Nikula void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
39543eaea13SPaulo Zanoni {
39643eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
397e33a4be8STvrtko Ursulin 	intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
39843eaea13SPaulo Zanoni }
39943eaea13SPaulo Zanoni 
400a9c287c9SJani Nikula void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
40143eaea13SPaulo Zanoni {
40243eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
40343eaea13SPaulo Zanoni }
40443eaea13SPaulo Zanoni 
405f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
406b900b949SImre Deak {
407d02b98b8SOscar Mateo 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
408d02b98b8SOscar Mateo 
409bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
410b900b949SImre Deak }
411b900b949SImre Deak 
41258820574STvrtko Ursulin static void write_pm_imr(struct intel_gt *gt)
413a72fbc3aSImre Deak {
41458820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
41558820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
41658820574STvrtko Ursulin 	u32 mask = gt->pm_imr;
417917dc6b5SMika Kuoppala 	i915_reg_t reg;
418917dc6b5SMika Kuoppala 
41958820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
420917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
421917dc6b5SMika Kuoppala 		/* pm is in upper half */
422917dc6b5SMika Kuoppala 		mask = mask << 16;
42358820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
424917dc6b5SMika Kuoppala 		reg = GEN8_GT_IMR(2);
425917dc6b5SMika Kuoppala 	} else {
426917dc6b5SMika Kuoppala 		reg = GEN6_PMIMR;
427a72fbc3aSImre Deak 	}
428a72fbc3aSImre Deak 
42958820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
43058820574STvrtko Ursulin 	intel_uncore_posting_read(uncore, reg);
431917dc6b5SMika Kuoppala }
432917dc6b5SMika Kuoppala 
43358820574STvrtko Ursulin static void write_pm_ier(struct intel_gt *gt)
434b900b949SImre Deak {
43558820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
43658820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
43758820574STvrtko Ursulin 	u32 mask = gt->pm_ier;
438917dc6b5SMika Kuoppala 	i915_reg_t reg;
439917dc6b5SMika Kuoppala 
44058820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
441917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
442917dc6b5SMika Kuoppala 		/* pm is in upper half */
443917dc6b5SMika Kuoppala 		mask = mask << 16;
44458820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
445917dc6b5SMika Kuoppala 		reg = GEN8_GT_IER(2);
446917dc6b5SMika Kuoppala 	} else {
447917dc6b5SMika Kuoppala 		reg = GEN6_PMIER;
448917dc6b5SMika Kuoppala 	}
449917dc6b5SMika Kuoppala 
45058820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
451b900b949SImre Deak }
452b900b949SImre Deak 
453edbfdb45SPaulo Zanoni /**
454edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
45558820574STvrtko Ursulin  * @gt: gt for the interrupts
456edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
457edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
458edbfdb45SPaulo Zanoni  */
45958820574STvrtko Ursulin static void snb_update_pm_irq(struct intel_gt *gt,
460a9c287c9SJani Nikula 			      u32 interrupt_mask,
461a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
462edbfdb45SPaulo Zanoni {
463a9c287c9SJani Nikula 	u32 new_val;
464edbfdb45SPaulo Zanoni 
46515a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
46615a17aaeSDaniel Vetter 
46758820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
468edbfdb45SPaulo Zanoni 
46958820574STvrtko Ursulin 	new_val = gt->pm_imr;
470f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
471f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
472f52ecbcfSPaulo Zanoni 
47358820574STvrtko Ursulin 	if (new_val != gt->pm_imr) {
47458820574STvrtko Ursulin 		gt->pm_imr = new_val;
47558820574STvrtko Ursulin 		write_pm_imr(gt);
476edbfdb45SPaulo Zanoni 	}
477f52ecbcfSPaulo Zanoni }
478edbfdb45SPaulo Zanoni 
47958820574STvrtko Ursulin void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
480edbfdb45SPaulo Zanoni {
48158820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4829939fba2SImre Deak 		return;
4839939fba2SImre Deak 
48458820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, mask);
485edbfdb45SPaulo Zanoni }
486edbfdb45SPaulo Zanoni 
48758820574STvrtko Ursulin static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
4889939fba2SImre Deak {
48958820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, 0);
4909939fba2SImre Deak }
4919939fba2SImre Deak 
49258820574STvrtko Ursulin void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
493edbfdb45SPaulo Zanoni {
49458820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4959939fba2SImre Deak 		return;
4969939fba2SImre Deak 
49758820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, mask);
498f4e9af4fSAkash Goel }
499f4e9af4fSAkash Goel 
5003814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
501f4e9af4fSAkash Goel {
502f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
503f4e9af4fSAkash Goel 
50467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
505f4e9af4fSAkash Goel 
506f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
507f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
508f4e9af4fSAkash Goel 	POSTING_READ(reg);
509f4e9af4fSAkash Goel }
510f4e9af4fSAkash Goel 
51158820574STvrtko Ursulin static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
512f4e9af4fSAkash Goel {
51358820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
514f4e9af4fSAkash Goel 
51558820574STvrtko Ursulin 	gt->pm_ier |= enable_mask;
51658820574STvrtko Ursulin 	write_pm_ier(gt);
51758820574STvrtko Ursulin 	gen6_unmask_pm_irq(gt, enable_mask);
518f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
519f4e9af4fSAkash Goel }
520f4e9af4fSAkash Goel 
52158820574STvrtko Ursulin static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
522f4e9af4fSAkash Goel {
52358820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
524f4e9af4fSAkash Goel 
52558820574STvrtko Ursulin 	gt->pm_ier &= ~disable_mask;
52658820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, disable_mask);
52758820574STvrtko Ursulin 	write_pm_ier(gt);
528f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
529edbfdb45SPaulo Zanoni }
530edbfdb45SPaulo Zanoni 
531d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
532d02b98b8SOscar Mateo {
533d02b98b8SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
534d02b98b8SOscar Mateo 
5359b77011eSTvrtko Ursulin 	while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
53696606f3bSOscar Mateo 		;
537d02b98b8SOscar Mateo 
538d02b98b8SOscar Mateo 	dev_priv->gt_pm.rps.pm_iir = 0;
539d02b98b8SOscar Mateo 
540d02b98b8SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
541d02b98b8SOscar Mateo }
542d02b98b8SOscar Mateo 
543dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
5443cc134e3SImre Deak {
5453cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
5464668f695SChris Wilson 	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
547562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
5483cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
5493cc134e3SImre Deak }
5503cc134e3SImre Deak 
55191d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
552b900b949SImre Deak {
55358820574STvrtko Ursulin 	struct intel_gt *gt = &dev_priv->gt;
554562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
555562d9baeSSagar Arun Kamble 
556562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
557f2a91d1aSChris Wilson 		return;
558f2a91d1aSChris Wilson 
559b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
560562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
56196606f3bSOscar Mateo 
562d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
56358820574STvrtko Ursulin 		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
564d02b98b8SOscar Mateo 	else
565c33d247dSChris Wilson 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
56696606f3bSOscar Mateo 
567562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
56858820574STvrtko Ursulin 	gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
56978e68d36SImre Deak 
570b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
571b900b949SImre Deak }
572b900b949SImre Deak 
57391d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
574b900b949SImre Deak {
575562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
576562d9baeSSagar Arun Kamble 
577562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
578f2a91d1aSChris Wilson 		return;
579f2a91d1aSChris Wilson 
580d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
581562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
5829939fba2SImre Deak 
583b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
5849939fba2SImre Deak 
58558820574STvrtko Ursulin 	gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
58658072ccbSImre Deak 
58758072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
588315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
589c33d247dSChris Wilson 
590c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
5913814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
592c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
593c33d247dSChris Wilson 	 * state of the worker can be discarded.
594c33d247dSChris Wilson 	 */
595562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
596d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
597d02b98b8SOscar Mateo 		gen11_reset_rps_interrupts(dev_priv);
598d02b98b8SOscar Mateo 	else
599c33d247dSChris Wilson 		gen6_reset_rps_interrupts(dev_priv);
600b900b949SImre Deak }
601b900b949SImre Deak 
6029cbd51c2SDaniele Ceraolo Spurio void gen9_reset_guc_interrupts(struct intel_guc *guc)
60326705e20SSagar Arun Kamble {
6049cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
6059cbd51c2SDaniele Ceraolo Spurio 
60687b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6071be333d3SSagar Arun Kamble 
60826705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
60926705e20SSagar Arun Kamble 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
61026705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
61126705e20SSagar Arun Kamble }
61226705e20SSagar Arun Kamble 
6139cbd51c2SDaniele Ceraolo Spurio void gen9_enable_guc_interrupts(struct intel_guc *guc)
61426705e20SSagar Arun Kamble {
6159cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
6169cbd51c2SDaniele Ceraolo Spurio 
61787b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6181be333d3SSagar Arun Kamble 
61926705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
6209cbd51c2SDaniele Ceraolo Spurio 	if (!guc->interrupts.enabled) {
62126705e20SSagar Arun Kamble 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
62226705e20SSagar Arun Kamble 				       dev_priv->pm_guc_events);
6239cbd51c2SDaniele Ceraolo Spurio 		guc->interrupts.enabled = true;
62458820574STvrtko Ursulin 		gen6_enable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
62526705e20SSagar Arun Kamble 	}
62626705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
62726705e20SSagar Arun Kamble }
62826705e20SSagar Arun Kamble 
6299cbd51c2SDaniele Ceraolo Spurio void gen9_disable_guc_interrupts(struct intel_guc *guc)
63026705e20SSagar Arun Kamble {
6319cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
6329cbd51c2SDaniele Ceraolo Spurio 
63387b391b9SDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
6341be333d3SSagar Arun Kamble 
63526705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
6369cbd51c2SDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
63726705e20SSagar Arun Kamble 
63858820574STvrtko Ursulin 	gen6_disable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events);
63926705e20SSagar Arun Kamble 
64026705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
641315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
64226705e20SSagar Arun Kamble 
6439cbd51c2SDaniele Ceraolo Spurio 	gen9_reset_guc_interrupts(guc);
64426705e20SSagar Arun Kamble }
64526705e20SSagar Arun Kamble 
6469cbd51c2SDaniele Ceraolo Spurio void gen11_reset_guc_interrupts(struct intel_guc *guc)
64754c52a84SOscar Mateo {
6489cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = guc_to_i915(guc);
6499cbd51c2SDaniele Ceraolo Spurio 
65054c52a84SOscar Mateo 	spin_lock_irq(&i915->irq_lock);
6519b77011eSTvrtko Ursulin 	gen11_reset_one_iir(&i915->gt, 0, GEN11_GUC);
65254c52a84SOscar Mateo 	spin_unlock_irq(&i915->irq_lock);
65354c52a84SOscar Mateo }
65454c52a84SOscar Mateo 
6559cbd51c2SDaniele Ceraolo Spurio void gen11_enable_guc_interrupts(struct intel_guc *guc)
65654c52a84SOscar Mateo {
6579cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
6589cbd51c2SDaniele Ceraolo Spurio 
65954c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
6609cbd51c2SDaniele Ceraolo Spurio 	if (!guc->interrupts.enabled) {
661633023a4SDaniele Ceraolo Spurio 		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
66254c52a84SOscar Mateo 
6639b77011eSTvrtko Ursulin 		WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC));
66454c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
66554c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
6669cbd51c2SDaniele Ceraolo Spurio 		guc->interrupts.enabled = true;
66754c52a84SOscar Mateo 	}
66854c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
66954c52a84SOscar Mateo }
67054c52a84SOscar Mateo 
6719cbd51c2SDaniele Ceraolo Spurio void gen11_disable_guc_interrupts(struct intel_guc *guc)
67254c52a84SOscar Mateo {
6739cbd51c2SDaniele Ceraolo Spurio 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
6749cbd51c2SDaniele Ceraolo Spurio 
67554c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
6769cbd51c2SDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
67754c52a84SOscar Mateo 
67854c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
67954c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
68054c52a84SOscar Mateo 
68154c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
682315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
68354c52a84SOscar Mateo 
6849cbd51c2SDaniele Ceraolo Spurio 	gen11_reset_guc_interrupts(guc);
68554c52a84SOscar Mateo }
68654c52a84SOscar Mateo 
6870961021aSBen Widawsky /**
6883a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
6893a3b3c7dSVille Syrjälä  * @dev_priv: driver private
6903a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
6913a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
6923a3b3c7dSVille Syrjälä  */
6933a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
694a9c287c9SJani Nikula 				u32 interrupt_mask,
695a9c287c9SJani Nikula 				u32 enabled_irq_mask)
6963a3b3c7dSVille Syrjälä {
697a9c287c9SJani Nikula 	u32 new_val;
698a9c287c9SJani Nikula 	u32 old_val;
6993a3b3c7dSVille Syrjälä 
70067520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
7013a3b3c7dSVille Syrjälä 
7023a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
7033a3b3c7dSVille Syrjälä 
7043a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
7053a3b3c7dSVille Syrjälä 		return;
7063a3b3c7dSVille Syrjälä 
7073a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
7083a3b3c7dSVille Syrjälä 
7093a3b3c7dSVille Syrjälä 	new_val = old_val;
7103a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
7113a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
7123a3b3c7dSVille Syrjälä 
7133a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
7143a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
7153a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
7163a3b3c7dSVille Syrjälä 	}
7173a3b3c7dSVille Syrjälä }
7183a3b3c7dSVille Syrjälä 
7193a3b3c7dSVille Syrjälä /**
720013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
721013d3752SVille Syrjälä  * @dev_priv: driver private
722013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
723013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
724013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
725013d3752SVille Syrjälä  */
726013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
727013d3752SVille Syrjälä 			 enum pipe pipe,
728a9c287c9SJani Nikula 			 u32 interrupt_mask,
729a9c287c9SJani Nikula 			 u32 enabled_irq_mask)
730013d3752SVille Syrjälä {
731a9c287c9SJani Nikula 	u32 new_val;
732013d3752SVille Syrjälä 
73367520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
734013d3752SVille Syrjälä 
735013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
736013d3752SVille Syrjälä 
737013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
738013d3752SVille Syrjälä 		return;
739013d3752SVille Syrjälä 
740013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
741013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
742013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
743013d3752SVille Syrjälä 
744013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
745013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
746013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
747013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
748013d3752SVille Syrjälä 	}
749013d3752SVille Syrjälä }
750013d3752SVille Syrjälä 
751013d3752SVille Syrjälä /**
752fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
753fee884edSDaniel Vetter  * @dev_priv: driver private
754fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
755fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
756fee884edSDaniel Vetter  */
75747339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
758a9c287c9SJani Nikula 				  u32 interrupt_mask,
759a9c287c9SJani Nikula 				  u32 enabled_irq_mask)
760fee884edSDaniel Vetter {
761a9c287c9SJani Nikula 	u32 sdeimr = I915_READ(SDEIMR);
762fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
763fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
764fee884edSDaniel Vetter 
76515a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
76615a17aaeSDaniel Vetter 
76767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
768fee884edSDaniel Vetter 
7699df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
770c67a470bSPaulo Zanoni 		return;
771c67a470bSPaulo Zanoni 
772fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
773fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
774fee884edSDaniel Vetter }
7758664281bSPaulo Zanoni 
7766b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
7776b12ca56SVille Syrjälä 			      enum pipe pipe)
7787c463586SKeith Packard {
7796b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
78010c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
78110c59c51SImre Deak 
7826b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
7836b12ca56SVille Syrjälä 
7846b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
7856b12ca56SVille Syrjälä 		goto out;
7866b12ca56SVille Syrjälä 
78710c59c51SImre Deak 	/*
788724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
789724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
79010c59c51SImre Deak 	 */
79110c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
79210c59c51SImre Deak 		return 0;
793724a6905SVille Syrjälä 	/*
794724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
795724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
796724a6905SVille Syrjälä 	 */
797724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
798724a6905SVille Syrjälä 		return 0;
79910c59c51SImre Deak 
80010c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
80110c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
80210c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
80310c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
80410c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
80510c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
80610c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
80710c59c51SImre Deak 
8086b12ca56SVille Syrjälä out:
8096b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
8106b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
8116b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
8126b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
8136b12ca56SVille Syrjälä 
81410c59c51SImre Deak 	return enable_mask;
81510c59c51SImre Deak }
81610c59c51SImre Deak 
8176b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
8186b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
819755e9019SImre Deak {
8206b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
821755e9019SImre Deak 	u32 enable_mask;
822755e9019SImre Deak 
8236b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8246b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8256b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8266b12ca56SVille Syrjälä 
8276b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8286b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8296b12ca56SVille Syrjälä 
8306b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
8316b12ca56SVille Syrjälä 		return;
8326b12ca56SVille Syrjälä 
8336b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
8346b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8356b12ca56SVille Syrjälä 
8366b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8376b12ca56SVille Syrjälä 	POSTING_READ(reg);
838755e9019SImre Deak }
839755e9019SImre Deak 
8406b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
8416b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
842755e9019SImre Deak {
8436b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
844755e9019SImre Deak 	u32 enable_mask;
845755e9019SImre Deak 
8466b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8476b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8486b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8496b12ca56SVille Syrjälä 
8506b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8516b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8526b12ca56SVille Syrjälä 
8536b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
8546b12ca56SVille Syrjälä 		return;
8556b12ca56SVille Syrjälä 
8566b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
8576b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8586b12ca56SVille Syrjälä 
8596b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8606b12ca56SVille Syrjälä 	POSTING_READ(reg);
861755e9019SImre Deak }
862755e9019SImre Deak 
863f3e30485SVille Syrjälä static bool i915_has_asle(struct drm_i915_private *dev_priv)
864f3e30485SVille Syrjälä {
865f3e30485SVille Syrjälä 	if (!dev_priv->opregion.asle)
866f3e30485SVille Syrjälä 		return false;
867f3e30485SVille Syrjälä 
868f3e30485SVille Syrjälä 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
869f3e30485SVille Syrjälä }
870f3e30485SVille Syrjälä 
871c0e09200SDave Airlie /**
872f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
87314bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
87401c66889SZhao Yakui  */
87591d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
87601c66889SZhao Yakui {
877f3e30485SVille Syrjälä 	if (!i915_has_asle(dev_priv))
878f49e38ddSJani Nikula 		return;
879f49e38ddSJani Nikula 
88013321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
88101c66889SZhao Yakui 
882755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
88391d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
8843b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
885755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
8861ec14ad3SChris Wilson 
88713321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
88801c66889SZhao Yakui }
88901c66889SZhao Yakui 
890f75f3746SVille Syrjälä /*
891f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
892f75f3746SVille Syrjälä  * around the vertical blanking period.
893f75f3746SVille Syrjälä  *
894f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
895f75f3746SVille Syrjälä  *  vblank_start >= 3
896f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
897f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
898f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
899f75f3746SVille Syrjälä  *
900f75f3746SVille Syrjälä  *           start of vblank:
901f75f3746SVille Syrjälä  *           latch double buffered registers
902f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
903f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
904f75f3746SVille Syrjälä  *           |
905f75f3746SVille Syrjälä  *           |          frame start:
906f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
907f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
908f75f3746SVille Syrjälä  *           |          |
909f75f3746SVille Syrjälä  *           |          |  start of vsync:
910f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
911f75f3746SVille Syrjälä  *           |          |  |
912f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
913f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
914f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
915f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
916f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
917f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
918f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
919f75f3746SVille Syrjälä  *       |          |                                         |
920f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
921f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
922f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
923f75f3746SVille Syrjälä  *
924f75f3746SVille Syrjälä  * x  = horizontal active
925f75f3746SVille Syrjälä  * _  = horizontal blanking
926f75f3746SVille Syrjälä  * hs = horizontal sync
927f75f3746SVille Syrjälä  * va = vertical active
928f75f3746SVille Syrjälä  * vb = vertical blanking
929f75f3746SVille Syrjälä  * vs = vertical sync
930f75f3746SVille Syrjälä  * vbs = vblank_start (number)
931f75f3746SVille Syrjälä  *
932f75f3746SVille Syrjälä  * Summary:
933f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
934f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
935f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
936f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
937f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
938f75f3746SVille Syrjälä  */
939f75f3746SVille Syrjälä 
94042f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
94142f52ef8SKeith Packard  * we use as a pipe index
94242f52ef8SKeith Packard  */
94308fa8fd0SVille Syrjälä u32 i915_get_vblank_counter(struct drm_crtc *crtc)
9440a3e67a4SJesse Barnes {
94508fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
94608fa8fd0SVille Syrjälä 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
94732db0b65SVille Syrjälä 	const struct drm_display_mode *mode = &vblank->hwmode;
94808fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
949f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
9500b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
951694e409dSVille Syrjälä 	unsigned long irqflags;
952391f75e2SVille Syrjälä 
95332db0b65SVille Syrjälä 	/*
95432db0b65SVille Syrjälä 	 * On i965gm TV output the frame counter only works up to
95532db0b65SVille Syrjälä 	 * the point when we enable the TV encoder. After that the
95632db0b65SVille Syrjälä 	 * frame counter ceases to work and reads zero. We need a
95732db0b65SVille Syrjälä 	 * vblank wait before enabling the TV encoder and so we
95832db0b65SVille Syrjälä 	 * have to enable vblank interrupts while the frame counter
95932db0b65SVille Syrjälä 	 * is still in a working state. However the core vblank code
96032db0b65SVille Syrjälä 	 * does not like us returning non-zero frame counter values
96132db0b65SVille Syrjälä 	 * when we've told it that we don't have a working frame
96232db0b65SVille Syrjälä 	 * counter. Thus we must stop non-zero values leaking out.
96332db0b65SVille Syrjälä 	 */
96432db0b65SVille Syrjälä 	if (!vblank->max_vblank_count)
96532db0b65SVille Syrjälä 		return 0;
96632db0b65SVille Syrjälä 
9670b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
9680b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
9690b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
9700b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
9710b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
972391f75e2SVille Syrjälä 
9730b2a8e09SVille Syrjälä 	/* Convert to pixel count */
9740b2a8e09SVille Syrjälä 	vbl_start *= htotal;
9750b2a8e09SVille Syrjälä 
9760b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
9770b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
9780b2a8e09SVille Syrjälä 
9799db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
9809db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
9815eddb70bSChris Wilson 
982694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
983694e409dSVille Syrjälä 
9840a3e67a4SJesse Barnes 	/*
9850a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
9860a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
9870a3e67a4SJesse Barnes 	 * register.
9880a3e67a4SJesse Barnes 	 */
9890a3e67a4SJesse Barnes 	do {
990694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
991694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
992694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
9930a3e67a4SJesse Barnes 	} while (high1 != high2);
9940a3e67a4SJesse Barnes 
995694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
996694e409dSVille Syrjälä 
9975eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
998391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
9995eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
1000391f75e2SVille Syrjälä 
1001391f75e2SVille Syrjälä 	/*
1002391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
1003391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
1004391f75e2SVille Syrjälä 	 * counter against vblank start.
1005391f75e2SVille Syrjälä 	 */
1006edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
10070a3e67a4SJesse Barnes }
10080a3e67a4SJesse Barnes 
100908fa8fd0SVille Syrjälä u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
10109880b7a5SJesse Barnes {
101108fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
101208fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
10139880b7a5SJesse Barnes 
1014649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
10159880b7a5SJesse Barnes }
10169880b7a5SJesse Barnes 
1017aec0246fSUma Shankar /*
1018aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
1019aec0246fSUma Shankar  * scanline register will not work to get the scanline,
1020aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
1021aec0246fSUma Shankar  * with scanline register updates.
1022aec0246fSUma Shankar  * This function will use Framestamp and current
1023aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
1024aec0246fSUma Shankar  */
1025aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
1026aec0246fSUma Shankar {
1027aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1028aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
1029aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1030aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
1031aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
1032aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
1033aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
1034aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
1035aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
1036aec0246fSUma Shankar 
1037aec0246fSUma Shankar 	/*
1038aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
1039aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
1040aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
1041aec0246fSUma Shankar 	 * during the same frame.
1042aec0246fSUma Shankar 	 */
1043aec0246fSUma Shankar 	do {
1044aec0246fSUma Shankar 		/*
1045aec0246fSUma Shankar 		 * This field provides read back of the display
1046aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
1047aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
1048aec0246fSUma Shankar 		 */
1049aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1050aec0246fSUma Shankar 
1051aec0246fSUma Shankar 		/*
1052aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
1053aec0246fSUma Shankar 		 * time stamp value.
1054aec0246fSUma Shankar 		 */
1055aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
1056aec0246fSUma Shankar 
1057aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1058aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
1059aec0246fSUma Shankar 
1060aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
1061aec0246fSUma Shankar 					clock), 1000 * htotal);
1062aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
1063aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
1064aec0246fSUma Shankar 
1065aec0246fSUma Shankar 	return scanline;
1066aec0246fSUma Shankar }
1067aec0246fSUma Shankar 
106875aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1069a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1070a225f079SVille Syrjälä {
1071a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
1072fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
10735caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
10745caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
1075a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
107680715b2fSVille Syrjälä 	int position, vtotal;
1077a225f079SVille Syrjälä 
107872259536SVille Syrjälä 	if (!crtc->active)
107972259536SVille Syrjälä 		return -1;
108072259536SVille Syrjälä 
10815caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
10825caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
10835caa0feaSDaniel Vetter 
1084aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1085aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
1086aec0246fSUma Shankar 
108780715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
1088a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1089a225f079SVille Syrjälä 		vtotal /= 2;
1090a225f079SVille Syrjälä 
1091cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 2))
109275aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1093a225f079SVille Syrjälä 	else
109475aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1095a225f079SVille Syrjälä 
1096a225f079SVille Syrjälä 	/*
109741b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
109841b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
109941b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
110041b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
110141b578fbSJesse Barnes 	 *
110241b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
110341b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
110441b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
110541b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
110641b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
110741b578fbSJesse Barnes 	 */
110891d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
110941b578fbSJesse Barnes 		int i, temp;
111041b578fbSJesse Barnes 
111141b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
111241b578fbSJesse Barnes 			udelay(1);
1113707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
111441b578fbSJesse Barnes 			if (temp != position) {
111541b578fbSJesse Barnes 				position = temp;
111641b578fbSJesse Barnes 				break;
111741b578fbSJesse Barnes 			}
111841b578fbSJesse Barnes 		}
111941b578fbSJesse Barnes 	}
112041b578fbSJesse Barnes 
112141b578fbSJesse Barnes 	/*
112280715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
112380715b2fSVille Syrjälä 	 * scanline_offset adjustment.
1124a225f079SVille Syrjälä 	 */
112580715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
1126a225f079SVille Syrjälä }
1127a225f079SVille Syrjälä 
11287d23e593SVille Syrjälä bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
11291bf6ad62SDaniel Vetter 			      bool in_vblank_irq, int *vpos, int *hpos,
11303bb403bfSVille Syrjälä 			      ktime_t *stime, ktime_t *etime,
11313bb403bfSVille Syrjälä 			      const struct drm_display_mode *mode)
11320af7e4dfSMario Kleiner {
1133fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
113498187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
113598187836SVille Syrjälä 								pipe);
11363aa18df8SVille Syrjälä 	int position;
113778e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1138ad3543edSMario Kleiner 	unsigned long irqflags;
11398a920e24SVille Syrjälä 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
11408a920e24SVille Syrjälä 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
11418a920e24SVille Syrjälä 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
11420af7e4dfSMario Kleiner 
1143fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
11440af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
11459db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
11461bf6ad62SDaniel Vetter 		return false;
11470af7e4dfSMario Kleiner 	}
11480af7e4dfSMario Kleiner 
1149c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
115078e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
1151c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
1152c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
1153c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
11540af7e4dfSMario Kleiner 
1155d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1156d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1157d31faf65SVille Syrjälä 		vbl_end /= 2;
1158d31faf65SVille Syrjälä 		vtotal /= 2;
1159d31faf65SVille Syrjälä 	}
1160d31faf65SVille Syrjälä 
1161ad3543edSMario Kleiner 	/*
1162ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
1163ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
1164ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
1165ad3543edSMario Kleiner 	 */
1166ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1167ad3543edSMario Kleiner 
1168ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1169ad3543edSMario Kleiner 
1170ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
1171ad3543edSMario Kleiner 	if (stime)
1172ad3543edSMario Kleiner 		*stime = ktime_get();
1173ad3543edSMario Kleiner 
11748a920e24SVille Syrjälä 	if (use_scanline_counter) {
11750af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
11760af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
11770af7e4dfSMario Kleiner 		 */
1178a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
11790af7e4dfSMario Kleiner 	} else {
11800af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
11810af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
11820af7e4dfSMario Kleiner 		 * scanout position.
11830af7e4dfSMario Kleiner 		 */
118475aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
11850af7e4dfSMario Kleiner 
11863aa18df8SVille Syrjälä 		/* convert to pixel counts */
11873aa18df8SVille Syrjälä 		vbl_start *= htotal;
11883aa18df8SVille Syrjälä 		vbl_end *= htotal;
11893aa18df8SVille Syrjälä 		vtotal *= htotal;
119078e8fc6bSVille Syrjälä 
119178e8fc6bSVille Syrjälä 		/*
11927e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
11937e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
11947e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
11957e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
11967e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
11977e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
11987e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
11997e78f1cbSVille Syrjälä 		 */
12007e78f1cbSVille Syrjälä 		if (position >= vtotal)
12017e78f1cbSVille Syrjälä 			position = vtotal - 1;
12027e78f1cbSVille Syrjälä 
12037e78f1cbSVille Syrjälä 		/*
120478e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
120578e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
120678e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
120778e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
120878e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
120978e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
121078e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
121178e8fc6bSVille Syrjälä 		 */
121278e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
12133aa18df8SVille Syrjälä 	}
12143aa18df8SVille Syrjälä 
1215ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
1216ad3543edSMario Kleiner 	if (etime)
1217ad3543edSMario Kleiner 		*etime = ktime_get();
1218ad3543edSMario Kleiner 
1219ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1220ad3543edSMario Kleiner 
1221ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1222ad3543edSMario Kleiner 
12233aa18df8SVille Syrjälä 	/*
12243aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
12253aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
12263aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
12273aa18df8SVille Syrjälä 	 * up since vbl_end.
12283aa18df8SVille Syrjälä 	 */
12293aa18df8SVille Syrjälä 	if (position >= vbl_start)
12303aa18df8SVille Syrjälä 		position -= vbl_end;
12313aa18df8SVille Syrjälä 	else
12323aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
12333aa18df8SVille Syrjälä 
12348a920e24SVille Syrjälä 	if (use_scanline_counter) {
12353aa18df8SVille Syrjälä 		*vpos = position;
12363aa18df8SVille Syrjälä 		*hpos = 0;
12373aa18df8SVille Syrjälä 	} else {
12380af7e4dfSMario Kleiner 		*vpos = position / htotal;
12390af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
12400af7e4dfSMario Kleiner 	}
12410af7e4dfSMario Kleiner 
12421bf6ad62SDaniel Vetter 	return true;
12430af7e4dfSMario Kleiner }
12440af7e4dfSMario Kleiner 
1245a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1246a225f079SVille Syrjälä {
1247fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1248a225f079SVille Syrjälä 	unsigned long irqflags;
1249a225f079SVille Syrjälä 	int position;
1250a225f079SVille Syrjälä 
1251a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1252a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1253a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1254a225f079SVille Syrjälä 
1255a225f079SVille Syrjälä 	return position;
1256a225f079SVille Syrjälä }
1257a225f079SVille Syrjälä 
125891d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1259f97108d1SJesse Barnes {
12604f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &dev_priv->uncore;
1261b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
12629270388eSDaniel Vetter 	u8 new_delay;
12639270388eSDaniel Vetter 
1264d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1265f97108d1SJesse Barnes 
12664f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
12674f5fd91fSTvrtko Ursulin 			     MEMINTRSTS,
12684f5fd91fSTvrtko Ursulin 			     intel_uncore_read(uncore, MEMINTRSTS));
126973edd18fSDaniel Vetter 
127020e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
12719270388eSDaniel Vetter 
12724f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
12734f5fd91fSTvrtko Ursulin 	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
12744f5fd91fSTvrtko Ursulin 	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
12754f5fd91fSTvrtko Ursulin 	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
12764f5fd91fSTvrtko Ursulin 	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1277f97108d1SJesse Barnes 
1278f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1279b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
128020e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
128120e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
128220e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
128320e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1284b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
128520e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
128620e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
128720e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
128820e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1289f97108d1SJesse Barnes 	}
1290f97108d1SJesse Barnes 
129191d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
129220e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1293f97108d1SJesse Barnes 
1294d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
12959270388eSDaniel Vetter 
1296f97108d1SJesse Barnes 	return;
1297f97108d1SJesse Barnes }
1298f97108d1SJesse Barnes 
129943cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
130043cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
130131685c25SDeepak S {
1302679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
130343cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
130443cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
130531685c25SDeepak S }
130631685c25SDeepak S 
130743cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
130843cf3bf0SChris Wilson {
1309562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
131043cf3bf0SChris Wilson }
131143cf3bf0SChris Wilson 
131243cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
131343cf3bf0SChris Wilson {
1314562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1315562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
131643cf3bf0SChris Wilson 	struct intel_rps_ei now;
131743cf3bf0SChris Wilson 	u32 events = 0;
131843cf3bf0SChris Wilson 
1319e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
132043cf3bf0SChris Wilson 		return 0;
132143cf3bf0SChris Wilson 
132243cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
132331685c25SDeepak S 
1324679cb6c1SMika Kuoppala 	if (prev->ktime) {
1325e0e8c7cbSChris Wilson 		u64 time, c0;
1326569884e3SChris Wilson 		u32 render, media;
1327e0e8c7cbSChris Wilson 
1328679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
13298f68d591SChris Wilson 
1330e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1331e0e8c7cbSChris Wilson 
1332e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1333e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1334e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1335e0e8c7cbSChris Wilson 		 * into our activity counter.
1336e0e8c7cbSChris Wilson 		 */
1337569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1338569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1339569884e3SChris Wilson 		c0 = max(render, media);
13406b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1341e0e8c7cbSChris Wilson 
134260548c55SChris Wilson 		if (c0 > time * rps->power.up_threshold)
1343e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
134460548c55SChris Wilson 		else if (c0 < time * rps->power.down_threshold)
1345e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
134631685c25SDeepak S 	}
134731685c25SDeepak S 
1348562d9baeSSagar Arun Kamble 	rps->ei = now;
134943cf3bf0SChris Wilson 	return events;
135031685c25SDeepak S }
135131685c25SDeepak S 
13524912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
13533b8d8d91SJesse Barnes {
13542d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1355562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1356562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
13577c0a16adSChris Wilson 	bool client_boost = false;
13588d3afd7dSChris Wilson 	int new_delay, adj, min, max;
13597c0a16adSChris Wilson 	u32 pm_iir = 0;
13603b8d8d91SJesse Barnes 
136159cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1362562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1363562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1364562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1365d4d70aa5SImre Deak 	}
136659cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
13674912d041SBen Widawsky 
136860611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1369a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
13708d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
13717c0a16adSChris Wilson 		goto out;
13723b8d8d91SJesse Barnes 
1373ebb5eb7dSChris Wilson 	mutex_lock(&rps->lock);
13747b9e0ae6SChris Wilson 
137543cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
137643cf3bf0SChris Wilson 
1377562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1378562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1379562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1380562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
13817b92c1bdSChris Wilson 	if (client_boost)
1382562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1383562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1384562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
13858d3afd7dSChris Wilson 		adj = 0;
13868d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1387dd75fdc8SChris Wilson 		if (adj > 0)
1388dd75fdc8SChris Wilson 			adj *= 2;
1389edcf284bSChris Wilson 		else /* CHV needs even encode values */
1390edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
13917e79a683SSagar Arun Kamble 
1392562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
13937e79a683SSagar Arun Kamble 			adj = 0;
13947b92c1bdSChris Wilson 	} else if (client_boost) {
1395f5a4c67dSChris Wilson 		adj = 0;
1396dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1397562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1398562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1399562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1400562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1401dd75fdc8SChris Wilson 		adj = 0;
1402dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1403dd75fdc8SChris Wilson 		if (adj < 0)
1404dd75fdc8SChris Wilson 			adj *= 2;
1405edcf284bSChris Wilson 		else /* CHV needs even encode values */
1406edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
14077e79a683SSagar Arun Kamble 
1408562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
14097e79a683SSagar Arun Kamble 			adj = 0;
1410dd75fdc8SChris Wilson 	} else { /* unknown event */
1411edcf284bSChris Wilson 		adj = 0;
1412dd75fdc8SChris Wilson 	}
14133b8d8d91SJesse Barnes 
1414562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1415edcf284bSChris Wilson 
14162a8862d2SChris Wilson 	/*
14172a8862d2SChris Wilson 	 * Limit deboosting and boosting to keep ourselves at the extremes
14182a8862d2SChris Wilson 	 * when in the respective power modes (i.e. slowly decrease frequencies
14192a8862d2SChris Wilson 	 * while in the HIGH_POWER zone and slowly increase frequencies while
14202a8862d2SChris Wilson 	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
14212a8862d2SChris Wilson 	 * to the next level quickly, and conversely if busy we expect to
14222a8862d2SChris Wilson 	 * hit a waitboost and rapidly switch into max power.
14232a8862d2SChris Wilson 	 */
14242a8862d2SChris Wilson 	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
14252a8862d2SChris Wilson 	    (adj > 0 && rps->power.mode == LOW_POWER))
14262a8862d2SChris Wilson 		rps->last_adj = 0;
14272a8862d2SChris Wilson 
142879249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
142979249636SBen Widawsky 	 * interrupt
143079249636SBen Widawsky 	 */
1431edcf284bSChris Wilson 	new_delay += adj;
14328d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
143327544369SDeepak S 
14349fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
14359fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1436562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
14379fcee2f7SChris Wilson 	}
14383b8d8d91SJesse Barnes 
1439ebb5eb7dSChris Wilson 	mutex_unlock(&rps->lock);
14407c0a16adSChris Wilson 
14417c0a16adSChris Wilson out:
14427c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
14437c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1444562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
144558820574STvrtko Ursulin 		gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
14467c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
14473b8d8d91SJesse Barnes }
14483b8d8d91SJesse Barnes 
1449e3689190SBen Widawsky 
1450e3689190SBen Widawsky /**
1451e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1452e3689190SBen Widawsky  * occurred.
1453e3689190SBen Widawsky  * @work: workqueue struct
1454e3689190SBen Widawsky  *
1455e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1456e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1457e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1458e3689190SBen Widawsky  */
1459e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1460e3689190SBen Widawsky {
14612d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1462cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1463e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
146435a85ac6SBen Widawsky 	char *parity_event[6];
1465a9c287c9SJani Nikula 	u32 misccpctl;
1466a9c287c9SJani Nikula 	u8 slice = 0;
1467e3689190SBen Widawsky 
1468e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1469e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1470e3689190SBen Widawsky 	 * any time we access those registers.
1471e3689190SBen Widawsky 	 */
147291c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1473e3689190SBen Widawsky 
147435a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
147535a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
147635a85ac6SBen Widawsky 		goto out;
147735a85ac6SBen Widawsky 
1478e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1479e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1480e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1481e3689190SBen Widawsky 
148235a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1483f0f59a00SVille Syrjälä 		i915_reg_t reg;
148435a85ac6SBen Widawsky 
148535a85ac6SBen Widawsky 		slice--;
14862d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
148735a85ac6SBen Widawsky 			break;
148835a85ac6SBen Widawsky 
148935a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
149035a85ac6SBen Widawsky 
14916fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
149235a85ac6SBen Widawsky 
149335a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1494e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1495e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1496e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1497e3689190SBen Widawsky 
149835a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
149935a85ac6SBen Widawsky 		POSTING_READ(reg);
1500e3689190SBen Widawsky 
1501cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1502e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1503e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1504e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
150535a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
150635a85ac6SBen Widawsky 		parity_event[5] = NULL;
1507e3689190SBen Widawsky 
150891c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1509e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1510e3689190SBen Widawsky 
151135a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
151235a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1513e3689190SBen Widawsky 
151435a85ac6SBen Widawsky 		kfree(parity_event[4]);
1515e3689190SBen Widawsky 		kfree(parity_event[3]);
1516e3689190SBen Widawsky 		kfree(parity_event[2]);
1517e3689190SBen Widawsky 		kfree(parity_event[1]);
1518e3689190SBen Widawsky 	}
1519e3689190SBen Widawsky 
152035a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
152135a85ac6SBen Widawsky 
152235a85ac6SBen Widawsky out:
152335a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
15244cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
15252d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
15264cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
152735a85ac6SBen Widawsky 
152891c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
152935a85ac6SBen Widawsky }
153035a85ac6SBen Widawsky 
1531261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1532261e40b8SVille Syrjälä 					       u32 iir)
1533e3689190SBen Widawsky {
1534261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1535e3689190SBen Widawsky 		return;
1536e3689190SBen Widawsky 
1537d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1538261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1539d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1540e3689190SBen Widawsky 
1541261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
154235a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
154335a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
154435a85ac6SBen Widawsky 
154535a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
154635a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
154735a85ac6SBen Widawsky 
1548a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1549e3689190SBen Widawsky }
1550e3689190SBen Widawsky 
1551261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1552f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1553f1af8fc1SPaulo Zanoni {
1554f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15558a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1556f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
15578a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1558f1af8fc1SPaulo Zanoni }
1559f1af8fc1SPaulo Zanoni 
1560261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1561e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1562e7b4c6b1SDaniel Vetter {
1563f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15648a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1565cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
15668a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1567cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
15688a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1569e7b4c6b1SDaniel Vetter 
1570cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1571cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1572aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1573aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1574e3689190SBen Widawsky 
1575261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1576261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1577e7b4c6b1SDaniel Vetter }
1578e7b4c6b1SDaniel Vetter 
15795d3d69d5SChris Wilson static void
158051f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1581fbcc1a0cSNick Hoath {
158231de7350SChris Wilson 	bool tasklet = false;
1583f747026cSChris Wilson 
1584fd8526e5SChris Wilson 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
15858ea397faSChris Wilson 		tasklet = true;
158631de7350SChris Wilson 
158751f6b0f9SChris Wilson 	if (iir & GT_RENDER_USER_INTERRUPT) {
158852c0fdb2SChris Wilson 		intel_engine_breadcrumbs_irq(engine);
15894c6ce5c9SChris Wilson 		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
159031de7350SChris Wilson 	}
159131de7350SChris Wilson 
159231de7350SChris Wilson 	if (tasklet)
1593fd8526e5SChris Wilson 		tasklet_hi_schedule(&engine->execlists.tasklet);
1594fbcc1a0cSNick Hoath }
1595fbcc1a0cSNick Hoath 
15962e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915,
159755ef72f2SChris Wilson 			    u32 master_ctl, u32 gt_iir[4])
1598abd58f01SBen Widawsky {
159925286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
16002e4a5b25SChris Wilson 
1601f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1602f0fd96f5SChris Wilson 		      GEN8_GT_BCS_IRQ | \
16038a68d464SChris Wilson 		      GEN8_GT_VCS0_IRQ | \
1604f0fd96f5SChris Wilson 		      GEN8_GT_VCS1_IRQ | \
1605f0fd96f5SChris Wilson 		      GEN8_GT_VECS_IRQ | \
1606f0fd96f5SChris Wilson 		      GEN8_GT_PM_IRQ | \
1607f0fd96f5SChris Wilson 		      GEN8_GT_GUC_IRQ)
1608f0fd96f5SChris Wilson 
1609abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16102e4a5b25SChris Wilson 		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
16112e4a5b25SChris Wilson 		if (likely(gt_iir[0]))
16122e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1613abd58f01SBen Widawsky 	}
1614abd58f01SBen Widawsky 
16158a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16162e4a5b25SChris Wilson 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
16172e4a5b25SChris Wilson 		if (likely(gt_iir[1]))
16182e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
161974cdb337SChris Wilson 	}
162074cdb337SChris Wilson 
162126705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16222e4a5b25SChris Wilson 		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1623f4de7794SChris Wilson 		if (likely(gt_iir[2]))
1624f4de7794SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
16250961021aSBen Widawsky 	}
16262e4a5b25SChris Wilson 
16272e4a5b25SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16282e4a5b25SChris Wilson 		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
16292e4a5b25SChris Wilson 		if (likely(gt_iir[3]))
16302e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
163155ef72f2SChris Wilson 	}
1632abd58f01SBen Widawsky }
1633abd58f01SBen Widawsky 
16342e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1635f0fd96f5SChris Wilson 				u32 master_ctl, u32 gt_iir[4])
1636e30e251aSVille Syrjälä {
1637f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16388a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[RCS0],
163951f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
16408a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[BCS0],
164151f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1642e30e251aSVille Syrjälä 	}
1643e30e251aSVille Syrjälä 
16448a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16458a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS0],
16468a68d464SChris Wilson 				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
16478a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS1],
164851f6b0f9SChris Wilson 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1649e30e251aSVille Syrjälä 	}
1650e30e251aSVille Syrjälä 
1651f0fd96f5SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16528a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VECS0],
165351f6b0f9SChris Wilson 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1654f0fd96f5SChris Wilson 	}
1655e30e251aSVille Syrjälä 
1656f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16572e4a5b25SChris Wilson 		gen6_rps_irq_handler(i915, gt_iir[2]);
1658*8b5689d7SDaniele Ceraolo Spurio 		guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
1659e30e251aSVille Syrjälä 	}
1660f0fd96f5SChris Wilson }
1661e30e251aSVille Syrjälä 
1662af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1663121e758eSDhinakaran Pandiyan {
1664af92058fSVille Syrjälä 	switch (pin) {
1665af92058fSVille Syrjälä 	case HPD_PORT_C:
1666121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1667af92058fSVille Syrjälä 	case HPD_PORT_D:
1668121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1669af92058fSVille Syrjälä 	case HPD_PORT_E:
1670121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1671af92058fSVille Syrjälä 	case HPD_PORT_F:
1672121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1673121e758eSDhinakaran Pandiyan 	default:
1674121e758eSDhinakaran Pandiyan 		return false;
1675121e758eSDhinakaran Pandiyan 	}
1676121e758eSDhinakaran Pandiyan }
1677121e758eSDhinakaran Pandiyan 
1678af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
167963c88d22SImre Deak {
1680af92058fSVille Syrjälä 	switch (pin) {
1681af92058fSVille Syrjälä 	case HPD_PORT_A:
1682195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1683af92058fSVille Syrjälä 	case HPD_PORT_B:
168463c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
1685af92058fSVille Syrjälä 	case HPD_PORT_C:
168663c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
168763c88d22SImre Deak 	default:
168863c88d22SImre Deak 		return false;
168963c88d22SImre Deak 	}
169063c88d22SImre Deak }
169163c88d22SImre Deak 
1692af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
169331604222SAnusha Srivatsa {
1694af92058fSVille Syrjälä 	switch (pin) {
1695af92058fSVille Syrjälä 	case HPD_PORT_A:
169631604222SAnusha Srivatsa 		return val & ICP_DDIA_HPD_LONG_DETECT;
1697af92058fSVille Syrjälä 	case HPD_PORT_B:
169831604222SAnusha Srivatsa 		return val & ICP_DDIB_HPD_LONG_DETECT;
169931604222SAnusha Srivatsa 	default:
170031604222SAnusha Srivatsa 		return false;
170131604222SAnusha Srivatsa 	}
170231604222SAnusha Srivatsa }
170331604222SAnusha Srivatsa 
1704af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
170531604222SAnusha Srivatsa {
1706af92058fSVille Syrjälä 	switch (pin) {
1707af92058fSVille Syrjälä 	case HPD_PORT_C:
170831604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1709af92058fSVille Syrjälä 	case HPD_PORT_D:
171031604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1711af92058fSVille Syrjälä 	case HPD_PORT_E:
171231604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1713af92058fSVille Syrjälä 	case HPD_PORT_F:
171431604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
171531604222SAnusha Srivatsa 	default:
171631604222SAnusha Srivatsa 		return false;
171731604222SAnusha Srivatsa 	}
171831604222SAnusha Srivatsa }
171931604222SAnusha Srivatsa 
1720af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
17216dbf30ceSVille Syrjälä {
1722af92058fSVille Syrjälä 	switch (pin) {
1723af92058fSVille Syrjälä 	case HPD_PORT_E:
17246dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
17256dbf30ceSVille Syrjälä 	default:
17266dbf30ceSVille Syrjälä 		return false;
17276dbf30ceSVille Syrjälä 	}
17286dbf30ceSVille Syrjälä }
17296dbf30ceSVille Syrjälä 
1730af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
173174c0b395SVille Syrjälä {
1732af92058fSVille Syrjälä 	switch (pin) {
1733af92058fSVille Syrjälä 	case HPD_PORT_A:
173474c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1735af92058fSVille Syrjälä 	case HPD_PORT_B:
173674c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
1737af92058fSVille Syrjälä 	case HPD_PORT_C:
173874c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
1739af92058fSVille Syrjälä 	case HPD_PORT_D:
174074c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
174174c0b395SVille Syrjälä 	default:
174274c0b395SVille Syrjälä 		return false;
174374c0b395SVille Syrjälä 	}
174474c0b395SVille Syrjälä }
174574c0b395SVille Syrjälä 
1746af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1747e4ce95aaSVille Syrjälä {
1748af92058fSVille Syrjälä 	switch (pin) {
1749af92058fSVille Syrjälä 	case HPD_PORT_A:
1750e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1751e4ce95aaSVille Syrjälä 	default:
1752e4ce95aaSVille Syrjälä 		return false;
1753e4ce95aaSVille Syrjälä 	}
1754e4ce95aaSVille Syrjälä }
1755e4ce95aaSVille Syrjälä 
1756af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
175713cf5504SDave Airlie {
1758af92058fSVille Syrjälä 	switch (pin) {
1759af92058fSVille Syrjälä 	case HPD_PORT_B:
1760676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
1761af92058fSVille Syrjälä 	case HPD_PORT_C:
1762676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
1763af92058fSVille Syrjälä 	case HPD_PORT_D:
1764676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1765676574dfSJani Nikula 	default:
1766676574dfSJani Nikula 		return false;
176713cf5504SDave Airlie 	}
176813cf5504SDave Airlie }
176913cf5504SDave Airlie 
1770af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
177113cf5504SDave Airlie {
1772af92058fSVille Syrjälä 	switch (pin) {
1773af92058fSVille Syrjälä 	case HPD_PORT_B:
1774676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1775af92058fSVille Syrjälä 	case HPD_PORT_C:
1776676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1777af92058fSVille Syrjälä 	case HPD_PORT_D:
1778676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1779676574dfSJani Nikula 	default:
1780676574dfSJani Nikula 		return false;
178113cf5504SDave Airlie 	}
178213cf5504SDave Airlie }
178313cf5504SDave Airlie 
178442db67d6SVille Syrjälä /*
178542db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
178642db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
178742db67d6SVille Syrjälä  * hotplug detection results from several registers.
178842db67d6SVille Syrjälä  *
178942db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
179042db67d6SVille Syrjälä  */
1791cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1792cf53902fSRodrigo Vivi 			       u32 *pin_mask, u32 *long_mask,
17938c841e57SJani Nikula 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1794fd63e2a9SImre Deak 			       const u32 hpd[HPD_NUM_PINS],
1795af92058fSVille Syrjälä 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1796676574dfSJani Nikula {
1797e9be2850SVille Syrjälä 	enum hpd_pin pin;
1798676574dfSJani Nikula 
1799e9be2850SVille Syrjälä 	for_each_hpd_pin(pin) {
1800e9be2850SVille Syrjälä 		if ((hpd[pin] & hotplug_trigger) == 0)
18018c841e57SJani Nikula 			continue;
18028c841e57SJani Nikula 
1803e9be2850SVille Syrjälä 		*pin_mask |= BIT(pin);
1804676574dfSJani Nikula 
1805af92058fSVille Syrjälä 		if (long_pulse_detect(pin, dig_hotplug_reg))
1806e9be2850SVille Syrjälä 			*long_mask |= BIT(pin);
1807676574dfSJani Nikula 	}
1808676574dfSJani Nikula 
1809f88f0478SVille Syrjälä 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1810f88f0478SVille Syrjälä 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1811676574dfSJani Nikula 
1812676574dfSJani Nikula }
1813676574dfSJani Nikula 
181491d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1815515ac2bbSDaniel Vetter {
181628c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1817515ac2bbSDaniel Vetter }
1818515ac2bbSDaniel Vetter 
181991d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1820ce99c256SDaniel Vetter {
18219ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1822ce99c256SDaniel Vetter }
1823ce99c256SDaniel Vetter 
18248bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
182591d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
182691d14251STvrtko Ursulin 					 enum pipe pipe,
1827a9c287c9SJani Nikula 					 u32 crc0, u32 crc1,
1828a9c287c9SJani Nikula 					 u32 crc2, u32 crc3,
1829a9c287c9SJani Nikula 					 u32 crc4)
18308bf1e9f1SShuang He {
18318bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
18328c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18335cee6c45SVille Syrjälä 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
18345cee6c45SVille Syrjälä 
18355cee6c45SVille Syrjälä 	trace_intel_pipe_crc(crtc, crcs);
1836b2c88f5bSDamien Lespiau 
1837d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
18388c6b709dSTomeu Vizoso 	/*
18398c6b709dSTomeu Vizoso 	 * For some not yet identified reason, the first CRC is
18408c6b709dSTomeu Vizoso 	 * bonkers. So let's just wait for the next vblank and read
18418c6b709dSTomeu Vizoso 	 * out the buggy result.
18428c6b709dSTomeu Vizoso 	 *
1843163e8aecSRodrigo Vivi 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
18448c6b709dSTomeu Vizoso 	 * don't trust that one either.
18458c6b709dSTomeu Vizoso 	 */
1846033b7a23SMaarten Lankhorst 	if (pipe_crc->skipped <= 0 ||
1847163e8aecSRodrigo Vivi 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
18488c6b709dSTomeu Vizoso 		pipe_crc->skipped++;
18498c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
18508c6b709dSTomeu Vizoso 		return;
18518c6b709dSTomeu Vizoso 	}
18528c6b709dSTomeu Vizoso 	spin_unlock(&pipe_crc->lock);
18536cc42152SMaarten Lankhorst 
1854246ee524STomeu Vizoso 	drm_crtc_add_crc_entry(&crtc->base, true,
1855ca814b25SDaniel Vetter 				drm_crtc_accurate_vblank_count(&crtc->base),
1856246ee524STomeu Vizoso 				crcs);
18578c6b709dSTomeu Vizoso }
1858277de95eSDaniel Vetter #else
1859277de95eSDaniel Vetter static inline void
186091d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
186191d14251STvrtko Ursulin 			     enum pipe pipe,
1862a9c287c9SJani Nikula 			     u32 crc0, u32 crc1,
1863a9c287c9SJani Nikula 			     u32 crc2, u32 crc3,
1864a9c287c9SJani Nikula 			     u32 crc4) {}
1865277de95eSDaniel Vetter #endif
1866eba94eb9SDaniel Vetter 
1867277de95eSDaniel Vetter 
186891d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
186991d14251STvrtko Ursulin 				     enum pipe pipe)
18705a69b89fSDaniel Vetter {
187191d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
18725a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
18735a69b89fSDaniel Vetter 				     0, 0, 0, 0);
18745a69b89fSDaniel Vetter }
18755a69b89fSDaniel Vetter 
187691d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
187791d14251STvrtko Ursulin 				     enum pipe pipe)
1878eba94eb9SDaniel Vetter {
187991d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1880eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1881eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1882eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1883eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
18848bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1885eba94eb9SDaniel Vetter }
18865b3a856bSDaniel Vetter 
188791d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
188891d14251STvrtko Ursulin 				      enum pipe pipe)
18895b3a856bSDaniel Vetter {
1890a9c287c9SJani Nikula 	u32 res1, res2;
18910b5c5ed0SDaniel Vetter 
189291d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
18930b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
18940b5c5ed0SDaniel Vetter 	else
18950b5c5ed0SDaniel Vetter 		res1 = 0;
18960b5c5ed0SDaniel Vetter 
189791d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
18980b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
18990b5c5ed0SDaniel Vetter 	else
19000b5c5ed0SDaniel Vetter 		res2 = 0;
19015b3a856bSDaniel Vetter 
190291d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
19030b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
19040b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
19050b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
19060b5c5ed0SDaniel Vetter 				     res1, res2);
19075b3a856bSDaniel Vetter }
19088bf1e9f1SShuang He 
19091403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
19101403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
19111403c0d4SPaulo Zanoni  * the work queue. */
191258820574STvrtko Ursulin static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
1913a087bafeSMika Kuoppala {
191458820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
1915a087bafeSMika Kuoppala 	struct intel_rps *rps = &i915->gt_pm.rps;
1916a087bafeSMika Kuoppala 	const u32 events = i915->pm_rps_events & pm_iir;
1917a087bafeSMika Kuoppala 
1918a087bafeSMika Kuoppala 	lockdep_assert_held(&i915->irq_lock);
1919a087bafeSMika Kuoppala 
1920a087bafeSMika Kuoppala 	if (unlikely(!events))
1921a087bafeSMika Kuoppala 		return;
1922a087bafeSMika Kuoppala 
192358820574STvrtko Ursulin 	gen6_mask_pm_irq(gt, events);
1924a087bafeSMika Kuoppala 
1925a087bafeSMika Kuoppala 	if (!rps->interrupts_enabled)
1926a087bafeSMika Kuoppala 		return;
1927a087bafeSMika Kuoppala 
1928a087bafeSMika Kuoppala 	rps->pm_iir |= events;
1929a087bafeSMika Kuoppala 	schedule_work(&rps->work);
1930a087bafeSMika Kuoppala }
1931a087bafeSMika Kuoppala 
19321403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1933baf02a1fSBen Widawsky {
1934562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1935562d9baeSSagar Arun Kamble 
1936a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
193759cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
193858820574STvrtko Ursulin 		gen6_mask_pm_irq(&dev_priv->gt,
193958820574STvrtko Ursulin 				 pm_iir & dev_priv->pm_rps_events);
1940562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1941562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1942562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
194341a05a3aSDaniel Vetter 		}
1944d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1945d4d70aa5SImre Deak 	}
1946baf02a1fSBen Widawsky 
1947bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1948c9a9a268SImre Deak 		return;
1949c9a9a268SImre Deak 
195012638c57SBen Widawsky 	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
19518a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
195212638c57SBen Widawsky 
1953aaecdf61SDaniel Vetter 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1954aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
195512638c57SBen Widawsky }
1956baf02a1fSBen Widawsky 
1957633023a4SDaniele Ceraolo Spurio static void guc_irq_handler(struct intel_guc *guc, u16 iir)
195826705e20SSagar Arun Kamble {
1959633023a4SDaniele Ceraolo Spurio 	if (iir & GUC_INTR_GUC2HOST)
1960633023a4SDaniele Ceraolo Spurio 		intel_guc_to_host_event_handler(guc);
196154c52a84SOscar Mateo }
196254c52a84SOscar Mateo 
196344d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
196444d9241eSVille Syrjälä {
196544d9241eSVille Syrjälä 	enum pipe pipe;
196644d9241eSVille Syrjälä 
196744d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
196844d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
196944d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
197044d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
197144d9241eSVille Syrjälä 
197244d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
197344d9241eSVille Syrjälä 	}
197444d9241eSVille Syrjälä }
197544d9241eSVille Syrjälä 
1976eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
197791d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
19787e231dbeSJesse Barnes {
19797e231dbeSJesse Barnes 	int pipe;
19807e231dbeSJesse Barnes 
198158ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
19821ca993d2SVille Syrjälä 
19831ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
19841ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
19851ca993d2SVille Syrjälä 		return;
19861ca993d2SVille Syrjälä 	}
19871ca993d2SVille Syrjälä 
1988055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1989f0f59a00SVille Syrjälä 		i915_reg_t reg;
19906b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
199191d181ddSImre Deak 
1992bbb5eebfSDaniel Vetter 		/*
1993bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
1994bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
1995bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
1996bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
1997bbb5eebfSDaniel Vetter 		 * handle.
1998bbb5eebfSDaniel Vetter 		 */
19990f239f4cSDaniel Vetter 
20000f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
20016b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
2002bbb5eebfSDaniel Vetter 
2003bbb5eebfSDaniel Vetter 		switch (pipe) {
2004bbb5eebfSDaniel Vetter 		case PIPE_A:
2005bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2006bbb5eebfSDaniel Vetter 			break;
2007bbb5eebfSDaniel Vetter 		case PIPE_B:
2008bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2009bbb5eebfSDaniel Vetter 			break;
20103278f67fSVille Syrjälä 		case PIPE_C:
20113278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
20123278f67fSVille Syrjälä 			break;
2013bbb5eebfSDaniel Vetter 		}
2014bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
20156b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
2016bbb5eebfSDaniel Vetter 
20176b12ca56SVille Syrjälä 		if (!status_mask)
201891d181ddSImre Deak 			continue;
201991d181ddSImre Deak 
202091d181ddSImre Deak 		reg = PIPESTAT(pipe);
20216b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
20226b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
20237e231dbeSJesse Barnes 
20247e231dbeSJesse Barnes 		/*
20257e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
2026132c27c9SVille Syrjälä 		 *
2027132c27c9SVille Syrjälä 		 * Toggle the enable bits to make sure we get an
2028132c27c9SVille Syrjälä 		 * edge in the ISR pipe event bit if we don't clear
2029132c27c9SVille Syrjälä 		 * all the enabled status bits. Otherwise the edge
2030132c27c9SVille Syrjälä 		 * triggered IIR on i965/g4x wouldn't notice that
2031132c27c9SVille Syrjälä 		 * an interrupt is still pending.
20327e231dbeSJesse Barnes 		 */
2033132c27c9SVille Syrjälä 		if (pipe_stats[pipe]) {
2034132c27c9SVille Syrjälä 			I915_WRITE(reg, pipe_stats[pipe]);
2035132c27c9SVille Syrjälä 			I915_WRITE(reg, enable_mask);
2036132c27c9SVille Syrjälä 		}
20377e231dbeSJesse Barnes 	}
203858ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
20392ecb8ca4SVille Syrjälä }
20402ecb8ca4SVille Syrjälä 
2041eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2042eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
2043eb64343cSVille Syrjälä {
2044eb64343cSVille Syrjälä 	enum pipe pipe;
2045eb64343cSVille Syrjälä 
2046eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2047eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2048eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2049eb64343cSVille Syrjälä 
2050eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2051eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2052eb64343cSVille Syrjälä 
2053eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2054eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2055eb64343cSVille Syrjälä 	}
2056eb64343cSVille Syrjälä }
2057eb64343cSVille Syrjälä 
2058eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2059eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2060eb64343cSVille Syrjälä {
2061eb64343cSVille Syrjälä 	bool blc_event = false;
2062eb64343cSVille Syrjälä 	enum pipe pipe;
2063eb64343cSVille Syrjälä 
2064eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2065eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2066eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2067eb64343cSVille Syrjälä 
2068eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2069eb64343cSVille Syrjälä 			blc_event = true;
2070eb64343cSVille Syrjälä 
2071eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2072eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2073eb64343cSVille Syrjälä 
2074eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2075eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2076eb64343cSVille Syrjälä 	}
2077eb64343cSVille Syrjälä 
2078eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2079eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2080eb64343cSVille Syrjälä }
2081eb64343cSVille Syrjälä 
2082eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2083eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2084eb64343cSVille Syrjälä {
2085eb64343cSVille Syrjälä 	bool blc_event = false;
2086eb64343cSVille Syrjälä 	enum pipe pipe;
2087eb64343cSVille Syrjälä 
2088eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2089eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2090eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2091eb64343cSVille Syrjälä 
2092eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2093eb64343cSVille Syrjälä 			blc_event = true;
2094eb64343cSVille Syrjälä 
2095eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2096eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2097eb64343cSVille Syrjälä 
2098eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2099eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2100eb64343cSVille Syrjälä 	}
2101eb64343cSVille Syrjälä 
2102eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2103eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2104eb64343cSVille Syrjälä 
2105eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2106eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
2107eb64343cSVille Syrjälä }
2108eb64343cSVille Syrjälä 
210991d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
21102ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
21112ecb8ca4SVille Syrjälä {
21122ecb8ca4SVille Syrjälä 	enum pipe pipe;
21137e231dbeSJesse Barnes 
2114055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2115fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2116fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
21174356d586SDaniel Vetter 
21184356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
211991d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
21202d9d2b0bSVille Syrjälä 
21211f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
21221f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
212331acc7f5SJesse Barnes 	}
212431acc7f5SJesse Barnes 
2125c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
212691d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2127c1874ed7SImre Deak }
2128c1874ed7SImre Deak 
21291ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
213016c6c56bSVille Syrjälä {
21310ba7c51aSVille Syrjälä 	u32 hotplug_status = 0, hotplug_status_mask;
21320ba7c51aSVille Syrjälä 	int i;
213316c6c56bSVille Syrjälä 
21340ba7c51aSVille Syrjälä 	if (IS_G4X(dev_priv) ||
21350ba7c51aSVille Syrjälä 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
21360ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
21370ba7c51aSVille Syrjälä 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
21380ba7c51aSVille Syrjälä 	else
21390ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
21400ba7c51aSVille Syrjälä 
21410ba7c51aSVille Syrjälä 	/*
21420ba7c51aSVille Syrjälä 	 * We absolutely have to clear all the pending interrupt
21430ba7c51aSVille Syrjälä 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
21440ba7c51aSVille Syrjälä 	 * interrupt bit won't have an edge, and the i965/g4x
21450ba7c51aSVille Syrjälä 	 * edge triggered IIR will not notice that an interrupt
21460ba7c51aSVille Syrjälä 	 * is still pending. We can't use PORT_HOTPLUG_EN to
21470ba7c51aSVille Syrjälä 	 * guarantee the edge as the act of toggling the enable
21480ba7c51aSVille Syrjälä 	 * bits can itself generate a new hotplug interrupt :(
21490ba7c51aSVille Syrjälä 	 */
21500ba7c51aSVille Syrjälä 	for (i = 0; i < 10; i++) {
21510ba7c51aSVille Syrjälä 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
21520ba7c51aSVille Syrjälä 
21530ba7c51aSVille Syrjälä 		if (tmp == 0)
21540ba7c51aSVille Syrjälä 			return hotplug_status;
21550ba7c51aSVille Syrjälä 
21560ba7c51aSVille Syrjälä 		hotplug_status |= tmp;
21573ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
21580ba7c51aSVille Syrjälä 	}
21590ba7c51aSVille Syrjälä 
21600ba7c51aSVille Syrjälä 	WARN_ONCE(1,
21610ba7c51aSVille Syrjälä 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
21620ba7c51aSVille Syrjälä 		  I915_READ(PORT_HOTPLUG_STAT));
21631ae3c34cSVille Syrjälä 
21641ae3c34cSVille Syrjälä 	return hotplug_status;
21651ae3c34cSVille Syrjälä }
21661ae3c34cSVille Syrjälä 
216791d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
21681ae3c34cSVille Syrjälä 				 u32 hotplug_status)
21691ae3c34cSVille Syrjälä {
21701ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
21713ff60f89SOscar Mateo 
217291d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
217391d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
217416c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
217516c6c56bSVille Syrjälä 
217658f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2177cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2178cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2179cf53902fSRodrigo Vivi 					   hpd_status_g4x,
2180fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
218158f2cf24SVille Syrjälä 
218291d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
218358f2cf24SVille Syrjälä 		}
2184369712e8SJani Nikula 
2185369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
218691d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
218716c6c56bSVille Syrjälä 	} else {
218816c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
218916c6c56bSVille Syrjälä 
219058f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2191cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2192cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2193cf53902fSRodrigo Vivi 					   hpd_status_i915,
2194fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
219591d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
219616c6c56bSVille Syrjälä 		}
21973ff60f89SOscar Mateo 	}
219858f2cf24SVille Syrjälä }
219916c6c56bSVille Syrjälä 
2200c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2201c1874ed7SImre Deak {
2202b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2203c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
2204c1874ed7SImre Deak 
22052dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22062dd2a883SImre Deak 		return IRQ_NONE;
22072dd2a883SImre Deak 
22081f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
22099102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22101f814dacSImre Deak 
22111e1cace9SVille Syrjälä 	do {
22126e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
22132ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
22141ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2215a5e485a9SVille Syrjälä 		u32 ier = 0;
22163ff60f89SOscar Mateo 
2217c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2218c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
22193ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2220c1874ed7SImre Deak 
2221c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
22221e1cace9SVille Syrjälä 			break;
2223c1874ed7SImre Deak 
2224c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2225c1874ed7SImre Deak 
2226a5e485a9SVille Syrjälä 		/*
2227a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2228a5e485a9SVille Syrjälä 		 *
2229a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2230a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2231a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2232a5e485a9SVille Syrjälä 		 *
2233a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2234a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2235a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2236a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2237a5e485a9SVille Syrjälä 		 * bits this time around.
2238a5e485a9SVille Syrjälä 		 */
22394a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2240a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2241a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
22424a0a0202SVille Syrjälä 
22434a0a0202SVille Syrjälä 		if (gt_iir)
22444a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
22454a0a0202SVille Syrjälä 		if (pm_iir)
22464a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
22474a0a0202SVille Syrjälä 
22487ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
22491ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
22507ce4d1f2SVille Syrjälä 
22513ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
22523ff60f89SOscar Mateo 		 * signalled in iir */
2253eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
22547ce4d1f2SVille Syrjälä 
2255eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2256eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2257eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2258eef57324SJerome Anand 
22597ce4d1f2SVille Syrjälä 		/*
22607ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
22617ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
22627ce4d1f2SVille Syrjälä 		 */
22637ce4d1f2SVille Syrjälä 		if (iir)
22647ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
22654a0a0202SVille Syrjälä 
2266a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
22674a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
22681ae3c34cSVille Syrjälä 
226952894874SVille Syrjälä 		if (gt_iir)
2270261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
227152894874SVille Syrjälä 		if (pm_iir)
227252894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
227352894874SVille Syrjälä 
22741ae3c34cSVille Syrjälä 		if (hotplug_status)
227591d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
22762ecb8ca4SVille Syrjälä 
227791d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
22781e1cace9SVille Syrjälä 	} while (0);
22797e231dbeSJesse Barnes 
22809102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22811f814dacSImre Deak 
22827e231dbeSJesse Barnes 	return ret;
22837e231dbeSJesse Barnes }
22847e231dbeSJesse Barnes 
228543f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
228643f328d7SVille Syrjälä {
2287b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
228843f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
228943f328d7SVille Syrjälä 
22902dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22912dd2a883SImre Deak 		return IRQ_NONE;
22922dd2a883SImre Deak 
22931f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
22949102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22951f814dacSImre Deak 
2296579de73bSChris Wilson 	do {
22976e814800SVille Syrjälä 		u32 master_ctl, iir;
22982ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
22991ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2300f0fd96f5SChris Wilson 		u32 gt_iir[4];
2301a5e485a9SVille Syrjälä 		u32 ier = 0;
2302a5e485a9SVille Syrjälä 
23038e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
23043278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
23053278f67fSVille Syrjälä 
23063278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
23078e5fd599SVille Syrjälä 			break;
230843f328d7SVille Syrjälä 
230927b6c122SOscar Mateo 		ret = IRQ_HANDLED;
231027b6c122SOscar Mateo 
2311a5e485a9SVille Syrjälä 		/*
2312a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2313a5e485a9SVille Syrjälä 		 *
2314a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2315a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2316a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2317a5e485a9SVille Syrjälä 		 *
2318a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2319a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2320a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2321a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2322a5e485a9SVille Syrjälä 		 * bits this time around.
2323a5e485a9SVille Syrjälä 		 */
232443f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2325a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2326a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
232743f328d7SVille Syrjälä 
2328e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
232927b6c122SOscar Mateo 
233027b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
23311ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
233243f328d7SVille Syrjälä 
233327b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
233427b6c122SOscar Mateo 		 * signalled in iir */
2335eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
233643f328d7SVille Syrjälä 
2337eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2338eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2339eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2340eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2341eef57324SJerome Anand 
23427ce4d1f2SVille Syrjälä 		/*
23437ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
23447ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
23457ce4d1f2SVille Syrjälä 		 */
23467ce4d1f2SVille Syrjälä 		if (iir)
23477ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
23487ce4d1f2SVille Syrjälä 
2349a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2350e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
23511ae3c34cSVille Syrjälä 
2352f0fd96f5SChris Wilson 		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2353e30e251aSVille Syrjälä 
23541ae3c34cSVille Syrjälä 		if (hotplug_status)
235591d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
23562ecb8ca4SVille Syrjälä 
235791d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2358579de73bSChris Wilson 	} while (0);
23593278f67fSVille Syrjälä 
23609102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
23611f814dacSImre Deak 
236243f328d7SVille Syrjälä 	return ret;
236343f328d7SVille Syrjälä }
236443f328d7SVille Syrjälä 
236591d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
236691d14251STvrtko Ursulin 				u32 hotplug_trigger,
236740e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2368776ad806SJesse Barnes {
236942db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2370776ad806SJesse Barnes 
23716a39d7c9SJani Nikula 	/*
23726a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
23736a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
23746a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
23756a39d7c9SJani Nikula 	 * errors.
23766a39d7c9SJani Nikula 	 */
237713cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23786a39d7c9SJani Nikula 	if (!hotplug_trigger) {
23796a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
23806a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
23816a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
23826a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
23836a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
23846a39d7c9SJani Nikula 	}
23856a39d7c9SJani Nikula 
238613cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23876a39d7c9SJani Nikula 	if (!hotplug_trigger)
23886a39d7c9SJani Nikula 		return;
238913cf5504SDave Airlie 
2390cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
239140e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2392fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
239340e56410SVille Syrjälä 
239491d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2395aaf5ec2eSSonika Jindal }
239691d131d2SDaniel Vetter 
239791d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
239840e56410SVille Syrjälä {
239940e56410SVille Syrjälä 	int pipe;
240040e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
240140e56410SVille Syrjälä 
240291d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
240340e56410SVille Syrjälä 
2404cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2405cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2406776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2407cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2408cfc33bf7SVille Syrjälä 				 port_name(port));
2409cfc33bf7SVille Syrjälä 	}
2410776ad806SJesse Barnes 
2411ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
241291d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2413ce99c256SDaniel Vetter 
2414776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
241591d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2416776ad806SJesse Barnes 
2417776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2418776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2419776ad806SJesse Barnes 
2420776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2421776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2422776ad806SJesse Barnes 
2423776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2424776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2425776ad806SJesse Barnes 
24269db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2427055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
24289db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
24299db4a9c7SJesse Barnes 					 pipe_name(pipe),
24309db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2431776ad806SJesse Barnes 
2432776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2433776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2434776ad806SJesse Barnes 
2435776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2436776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2437776ad806SJesse Barnes 
2438776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2439a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
24408664281bSPaulo Zanoni 
24418664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2442a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
24438664281bSPaulo Zanoni }
24448664281bSPaulo Zanoni 
244591d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
24468664281bSPaulo Zanoni {
24478664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
24485a69b89fSDaniel Vetter 	enum pipe pipe;
24498664281bSPaulo Zanoni 
2450de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2451de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2452de032bf4SPaulo Zanoni 
2453055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
24541f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
24551f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
24568664281bSPaulo Zanoni 
24575a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
245891d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
245991d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
24605a69b89fSDaniel Vetter 			else
246191d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
24625a69b89fSDaniel Vetter 		}
24635a69b89fSDaniel Vetter 	}
24648bf1e9f1SShuang He 
24658664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
24668664281bSPaulo Zanoni }
24678664281bSPaulo Zanoni 
246891d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
24698664281bSPaulo Zanoni {
24708664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
247145c1cd87SMika Kahola 	enum pipe pipe;
24728664281bSPaulo Zanoni 
2473de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2474de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2475de032bf4SPaulo Zanoni 
247645c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
247745c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
247845c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
24798664281bSPaulo Zanoni 
24808664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2481776ad806SJesse Barnes }
2482776ad806SJesse Barnes 
248391d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
248423e81d69SAdam Jackson {
248523e81d69SAdam Jackson 	int pipe;
24866dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2487aaf5ec2eSSonika Jindal 
248891d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
248991d131d2SDaniel Vetter 
2490cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2491cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
249223e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2493cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2494cfc33bf7SVille Syrjälä 				 port_name(port));
2495cfc33bf7SVille Syrjälä 	}
249623e81d69SAdam Jackson 
249723e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
249891d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
249923e81d69SAdam Jackson 
250023e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
250191d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
250223e81d69SAdam Jackson 
250323e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
250423e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
250523e81d69SAdam Jackson 
250623e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
250723e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
250823e81d69SAdam Jackson 
250923e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2510055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
251123e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
251223e81d69SAdam Jackson 					 pipe_name(pipe),
251323e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
25148664281bSPaulo Zanoni 
25158664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
251691d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
251723e81d69SAdam Jackson }
251823e81d69SAdam Jackson 
2519c6f7acb8SMatt Roper static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
2520c6f7acb8SMatt Roper 			    const u32 *pins)
252131604222SAnusha Srivatsa {
252231604222SAnusha Srivatsa 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
252331604222SAnusha Srivatsa 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
252431604222SAnusha Srivatsa 	u32 pin_mask = 0, long_mask = 0;
252531604222SAnusha Srivatsa 
252631604222SAnusha Srivatsa 	if (ddi_hotplug_trigger) {
252731604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
252831604222SAnusha Srivatsa 
252931604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
253031604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
253131604222SAnusha Srivatsa 
253231604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
253331604222SAnusha Srivatsa 				   ddi_hotplug_trigger,
2534c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
253531604222SAnusha Srivatsa 				   icp_ddi_port_hotplug_long_detect);
253631604222SAnusha Srivatsa 	}
253731604222SAnusha Srivatsa 
253831604222SAnusha Srivatsa 	if (tc_hotplug_trigger) {
253931604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
254031604222SAnusha Srivatsa 
254131604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
254231604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
254331604222SAnusha Srivatsa 
254431604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
254531604222SAnusha Srivatsa 				   tc_hotplug_trigger,
2546c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
254731604222SAnusha Srivatsa 				   icp_tc_port_hotplug_long_detect);
254831604222SAnusha Srivatsa 	}
254931604222SAnusha Srivatsa 
255031604222SAnusha Srivatsa 	if (pin_mask)
255131604222SAnusha Srivatsa 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
255231604222SAnusha Srivatsa 
255331604222SAnusha Srivatsa 	if (pch_iir & SDE_GMBUS_ICP)
255431604222SAnusha Srivatsa 		gmbus_irq_handler(dev_priv);
255531604222SAnusha Srivatsa }
255631604222SAnusha Srivatsa 
255791d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
25586dbf30ceSVille Syrjälä {
25596dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
25606dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
25616dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
25626dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
25636dbf30ceSVille Syrjälä 
25646dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
25656dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25666dbf30ceSVille Syrjälä 
25676dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
25686dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
25696dbf30ceSVille Syrjälä 
2570cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2571cf53902fSRodrigo Vivi 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
257274c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
25736dbf30ceSVille Syrjälä 	}
25746dbf30ceSVille Syrjälä 
25756dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
25766dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25776dbf30ceSVille Syrjälä 
25786dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
25796dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
25806dbf30ceSVille Syrjälä 
2581cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2582cf53902fSRodrigo Vivi 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
25836dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
25846dbf30ceSVille Syrjälä 	}
25856dbf30ceSVille Syrjälä 
25866dbf30ceSVille Syrjälä 	if (pin_mask)
258791d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
25886dbf30ceSVille Syrjälä 
25896dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
259091d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
25916dbf30ceSVille Syrjälä }
25926dbf30ceSVille Syrjälä 
259391d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
259491d14251STvrtko Ursulin 				u32 hotplug_trigger,
259540e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2596c008bc6eSPaulo Zanoni {
2597e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2598e4ce95aaSVille Syrjälä 
2599e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2600e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2601e4ce95aaSVille Syrjälä 
2602cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
260340e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2604e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
260540e56410SVille Syrjälä 
260691d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2607e4ce95aaSVille Syrjälä }
2608c008bc6eSPaulo Zanoni 
260991d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
261091d14251STvrtko Ursulin 				    u32 de_iir)
261140e56410SVille Syrjälä {
261240e56410SVille Syrjälä 	enum pipe pipe;
261340e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
261440e56410SVille Syrjälä 
261540e56410SVille Syrjälä 	if (hotplug_trigger)
261691d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
261740e56410SVille Syrjälä 
2618c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
261991d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2620c008bc6eSPaulo Zanoni 
2621c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
262291d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2623c008bc6eSPaulo Zanoni 
2624c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2625c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2626c008bc6eSPaulo Zanoni 
2627055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2628fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2629fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2630c008bc6eSPaulo Zanoni 
263140da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
26321f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2633c008bc6eSPaulo Zanoni 
263440da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
263591d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2636c008bc6eSPaulo Zanoni 	}
2637c008bc6eSPaulo Zanoni 
2638c008bc6eSPaulo Zanoni 	/* check event from PCH */
2639c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2640c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2641c008bc6eSPaulo Zanoni 
264291d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
264391d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2644c008bc6eSPaulo Zanoni 		else
264591d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2646c008bc6eSPaulo Zanoni 
2647c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2648c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2649c008bc6eSPaulo Zanoni 	}
2650c008bc6eSPaulo Zanoni 
2651cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
265291d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2653c008bc6eSPaulo Zanoni }
2654c008bc6eSPaulo Zanoni 
265591d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
265691d14251STvrtko Ursulin 				    u32 de_iir)
26579719fb98SPaulo Zanoni {
265807d27e20SDamien Lespiau 	enum pipe pipe;
265923bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
266023bb4cb5SVille Syrjälä 
266140e56410SVille Syrjälä 	if (hotplug_trigger)
266291d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
26639719fb98SPaulo Zanoni 
26649719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
266591d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
26669719fb98SPaulo Zanoni 
266754fd3149SDhinakaran Pandiyan 	if (de_iir & DE_EDP_PSR_INT_HSW) {
266854fd3149SDhinakaran Pandiyan 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
266954fd3149SDhinakaran Pandiyan 
267054fd3149SDhinakaran Pandiyan 		intel_psr_irq_handler(dev_priv, psr_iir);
267154fd3149SDhinakaran Pandiyan 		I915_WRITE(EDP_PSR_IIR, psr_iir);
267254fd3149SDhinakaran Pandiyan 	}
2673fc340442SDaniel Vetter 
26749719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
267591d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
26769719fb98SPaulo Zanoni 
26779719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
267891d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
26799719fb98SPaulo Zanoni 
2680055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2681fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2682fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
26839719fb98SPaulo Zanoni 	}
26849719fb98SPaulo Zanoni 
26859719fb98SPaulo Zanoni 	/* check event from PCH */
268691d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
26879719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
26889719fb98SPaulo Zanoni 
268991d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
26909719fb98SPaulo Zanoni 
26919719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
26929719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
26939719fb98SPaulo Zanoni 	}
26949719fb98SPaulo Zanoni }
26959719fb98SPaulo Zanoni 
269672c90f62SOscar Mateo /*
269772c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
269872c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
269972c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
270072c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
270172c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
270272c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
270372c90f62SOscar Mateo  */
2704f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2705b1f14ad0SJesse Barnes {
2706b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2707f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
27080e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2709b1f14ad0SJesse Barnes 
27102dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
27112dd2a883SImre Deak 		return IRQ_NONE;
27122dd2a883SImre Deak 
27131f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27149102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27151f814dacSImre Deak 
2716b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2717b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2718b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
27190e43406bSChris Wilson 
272044498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
272144498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
272244498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
272344498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
272444498aeaSPaulo Zanoni 	 * due to its back queue). */
272591d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
272644498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
272744498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
2728ab5c608bSBen Widawsky 	}
272944498aeaSPaulo Zanoni 
273072c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
273172c90f62SOscar Mateo 
27320e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
27330e43406bSChris Wilson 	if (gt_iir) {
273472c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
273572c90f62SOscar Mateo 		ret = IRQ_HANDLED;
273691d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2737261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2738d8fc8a47SPaulo Zanoni 		else
2739261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
27400e43406bSChris Wilson 	}
2741b1f14ad0SJesse Barnes 
2742b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
27430e43406bSChris Wilson 	if (de_iir) {
274472c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
274572c90f62SOscar Mateo 		ret = IRQ_HANDLED;
274691d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
274791d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2748f1af8fc1SPaulo Zanoni 		else
274991d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
27500e43406bSChris Wilson 	}
27510e43406bSChris Wilson 
275291d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2753f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
27540e43406bSChris Wilson 		if (pm_iir) {
2755b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
27560e43406bSChris Wilson 			ret = IRQ_HANDLED;
275772c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
27580e43406bSChris Wilson 		}
2759f1af8fc1SPaulo Zanoni 	}
2760b1f14ad0SJesse Barnes 
2761b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
276274093f3eSChris Wilson 	if (!HAS_PCH_NOP(dev_priv))
276344498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
2764b1f14ad0SJesse Barnes 
27651f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27669102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27671f814dacSImre Deak 
2768b1f14ad0SJesse Barnes 	return ret;
2769b1f14ad0SJesse Barnes }
2770b1f14ad0SJesse Barnes 
277191d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
277291d14251STvrtko Ursulin 				u32 hotplug_trigger,
277340e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2774d04a492dSShashank Sharma {
2775cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2776d04a492dSShashank Sharma 
2777a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2778a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2779d04a492dSShashank Sharma 
2780cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
278140e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2782cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
278340e56410SVille Syrjälä 
278491d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2785d04a492dSShashank Sharma }
2786d04a492dSShashank Sharma 
2787121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2788121e758eSDhinakaran Pandiyan {
2789121e758eSDhinakaran Pandiyan 	u32 pin_mask = 0, long_mask = 0;
2790b796b971SDhinakaran Pandiyan 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2791b796b971SDhinakaran Pandiyan 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2792121e758eSDhinakaran Pandiyan 
2793121e758eSDhinakaran Pandiyan 	if (trigger_tc) {
2794b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2795b796b971SDhinakaran Pandiyan 
2796121e758eSDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2797121e758eSDhinakaran Pandiyan 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2798121e758eSDhinakaran Pandiyan 
2799121e758eSDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2800b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2801121e758eSDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2802121e758eSDhinakaran Pandiyan 	}
2803b796b971SDhinakaran Pandiyan 
2804b796b971SDhinakaran Pandiyan 	if (trigger_tbt) {
2805b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2806b796b971SDhinakaran Pandiyan 
2807b796b971SDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2808b796b971SDhinakaran Pandiyan 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2809b796b971SDhinakaran Pandiyan 
2810b796b971SDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2811b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2812b796b971SDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2813b796b971SDhinakaran Pandiyan 	}
2814b796b971SDhinakaran Pandiyan 
2815b796b971SDhinakaran Pandiyan 	if (pin_mask)
2816b796b971SDhinakaran Pandiyan 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2817b796b971SDhinakaran Pandiyan 	else
2818b796b971SDhinakaran Pandiyan 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2819121e758eSDhinakaran Pandiyan }
2820121e758eSDhinakaran Pandiyan 
28219d17210fSLucas De Marchi static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
28229d17210fSLucas De Marchi {
28239d17210fSLucas De Marchi 	u32 mask = GEN8_AUX_CHANNEL_A;
28249d17210fSLucas De Marchi 
28259d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 9)
28269d17210fSLucas De Marchi 		mask |= GEN9_AUX_CHANNEL_B |
28279d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_C |
28289d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_D;
28299d17210fSLucas De Marchi 
28309d17210fSLucas De Marchi 	if (IS_CNL_WITH_PORT_F(dev_priv))
28319d17210fSLucas De Marchi 		mask |= CNL_AUX_CHANNEL_F;
28329d17210fSLucas De Marchi 
28339d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 11)
28349d17210fSLucas De Marchi 		mask |= ICL_AUX_CHANNEL_E |
28359d17210fSLucas De Marchi 			CNL_AUX_CHANNEL_F;
28369d17210fSLucas De Marchi 
28379d17210fSLucas De Marchi 	return mask;
28389d17210fSLucas De Marchi }
28399d17210fSLucas De Marchi 
2840f11a0f46STvrtko Ursulin static irqreturn_t
2841f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2842abd58f01SBen Widawsky {
2843abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2844f11a0f46STvrtko Ursulin 	u32 iir;
2845c42664ccSDaniel Vetter 	enum pipe pipe;
284688e04703SJesse Barnes 
2847abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2848e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2849e32192e1STvrtko Ursulin 		if (iir) {
2850e04f7eceSVille Syrjälä 			bool found = false;
2851e04f7eceSVille Syrjälä 
2852e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2853abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2854e04f7eceSVille Syrjälä 
2855e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_MISC_GSE) {
285691d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
2857e04f7eceSVille Syrjälä 				found = true;
2858e04f7eceSVille Syrjälä 			}
2859e04f7eceSVille Syrjälä 
2860e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_EDP_PSR) {
286154fd3149SDhinakaran Pandiyan 				u32 psr_iir = I915_READ(EDP_PSR_IIR);
286254fd3149SDhinakaran Pandiyan 
286354fd3149SDhinakaran Pandiyan 				intel_psr_irq_handler(dev_priv, psr_iir);
286454fd3149SDhinakaran Pandiyan 				I915_WRITE(EDP_PSR_IIR, psr_iir);
2865e04f7eceSVille Syrjälä 				found = true;
2866e04f7eceSVille Syrjälä 			}
2867e04f7eceSVille Syrjälä 
2868e04f7eceSVille Syrjälä 			if (!found)
286938cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2870abd58f01SBen Widawsky 		}
287138cc46d7SOscar Mateo 		else
287238cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2873abd58f01SBen Widawsky 	}
2874abd58f01SBen Widawsky 
2875121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2876121e758eSDhinakaran Pandiyan 		iir = I915_READ(GEN11_DE_HPD_IIR);
2877121e758eSDhinakaran Pandiyan 		if (iir) {
2878121e758eSDhinakaran Pandiyan 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2879121e758eSDhinakaran Pandiyan 			ret = IRQ_HANDLED;
2880121e758eSDhinakaran Pandiyan 			gen11_hpd_irq_handler(dev_priv, iir);
2881121e758eSDhinakaran Pandiyan 		} else {
2882121e758eSDhinakaran Pandiyan 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2883121e758eSDhinakaran Pandiyan 		}
2884121e758eSDhinakaran Pandiyan 	}
2885121e758eSDhinakaran Pandiyan 
28866d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2887e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2888e32192e1STvrtko Ursulin 		if (iir) {
2889e32192e1STvrtko Ursulin 			u32 tmp_mask;
2890d04a492dSShashank Sharma 			bool found = false;
2891cebd87a0SVille Syrjälä 
2892e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
28936d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
289488e04703SJesse Barnes 
28959d17210fSLucas De Marchi 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
289691d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2897d04a492dSShashank Sharma 				found = true;
2898d04a492dSShashank Sharma 			}
2899d04a492dSShashank Sharma 
2900cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2901e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2902e32192e1STvrtko Ursulin 				if (tmp_mask) {
290391d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
290491d14251STvrtko Ursulin 							    hpd_bxt);
2905d04a492dSShashank Sharma 					found = true;
2906d04a492dSShashank Sharma 				}
2907e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2908e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2909e32192e1STvrtko Ursulin 				if (tmp_mask) {
291091d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
291191d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2912e32192e1STvrtko Ursulin 					found = true;
2913e32192e1STvrtko Ursulin 				}
2914e32192e1STvrtko Ursulin 			}
2915d04a492dSShashank Sharma 
2916cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
291791d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
29189e63743eSShashank Sharma 				found = true;
29199e63743eSShashank Sharma 			}
29209e63743eSShashank Sharma 
2921d04a492dSShashank Sharma 			if (!found)
292238cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
29236d766f02SDaniel Vetter 		}
292438cc46d7SOscar Mateo 		else
292538cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
29266d766f02SDaniel Vetter 	}
29276d766f02SDaniel Vetter 
2928055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2929fd3a4024SDaniel Vetter 		u32 fault_errors;
2930abd58f01SBen Widawsky 
2931c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2932c42664ccSDaniel Vetter 			continue;
2933c42664ccSDaniel Vetter 
2934e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2935e32192e1STvrtko Ursulin 		if (!iir) {
2936e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2937e32192e1STvrtko Ursulin 			continue;
2938e32192e1STvrtko Ursulin 		}
2939770de83dSDamien Lespiau 
2940e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2941e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2942e32192e1STvrtko Ursulin 
2943fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2944fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2945abd58f01SBen Widawsky 
2946e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
294791d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
29480fbe7870SDaniel Vetter 
2949e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2950e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
295138d83c96SDaniel Vetter 
2952e32192e1STvrtko Ursulin 		fault_errors = iir;
2953bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2954e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2955770de83dSDamien Lespiau 		else
2956e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2957770de83dSDamien Lespiau 
2958770de83dSDamien Lespiau 		if (fault_errors)
29591353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
296030100f2bSDaniel Vetter 				  pipe_name(pipe),
2961e32192e1STvrtko Ursulin 				  fault_errors);
2962abd58f01SBen Widawsky 	}
2963abd58f01SBen Widawsky 
296491d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2965266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
296692d03a80SDaniel Vetter 		/*
296792d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
296892d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
296992d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
297092d03a80SDaniel Vetter 		 */
2971e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2972e32192e1STvrtko Ursulin 		if (iir) {
2973e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
297492d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
29756dbf30ceSVille Syrjälä 
2976c6f7acb8SMatt Roper 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
2977c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_mcc);
2978c6f7acb8SMatt Roper 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2979c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_icp);
2980c6c30b91SRodrigo Vivi 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
298191d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
29826dbf30ceSVille Syrjälä 			else
298391d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
29842dfb0b81SJani Nikula 		} else {
29852dfb0b81SJani Nikula 			/*
29862dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
29872dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
29882dfb0b81SJani Nikula 			 */
29892dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
29902dfb0b81SJani Nikula 		}
299192d03a80SDaniel Vetter 	}
299292d03a80SDaniel Vetter 
2993f11a0f46STvrtko Ursulin 	return ret;
2994f11a0f46STvrtko Ursulin }
2995f11a0f46STvrtko Ursulin 
29964376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
29974376b9c9SMika Kuoppala {
29984376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
29994376b9c9SMika Kuoppala 
30004376b9c9SMika Kuoppala 	/*
30014376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
30024376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
30034376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
30044376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
30054376b9c9SMika Kuoppala 	 */
30064376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
30074376b9c9SMika Kuoppala }
30084376b9c9SMika Kuoppala 
30094376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
30104376b9c9SMika Kuoppala {
30114376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
30124376b9c9SMika Kuoppala }
30134376b9c9SMika Kuoppala 
3014f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
3015f11a0f46STvrtko Ursulin {
3016b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
301725286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = dev_priv->uncore.regs;
3018f11a0f46STvrtko Ursulin 	u32 master_ctl;
3019f0fd96f5SChris Wilson 	u32 gt_iir[4];
3020f11a0f46STvrtko Ursulin 
3021f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
3022f11a0f46STvrtko Ursulin 		return IRQ_NONE;
3023f11a0f46STvrtko Ursulin 
30244376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
30254376b9c9SMika Kuoppala 	if (!master_ctl) {
30264376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
3027f11a0f46STvrtko Ursulin 		return IRQ_NONE;
30284376b9c9SMika Kuoppala 	}
3029f11a0f46STvrtko Ursulin 
3030f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
303155ef72f2SChris Wilson 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3032f0fd96f5SChris Wilson 
3033f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3034f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
30359102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
303655ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
30379102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3038f0fd96f5SChris Wilson 	}
3039f11a0f46STvrtko Ursulin 
30404376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
3041abd58f01SBen Widawsky 
3042f0fd96f5SChris Wilson 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
30431f814dacSImre Deak 
304455ef72f2SChris Wilson 	return IRQ_HANDLED;
3045abd58f01SBen Widawsky }
3046abd58f01SBen Widawsky 
304751951ae7SMika Kuoppala static u32
30489b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
304951951ae7SMika Kuoppala 			 const unsigned int bank, const unsigned int bit)
305051951ae7SMika Kuoppala {
30519b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
305251951ae7SMika Kuoppala 	u32 timeout_ts;
305351951ae7SMika Kuoppala 	u32 ident;
305451951ae7SMika Kuoppala 
30559b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
305696606f3bSOscar Mateo 
305751951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
305851951ae7SMika Kuoppala 
305951951ae7SMika Kuoppala 	/*
306051951ae7SMika Kuoppala 	 * NB: Specs do not specify how long to spin wait,
306151951ae7SMika Kuoppala 	 * so we do ~100us as an educated guess.
306251951ae7SMika Kuoppala 	 */
306351951ae7SMika Kuoppala 	timeout_ts = (local_clock() >> 10) + 100;
306451951ae7SMika Kuoppala 	do {
306551951ae7SMika Kuoppala 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
306651951ae7SMika Kuoppala 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
306751951ae7SMika Kuoppala 		 !time_after32(local_clock() >> 10, timeout_ts));
306851951ae7SMika Kuoppala 
306951951ae7SMika Kuoppala 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
307051951ae7SMika Kuoppala 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
307151951ae7SMika Kuoppala 			  bank, bit, ident);
307251951ae7SMika Kuoppala 		return 0;
307351951ae7SMika Kuoppala 	}
307451951ae7SMika Kuoppala 
307551951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
307651951ae7SMika Kuoppala 		      GEN11_INTR_DATA_VALID);
307751951ae7SMika Kuoppala 
3078f744dbc2SMika Kuoppala 	return ident;
3079f744dbc2SMika Kuoppala }
3080f744dbc2SMika Kuoppala 
3081f744dbc2SMika Kuoppala static void
30829b77011eSTvrtko Ursulin gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
30839b77011eSTvrtko Ursulin 			const u16 iir)
3084f744dbc2SMika Kuoppala {
308554c52a84SOscar Mateo 	if (instance == OTHER_GUC_INSTANCE)
3086*8b5689d7SDaniele Ceraolo Spurio 		return guc_irq_handler(&gt->uc.guc, iir);
308754c52a84SOscar Mateo 
3088d02b98b8SOscar Mateo 	if (instance == OTHER_GTPM_INSTANCE)
308958820574STvrtko Ursulin 		return gen11_rps_irq_handler(gt, iir);
3090d02b98b8SOscar Mateo 
3091f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3092f744dbc2SMika Kuoppala 		  instance, iir);
3093f744dbc2SMika Kuoppala }
3094f744dbc2SMika Kuoppala 
3095f744dbc2SMika Kuoppala static void
30969b77011eSTvrtko Ursulin gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
30979b77011eSTvrtko Ursulin 			 const u8 instance, const u16 iir)
3098f744dbc2SMika Kuoppala {
3099f744dbc2SMika Kuoppala 	struct intel_engine_cs *engine;
3100f744dbc2SMika Kuoppala 
3101f744dbc2SMika Kuoppala 	if (instance <= MAX_ENGINE_INSTANCE)
31029b77011eSTvrtko Ursulin 		engine = gt->i915->engine_class[class][instance];
3103f744dbc2SMika Kuoppala 	else
3104f744dbc2SMika Kuoppala 		engine = NULL;
3105f744dbc2SMika Kuoppala 
3106f744dbc2SMika Kuoppala 	if (likely(engine))
3107f744dbc2SMika Kuoppala 		return gen8_cs_irq_handler(engine, iir);
3108f744dbc2SMika Kuoppala 
3109f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3110f744dbc2SMika Kuoppala 		  class, instance);
3111f744dbc2SMika Kuoppala }
3112f744dbc2SMika Kuoppala 
3113f744dbc2SMika Kuoppala static void
31149b77011eSTvrtko Ursulin gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
3115f744dbc2SMika Kuoppala {
3116f744dbc2SMika Kuoppala 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3117f744dbc2SMika Kuoppala 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3118f744dbc2SMika Kuoppala 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3119f744dbc2SMika Kuoppala 
3120f744dbc2SMika Kuoppala 	if (unlikely(!intr))
3121f744dbc2SMika Kuoppala 		return;
3122f744dbc2SMika Kuoppala 
3123f744dbc2SMika Kuoppala 	if (class <= COPY_ENGINE_CLASS)
31249b77011eSTvrtko Ursulin 		return gen11_engine_irq_handler(gt, class, instance, intr);
3125f744dbc2SMika Kuoppala 
3126f744dbc2SMika Kuoppala 	if (class == OTHER_CLASS)
31279b77011eSTvrtko Ursulin 		return gen11_other_irq_handler(gt, instance, intr);
3128f744dbc2SMika Kuoppala 
3129f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3130f744dbc2SMika Kuoppala 		  class, instance, intr);
313151951ae7SMika Kuoppala }
313251951ae7SMika Kuoppala 
313351951ae7SMika Kuoppala static void
31349b77011eSTvrtko Ursulin gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
313551951ae7SMika Kuoppala {
31369b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
313751951ae7SMika Kuoppala 	unsigned long intr_dw;
313851951ae7SMika Kuoppala 	unsigned int bit;
313951951ae7SMika Kuoppala 
31409b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
314151951ae7SMika Kuoppala 
314251951ae7SMika Kuoppala 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
314351951ae7SMika Kuoppala 
314451951ae7SMika Kuoppala 	for_each_set_bit(bit, &intr_dw, 32) {
31459b77011eSTvrtko Ursulin 		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
314651951ae7SMika Kuoppala 
31479b77011eSTvrtko Ursulin 		gen11_gt_identity_handler(gt, ident);
314851951ae7SMika Kuoppala 	}
314951951ae7SMika Kuoppala 
315051951ae7SMika Kuoppala 	/* Clear must be after shared has been served for engine */
315151951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
315251951ae7SMika Kuoppala }
315396606f3bSOscar Mateo 
315496606f3bSOscar Mateo static void
31559b77011eSTvrtko Ursulin gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
315696606f3bSOscar Mateo {
31579b77011eSTvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
315896606f3bSOscar Mateo 	unsigned int bank;
315996606f3bSOscar Mateo 
316096606f3bSOscar Mateo 	spin_lock(&i915->irq_lock);
316196606f3bSOscar Mateo 
316296606f3bSOscar Mateo 	for (bank = 0; bank < 2; bank++) {
316396606f3bSOscar Mateo 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
31649b77011eSTvrtko Ursulin 			gen11_gt_bank_handler(gt, bank);
316596606f3bSOscar Mateo 	}
316696606f3bSOscar Mateo 
316796606f3bSOscar Mateo 	spin_unlock(&i915->irq_lock);
316851951ae7SMika Kuoppala }
316951951ae7SMika Kuoppala 
31707a909383SChris Wilson static u32
31719b77011eSTvrtko Ursulin gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
3172df0d28c1SDhinakaran Pandiyan {
31739b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31747a909383SChris Wilson 	u32 iir;
3175df0d28c1SDhinakaran Pandiyan 
3176df0d28c1SDhinakaran Pandiyan 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
31777a909383SChris Wilson 		return 0;
3178df0d28c1SDhinakaran Pandiyan 
31797a909383SChris Wilson 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
31807a909383SChris Wilson 	if (likely(iir))
31817a909383SChris Wilson 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
31827a909383SChris Wilson 
31837a909383SChris Wilson 	return iir;
3184df0d28c1SDhinakaran Pandiyan }
3185df0d28c1SDhinakaran Pandiyan 
3186df0d28c1SDhinakaran Pandiyan static void
31879b77011eSTvrtko Ursulin gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
3188df0d28c1SDhinakaran Pandiyan {
3189df0d28c1SDhinakaran Pandiyan 	if (iir & GEN11_GU_MISC_GSE)
31909b77011eSTvrtko Ursulin 		intel_opregion_asle_intr(gt->i915);
3191df0d28c1SDhinakaran Pandiyan }
3192df0d28c1SDhinakaran Pandiyan 
319381067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
319481067b71SMika Kuoppala {
319581067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
319681067b71SMika Kuoppala 
319781067b71SMika Kuoppala 	/*
319881067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
319981067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
320081067b71SMika Kuoppala 	 * New indications can and will light up during processing,
320181067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
320281067b71SMika Kuoppala 	 */
320381067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
320481067b71SMika Kuoppala }
320581067b71SMika Kuoppala 
320681067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
320781067b71SMika Kuoppala {
320881067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
320981067b71SMika Kuoppala }
321081067b71SMika Kuoppala 
321151951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg)
321251951ae7SMika Kuoppala {
3213b318b824SVille Syrjälä 	struct drm_i915_private * const i915 = arg;
321425286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
32159b77011eSTvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
321651951ae7SMika Kuoppala 	u32 master_ctl;
3217df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
321851951ae7SMika Kuoppala 
321951951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
322051951ae7SMika Kuoppala 		return IRQ_NONE;
322151951ae7SMika Kuoppala 
322281067b71SMika Kuoppala 	master_ctl = gen11_master_intr_disable(regs);
322381067b71SMika Kuoppala 	if (!master_ctl) {
322481067b71SMika Kuoppala 		gen11_master_intr_enable(regs);
322551951ae7SMika Kuoppala 		return IRQ_NONE;
322681067b71SMika Kuoppala 	}
322751951ae7SMika Kuoppala 
322851951ae7SMika Kuoppala 	/* Find, clear, then process each source of interrupt. */
32299b77011eSTvrtko Ursulin 	gen11_gt_irq_handler(gt, master_ctl);
323051951ae7SMika Kuoppala 
323151951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
323251951ae7SMika Kuoppala 	if (master_ctl & GEN11_DISPLAY_IRQ) {
323351951ae7SMika Kuoppala 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
323451951ae7SMika Kuoppala 
32359102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&i915->runtime_pm);
323651951ae7SMika Kuoppala 		/*
323751951ae7SMika Kuoppala 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
323851951ae7SMika Kuoppala 		 * for the display related bits.
323951951ae7SMika Kuoppala 		 */
324051951ae7SMika Kuoppala 		gen8_de_irq_handler(i915, disp_ctl);
32419102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&i915->runtime_pm);
324251951ae7SMika Kuoppala 	}
324351951ae7SMika Kuoppala 
32449b77011eSTvrtko Ursulin 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
3245df0d28c1SDhinakaran Pandiyan 
324681067b71SMika Kuoppala 	gen11_master_intr_enable(regs);
324751951ae7SMika Kuoppala 
32489b77011eSTvrtko Ursulin 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
3249df0d28c1SDhinakaran Pandiyan 
325051951ae7SMika Kuoppala 	return IRQ_HANDLED;
325151951ae7SMika Kuoppala }
325251951ae7SMika Kuoppala 
325342f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
325442f52ef8SKeith Packard  * we use as a pipe index
325542f52ef8SKeith Packard  */
325608fa8fd0SVille Syrjälä int i8xx_enable_vblank(struct drm_crtc *crtc)
32570a3e67a4SJesse Barnes {
325808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
325908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3260e9d21d7fSKeith Packard 	unsigned long irqflags;
326171e0ffa5SJesse Barnes 
32621ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
326386e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
326486e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
326586e83e35SChris Wilson 
326686e83e35SChris Wilson 	return 0;
326786e83e35SChris Wilson }
326886e83e35SChris Wilson 
326908fa8fd0SVille Syrjälä int i945gm_enable_vblank(struct drm_crtc *crtc)
3270d938da6bSVille Syrjälä {
327108fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3272d938da6bSVille Syrjälä 
3273d938da6bSVille Syrjälä 	if (dev_priv->i945gm_vblank.enabled++ == 0)
3274d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3275d938da6bSVille Syrjälä 
327608fa8fd0SVille Syrjälä 	return i8xx_enable_vblank(crtc);
3277d938da6bSVille Syrjälä }
3278d938da6bSVille Syrjälä 
327908fa8fd0SVille Syrjälä int i965_enable_vblank(struct drm_crtc *crtc)
328086e83e35SChris Wilson {
328108fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
328208fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
328386e83e35SChris Wilson 	unsigned long irqflags;
328486e83e35SChris Wilson 
328586e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
32867c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
3287755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
32881ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
32898692d00eSChris Wilson 
32900a3e67a4SJesse Barnes 	return 0;
32910a3e67a4SJesse Barnes }
32920a3e67a4SJesse Barnes 
329308fa8fd0SVille Syrjälä int ilk_enable_vblank(struct drm_crtc *crtc)
3294f796cf8fSJesse Barnes {
329508fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
329608fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3297f796cf8fSJesse Barnes 	unsigned long irqflags;
3298a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
329986e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3300f796cf8fSJesse Barnes 
3301f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3302fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
3303b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3304b1f14ad0SJesse Barnes 
33052e8bf223SDhinakaran Pandiyan 	/* Even though there is no DMC, frame counter can get stuck when
33062e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated.
33072e8bf223SDhinakaran Pandiyan 	 */
33082e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
330908fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33102e8bf223SDhinakaran Pandiyan 
3311b1f14ad0SJesse Barnes 	return 0;
3312b1f14ad0SJesse Barnes }
3313b1f14ad0SJesse Barnes 
331408fa8fd0SVille Syrjälä int bdw_enable_vblank(struct drm_crtc *crtc)
3315abd58f01SBen Widawsky {
331608fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
331708fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3318abd58f01SBen Widawsky 	unsigned long irqflags;
3319abd58f01SBen Widawsky 
3320abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3321013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3322abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3323013d3752SVille Syrjälä 
33242e8bf223SDhinakaran Pandiyan 	/* Even if there is no DMC, frame counter can get stuck when
33252e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated, so check only for PSR.
33262e8bf223SDhinakaran Pandiyan 	 */
33272e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
332808fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33292e8bf223SDhinakaran Pandiyan 
3330abd58f01SBen Widawsky 	return 0;
3331abd58f01SBen Widawsky }
3332abd58f01SBen Widawsky 
333342f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
333442f52ef8SKeith Packard  * we use as a pipe index
333542f52ef8SKeith Packard  */
333608fa8fd0SVille Syrjälä void i8xx_disable_vblank(struct drm_crtc *crtc)
333786e83e35SChris Wilson {
333808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
333908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
334086e83e35SChris Wilson 	unsigned long irqflags;
334186e83e35SChris Wilson 
334286e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
334386e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
334486e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
334586e83e35SChris Wilson }
334686e83e35SChris Wilson 
334708fa8fd0SVille Syrjälä void i945gm_disable_vblank(struct drm_crtc *crtc)
3348d938da6bSVille Syrjälä {
334908fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3350d938da6bSVille Syrjälä 
335108fa8fd0SVille Syrjälä 	i8xx_disable_vblank(crtc);
3352d938da6bSVille Syrjälä 
3353d938da6bSVille Syrjälä 	if (--dev_priv->i945gm_vblank.enabled == 0)
3354d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3355d938da6bSVille Syrjälä }
3356d938da6bSVille Syrjälä 
335708fa8fd0SVille Syrjälä void i965_disable_vblank(struct drm_crtc *crtc)
33580a3e67a4SJesse Barnes {
335908fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
336008fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3361e9d21d7fSKeith Packard 	unsigned long irqflags;
33620a3e67a4SJesse Barnes 
33631ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
33647c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
3365755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
33661ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
33670a3e67a4SJesse Barnes }
33680a3e67a4SJesse Barnes 
336908fa8fd0SVille Syrjälä void ilk_disable_vblank(struct drm_crtc *crtc)
3370f796cf8fSJesse Barnes {
337108fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
337208fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3373f796cf8fSJesse Barnes 	unsigned long irqflags;
3374a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
337586e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3376f796cf8fSJesse Barnes 
3377f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3378fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3379b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3380b1f14ad0SJesse Barnes }
3381b1f14ad0SJesse Barnes 
338208fa8fd0SVille Syrjälä void bdw_disable_vblank(struct drm_crtc *crtc)
3383abd58f01SBen Widawsky {
338408fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
338508fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3386abd58f01SBen Widawsky 	unsigned long irqflags;
3387abd58f01SBen Widawsky 
3388abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3389013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3390abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3391abd58f01SBen Widawsky }
3392abd58f01SBen Widawsky 
33937218524dSChris Wilson static void i945gm_vblank_work_func(struct work_struct *work)
3394d938da6bSVille Syrjälä {
3395d938da6bSVille Syrjälä 	struct drm_i915_private *dev_priv =
3396d938da6bSVille Syrjälä 		container_of(work, struct drm_i915_private, i945gm_vblank.work);
3397d938da6bSVille Syrjälä 
3398d938da6bSVille Syrjälä 	/*
3399d938da6bSVille Syrjälä 	 * Vblank interrupts fail to wake up the device from C3,
3400d938da6bSVille Syrjälä 	 * hence we want to prevent C3 usage while vblank interrupts
3401d938da6bSVille Syrjälä 	 * are enabled.
3402d938da6bSVille Syrjälä 	 */
3403d938da6bSVille Syrjälä 	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3404d938da6bSVille Syrjälä 			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3405d938da6bSVille Syrjälä 			      dev_priv->i945gm_vblank.c3_disable_latency :
3406d938da6bSVille Syrjälä 			      PM_QOS_DEFAULT_VALUE);
3407d938da6bSVille Syrjälä }
3408d938da6bSVille Syrjälä 
3409d938da6bSVille Syrjälä static int cstate_disable_latency(const char *name)
3410d938da6bSVille Syrjälä {
3411d938da6bSVille Syrjälä 	const struct cpuidle_driver *drv;
3412d938da6bSVille Syrjälä 	int i;
3413d938da6bSVille Syrjälä 
3414d938da6bSVille Syrjälä 	drv = cpuidle_get_driver();
3415d938da6bSVille Syrjälä 	if (!drv)
3416d938da6bSVille Syrjälä 		return 0;
3417d938da6bSVille Syrjälä 
3418d938da6bSVille Syrjälä 	for (i = 0; i < drv->state_count; i++) {
3419d938da6bSVille Syrjälä 		const struct cpuidle_state *state = &drv->states[i];
3420d938da6bSVille Syrjälä 
3421d938da6bSVille Syrjälä 		if (!strcmp(state->name, name))
3422d938da6bSVille Syrjälä 			return state->exit_latency ?
3423d938da6bSVille Syrjälä 				state->exit_latency - 1 : 0;
3424d938da6bSVille Syrjälä 	}
3425d938da6bSVille Syrjälä 
3426d938da6bSVille Syrjälä 	return 0;
3427d938da6bSVille Syrjälä }
3428d938da6bSVille Syrjälä 
3429d938da6bSVille Syrjälä static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3430d938da6bSVille Syrjälä {
3431d938da6bSVille Syrjälä 	INIT_WORK(&dev_priv->i945gm_vblank.work,
3432d938da6bSVille Syrjälä 		  i945gm_vblank_work_func);
3433d938da6bSVille Syrjälä 
3434d938da6bSVille Syrjälä 	dev_priv->i945gm_vblank.c3_disable_latency =
3435d938da6bSVille Syrjälä 		cstate_disable_latency("C3");
3436d938da6bSVille Syrjälä 	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3437d938da6bSVille Syrjälä 			   PM_QOS_CPU_DMA_LATENCY,
3438d938da6bSVille Syrjälä 			   PM_QOS_DEFAULT_VALUE);
3439d938da6bSVille Syrjälä }
3440d938da6bSVille Syrjälä 
3441d938da6bSVille Syrjälä static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3442d938da6bSVille Syrjälä {
3443d938da6bSVille Syrjälä 	cancel_work_sync(&dev_priv->i945gm_vblank.work);
3444d938da6bSVille Syrjälä 	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3445d938da6bSVille Syrjälä }
3446d938da6bSVille Syrjälä 
3447b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
344891738a95SPaulo Zanoni {
3449b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3450b16b2a2fSPaulo Zanoni 
34516e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
345291738a95SPaulo Zanoni 		return;
345391738a95SPaulo Zanoni 
3454b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
3455105b122eSPaulo Zanoni 
34566e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3457105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3458622364b6SPaulo Zanoni }
3459105b122eSPaulo Zanoni 
346091738a95SPaulo Zanoni /*
3461622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3462622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3463622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3464622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3465622364b6SPaulo Zanoni  *
3466622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
346791738a95SPaulo Zanoni  */
3468b318b824SVille Syrjälä static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
3469622364b6SPaulo Zanoni {
34706e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3471622364b6SPaulo Zanoni 		return;
3472622364b6SPaulo Zanoni 
3473622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
347491738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
347591738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
347691738a95SPaulo Zanoni }
347791738a95SPaulo Zanoni 
3478b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3479d18ea1b5SDaniel Vetter {
3480b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3481b16b2a2fSPaulo Zanoni 
3482b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GT);
3483b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
3484b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, GEN6_PM);
3485d18ea1b5SDaniel Vetter }
3486d18ea1b5SDaniel Vetter 
348770591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
348870591a41SVille Syrjälä {
3489b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3490b16b2a2fSPaulo Zanoni 
349171b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3492f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
349371b8b41dSVille Syrjälä 	else
3494f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
349571b8b41dSVille Syrjälä 
3496ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3497f0818984STvrtko Ursulin 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
349870591a41SVille Syrjälä 
349944d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
350070591a41SVille Syrjälä 
3501b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, VLV_);
35028bd099a7SChris Wilson 	dev_priv->irq_mask = ~0u;
350370591a41SVille Syrjälä }
350470591a41SVille Syrjälä 
35058bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
35068bb61306SVille Syrjälä {
3507b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3508b16b2a2fSPaulo Zanoni 
35098bb61306SVille Syrjälä 	u32 pipestat_mask;
35109ab981f2SVille Syrjälä 	u32 enable_mask;
35118bb61306SVille Syrjälä 	enum pipe pipe;
35128bb61306SVille Syrjälä 
3513842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
35148bb61306SVille Syrjälä 
35158bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
35168bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
35178bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
35188bb61306SVille Syrjälä 
35199ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
35208bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3521ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3522ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3523ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3524ebf5f921SVille Syrjälä 
35258bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3526ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3527ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
35286b7eafc1SVille Syrjälä 
35298bd099a7SChris Wilson 	WARN_ON(dev_priv->irq_mask != ~0u);
35306b7eafc1SVille Syrjälä 
35319ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
35328bb61306SVille Syrjälä 
3533b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
35348bb61306SVille Syrjälä }
35358bb61306SVille Syrjälä 
35368bb61306SVille Syrjälä /* drm_dma.h hooks
35378bb61306SVille Syrjälä */
3538b318b824SVille Syrjälä static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
35398bb61306SVille Syrjälä {
3540b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
35418bb61306SVille Syrjälä 
3542b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
3543cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 7))
3544f0818984STvrtko Ursulin 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
35458bb61306SVille Syrjälä 
3546fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
3547f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3548f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3549fc340442SDaniel Vetter 	}
3550fc340442SDaniel Vetter 
3551b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35528bb61306SVille Syrjälä 
3553b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
35548bb61306SVille Syrjälä }
35558bb61306SVille Syrjälä 
3556b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
35577e231dbeSJesse Barnes {
355834c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
355934c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
356034c7b8a7SVille Syrjälä 
3561b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35627e231dbeSJesse Barnes 
3563ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35649918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
356570591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3566ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
35677e231dbeSJesse Barnes }
35687e231dbeSJesse Barnes 
3569d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3570d6e3cca3SDaniel Vetter {
3571b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3572b16b2a2fSPaulo Zanoni 
3573b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3574b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3575b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3576b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3577d6e3cca3SDaniel Vetter }
3578d6e3cca3SDaniel Vetter 
3579b318b824SVille Syrjälä static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3580abd58f01SBen Widawsky {
3581b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3582abd58f01SBen Widawsky 	int pipe;
3583abd58f01SBen Widawsky 
358425286aacSDaniele Ceraolo Spurio 	gen8_master_intr_disable(dev_priv->uncore.regs);
3585abd58f01SBen Widawsky 
3586d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3587abd58f01SBen Widawsky 
3588f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3589f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3590e04f7eceSVille Syrjälä 
3591055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3592f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3593813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3594b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3595abd58f01SBen Widawsky 
3596b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3597b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3598b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3599abd58f01SBen Widawsky 
36006e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3601b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3602abd58f01SBen Widawsky }
3603abd58f01SBen Widawsky 
36049b77011eSTvrtko Ursulin static void gen11_gt_irq_reset(struct intel_gt *gt)
360551951ae7SMika Kuoppala {
3606f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
36079b77011eSTvrtko Ursulin 
360851951ae7SMika Kuoppala 	/* Disable RCS, BCS, VCS and VECS class engines. */
3609f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
3610f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
361151951ae7SMika Kuoppala 
361251951ae7SMika Kuoppala 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3613f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
3614f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
3615f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
3616f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
3617f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
3618d02b98b8SOscar Mateo 
3619f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3620f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3621f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
3622f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
362351951ae7SMika Kuoppala }
362451951ae7SMika Kuoppala 
3625b318b824SVille Syrjälä static void gen11_irq_reset(struct drm_i915_private *dev_priv)
362651951ae7SMika Kuoppala {
3627b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
362851951ae7SMika Kuoppala 	int pipe;
362951951ae7SMika Kuoppala 
363025286aacSDaniele Ceraolo Spurio 	gen11_master_intr_disable(dev_priv->uncore.regs);
363151951ae7SMika Kuoppala 
36329b77011eSTvrtko Ursulin 	gen11_gt_irq_reset(&dev_priv->gt);
363351951ae7SMika Kuoppala 
3634f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
363551951ae7SMika Kuoppala 
3636f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3637f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
363862819dfdSJosé Roberto de Souza 
363951951ae7SMika Kuoppala 	for_each_pipe(dev_priv, pipe)
364051951ae7SMika Kuoppala 		if (intel_display_power_is_enabled(dev_priv,
364151951ae7SMika Kuoppala 						   POWER_DOMAIN_PIPE(pipe)))
3642b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
364351951ae7SMika Kuoppala 
3644b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3645b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3646b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3647b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3648b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
364931604222SAnusha Srivatsa 
365029b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3651b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, SDE);
365251951ae7SMika Kuoppala }
365351951ae7SMika Kuoppala 
36544c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3655001bd2cbSImre Deak 				     u8 pipe_mask)
3656d49bdb0eSPaulo Zanoni {
3657b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3658b16b2a2fSPaulo Zanoni 
3659a9c287c9SJani Nikula 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
36606831f3e3SVille Syrjälä 	enum pipe pipe;
3661d49bdb0eSPaulo Zanoni 
366213321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
36639dfe2e3aSImre Deak 
36649dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36659dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36669dfe2e3aSImre Deak 		return;
36679dfe2e3aSImre Deak 	}
36689dfe2e3aSImre Deak 
36696831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3670b16b2a2fSPaulo Zanoni 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
36716831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
36726831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
36739dfe2e3aSImre Deak 
367413321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3675d49bdb0eSPaulo Zanoni }
3676d49bdb0eSPaulo Zanoni 
3677aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3678001bd2cbSImre Deak 				     u8 pipe_mask)
3679aae8ba84SVille Syrjälä {
3680b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
36816831f3e3SVille Syrjälä 	enum pipe pipe;
36826831f3e3SVille Syrjälä 
3683aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36849dfe2e3aSImre Deak 
36859dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36869dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36879dfe2e3aSImre Deak 		return;
36889dfe2e3aSImre Deak 	}
36899dfe2e3aSImre Deak 
36906831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3691b16b2a2fSPaulo Zanoni 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
36929dfe2e3aSImre Deak 
3693aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3694aae8ba84SVille Syrjälä 
3695aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
3696315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
3697aae8ba84SVille Syrjälä }
3698aae8ba84SVille Syrjälä 
3699b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
370043f328d7SVille Syrjälä {
3701b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
370243f328d7SVille Syrjälä 
370343f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
370443f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
370543f328d7SVille Syrjälä 
3706d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
370743f328d7SVille Syrjälä 
3708b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
370943f328d7SVille Syrjälä 
3710ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
37119918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
371270591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3713ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
371443f328d7SVille Syrjälä }
371543f328d7SVille Syrjälä 
371691d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
371787a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
371887a02106SVille Syrjälä {
371987a02106SVille Syrjälä 	struct intel_encoder *encoder;
372087a02106SVille Syrjälä 	u32 enabled_irqs = 0;
372187a02106SVille Syrjälä 
372291c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
372387a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
372487a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
372587a02106SVille Syrjälä 
372687a02106SVille Syrjälä 	return enabled_irqs;
372787a02106SVille Syrjälä }
372887a02106SVille Syrjälä 
37291a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
37301a56b1a2SImre Deak {
37311a56b1a2SImre Deak 	u32 hotplug;
37321a56b1a2SImre Deak 
37331a56b1a2SImre Deak 	/*
37341a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
37351a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
37361a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
37371a56b1a2SImre Deak 	 */
37381a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
37391a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
37401a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
37411a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
37421a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
37431a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
37441a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
37451a56b1a2SImre Deak 	/*
37461a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
37471a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
37481a56b1a2SImre Deak 	 */
37491a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
37501a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
37511a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
37521a56b1a2SImre Deak }
37531a56b1a2SImre Deak 
375491d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
375582a28bcfSDaniel Vetter {
37561a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
375782a28bcfSDaniel Vetter 
375891d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3759fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
376091d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
376182a28bcfSDaniel Vetter 	} else {
3762fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
376391d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
376482a28bcfSDaniel Vetter 	}
376582a28bcfSDaniel Vetter 
3766fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
376782a28bcfSDaniel Vetter 
37681a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
37696dbf30ceSVille Syrjälä }
377026951cafSXiong Zhang 
377131604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
377231604222SAnusha Srivatsa {
377331604222SAnusha Srivatsa 	u32 hotplug;
377431604222SAnusha Srivatsa 
377531604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
377631604222SAnusha Srivatsa 	hotplug |= ICP_DDIA_HPD_ENABLE |
377731604222SAnusha Srivatsa 		   ICP_DDIB_HPD_ENABLE;
377831604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
377931604222SAnusha Srivatsa 
378031604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_TC);
378131604222SAnusha Srivatsa 	hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
378231604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC2) |
378331604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC3) |
378431604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC4);
378531604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
378631604222SAnusha Srivatsa }
378731604222SAnusha Srivatsa 
378831604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
378931604222SAnusha Srivatsa {
379031604222SAnusha Srivatsa 	u32 hotplug_irqs, enabled_irqs;
379131604222SAnusha Srivatsa 
379231604222SAnusha Srivatsa 	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
379331604222SAnusha Srivatsa 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
379431604222SAnusha Srivatsa 
379531604222SAnusha Srivatsa 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
379631604222SAnusha Srivatsa 
379731604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
379831604222SAnusha Srivatsa }
379931604222SAnusha Srivatsa 
3800121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3801121e758eSDhinakaran Pandiyan {
3802121e758eSDhinakaran Pandiyan 	u32 hotplug;
3803121e758eSDhinakaran Pandiyan 
3804121e758eSDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3805121e758eSDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3806121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3807121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3808121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3809121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3810b796b971SDhinakaran Pandiyan 
3811b796b971SDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3812b796b971SDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3813b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3814b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3815b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3816b796b971SDhinakaran Pandiyan 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3817121e758eSDhinakaran Pandiyan }
3818121e758eSDhinakaran Pandiyan 
3819121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3820121e758eSDhinakaran Pandiyan {
3821121e758eSDhinakaran Pandiyan 	u32 hotplug_irqs, enabled_irqs;
3822121e758eSDhinakaran Pandiyan 	u32 val;
3823121e758eSDhinakaran Pandiyan 
3824b796b971SDhinakaran Pandiyan 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3825b796b971SDhinakaran Pandiyan 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3826121e758eSDhinakaran Pandiyan 
3827121e758eSDhinakaran Pandiyan 	val = I915_READ(GEN11_DE_HPD_IMR);
3828121e758eSDhinakaran Pandiyan 	val &= ~hotplug_irqs;
3829121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3830121e758eSDhinakaran Pandiyan 	POSTING_READ(GEN11_DE_HPD_IMR);
3831121e758eSDhinakaran Pandiyan 
3832121e758eSDhinakaran Pandiyan 	gen11_hpd_detection_setup(dev_priv);
383331604222SAnusha Srivatsa 
383429b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
383531604222SAnusha Srivatsa 		icp_hpd_irq_setup(dev_priv);
3836121e758eSDhinakaran Pandiyan }
3837121e758eSDhinakaran Pandiyan 
38382a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
38392a57d9ccSImre Deak {
38403b92e263SRodrigo Vivi 	u32 val, hotplug;
38413b92e263SRodrigo Vivi 
38423b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
38433b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
38443b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
38453b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
38463b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
38473b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
38483b92e263SRodrigo Vivi 	}
38492a57d9ccSImre Deak 
38502a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
38512a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
38522a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
38532a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
38542a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
38552a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
38562a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
38572a57d9ccSImre Deak 
38582a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
38592a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
38602a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
38612a57d9ccSImre Deak }
38622a57d9ccSImre Deak 
386391d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
38646dbf30ceSVille Syrjälä {
38652a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
38666dbf30ceSVille Syrjälä 
38676dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
386891d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
38696dbf30ceSVille Syrjälä 
38706dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
38716dbf30ceSVille Syrjälä 
38722a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
387326951cafSXiong Zhang }
38747fe0b973SKeith Packard 
38751a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
38761a56b1a2SImre Deak {
38771a56b1a2SImre Deak 	u32 hotplug;
38781a56b1a2SImre Deak 
38791a56b1a2SImre Deak 	/*
38801a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
38811a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
38821a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
38831a56b1a2SImre Deak 	 */
38841a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
38851a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
38861a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
38871a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
38881a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
38891a56b1a2SImre Deak }
38901a56b1a2SImre Deak 
389191d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3892e4ce95aaSVille Syrjälä {
38931a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3894e4ce95aaSVille Syrjälä 
389591d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
38963a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
389791d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
38983a3b3c7dSVille Syrjälä 
38993a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
390091d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
390123bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
390291d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
39033a3b3c7dSVille Syrjälä 
39043a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
390523bb4cb5SVille Syrjälä 	} else {
3906e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
390791d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3908e4ce95aaSVille Syrjälä 
3909e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
39103a3b3c7dSVille Syrjälä 	}
3911e4ce95aaSVille Syrjälä 
39121a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3913e4ce95aaSVille Syrjälä 
391491d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3915e4ce95aaSVille Syrjälä }
3916e4ce95aaSVille Syrjälä 
39172a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
39182a57d9ccSImre Deak 				      u32 enabled_irqs)
3919e0a20ad7SShashank Sharma {
39202a57d9ccSImre Deak 	u32 hotplug;
3921e0a20ad7SShashank Sharma 
3922a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
39232a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
39242a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
39252a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3926d252bf68SShubhangi Shrivastava 
3927d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3928d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3929d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3930d252bf68SShubhangi Shrivastava 
3931d252bf68SShubhangi Shrivastava 	/*
3932d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3933d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3934d252bf68SShubhangi Shrivastava 	 */
3935d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3936d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3937d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3938d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3939d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3940d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3941d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3942d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3943d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3944d252bf68SShubhangi Shrivastava 
3945a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3946e0a20ad7SShashank Sharma }
3947e0a20ad7SShashank Sharma 
39482a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
39492a57d9ccSImre Deak {
39502a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
39512a57d9ccSImre Deak }
39522a57d9ccSImre Deak 
39532a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
39542a57d9ccSImre Deak {
39552a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
39562a57d9ccSImre Deak 
39572a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
39582a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
39592a57d9ccSImre Deak 
39602a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
39612a57d9ccSImre Deak 
39622a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
39632a57d9ccSImre Deak }
39642a57d9ccSImre Deak 
3965b318b824SVille Syrjälä static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3966d46da437SPaulo Zanoni {
396782a28bcfSDaniel Vetter 	u32 mask;
3968d46da437SPaulo Zanoni 
39696e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3970692a04cfSDaniel Vetter 		return;
3971692a04cfSDaniel Vetter 
39726e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
39735c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
39744ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
39755c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
39764ebc6509SDhinakaran Pandiyan 	else
39774ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
39788664281bSPaulo Zanoni 
397965f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3980d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
39812a57d9ccSImre Deak 
39822a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
39832a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
39841a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
39852a57d9ccSImre Deak 	else
39862a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3987d46da437SPaulo Zanoni }
3988d46da437SPaulo Zanoni 
3989b318b824SVille Syrjälä static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
39900a9a8c91SDaniel Vetter {
3991b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
39920a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
39930a9a8c91SDaniel Vetter 
39940a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
39950a9a8c91SDaniel Vetter 
39960a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
39973c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
39980a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
3999772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
4000772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
40010a9a8c91SDaniel Vetter 	}
40020a9a8c91SDaniel Vetter 
40030a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
4004cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5)) {
4005f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
40060a9a8c91SDaniel Vetter 	} else {
40070a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
40080a9a8c91SDaniel Vetter 	}
40090a9a8c91SDaniel Vetter 
4010b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
40110a9a8c91SDaniel Vetter 
4012b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
401378e68d36SImre Deak 		/*
401478e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
401578e68d36SImre Deak 		 * itself is enabled/disabled.
401678e68d36SImre Deak 		 */
40178a68d464SChris Wilson 		if (HAS_ENGINE(dev_priv, VECS0)) {
40180a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
401958820574STvrtko Ursulin 			dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
4020f4e9af4fSAkash Goel 		}
40210a9a8c91SDaniel Vetter 
402258820574STvrtko Ursulin 		dev_priv->gt.pm_imr = 0xffffffff;
402358820574STvrtko Ursulin 		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
40240a9a8c91SDaniel Vetter 	}
40250a9a8c91SDaniel Vetter }
40260a9a8c91SDaniel Vetter 
4027b318b824SVille Syrjälä static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
4028036a4a7dSZhenyu Wang {
4029b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
40308e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
40318e76f8dcSPaulo Zanoni 
4032b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
40338e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4034842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
40358e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
403623bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
403723bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
40388e76f8dcSPaulo Zanoni 	} else {
40398e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4040842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4041842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
4042e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4043e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4044e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
40458e76f8dcSPaulo Zanoni 	}
4046036a4a7dSZhenyu Wang 
4047fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
4048b16b2a2fSPaulo Zanoni 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
40491aeb1b5fSDhinakaran Pandiyan 		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4050fc340442SDaniel Vetter 		display_mask |= DE_EDP_PSR_INT_HSW;
4051fc340442SDaniel Vetter 	}
4052fc340442SDaniel Vetter 
40531ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
4054036a4a7dSZhenyu Wang 
4055b318b824SVille Syrjälä 	ibx_irq_pre_postinstall(dev_priv);
4056622364b6SPaulo Zanoni 
4057b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
4058b16b2a2fSPaulo Zanoni 		      display_mask | extra_mask);
4059036a4a7dSZhenyu Wang 
4060b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
4061036a4a7dSZhenyu Wang 
40621a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
40631a56b1a2SImre Deak 
4064b318b824SVille Syrjälä 	ibx_irq_postinstall(dev_priv);
40657fe0b973SKeith Packard 
406650a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
40676005ce42SDaniel Vetter 		/* Enable PCU event interrupts
40686005ce42SDaniel Vetter 		 *
40696005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
40704bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
40714bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
4072d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
4073fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4074d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
4075f97108d1SJesse Barnes 	}
4076036a4a7dSZhenyu Wang }
4077036a4a7dSZhenyu Wang 
4078f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4079f8b79e58SImre Deak {
408067520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4081f8b79e58SImre Deak 
4082f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
4083f8b79e58SImre Deak 		return;
4084f8b79e58SImre Deak 
4085f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
4086f8b79e58SImre Deak 
4087d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
4088d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4089ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4090f8b79e58SImre Deak 	}
4091d6c69803SVille Syrjälä }
4092f8b79e58SImre Deak 
4093f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4094f8b79e58SImre Deak {
409567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4096f8b79e58SImre Deak 
4097f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
4098f8b79e58SImre Deak 		return;
4099f8b79e58SImre Deak 
4100f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
4101f8b79e58SImre Deak 
4102950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
4103ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4104f8b79e58SImre Deak }
4105f8b79e58SImre Deak 
41060e6c9a9eSVille Syrjälä 
4107b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
41080e6c9a9eSVille Syrjälä {
4109b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
41107e231dbeSJesse Barnes 
4111ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
41129918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4113ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4114ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4115ad22d106SVille Syrjälä 
41167e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
411734c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
411820afbda2SDaniel Vetter }
411920afbda2SDaniel Vetter 
412058820574STvrtko Ursulin static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
4121abd58f01SBen Widawsky {
412258820574STvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
412358820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4124b16b2a2fSPaulo Zanoni 
4125abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
4126a9c287c9SJani Nikula 	u32 gt_interrupts[] = {
41278a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
412873d477f6SOscar Mateo 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
412973d477f6SOscar Mateo 		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
41308a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
41318a68d464SChris Wilson 
41328a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
41338a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4134abd58f01SBen Widawsky 		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
41358a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
41368a68d464SChris Wilson 
4137abd58f01SBen Widawsky 		0,
41388a68d464SChris Wilson 
41398a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
41408a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4141abd58f01SBen Widawsky 	};
4142abd58f01SBen Widawsky 
414358820574STvrtko Ursulin 	gt->pm_ier = 0x0;
414458820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4145b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4146b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
414778e68d36SImre Deak 	/*
414878e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
414926705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
415078e68d36SImre Deak 	 */
415158820574STvrtko Ursulin 	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
4152b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4153abd58f01SBen Widawsky }
4154abd58f01SBen Widawsky 
4155abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4156abd58f01SBen Widawsky {
4157b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4158b16b2a2fSPaulo Zanoni 
4159a9c287c9SJani Nikula 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4160a9c287c9SJani Nikula 	u32 de_pipe_enables;
41613a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
41623a3b3c7dSVille Syrjälä 	u32 de_port_enables;
4163df0d28c1SDhinakaran Pandiyan 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
41643a3b3c7dSVille Syrjälä 	enum pipe pipe;
4165770de83dSDamien Lespiau 
4166df0d28c1SDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) <= 10)
4167df0d28c1SDhinakaran Pandiyan 		de_misc_masked |= GEN8_DE_MISC_GSE;
4168df0d28c1SDhinakaran Pandiyan 
4169bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
4170842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
41713a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
417288e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
4173cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
41743a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
41753a3b3c7dSVille Syrjälä 	} else {
4176842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
41773a3b3c7dSVille Syrjälä 	}
4178770de83dSDamien Lespiau 
4179bb187e93SJames Ausmus 	if (INTEL_GEN(dev_priv) >= 11)
4180bb187e93SJames Ausmus 		de_port_masked |= ICL_AUX_CHANNEL_E;
4181bb187e93SJames Ausmus 
41829bb635d9SDhinakaran Pandiyan 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4183a324fcacSRodrigo Vivi 		de_port_masked |= CNL_AUX_CHANNEL_F;
4184a324fcacSRodrigo Vivi 
4185770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4186770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
4187770de83dSDamien Lespiau 
41883a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
4189cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
4190a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4191a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
41923a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
41933a3b3c7dSVille Syrjälä 
4194b16b2a2fSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
419554fd3149SDhinakaran Pandiyan 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4196e04f7eceSVille Syrjälä 
41970a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
41980a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4199abd58f01SBen Widawsky 
4200f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
4201813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
4202b16b2a2fSPaulo Zanoni 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4203813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
420435079899SPaulo Zanoni 					  de_pipe_enables);
42050a195c02SMika Kahola 	}
4206abd58f01SBen Widawsky 
4207b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4208b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
42092a57d9ccSImre Deak 
4210121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11) {
4211121e758eSDhinakaran Pandiyan 		u32 de_hpd_masked = 0;
4212b796b971SDhinakaran Pandiyan 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4213b796b971SDhinakaran Pandiyan 				     GEN11_DE_TBT_HOTPLUG_MASK;
4214121e758eSDhinakaran Pandiyan 
4215b16b2a2fSPaulo Zanoni 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4216b16b2a2fSPaulo Zanoni 			      de_hpd_enables);
4217121e758eSDhinakaran Pandiyan 		gen11_hpd_detection_setup(dev_priv);
4218121e758eSDhinakaran Pandiyan 	} else if (IS_GEN9_LP(dev_priv)) {
42192a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
4220121e758eSDhinakaran Pandiyan 	} else if (IS_BROADWELL(dev_priv)) {
42211a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
4222abd58f01SBen Widawsky 	}
4223121e758eSDhinakaran Pandiyan }
4224abd58f01SBen Widawsky 
4225b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
4226abd58f01SBen Widawsky {
42276e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4228b318b824SVille Syrjälä 		ibx_irq_pre_postinstall(dev_priv);
4229622364b6SPaulo Zanoni 
4230abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
4231abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
4232abd58f01SBen Widawsky 
42336e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4234b318b824SVille Syrjälä 		ibx_irq_postinstall(dev_priv);
4235abd58f01SBen Widawsky 
423625286aacSDaniele Ceraolo Spurio 	gen8_master_intr_enable(dev_priv->uncore.regs);
4237abd58f01SBen Widawsky }
4238abd58f01SBen Widawsky 
42399b77011eSTvrtko Ursulin static void gen11_gt_irq_postinstall(struct intel_gt *gt)
424051951ae7SMika Kuoppala {
424151951ae7SMika Kuoppala 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4242f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4243f0818984STvrtko Ursulin 	const u32 dmask = irqs << 16 | irqs;
4244f0818984STvrtko Ursulin 	const u32 smask = irqs << 16;
424551951ae7SMika Kuoppala 
424651951ae7SMika Kuoppala 	BUILD_BUG_ON(irqs & 0xffff0000);
424751951ae7SMika Kuoppala 
424851951ae7SMika Kuoppala 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
4249f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
4250f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
425151951ae7SMika Kuoppala 
425251951ae7SMika Kuoppala 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4253f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
4254f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
4255f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
4256f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
4257f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
425851951ae7SMika Kuoppala 
4259d02b98b8SOscar Mateo 	/*
4260d02b98b8SOscar Mateo 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4261d02b98b8SOscar Mateo 	 * is enabled/disabled.
4262d02b98b8SOscar Mateo 	 */
426358820574STvrtko Ursulin 	gt->pm_ier = 0x0;
426458820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4265f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4266f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
426754c52a84SOscar Mateo 
426854c52a84SOscar Mateo 	/* Same thing for GuC interrupts */
4269f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
4270f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
427151951ae7SMika Kuoppala }
427251951ae7SMika Kuoppala 
4273b318b824SVille Syrjälä static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
427431604222SAnusha Srivatsa {
427531604222SAnusha Srivatsa 	u32 mask = SDE_GMBUS_ICP;
427631604222SAnusha Srivatsa 
427731604222SAnusha Srivatsa 	WARN_ON(I915_READ(SDEIER) != 0);
427831604222SAnusha Srivatsa 	I915_WRITE(SDEIER, 0xffffffff);
427931604222SAnusha Srivatsa 	POSTING_READ(SDEIER);
428031604222SAnusha Srivatsa 
428165f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
428231604222SAnusha Srivatsa 	I915_WRITE(SDEIMR, ~mask);
428331604222SAnusha Srivatsa 
428431604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
428531604222SAnusha Srivatsa }
428631604222SAnusha Srivatsa 
4287b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
428851951ae7SMika Kuoppala {
4289b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4290df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
429151951ae7SMika Kuoppala 
429229b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4293b318b824SVille Syrjälä 		icp_irq_postinstall(dev_priv);
429431604222SAnusha Srivatsa 
42959b77011eSTvrtko Ursulin 	gen11_gt_irq_postinstall(&dev_priv->gt);
429651951ae7SMika Kuoppala 	gen8_de_irq_postinstall(dev_priv);
429751951ae7SMika Kuoppala 
4298b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4299df0d28c1SDhinakaran Pandiyan 
430051951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
430151951ae7SMika Kuoppala 
43029b77011eSTvrtko Ursulin 	gen11_master_intr_enable(uncore->regs);
4303c25f0c6aSDaniele Ceraolo Spurio 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
430451951ae7SMika Kuoppala }
430551951ae7SMika Kuoppala 
4306b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
430743f328d7SVille Syrjälä {
430843f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
430943f328d7SVille Syrjälä 
4310ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
43119918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4312ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4313ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4314ad22d106SVille Syrjälä 
4315e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
431643f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
431743f328d7SVille Syrjälä }
431843f328d7SVille Syrjälä 
4319b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
4320c2798b19SChris Wilson {
4321b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4322c2798b19SChris Wilson 
432344d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
432444d9241eSVille Syrjälä 
4325b16b2a2fSPaulo Zanoni 	GEN2_IRQ_RESET(uncore);
4326c2798b19SChris Wilson }
4327c2798b19SChris Wilson 
4328b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
4329c2798b19SChris Wilson {
4330b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4331e9e9848aSVille Syrjälä 	u16 enable_mask;
4332c2798b19SChris Wilson 
43334f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
43344f5fd91fSTvrtko Ursulin 			     EMR,
43354f5fd91fSTvrtko Ursulin 			     ~(I915_ERROR_PAGE_TABLE |
4336045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH));
4337c2798b19SChris Wilson 
4338c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
4339c2798b19SChris Wilson 	dev_priv->irq_mask =
4340c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
434116659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
434216659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4343c2798b19SChris Wilson 
4344e9e9848aSVille Syrjälä 	enable_mask =
4345c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4346c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
434716659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4348e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
4349e9e9848aSVille Syrjälä 
4350b16b2a2fSPaulo Zanoni 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4351c2798b19SChris Wilson 
4352379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4353379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4354d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4355755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4356755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4357d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4358c2798b19SChris Wilson }
4359c2798b19SChris Wilson 
43604f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915,
436178c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
436278c357ddSVille Syrjälä {
43634f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &i915->uncore;
436478c357ddSVille Syrjälä 	u16 emr;
436578c357ddSVille Syrjälä 
43664f5fd91fSTvrtko Ursulin 	*eir = intel_uncore_read16(uncore, EIR);
436778c357ddSVille Syrjälä 
436878c357ddSVille Syrjälä 	if (*eir)
43694f5fd91fSTvrtko Ursulin 		intel_uncore_write16(uncore, EIR, *eir);
437078c357ddSVille Syrjälä 
43714f5fd91fSTvrtko Ursulin 	*eir_stuck = intel_uncore_read16(uncore, EIR);
437278c357ddSVille Syrjälä 	if (*eir_stuck == 0)
437378c357ddSVille Syrjälä 		return;
437478c357ddSVille Syrjälä 
437578c357ddSVille Syrjälä 	/*
437678c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
437778c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
437878c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
437978c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
438078c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
438178c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
438278c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
438378c357ddSVille Syrjälä 	 * remains set.
438478c357ddSVille Syrjälä 	 */
43854f5fd91fSTvrtko Ursulin 	emr = intel_uncore_read16(uncore, EMR);
43864f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, 0xffff);
43874f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
438878c357ddSVille Syrjälä }
438978c357ddSVille Syrjälä 
439078c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
439178c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
439278c357ddSVille Syrjälä {
439378c357ddSVille Syrjälä 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
439478c357ddSVille Syrjälä 
439578c357ddSVille Syrjälä 	if (eir_stuck)
439678c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
439778c357ddSVille Syrjälä }
439878c357ddSVille Syrjälä 
439978c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
440078c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
440178c357ddSVille Syrjälä {
440278c357ddSVille Syrjälä 	u32 emr;
440378c357ddSVille Syrjälä 
440478c357ddSVille Syrjälä 	*eir = I915_READ(EIR);
440578c357ddSVille Syrjälä 
440678c357ddSVille Syrjälä 	I915_WRITE(EIR, *eir);
440778c357ddSVille Syrjälä 
440878c357ddSVille Syrjälä 	*eir_stuck = I915_READ(EIR);
440978c357ddSVille Syrjälä 	if (*eir_stuck == 0)
441078c357ddSVille Syrjälä 		return;
441178c357ddSVille Syrjälä 
441278c357ddSVille Syrjälä 	/*
441378c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
441478c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
441578c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
441678c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
441778c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
441878c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
441978c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
442078c357ddSVille Syrjälä 	 * remains set.
442178c357ddSVille Syrjälä 	 */
442278c357ddSVille Syrjälä 	emr = I915_READ(EMR);
442378c357ddSVille Syrjälä 	I915_WRITE(EMR, 0xffffffff);
442478c357ddSVille Syrjälä 	I915_WRITE(EMR, emr | *eir_stuck);
442578c357ddSVille Syrjälä }
442678c357ddSVille Syrjälä 
442778c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
442878c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
442978c357ddSVille Syrjälä {
443078c357ddSVille Syrjälä 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
443178c357ddSVille Syrjälä 
443278c357ddSVille Syrjälä 	if (eir_stuck)
443378c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
443478c357ddSVille Syrjälä }
443578c357ddSVille Syrjälä 
4436ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4437c2798b19SChris Wilson {
4438b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4439af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4440c2798b19SChris Wilson 
44412dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
44422dd2a883SImre Deak 		return IRQ_NONE;
44432dd2a883SImre Deak 
44441f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
44459102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44461f814dacSImre Deak 
4447af722d28SVille Syrjälä 	do {
4448af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
444978c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
4450af722d28SVille Syrjälä 		u16 iir;
4451af722d28SVille Syrjälä 
44524f5fd91fSTvrtko Ursulin 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4453c2798b19SChris Wilson 		if (iir == 0)
4454af722d28SVille Syrjälä 			break;
4455c2798b19SChris Wilson 
4456af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4457c2798b19SChris Wilson 
4458eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4459eb64343cSVille Syrjälä 		 * signalled in iir */
4460eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4461c2798b19SChris Wilson 
446278c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
446378c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
446478c357ddSVille Syrjälä 
44654f5fd91fSTvrtko Ursulin 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4466c2798b19SChris Wilson 
4467c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
44688a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4469c2798b19SChris Wilson 
447078c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
447178c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4472af722d28SVille Syrjälä 
4473eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4474af722d28SVille Syrjälä 	} while (0);
4475c2798b19SChris Wilson 
44769102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44771f814dacSImre Deak 
44781f814dacSImre Deak 	return ret;
4479c2798b19SChris Wilson }
4480c2798b19SChris Wilson 
4481b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv)
4482a266c7d5SChris Wilson {
4483b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4484a266c7d5SChris Wilson 
448556b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
44860706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4487a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4488a266c7d5SChris Wilson 	}
4489a266c7d5SChris Wilson 
449044d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
449144d9241eSVille Syrjälä 
4492b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4493a266c7d5SChris Wilson }
4494a266c7d5SChris Wilson 
4495b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4496a266c7d5SChris Wilson {
4497b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
449838bde180SChris Wilson 	u32 enable_mask;
4499a266c7d5SChris Wilson 
4500045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4501045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
450238bde180SChris Wilson 
450338bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
450438bde180SChris Wilson 	dev_priv->irq_mask =
450538bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
450638bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
450716659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
450816659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
450938bde180SChris Wilson 
451038bde180SChris Wilson 	enable_mask =
451138bde180SChris Wilson 		I915_ASLE_INTERRUPT |
451238bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
451338bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
451416659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
451538bde180SChris Wilson 		I915_USER_INTERRUPT;
451638bde180SChris Wilson 
451756b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
4518a266c7d5SChris Wilson 		/* Enable in IER... */
4519a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4520a266c7d5SChris Wilson 		/* and unmask in IMR */
4521a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4522a266c7d5SChris Wilson 	}
4523a266c7d5SChris Wilson 
4524b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4525a266c7d5SChris Wilson 
4526379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4527379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4528d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4529755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4530755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4531d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4532379ef82dSDaniel Vetter 
4533c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
453420afbda2SDaniel Vetter }
453520afbda2SDaniel Vetter 
4536ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
4537a266c7d5SChris Wilson {
4538b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4539af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4540a266c7d5SChris Wilson 
45412dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
45422dd2a883SImre Deak 		return IRQ_NONE;
45432dd2a883SImre Deak 
45441f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
45459102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45461f814dacSImre Deak 
454738bde180SChris Wilson 	do {
4548eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
454978c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4550af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4551af722d28SVille Syrjälä 		u32 iir;
4552a266c7d5SChris Wilson 
45539d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4554af722d28SVille Syrjälä 		if (iir == 0)
4555af722d28SVille Syrjälä 			break;
4556af722d28SVille Syrjälä 
4557af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4558af722d28SVille Syrjälä 
4559af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
4560af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4561af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4562a266c7d5SChris Wilson 
4563eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4564eb64343cSVille Syrjälä 		 * signalled in iir */
4565eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4566a266c7d5SChris Wilson 
456778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
456878c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
456978c357ddSVille Syrjälä 
45709d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4571a266c7d5SChris Wilson 
4572a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
45738a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4574a266c7d5SChris Wilson 
457578c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
457678c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4577a266c7d5SChris Wilson 
4578af722d28SVille Syrjälä 		if (hotplug_status)
4579af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4580af722d28SVille Syrjälä 
4581af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4582af722d28SVille Syrjälä 	} while (0);
4583a266c7d5SChris Wilson 
45849102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45851f814dacSImre Deak 
4586a266c7d5SChris Wilson 	return ret;
4587a266c7d5SChris Wilson }
4588a266c7d5SChris Wilson 
4589b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv)
4590a266c7d5SChris Wilson {
4591b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4592a266c7d5SChris Wilson 
45930706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4594a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4595a266c7d5SChris Wilson 
459644d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
459744d9241eSVille Syrjälä 
4598b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4599a266c7d5SChris Wilson }
4600a266c7d5SChris Wilson 
4601b318b824SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4602a266c7d5SChris Wilson {
4603b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4604bbba0a97SChris Wilson 	u32 enable_mask;
4605a266c7d5SChris Wilson 	u32 error_mask;
4606a266c7d5SChris Wilson 
4607045cebd2SVille Syrjälä 	/*
4608045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
4609045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
4610045cebd2SVille Syrjälä 	 */
4611045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
4612045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4613045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
4614045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
4615045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4616045cebd2SVille Syrjälä 	} else {
4617045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4618045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4619045cebd2SVille Syrjälä 	}
4620045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
4621045cebd2SVille Syrjälä 
4622a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
4623c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
4624c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
4625adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
4626bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4627bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
462878c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4629bbba0a97SChris Wilson 
4630c30bb1fdSVille Syrjälä 	enable_mask =
4631c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
4632c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
4633c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4634c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
463578c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4636c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
4637bbba0a97SChris Wilson 
463891d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4639bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
4640a266c7d5SChris Wilson 
4641b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4642c30bb1fdSVille Syrjälä 
4643b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4644b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4645d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4646755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4647755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4648755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4649d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4650a266c7d5SChris Wilson 
465191d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
465220afbda2SDaniel Vetter }
465320afbda2SDaniel Vetter 
465491d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
465520afbda2SDaniel Vetter {
465620afbda2SDaniel Vetter 	u32 hotplug_en;
465720afbda2SDaniel Vetter 
465867520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4659b5ea2d56SDaniel Vetter 
4660adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
4661e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
466291d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4663a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
4664a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
4665a266c7d5SChris Wilson 	   seconds later.  So just do it once.
4666a266c7d5SChris Wilson 	*/
466791d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4668a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4669a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4670a266c7d5SChris Wilson 
4671a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
46720706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
4673f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
4674f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4675f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
46760706f17cSEgbert Eich 					     hotplug_en);
4677a266c7d5SChris Wilson }
4678a266c7d5SChris Wilson 
4679ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
4680a266c7d5SChris Wilson {
4681b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4682af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4683a266c7d5SChris Wilson 
46842dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
46852dd2a883SImre Deak 		return IRQ_NONE;
46862dd2a883SImre Deak 
46871f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
46889102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
46891f814dacSImre Deak 
4690af722d28SVille Syrjälä 	do {
4691eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
469278c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4693af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4694af722d28SVille Syrjälä 		u32 iir;
46952c8ba29fSChris Wilson 
46969d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4697af722d28SVille Syrjälä 		if (iir == 0)
4698af722d28SVille Syrjälä 			break;
4699af722d28SVille Syrjälä 
4700af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4701af722d28SVille Syrjälä 
4702af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4703af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4704a266c7d5SChris Wilson 
4705eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4706eb64343cSVille Syrjälä 		 * signalled in iir */
4707eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4708a266c7d5SChris Wilson 
470978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
471078c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
471178c357ddSVille Syrjälä 
47129d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4713a266c7d5SChris Wilson 
4714a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
47158a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4716af722d28SVille Syrjälä 
4717a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
47188a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4719a266c7d5SChris Wilson 
472078c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
472178c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4722515ac2bbSDaniel Vetter 
4723af722d28SVille Syrjälä 		if (hotplug_status)
4724af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4725af722d28SVille Syrjälä 
4726af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4727af722d28SVille Syrjälä 	} while (0);
4728a266c7d5SChris Wilson 
47299102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
47301f814dacSImre Deak 
4731a266c7d5SChris Wilson 	return ret;
4732a266c7d5SChris Wilson }
4733a266c7d5SChris Wilson 
4734fca52a55SDaniel Vetter /**
4735fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4736fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4737fca52a55SDaniel Vetter  *
4738fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4739fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4740fca52a55SDaniel Vetter  */
4741b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4742f71d4af4SJesse Barnes {
474391c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4744562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4745cefcff8fSJoonas Lahtinen 	int i;
47468b2e326dSChris Wilson 
4747d938da6bSVille Syrjälä 	if (IS_I945GM(dev_priv))
4748d938da6bSVille Syrjälä 		i945gm_vblank_work_init(dev_priv);
4749d938da6bSVille Syrjälä 
475077913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
475177913b39SJani Nikula 
4752562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4753cefcff8fSJoonas Lahtinen 
4754a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4755cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4756cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
47578b2e326dSChris Wilson 
4758633023a4SDaniele Ceraolo Spurio 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
475954c52a84SOscar Mateo 	if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
4760633023a4SDaniele Ceraolo Spurio 		dev_priv->pm_guc_events = GUC_INTR_GUC2HOST << 16;
476126705e20SSagar Arun Kamble 
4762a6706b45SDeepak S 	/* Let's track the enabled rps events */
4763666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
47646c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4765e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
476631685c25SDeepak S 	else
47674668f695SChris Wilson 		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
47684668f695SChris Wilson 					   GEN6_PM_RP_DOWN_THRESHOLD |
47694668f695SChris Wilson 					   GEN6_PM_RP_DOWN_TIMEOUT);
4770a6706b45SDeepak S 
4771917dc6b5SMika Kuoppala 	/* We share the register with other engine */
4772917dc6b5SMika Kuoppala 	if (INTEL_GEN(dev_priv) > 9)
4773917dc6b5SMika Kuoppala 		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4774917dc6b5SMika Kuoppala 
4775562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
47761800ad25SSagar Arun Kamble 
47771800ad25SSagar Arun Kamble 	/*
4778acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
47791800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
47801800ad25SSagar Arun Kamble 	 *
47811800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
47821800ad25SSagar Arun Kamble 	 */
4783bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4784562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
47851800ad25SSagar Arun Kamble 
4786bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4787562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
47881800ad25SSagar Arun Kamble 
478921da2700SVille Syrjälä 	dev->vblank_disable_immediate = true;
479021da2700SVille Syrjälä 
4791262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4792262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4793262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4794262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4795262fd485SChris Wilson 	 * in this case to the runtime pm.
4796262fd485SChris Wilson 	 */
4797262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4798262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4799262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4800262fd485SChris Wilson 
4801317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
48029a64c650SLyude Paul 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
48039a64c650SLyude Paul 	 * detection, as short HPD storms will occur as a natural part of
48049a64c650SLyude Paul 	 * sideband messaging with MST.
48059a64c650SLyude Paul 	 * On older platforms however, IRQ storms can occur with both long and
48069a64c650SLyude Paul 	 * short pulses, as seen on some G4x systems.
48079a64c650SLyude Paul 	 */
48089a64c650SLyude Paul 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4809317eaa95SLyude 
4810b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4811b318b824SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
481243f328d7SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4813b318b824SVille Syrjälä 	} else {
4814b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4815121e758eSDhinakaran Pandiyan 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4816b318b824SVille Syrjälä 		else if (IS_GEN9_LP(dev_priv))
4817e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4818c6c30b91SRodrigo Vivi 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
48196dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
48206dbf30ceSVille Syrjälä 		else
48213a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4822f71d4af4SJesse Barnes 	}
4823f71d4af4SJesse Barnes }
482420afbda2SDaniel Vetter 
4825fca52a55SDaniel Vetter /**
4826cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4827cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4828cefcff8fSJoonas Lahtinen  *
4829cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4830cefcff8fSJoonas Lahtinen  */
4831cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4832cefcff8fSJoonas Lahtinen {
4833cefcff8fSJoonas Lahtinen 	int i;
4834cefcff8fSJoonas Lahtinen 
4835d938da6bSVille Syrjälä 	if (IS_I945GM(i915))
4836d938da6bSVille Syrjälä 		i945gm_vblank_work_fini(i915);
4837d938da6bSVille Syrjälä 
4838cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4839cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4840cefcff8fSJoonas Lahtinen }
4841cefcff8fSJoonas Lahtinen 
4842b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4843b318b824SVille Syrjälä {
4844b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4845b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4846b318b824SVille Syrjälä 			return cherryview_irq_handler;
4847b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4848b318b824SVille Syrjälä 			return valleyview_irq_handler;
4849b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4850b318b824SVille Syrjälä 			return i965_irq_handler;
4851b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4852b318b824SVille Syrjälä 			return i915_irq_handler;
4853b318b824SVille Syrjälä 		else
4854b318b824SVille Syrjälä 			return i8xx_irq_handler;
4855b318b824SVille Syrjälä 	} else {
4856b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4857b318b824SVille Syrjälä 			return gen11_irq_handler;
4858b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4859b318b824SVille Syrjälä 			return gen8_irq_handler;
4860b318b824SVille Syrjälä 		else
4861b318b824SVille Syrjälä 			return ironlake_irq_handler;
4862b318b824SVille Syrjälä 	}
4863b318b824SVille Syrjälä }
4864b318b824SVille Syrjälä 
4865b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv)
4866b318b824SVille Syrjälä {
4867b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4868b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4869b318b824SVille Syrjälä 			cherryview_irq_reset(dev_priv);
4870b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4871b318b824SVille Syrjälä 			valleyview_irq_reset(dev_priv);
4872b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4873b318b824SVille Syrjälä 			i965_irq_reset(dev_priv);
4874b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4875b318b824SVille Syrjälä 			i915_irq_reset(dev_priv);
4876b318b824SVille Syrjälä 		else
4877b318b824SVille Syrjälä 			i8xx_irq_reset(dev_priv);
4878b318b824SVille Syrjälä 	} else {
4879b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4880b318b824SVille Syrjälä 			gen11_irq_reset(dev_priv);
4881b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4882b318b824SVille Syrjälä 			gen8_irq_reset(dev_priv);
4883b318b824SVille Syrjälä 		else
4884b318b824SVille Syrjälä 			ironlake_irq_reset(dev_priv);
4885b318b824SVille Syrjälä 	}
4886b318b824SVille Syrjälä }
4887b318b824SVille Syrjälä 
4888b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4889b318b824SVille Syrjälä {
4890b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4891b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4892b318b824SVille Syrjälä 			cherryview_irq_postinstall(dev_priv);
4893b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4894b318b824SVille Syrjälä 			valleyview_irq_postinstall(dev_priv);
4895b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4896b318b824SVille Syrjälä 			i965_irq_postinstall(dev_priv);
4897b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4898b318b824SVille Syrjälä 			i915_irq_postinstall(dev_priv);
4899b318b824SVille Syrjälä 		else
4900b318b824SVille Syrjälä 			i8xx_irq_postinstall(dev_priv);
4901b318b824SVille Syrjälä 	} else {
4902b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4903b318b824SVille Syrjälä 			gen11_irq_postinstall(dev_priv);
4904b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4905b318b824SVille Syrjälä 			gen8_irq_postinstall(dev_priv);
4906b318b824SVille Syrjälä 		else
4907b318b824SVille Syrjälä 			ironlake_irq_postinstall(dev_priv);
4908b318b824SVille Syrjälä 	}
4909b318b824SVille Syrjälä }
4910b318b824SVille Syrjälä 
4911cefcff8fSJoonas Lahtinen /**
4912fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4913fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4914fca52a55SDaniel Vetter  *
4915fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4916fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4917fca52a55SDaniel Vetter  *
4918fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4919fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4920fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4921fca52a55SDaniel Vetter  */
49222aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
49232aeb7d3aSDaniel Vetter {
4924b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4925b318b824SVille Syrjälä 	int ret;
4926b318b824SVille Syrjälä 
49272aeb7d3aSDaniel Vetter 	/*
49282aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
49292aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
49302aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
49312aeb7d3aSDaniel Vetter 	 */
4932ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
49332aeb7d3aSDaniel Vetter 
4934b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = true;
4935b318b824SVille Syrjälä 
4936b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4937b318b824SVille Syrjälä 
4938b318b824SVille Syrjälä 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4939b318b824SVille Syrjälä 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4940b318b824SVille Syrjälä 	if (ret < 0) {
4941b318b824SVille Syrjälä 		dev_priv->drm.irq_enabled = false;
4942b318b824SVille Syrjälä 		return ret;
4943b318b824SVille Syrjälä 	}
4944b318b824SVille Syrjälä 
4945b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
4946b318b824SVille Syrjälä 
4947b318b824SVille Syrjälä 	return ret;
49482aeb7d3aSDaniel Vetter }
49492aeb7d3aSDaniel Vetter 
4950fca52a55SDaniel Vetter /**
4951fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4952fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4953fca52a55SDaniel Vetter  *
4954fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4955fca52a55SDaniel Vetter  * resources acquired in the init functions.
4956fca52a55SDaniel Vetter  */
49572aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
49582aeb7d3aSDaniel Vetter {
4959b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4960b318b824SVille Syrjälä 
4961b318b824SVille Syrjälä 	/*
4962b318b824SVille Syrjälä 	 * FIXME we can get called twice during driver load
4963b318b824SVille Syrjälä 	 * error handling due to intel_modeset_cleanup()
4964b318b824SVille Syrjälä 	 * calling us out of sequence. Would be nice if
4965b318b824SVille Syrjälä 	 * it didn't do that...
4966b318b824SVille Syrjälä 	 */
4967b318b824SVille Syrjälä 	if (!dev_priv->drm.irq_enabled)
4968b318b824SVille Syrjälä 		return;
4969b318b824SVille Syrjälä 
4970b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = false;
4971b318b824SVille Syrjälä 
4972b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4973b318b824SVille Syrjälä 
4974b318b824SVille Syrjälä 	free_irq(irq, dev_priv);
4975b318b824SVille Syrjälä 
49762aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4977ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
49782aeb7d3aSDaniel Vetter }
49792aeb7d3aSDaniel Vetter 
4980fca52a55SDaniel Vetter /**
4981fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4982fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4983fca52a55SDaniel Vetter  *
4984fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4985fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4986fca52a55SDaniel Vetter  */
4987b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4988c67a470bSPaulo Zanoni {
4989b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4990ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
4991315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
4992c67a470bSPaulo Zanoni }
4993c67a470bSPaulo Zanoni 
4994fca52a55SDaniel Vetter /**
4995fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4996fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4997fca52a55SDaniel Vetter  *
4998fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
4999fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
5000fca52a55SDaniel Vetter  */
5001b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
5002c67a470bSPaulo Zanoni {
5003ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
5004b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
5005b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
5006c67a470bSPaulo Zanoni }
5007