xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 2239e6dff2067c23f0afb7fab62ef139dc957d48)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
31b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
3255367a27SJani Nikula #include <linux/cpuidle.h>
3355367a27SJani Nikula #include <linux/slab.h>
3455367a27SJani Nikula #include <linux/sysrq.h>
3555367a27SJani Nikula 
36fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3755367a27SJani Nikula #include <drm/drm_irq.h>
38760285e7SDavid Howells #include <drm/i915_drm.h>
3955367a27SJani Nikula 
40df0566a6SJani Nikula #include "display/intel_fifo_underrun.h"
41df0566a6SJani Nikula #include "display/intel_hotplug.h"
42df0566a6SJani Nikula #include "display/intel_lpe_audio.h"
43df0566a6SJani Nikula #include "display/intel_psr.h"
44df0566a6SJani Nikula 
45*2239e6dfSDaniele Ceraolo Spurio #include "gt/intel_gt.h"
46*2239e6dfSDaniele Ceraolo Spurio 
47c0e09200SDave Airlie #include "i915_drv.h"
48440e2b3dSJani Nikula #include "i915_irq.h"
491c5d22f7SChris Wilson #include "i915_trace.h"
5079e53945SJesse Barnes #include "intel_drv.h"
51d13616dbSJani Nikula #include "intel_pm.h"
52c0e09200SDave Airlie 
53fca52a55SDaniel Vetter /**
54fca52a55SDaniel Vetter  * DOC: interrupt handling
55fca52a55SDaniel Vetter  *
56fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
57fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
58fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
59fca52a55SDaniel Vetter  */
60fca52a55SDaniel Vetter 
61e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
62e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
63e4ce95aaSVille Syrjälä };
64e4ce95aaSVille Syrjälä 
6523bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
6623bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
6723bb4cb5SVille Syrjälä };
6823bb4cb5SVille Syrjälä 
693a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
703a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
713a3b3c7dSVille Syrjälä };
723a3b3c7dSVille Syrjälä 
737c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
74e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
75e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
76e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
77e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
78e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
79e5868a31SEgbert Eich };
80e5868a31SEgbert Eich 
817c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
82e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
8373c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
84e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
85e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
86e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
87e5868a31SEgbert Eich };
88e5868a31SEgbert Eich 
8926951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
9074c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
9126951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
9226951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
9326951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
9426951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
9526951cafSXiong Zhang };
9626951cafSXiong Zhang 
977c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
98e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
99e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
100e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
101e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
102e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
103e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
104e5868a31SEgbert Eich };
105e5868a31SEgbert Eich 
1067c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
107e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
108e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
109e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
110e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
111e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
112e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
113e5868a31SEgbert Eich };
114e5868a31SEgbert Eich 
1154bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
116e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
117e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
118e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
119e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
120e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
121e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
122e5868a31SEgbert Eich };
123e5868a31SEgbert Eich 
124e0a20ad7SShashank Sharma /* BXT hpd list */
125e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1267f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
127e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
128e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
129e0a20ad7SShashank Sharma };
130e0a20ad7SShashank Sharma 
131b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = {
132b796b971SDhinakaran Pandiyan 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
133b796b971SDhinakaran Pandiyan 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
134b796b971SDhinakaran Pandiyan 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
135b796b971SDhinakaran Pandiyan 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
136121e758eSDhinakaran Pandiyan };
137121e758eSDhinakaran Pandiyan 
13831604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = {
13931604222SAnusha Srivatsa 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
14031604222SAnusha Srivatsa 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
14131604222SAnusha Srivatsa 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
14231604222SAnusha Srivatsa 	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
14331604222SAnusha Srivatsa 	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
14431604222SAnusha Srivatsa 	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
14531604222SAnusha Srivatsa };
14631604222SAnusha Srivatsa 
147c6f7acb8SMatt Roper static const u32 hpd_mcc[HPD_NUM_PINS] = {
148c6f7acb8SMatt Roper 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
149c6f7acb8SMatt Roper 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
150c6f7acb8SMatt Roper 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
151c6f7acb8SMatt Roper };
152c6f7acb8SMatt Roper 
15365f42cdcSPaulo Zanoni static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
15468eb49b1SPaulo Zanoni 			   i915_reg_t iir, i915_reg_t ier)
15568eb49b1SPaulo Zanoni {
15665f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
15765f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
15868eb49b1SPaulo Zanoni 
15965f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
16068eb49b1SPaulo Zanoni 
1615c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
16265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
16565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
16668eb49b1SPaulo Zanoni }
1675c502442SPaulo Zanoni 
16865f42cdcSPaulo Zanoni static void gen2_irq_reset(struct intel_uncore *uncore)
16968eb49b1SPaulo Zanoni {
17065f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
17165f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
172a9d356a6SPaulo Zanoni 
17365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
17468eb49b1SPaulo Zanoni 
17568eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
17665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
17965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
18068eb49b1SPaulo Zanoni }
18168eb49b1SPaulo Zanoni 
182b16b2a2fSPaulo Zanoni #define GEN8_IRQ_RESET_NDX(uncore, type, which) \
18368eb49b1SPaulo Zanoni ({ \
18468eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
185b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
18668eb49b1SPaulo Zanoni 		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
18768eb49b1SPaulo Zanoni })
18868eb49b1SPaulo Zanoni 
189b16b2a2fSPaulo Zanoni #define GEN3_IRQ_RESET(uncore, type) \
190b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
19168eb49b1SPaulo Zanoni 
192b16b2a2fSPaulo Zanoni #define GEN2_IRQ_RESET(uncore) \
193b16b2a2fSPaulo Zanoni 	gen2_irq_reset(uncore)
194e9e9848aSVille Syrjälä 
195337ba017SPaulo Zanoni /*
196337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
197337ba017SPaulo Zanoni  */
19865f42cdcSPaulo Zanoni static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
199b51a2842SVille Syrjälä {
20065f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
201b51a2842SVille Syrjälä 
202b51a2842SVille Syrjälä 	if (val == 0)
203b51a2842SVille Syrjälä 		return;
204b51a2842SVille Syrjälä 
205b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
206f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
20765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20865f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
20965f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
21065f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
211b51a2842SVille Syrjälä }
212337ba017SPaulo Zanoni 
21365f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
214e9e9848aSVille Syrjälä {
21565f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
216e9e9848aSVille Syrjälä 
217e9e9848aSVille Syrjälä 	if (val == 0)
218e9e9848aSVille Syrjälä 		return;
219e9e9848aSVille Syrjälä 
220e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
2219d9523d8SPaulo Zanoni 	     i915_mmio_reg_offset(GEN2_IIR), val);
22265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22365f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
22465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
22565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
226e9e9848aSVille Syrjälä }
227e9e9848aSVille Syrjälä 
22865f42cdcSPaulo Zanoni static void gen3_irq_init(struct intel_uncore *uncore,
22968eb49b1SPaulo Zanoni 			  i915_reg_t imr, u32 imr_val,
23068eb49b1SPaulo Zanoni 			  i915_reg_t ier, u32 ier_val,
23168eb49b1SPaulo Zanoni 			  i915_reg_t iir)
23268eb49b1SPaulo Zanoni {
23365f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
23435079899SPaulo Zanoni 
23565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
23665f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
23765f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
23868eb49b1SPaulo Zanoni }
23935079899SPaulo Zanoni 
24065f42cdcSPaulo Zanoni static void gen2_irq_init(struct intel_uncore *uncore,
2412918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
24268eb49b1SPaulo Zanoni {
24365f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
24468eb49b1SPaulo Zanoni 
24565f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
24665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
24765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
24868eb49b1SPaulo Zanoni }
24968eb49b1SPaulo Zanoni 
250b16b2a2fSPaulo Zanoni #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
25168eb49b1SPaulo Zanoni ({ \
25268eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
253b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25468eb49b1SPaulo Zanoni 		      GEN8_##type##_IMR(which_), imr_val, \
25568eb49b1SPaulo Zanoni 		      GEN8_##type##_IER(which_), ier_val, \
25668eb49b1SPaulo Zanoni 		      GEN8_##type##_IIR(which_)); \
25768eb49b1SPaulo Zanoni })
25868eb49b1SPaulo Zanoni 
259b16b2a2fSPaulo Zanoni #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
260b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
26168eb49b1SPaulo Zanoni 		      type##IMR, imr_val, \
26268eb49b1SPaulo Zanoni 		      type##IER, ier_val, \
26368eb49b1SPaulo Zanoni 		      type##IIR)
26468eb49b1SPaulo Zanoni 
265b16b2a2fSPaulo Zanoni #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
266b16b2a2fSPaulo Zanoni 	gen2_irq_init((uncore), imr_val, ier_val)
267e9e9848aSVille Syrjälä 
268c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
269633023a4SDaniele Ceraolo Spurio static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
270c9a9a268SImre Deak 
2710706f17cSEgbert Eich /* For display hotplug interrupt */
2720706f17cSEgbert Eich static inline void
2730706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
274a9c287c9SJani Nikula 				     u32 mask,
275a9c287c9SJani Nikula 				     u32 bits)
2760706f17cSEgbert Eich {
277a9c287c9SJani Nikula 	u32 val;
2780706f17cSEgbert Eich 
27967520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2800706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2810706f17cSEgbert Eich 
2820706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2830706f17cSEgbert Eich 	val &= ~mask;
2840706f17cSEgbert Eich 	val |= bits;
2850706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2860706f17cSEgbert Eich }
2870706f17cSEgbert Eich 
2880706f17cSEgbert Eich /**
2890706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2900706f17cSEgbert Eich  * @dev_priv: driver private
2910706f17cSEgbert Eich  * @mask: bits to update
2920706f17cSEgbert Eich  * @bits: bits to enable
2930706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2940706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2950706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2960706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2970706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2980706f17cSEgbert Eich  * version is also available.
2990706f17cSEgbert Eich  */
3000706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
301a9c287c9SJani Nikula 				   u32 mask,
302a9c287c9SJani Nikula 				   u32 bits)
3030706f17cSEgbert Eich {
3040706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
3050706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
3060706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
3070706f17cSEgbert Eich }
3080706f17cSEgbert Eich 
30996606f3bSOscar Mateo static u32
3109b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
31196606f3bSOscar Mateo 			 const unsigned int bank, const unsigned int bit);
31296606f3bSOscar Mateo 
3139b77011eSTvrtko Ursulin static bool gen11_reset_one_iir(struct intel_gt *gt,
31496606f3bSOscar Mateo 				const unsigned int bank,
31596606f3bSOscar Mateo 				const unsigned int bit)
31696606f3bSOscar Mateo {
3179b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31896606f3bSOscar Mateo 	u32 dw;
31996606f3bSOscar Mateo 
3209b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
32196606f3bSOscar Mateo 
32296606f3bSOscar Mateo 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
32396606f3bSOscar Mateo 	if (dw & BIT(bit)) {
32496606f3bSOscar Mateo 		/*
32596606f3bSOscar Mateo 		 * According to the BSpec, DW_IIR bits cannot be cleared without
32696606f3bSOscar Mateo 		 * first servicing the Selector & Shared IIR registers.
32796606f3bSOscar Mateo 		 */
3289b77011eSTvrtko Ursulin 		gen11_gt_engine_identity(gt, bank, bit);
32996606f3bSOscar Mateo 
33096606f3bSOscar Mateo 		/*
33196606f3bSOscar Mateo 		 * We locked GT INT DW by reading it. If we want to (try
33296606f3bSOscar Mateo 		 * to) recover from this succesfully, we need to clear
33396606f3bSOscar Mateo 		 * our bit, otherwise we are locking the register for
33496606f3bSOscar Mateo 		 * everybody.
33596606f3bSOscar Mateo 		 */
33696606f3bSOscar Mateo 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
33796606f3bSOscar Mateo 
33896606f3bSOscar Mateo 		return true;
33996606f3bSOscar Mateo 	}
34096606f3bSOscar Mateo 
34196606f3bSOscar Mateo 	return false;
34296606f3bSOscar Mateo }
34396606f3bSOscar Mateo 
344d9dc34f1SVille Syrjälä /**
345d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
346d9dc34f1SVille Syrjälä  * @dev_priv: driver private
347d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
348d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
349d9dc34f1SVille Syrjälä  */
350fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
351a9c287c9SJani Nikula 			    u32 interrupt_mask,
352a9c287c9SJani Nikula 			    u32 enabled_irq_mask)
353036a4a7dSZhenyu Wang {
354a9c287c9SJani Nikula 	u32 new_val;
355d9dc34f1SVille Syrjälä 
35667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3574bc9d430SDaniel Vetter 
358d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
359d9dc34f1SVille Syrjälä 
3609df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
361c67a470bSPaulo Zanoni 		return;
362c67a470bSPaulo Zanoni 
363d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
364d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
365d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
366d9dc34f1SVille Syrjälä 
367d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
368d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
3691ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
3703143a2bfSChris Wilson 		POSTING_READ(DEIMR);
371036a4a7dSZhenyu Wang 	}
372036a4a7dSZhenyu Wang }
373036a4a7dSZhenyu Wang 
37443eaea13SPaulo Zanoni /**
37543eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
37643eaea13SPaulo Zanoni  * @dev_priv: driver private
37743eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
37843eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
37943eaea13SPaulo Zanoni  */
38043eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
381a9c287c9SJani Nikula 			      u32 interrupt_mask,
382a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
38343eaea13SPaulo Zanoni {
38467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
38543eaea13SPaulo Zanoni 
38615a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
38715a17aaeSDaniel Vetter 
3889df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
389c67a470bSPaulo Zanoni 		return;
390c67a470bSPaulo Zanoni 
39143eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
39243eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
39343eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
39443eaea13SPaulo Zanoni }
39543eaea13SPaulo Zanoni 
396a9c287c9SJani Nikula void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
39743eaea13SPaulo Zanoni {
39843eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
399e33a4be8STvrtko Ursulin 	intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
40043eaea13SPaulo Zanoni }
40143eaea13SPaulo Zanoni 
402a9c287c9SJani Nikula void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
40343eaea13SPaulo Zanoni {
40443eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
40543eaea13SPaulo Zanoni }
40643eaea13SPaulo Zanoni 
407f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
408b900b949SImre Deak {
409d02b98b8SOscar Mateo 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
410d02b98b8SOscar Mateo 
411bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
412b900b949SImre Deak }
413b900b949SImre Deak 
41458820574STvrtko Ursulin static void write_pm_imr(struct intel_gt *gt)
415a72fbc3aSImre Deak {
41658820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
41758820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
41858820574STvrtko Ursulin 	u32 mask = gt->pm_imr;
419917dc6b5SMika Kuoppala 	i915_reg_t reg;
420917dc6b5SMika Kuoppala 
42158820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
422917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
423917dc6b5SMika Kuoppala 		/* pm is in upper half */
424917dc6b5SMika Kuoppala 		mask = mask << 16;
42558820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
426917dc6b5SMika Kuoppala 		reg = GEN8_GT_IMR(2);
427917dc6b5SMika Kuoppala 	} else {
428917dc6b5SMika Kuoppala 		reg = GEN6_PMIMR;
429a72fbc3aSImre Deak 	}
430a72fbc3aSImre Deak 
43158820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
43258820574STvrtko Ursulin 	intel_uncore_posting_read(uncore, reg);
433917dc6b5SMika Kuoppala }
434917dc6b5SMika Kuoppala 
43558820574STvrtko Ursulin static void write_pm_ier(struct intel_gt *gt)
436b900b949SImre Deak {
43758820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
43858820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
43958820574STvrtko Ursulin 	u32 mask = gt->pm_ier;
440917dc6b5SMika Kuoppala 	i915_reg_t reg;
441917dc6b5SMika Kuoppala 
44258820574STvrtko Ursulin 	if (INTEL_GEN(i915) >= 11) {
443917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
444917dc6b5SMika Kuoppala 		/* pm is in upper half */
445917dc6b5SMika Kuoppala 		mask = mask << 16;
44658820574STvrtko Ursulin 	} else if (INTEL_GEN(i915) >= 8) {
447917dc6b5SMika Kuoppala 		reg = GEN8_GT_IER(2);
448917dc6b5SMika Kuoppala 	} else {
449917dc6b5SMika Kuoppala 		reg = GEN6_PMIER;
450917dc6b5SMika Kuoppala 	}
451917dc6b5SMika Kuoppala 
45258820574STvrtko Ursulin 	intel_uncore_write(uncore, reg, mask);
453b900b949SImre Deak }
454b900b949SImre Deak 
455edbfdb45SPaulo Zanoni /**
456edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
45758820574STvrtko Ursulin  * @gt: gt for the interrupts
458edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
459edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
460edbfdb45SPaulo Zanoni  */
46158820574STvrtko Ursulin static void snb_update_pm_irq(struct intel_gt *gt,
462a9c287c9SJani Nikula 			      u32 interrupt_mask,
463a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
464edbfdb45SPaulo Zanoni {
465a9c287c9SJani Nikula 	u32 new_val;
466edbfdb45SPaulo Zanoni 
46715a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
46815a17aaeSDaniel Vetter 
46958820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
470edbfdb45SPaulo Zanoni 
47158820574STvrtko Ursulin 	new_val = gt->pm_imr;
472f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
473f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
474f52ecbcfSPaulo Zanoni 
47558820574STvrtko Ursulin 	if (new_val != gt->pm_imr) {
47658820574STvrtko Ursulin 		gt->pm_imr = new_val;
47758820574STvrtko Ursulin 		write_pm_imr(gt);
478edbfdb45SPaulo Zanoni 	}
479f52ecbcfSPaulo Zanoni }
480edbfdb45SPaulo Zanoni 
48158820574STvrtko Ursulin void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
482edbfdb45SPaulo Zanoni {
48358820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4849939fba2SImre Deak 		return;
4859939fba2SImre Deak 
48658820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, mask);
487edbfdb45SPaulo Zanoni }
488edbfdb45SPaulo Zanoni 
48958820574STvrtko Ursulin static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
4909939fba2SImre Deak {
49158820574STvrtko Ursulin 	snb_update_pm_irq(gt, mask, 0);
4929939fba2SImre Deak }
4939939fba2SImre Deak 
49458820574STvrtko Ursulin void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
495edbfdb45SPaulo Zanoni {
49658820574STvrtko Ursulin 	if (WARN_ON(!intel_irqs_enabled(gt->i915)))
4979939fba2SImre Deak 		return;
4989939fba2SImre Deak 
49958820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, mask);
500f4e9af4fSAkash Goel }
501f4e9af4fSAkash Goel 
5023814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
503f4e9af4fSAkash Goel {
504f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
505f4e9af4fSAkash Goel 
50667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
507f4e9af4fSAkash Goel 
508f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
509f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
510f4e9af4fSAkash Goel 	POSTING_READ(reg);
511f4e9af4fSAkash Goel }
512f4e9af4fSAkash Goel 
51358820574STvrtko Ursulin static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
514f4e9af4fSAkash Goel {
51558820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
516f4e9af4fSAkash Goel 
51758820574STvrtko Ursulin 	gt->pm_ier |= enable_mask;
51858820574STvrtko Ursulin 	write_pm_ier(gt);
51958820574STvrtko Ursulin 	gen6_unmask_pm_irq(gt, enable_mask);
520f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
521f4e9af4fSAkash Goel }
522f4e9af4fSAkash Goel 
52358820574STvrtko Ursulin static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
524f4e9af4fSAkash Goel {
52558820574STvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
526f4e9af4fSAkash Goel 
52758820574STvrtko Ursulin 	gt->pm_ier &= ~disable_mask;
52858820574STvrtko Ursulin 	__gen6_mask_pm_irq(gt, disable_mask);
52958820574STvrtko Ursulin 	write_pm_ier(gt);
530f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
531edbfdb45SPaulo Zanoni }
532edbfdb45SPaulo Zanoni 
533d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
534d02b98b8SOscar Mateo {
535d02b98b8SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
536d02b98b8SOscar Mateo 
5379b77011eSTvrtko Ursulin 	while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
53896606f3bSOscar Mateo 		;
539d02b98b8SOscar Mateo 
540d02b98b8SOscar Mateo 	dev_priv->gt_pm.rps.pm_iir = 0;
541d02b98b8SOscar Mateo 
542d02b98b8SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
543d02b98b8SOscar Mateo }
544d02b98b8SOscar Mateo 
545dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
5463cc134e3SImre Deak {
5473cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
5484668f695SChris Wilson 	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
549562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
5503cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
5513cc134e3SImre Deak }
5523cc134e3SImre Deak 
55391d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
554b900b949SImre Deak {
55558820574STvrtko Ursulin 	struct intel_gt *gt = &dev_priv->gt;
556562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
557562d9baeSSagar Arun Kamble 
558562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
559f2a91d1aSChris Wilson 		return;
560f2a91d1aSChris Wilson 
561b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
562562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
56396606f3bSOscar Mateo 
564d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
56558820574STvrtko Ursulin 		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
566d02b98b8SOscar Mateo 	else
567c33d247dSChris Wilson 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
56896606f3bSOscar Mateo 
569562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
57058820574STvrtko Ursulin 	gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
57178e68d36SImre Deak 
572b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
573b900b949SImre Deak }
574b900b949SImre Deak 
57591d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
576b900b949SImre Deak {
577562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
578562d9baeSSagar Arun Kamble 
579562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
580f2a91d1aSChris Wilson 		return;
581f2a91d1aSChris Wilson 
582d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
583562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
5849939fba2SImre Deak 
585b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
5869939fba2SImre Deak 
58758820574STvrtko Ursulin 	gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
58858072ccbSImre Deak 
58958072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
590315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
591c33d247dSChris Wilson 
592c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
5933814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
594c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
595c33d247dSChris Wilson 	 * state of the worker can be discarded.
596c33d247dSChris Wilson 	 */
597562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
598d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
599d02b98b8SOscar Mateo 		gen11_reset_rps_interrupts(dev_priv);
600d02b98b8SOscar Mateo 	else
601c33d247dSChris Wilson 		gen6_reset_rps_interrupts(dev_priv);
602b900b949SImre Deak }
603b900b949SImre Deak 
6049cbd51c2SDaniele Ceraolo Spurio void gen9_reset_guc_interrupts(struct intel_guc *guc)
60526705e20SSagar Arun Kamble {
606*2239e6dfSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
607*2239e6dfSDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = gt->i915;
6089cbd51c2SDaniele Ceraolo Spurio 
609*2239e6dfSDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&i915->runtime_pm);
6101be333d3SSagar Arun Kamble 
611*2239e6dfSDaniele Ceraolo Spurio 	spin_lock_irq(&i915->irq_lock);
612*2239e6dfSDaniele Ceraolo Spurio 	gen6_reset_pm_iir(i915, gt->pm_guc_events);
613*2239e6dfSDaniele Ceraolo Spurio 	spin_unlock_irq(&i915->irq_lock);
61426705e20SSagar Arun Kamble }
61526705e20SSagar Arun Kamble 
6169cbd51c2SDaniele Ceraolo Spurio void gen9_enable_guc_interrupts(struct intel_guc *guc)
61726705e20SSagar Arun Kamble {
618*2239e6dfSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
619*2239e6dfSDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = gt->i915;
6209cbd51c2SDaniele Ceraolo Spurio 
621*2239e6dfSDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&i915->runtime_pm);
6221be333d3SSagar Arun Kamble 
623*2239e6dfSDaniele Ceraolo Spurio 	spin_lock_irq(&i915->irq_lock);
6249cbd51c2SDaniele Ceraolo Spurio 	if (!guc->interrupts.enabled) {
625*2239e6dfSDaniele Ceraolo Spurio 		WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) &
626*2239e6dfSDaniele Ceraolo Spurio 			     gt->pm_guc_events);
6279cbd51c2SDaniele Ceraolo Spurio 		guc->interrupts.enabled = true;
628*2239e6dfSDaniele Ceraolo Spurio 		gen6_enable_pm_irq(gt, gt->pm_guc_events);
62926705e20SSagar Arun Kamble 	}
630*2239e6dfSDaniele Ceraolo Spurio 	spin_unlock_irq(&i915->irq_lock);
63126705e20SSagar Arun Kamble }
63226705e20SSagar Arun Kamble 
6339cbd51c2SDaniele Ceraolo Spurio void gen9_disable_guc_interrupts(struct intel_guc *guc)
63426705e20SSagar Arun Kamble {
635*2239e6dfSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
636*2239e6dfSDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = gt->i915;
6379cbd51c2SDaniele Ceraolo Spurio 
638*2239e6dfSDaniele Ceraolo Spurio 	assert_rpm_wakelock_held(&i915->runtime_pm);
6391be333d3SSagar Arun Kamble 
640*2239e6dfSDaniele Ceraolo Spurio 	spin_lock_irq(&i915->irq_lock);
6419cbd51c2SDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
64226705e20SSagar Arun Kamble 
643*2239e6dfSDaniele Ceraolo Spurio 	gen6_disable_pm_irq(gt, gt->pm_guc_events);
64426705e20SSagar Arun Kamble 
645*2239e6dfSDaniele Ceraolo Spurio 	spin_unlock_irq(&i915->irq_lock);
646*2239e6dfSDaniele Ceraolo Spurio 	intel_synchronize_irq(i915);
64726705e20SSagar Arun Kamble 
6489cbd51c2SDaniele Ceraolo Spurio 	gen9_reset_guc_interrupts(guc);
64926705e20SSagar Arun Kamble }
65026705e20SSagar Arun Kamble 
6519cbd51c2SDaniele Ceraolo Spurio void gen11_reset_guc_interrupts(struct intel_guc *guc)
65254c52a84SOscar Mateo {
653*2239e6dfSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
654*2239e6dfSDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = gt->i915;
6559cbd51c2SDaniele Ceraolo Spurio 
65654c52a84SOscar Mateo 	spin_lock_irq(&i915->irq_lock);
657*2239e6dfSDaniele Ceraolo Spurio 	gen11_reset_one_iir(gt, 0, GEN11_GUC);
65854c52a84SOscar Mateo 	spin_unlock_irq(&i915->irq_lock);
65954c52a84SOscar Mateo }
66054c52a84SOscar Mateo 
6619cbd51c2SDaniele Ceraolo Spurio void gen11_enable_guc_interrupts(struct intel_guc *guc)
66254c52a84SOscar Mateo {
663*2239e6dfSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
6649cbd51c2SDaniele Ceraolo Spurio 
665*2239e6dfSDaniele Ceraolo Spurio 	spin_lock_irq(&gt->i915->irq_lock);
6669cbd51c2SDaniele Ceraolo Spurio 	if (!guc->interrupts.enabled) {
667633023a4SDaniele Ceraolo Spurio 		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
66854c52a84SOscar Mateo 
669*2239e6dfSDaniele Ceraolo Spurio 		WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC));
670*2239e6dfSDaniele Ceraolo Spurio 		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
671*2239e6dfSDaniele Ceraolo Spurio 		intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
6729cbd51c2SDaniele Ceraolo Spurio 		guc->interrupts.enabled = true;
67354c52a84SOscar Mateo 	}
674*2239e6dfSDaniele Ceraolo Spurio 	spin_unlock_irq(&gt->i915->irq_lock);
67554c52a84SOscar Mateo }
67654c52a84SOscar Mateo 
6779cbd51c2SDaniele Ceraolo Spurio void gen11_disable_guc_interrupts(struct intel_guc *guc)
67854c52a84SOscar Mateo {
679*2239e6dfSDaniele Ceraolo Spurio 	struct intel_gt *gt = guc_to_gt(guc);
680*2239e6dfSDaniele Ceraolo Spurio 	struct drm_i915_private *i915 = gt->i915;
6819cbd51c2SDaniele Ceraolo Spurio 
682*2239e6dfSDaniele Ceraolo Spurio 	spin_lock_irq(&i915->irq_lock);
6839cbd51c2SDaniele Ceraolo Spurio 	guc->interrupts.enabled = false;
68454c52a84SOscar Mateo 
685*2239e6dfSDaniele Ceraolo Spurio 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
686*2239e6dfSDaniele Ceraolo Spurio 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
68754c52a84SOscar Mateo 
688*2239e6dfSDaniele Ceraolo Spurio 	spin_unlock_irq(&i915->irq_lock);
689*2239e6dfSDaniele Ceraolo Spurio 	intel_synchronize_irq(i915);
69054c52a84SOscar Mateo 
6919cbd51c2SDaniele Ceraolo Spurio 	gen11_reset_guc_interrupts(guc);
69254c52a84SOscar Mateo }
69354c52a84SOscar Mateo 
6940961021aSBen Widawsky /**
6953a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
6963a3b3c7dSVille Syrjälä  * @dev_priv: driver private
6973a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
6983a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
6993a3b3c7dSVille Syrjälä  */
7003a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
701a9c287c9SJani Nikula 				u32 interrupt_mask,
702a9c287c9SJani Nikula 				u32 enabled_irq_mask)
7033a3b3c7dSVille Syrjälä {
704a9c287c9SJani Nikula 	u32 new_val;
705a9c287c9SJani Nikula 	u32 old_val;
7063a3b3c7dSVille Syrjälä 
70767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
7083a3b3c7dSVille Syrjälä 
7093a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
7103a3b3c7dSVille Syrjälä 
7113a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
7123a3b3c7dSVille Syrjälä 		return;
7133a3b3c7dSVille Syrjälä 
7143a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
7153a3b3c7dSVille Syrjälä 
7163a3b3c7dSVille Syrjälä 	new_val = old_val;
7173a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
7183a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
7193a3b3c7dSVille Syrjälä 
7203a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
7213a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
7223a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
7233a3b3c7dSVille Syrjälä 	}
7243a3b3c7dSVille Syrjälä }
7253a3b3c7dSVille Syrjälä 
7263a3b3c7dSVille Syrjälä /**
727013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
728013d3752SVille Syrjälä  * @dev_priv: driver private
729013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
730013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
731013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
732013d3752SVille Syrjälä  */
733013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
734013d3752SVille Syrjälä 			 enum pipe pipe,
735a9c287c9SJani Nikula 			 u32 interrupt_mask,
736a9c287c9SJani Nikula 			 u32 enabled_irq_mask)
737013d3752SVille Syrjälä {
738a9c287c9SJani Nikula 	u32 new_val;
739013d3752SVille Syrjälä 
74067520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
741013d3752SVille Syrjälä 
742013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
743013d3752SVille Syrjälä 
744013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
745013d3752SVille Syrjälä 		return;
746013d3752SVille Syrjälä 
747013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
748013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
749013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
750013d3752SVille Syrjälä 
751013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
752013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
753013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
754013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
755013d3752SVille Syrjälä 	}
756013d3752SVille Syrjälä }
757013d3752SVille Syrjälä 
758013d3752SVille Syrjälä /**
759fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
760fee884edSDaniel Vetter  * @dev_priv: driver private
761fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
762fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
763fee884edSDaniel Vetter  */
76447339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
765a9c287c9SJani Nikula 				  u32 interrupt_mask,
766a9c287c9SJani Nikula 				  u32 enabled_irq_mask)
767fee884edSDaniel Vetter {
768a9c287c9SJani Nikula 	u32 sdeimr = I915_READ(SDEIMR);
769fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
770fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
771fee884edSDaniel Vetter 
77215a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
77315a17aaeSDaniel Vetter 
77467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
775fee884edSDaniel Vetter 
7769df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
777c67a470bSPaulo Zanoni 		return;
778c67a470bSPaulo Zanoni 
779fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
780fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
781fee884edSDaniel Vetter }
7828664281bSPaulo Zanoni 
7836b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
7846b12ca56SVille Syrjälä 			      enum pipe pipe)
7857c463586SKeith Packard {
7866b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
78710c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
78810c59c51SImre Deak 
7896b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
7906b12ca56SVille Syrjälä 
7916b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
7926b12ca56SVille Syrjälä 		goto out;
7936b12ca56SVille Syrjälä 
79410c59c51SImre Deak 	/*
795724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
796724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
79710c59c51SImre Deak 	 */
79810c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
79910c59c51SImre Deak 		return 0;
800724a6905SVille Syrjälä 	/*
801724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
802724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
803724a6905SVille Syrjälä 	 */
804724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
805724a6905SVille Syrjälä 		return 0;
80610c59c51SImre Deak 
80710c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
80810c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
80910c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
81010c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
81110c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
81210c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
81310c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
81410c59c51SImre Deak 
8156b12ca56SVille Syrjälä out:
8166b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
8176b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
8186b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
8196b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
8206b12ca56SVille Syrjälä 
82110c59c51SImre Deak 	return enable_mask;
82210c59c51SImre Deak }
82310c59c51SImre Deak 
8246b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
8256b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
826755e9019SImre Deak {
8276b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
828755e9019SImre Deak 	u32 enable_mask;
829755e9019SImre Deak 
8306b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8316b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8326b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8336b12ca56SVille Syrjälä 
8346b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8356b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8366b12ca56SVille Syrjälä 
8376b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
8386b12ca56SVille Syrjälä 		return;
8396b12ca56SVille Syrjälä 
8406b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
8416b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8426b12ca56SVille Syrjälä 
8436b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8446b12ca56SVille Syrjälä 	POSTING_READ(reg);
845755e9019SImre Deak }
846755e9019SImre Deak 
8476b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
8486b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
849755e9019SImre Deak {
8506b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
851755e9019SImre Deak 	u32 enable_mask;
852755e9019SImre Deak 
8536b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8546b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8556b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8566b12ca56SVille Syrjälä 
8576b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8586b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8596b12ca56SVille Syrjälä 
8606b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
8616b12ca56SVille Syrjälä 		return;
8626b12ca56SVille Syrjälä 
8636b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
8646b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8656b12ca56SVille Syrjälä 
8666b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8676b12ca56SVille Syrjälä 	POSTING_READ(reg);
868755e9019SImre Deak }
869755e9019SImre Deak 
870f3e30485SVille Syrjälä static bool i915_has_asle(struct drm_i915_private *dev_priv)
871f3e30485SVille Syrjälä {
872f3e30485SVille Syrjälä 	if (!dev_priv->opregion.asle)
873f3e30485SVille Syrjälä 		return false;
874f3e30485SVille Syrjälä 
875f3e30485SVille Syrjälä 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
876f3e30485SVille Syrjälä }
877f3e30485SVille Syrjälä 
878c0e09200SDave Airlie /**
879f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
88014bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
88101c66889SZhao Yakui  */
88291d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
88301c66889SZhao Yakui {
884f3e30485SVille Syrjälä 	if (!i915_has_asle(dev_priv))
885f49e38ddSJani Nikula 		return;
886f49e38ddSJani Nikula 
88713321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
88801c66889SZhao Yakui 
889755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
89091d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
8913b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
892755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
8931ec14ad3SChris Wilson 
89413321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
89501c66889SZhao Yakui }
89601c66889SZhao Yakui 
897f75f3746SVille Syrjälä /*
898f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
899f75f3746SVille Syrjälä  * around the vertical blanking period.
900f75f3746SVille Syrjälä  *
901f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
902f75f3746SVille Syrjälä  *  vblank_start >= 3
903f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
904f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
905f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
906f75f3746SVille Syrjälä  *
907f75f3746SVille Syrjälä  *           start of vblank:
908f75f3746SVille Syrjälä  *           latch double buffered registers
909f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
910f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
911f75f3746SVille Syrjälä  *           |
912f75f3746SVille Syrjälä  *           |          frame start:
913f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
914f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
915f75f3746SVille Syrjälä  *           |          |
916f75f3746SVille Syrjälä  *           |          |  start of vsync:
917f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
918f75f3746SVille Syrjälä  *           |          |  |
919f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
920f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
921f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
922f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
923f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
924f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
925f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
926f75f3746SVille Syrjälä  *       |          |                                         |
927f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
928f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
929f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
930f75f3746SVille Syrjälä  *
931f75f3746SVille Syrjälä  * x  = horizontal active
932f75f3746SVille Syrjälä  * _  = horizontal blanking
933f75f3746SVille Syrjälä  * hs = horizontal sync
934f75f3746SVille Syrjälä  * va = vertical active
935f75f3746SVille Syrjälä  * vb = vertical blanking
936f75f3746SVille Syrjälä  * vs = vertical sync
937f75f3746SVille Syrjälä  * vbs = vblank_start (number)
938f75f3746SVille Syrjälä  *
939f75f3746SVille Syrjälä  * Summary:
940f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
941f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
942f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
943f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
944f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
945f75f3746SVille Syrjälä  */
946f75f3746SVille Syrjälä 
94742f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
94842f52ef8SKeith Packard  * we use as a pipe index
94942f52ef8SKeith Packard  */
95008fa8fd0SVille Syrjälä u32 i915_get_vblank_counter(struct drm_crtc *crtc)
9510a3e67a4SJesse Barnes {
95208fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
95308fa8fd0SVille Syrjälä 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
95432db0b65SVille Syrjälä 	const struct drm_display_mode *mode = &vblank->hwmode;
95508fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
956f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
9570b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
958694e409dSVille Syrjälä 	unsigned long irqflags;
959391f75e2SVille Syrjälä 
96032db0b65SVille Syrjälä 	/*
96132db0b65SVille Syrjälä 	 * On i965gm TV output the frame counter only works up to
96232db0b65SVille Syrjälä 	 * the point when we enable the TV encoder. After that the
96332db0b65SVille Syrjälä 	 * frame counter ceases to work and reads zero. We need a
96432db0b65SVille Syrjälä 	 * vblank wait before enabling the TV encoder and so we
96532db0b65SVille Syrjälä 	 * have to enable vblank interrupts while the frame counter
96632db0b65SVille Syrjälä 	 * is still in a working state. However the core vblank code
96732db0b65SVille Syrjälä 	 * does not like us returning non-zero frame counter values
96832db0b65SVille Syrjälä 	 * when we've told it that we don't have a working frame
96932db0b65SVille Syrjälä 	 * counter. Thus we must stop non-zero values leaking out.
97032db0b65SVille Syrjälä 	 */
97132db0b65SVille Syrjälä 	if (!vblank->max_vblank_count)
97232db0b65SVille Syrjälä 		return 0;
97332db0b65SVille Syrjälä 
9740b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
9750b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
9760b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
9770b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
9780b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
979391f75e2SVille Syrjälä 
9800b2a8e09SVille Syrjälä 	/* Convert to pixel count */
9810b2a8e09SVille Syrjälä 	vbl_start *= htotal;
9820b2a8e09SVille Syrjälä 
9830b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
9840b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
9850b2a8e09SVille Syrjälä 
9869db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
9879db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
9885eddb70bSChris Wilson 
989694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
990694e409dSVille Syrjälä 
9910a3e67a4SJesse Barnes 	/*
9920a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
9930a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
9940a3e67a4SJesse Barnes 	 * register.
9950a3e67a4SJesse Barnes 	 */
9960a3e67a4SJesse Barnes 	do {
997694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
998694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
999694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
10000a3e67a4SJesse Barnes 	} while (high1 != high2);
10010a3e67a4SJesse Barnes 
1002694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1003694e409dSVille Syrjälä 
10045eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
1005391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
10065eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
1007391f75e2SVille Syrjälä 
1008391f75e2SVille Syrjälä 	/*
1009391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
1010391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
1011391f75e2SVille Syrjälä 	 * counter against vblank start.
1012391f75e2SVille Syrjälä 	 */
1013edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
10140a3e67a4SJesse Barnes }
10150a3e67a4SJesse Barnes 
101608fa8fd0SVille Syrjälä u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
10179880b7a5SJesse Barnes {
101808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
101908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
10209880b7a5SJesse Barnes 
1021649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
10229880b7a5SJesse Barnes }
10239880b7a5SJesse Barnes 
1024aec0246fSUma Shankar /*
1025aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
1026aec0246fSUma Shankar  * scanline register will not work to get the scanline,
1027aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
1028aec0246fSUma Shankar  * with scanline register updates.
1029aec0246fSUma Shankar  * This function will use Framestamp and current
1030aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
1031aec0246fSUma Shankar  */
1032aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
1033aec0246fSUma Shankar {
1034aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1035aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
1036aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1037aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
1038aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
1039aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
1040aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
1041aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
1042aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
1043aec0246fSUma Shankar 
1044aec0246fSUma Shankar 	/*
1045aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
1046aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
1047aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
1048aec0246fSUma Shankar 	 * during the same frame.
1049aec0246fSUma Shankar 	 */
1050aec0246fSUma Shankar 	do {
1051aec0246fSUma Shankar 		/*
1052aec0246fSUma Shankar 		 * This field provides read back of the display
1053aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
1054aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
1055aec0246fSUma Shankar 		 */
1056aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1057aec0246fSUma Shankar 
1058aec0246fSUma Shankar 		/*
1059aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
1060aec0246fSUma Shankar 		 * time stamp value.
1061aec0246fSUma Shankar 		 */
1062aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
1063aec0246fSUma Shankar 
1064aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1065aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
1066aec0246fSUma Shankar 
1067aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
1068aec0246fSUma Shankar 					clock), 1000 * htotal);
1069aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
1070aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
1071aec0246fSUma Shankar 
1072aec0246fSUma Shankar 	return scanline;
1073aec0246fSUma Shankar }
1074aec0246fSUma Shankar 
107575aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1076a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1077a225f079SVille Syrjälä {
1078a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
1079fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
10805caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
10815caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
1082a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
108380715b2fSVille Syrjälä 	int position, vtotal;
1084a225f079SVille Syrjälä 
108572259536SVille Syrjälä 	if (!crtc->active)
108672259536SVille Syrjälä 		return -1;
108772259536SVille Syrjälä 
10885caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
10895caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
10905caa0feaSDaniel Vetter 
1091aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1092aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
1093aec0246fSUma Shankar 
109480715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
1095a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1096a225f079SVille Syrjälä 		vtotal /= 2;
1097a225f079SVille Syrjälä 
1098cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 2))
109975aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1100a225f079SVille Syrjälä 	else
110175aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1102a225f079SVille Syrjälä 
1103a225f079SVille Syrjälä 	/*
110441b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
110541b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
110641b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
110741b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
110841b578fbSJesse Barnes 	 *
110941b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
111041b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
111141b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
111241b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
111341b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
111441b578fbSJesse Barnes 	 */
111591d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
111641b578fbSJesse Barnes 		int i, temp;
111741b578fbSJesse Barnes 
111841b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
111941b578fbSJesse Barnes 			udelay(1);
1120707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
112141b578fbSJesse Barnes 			if (temp != position) {
112241b578fbSJesse Barnes 				position = temp;
112341b578fbSJesse Barnes 				break;
112441b578fbSJesse Barnes 			}
112541b578fbSJesse Barnes 		}
112641b578fbSJesse Barnes 	}
112741b578fbSJesse Barnes 
112841b578fbSJesse Barnes 	/*
112980715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
113080715b2fSVille Syrjälä 	 * scanline_offset adjustment.
1131a225f079SVille Syrjälä 	 */
113280715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
1133a225f079SVille Syrjälä }
1134a225f079SVille Syrjälä 
11357d23e593SVille Syrjälä bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
11361bf6ad62SDaniel Vetter 			      bool in_vblank_irq, int *vpos, int *hpos,
11373bb403bfSVille Syrjälä 			      ktime_t *stime, ktime_t *etime,
11383bb403bfSVille Syrjälä 			      const struct drm_display_mode *mode)
11390af7e4dfSMario Kleiner {
1140fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
114198187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
114298187836SVille Syrjälä 								pipe);
11433aa18df8SVille Syrjälä 	int position;
114478e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1145ad3543edSMario Kleiner 	unsigned long irqflags;
11468a920e24SVille Syrjälä 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
11478a920e24SVille Syrjälä 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
11488a920e24SVille Syrjälä 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
11490af7e4dfSMario Kleiner 
1150fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
11510af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
11529db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
11531bf6ad62SDaniel Vetter 		return false;
11540af7e4dfSMario Kleiner 	}
11550af7e4dfSMario Kleiner 
1156c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
115778e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
1158c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
1159c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
1160c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
11610af7e4dfSMario Kleiner 
1162d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1163d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1164d31faf65SVille Syrjälä 		vbl_end /= 2;
1165d31faf65SVille Syrjälä 		vtotal /= 2;
1166d31faf65SVille Syrjälä 	}
1167d31faf65SVille Syrjälä 
1168ad3543edSMario Kleiner 	/*
1169ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
1170ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
1171ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
1172ad3543edSMario Kleiner 	 */
1173ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1174ad3543edSMario Kleiner 
1175ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1176ad3543edSMario Kleiner 
1177ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
1178ad3543edSMario Kleiner 	if (stime)
1179ad3543edSMario Kleiner 		*stime = ktime_get();
1180ad3543edSMario Kleiner 
11818a920e24SVille Syrjälä 	if (use_scanline_counter) {
11820af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
11830af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
11840af7e4dfSMario Kleiner 		 */
1185a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
11860af7e4dfSMario Kleiner 	} else {
11870af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
11880af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
11890af7e4dfSMario Kleiner 		 * scanout position.
11900af7e4dfSMario Kleiner 		 */
119175aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
11920af7e4dfSMario Kleiner 
11933aa18df8SVille Syrjälä 		/* convert to pixel counts */
11943aa18df8SVille Syrjälä 		vbl_start *= htotal;
11953aa18df8SVille Syrjälä 		vbl_end *= htotal;
11963aa18df8SVille Syrjälä 		vtotal *= htotal;
119778e8fc6bSVille Syrjälä 
119878e8fc6bSVille Syrjälä 		/*
11997e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
12007e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
12017e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
12027e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
12037e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
12047e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
12057e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
12067e78f1cbSVille Syrjälä 		 */
12077e78f1cbSVille Syrjälä 		if (position >= vtotal)
12087e78f1cbSVille Syrjälä 			position = vtotal - 1;
12097e78f1cbSVille Syrjälä 
12107e78f1cbSVille Syrjälä 		/*
121178e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
121278e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
121378e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
121478e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
121578e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
121678e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
121778e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
121878e8fc6bSVille Syrjälä 		 */
121978e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
12203aa18df8SVille Syrjälä 	}
12213aa18df8SVille Syrjälä 
1222ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
1223ad3543edSMario Kleiner 	if (etime)
1224ad3543edSMario Kleiner 		*etime = ktime_get();
1225ad3543edSMario Kleiner 
1226ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1227ad3543edSMario Kleiner 
1228ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1229ad3543edSMario Kleiner 
12303aa18df8SVille Syrjälä 	/*
12313aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
12323aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
12333aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
12343aa18df8SVille Syrjälä 	 * up since vbl_end.
12353aa18df8SVille Syrjälä 	 */
12363aa18df8SVille Syrjälä 	if (position >= vbl_start)
12373aa18df8SVille Syrjälä 		position -= vbl_end;
12383aa18df8SVille Syrjälä 	else
12393aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
12403aa18df8SVille Syrjälä 
12418a920e24SVille Syrjälä 	if (use_scanline_counter) {
12423aa18df8SVille Syrjälä 		*vpos = position;
12433aa18df8SVille Syrjälä 		*hpos = 0;
12443aa18df8SVille Syrjälä 	} else {
12450af7e4dfSMario Kleiner 		*vpos = position / htotal;
12460af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
12470af7e4dfSMario Kleiner 	}
12480af7e4dfSMario Kleiner 
12491bf6ad62SDaniel Vetter 	return true;
12500af7e4dfSMario Kleiner }
12510af7e4dfSMario Kleiner 
1252a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1253a225f079SVille Syrjälä {
1254fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1255a225f079SVille Syrjälä 	unsigned long irqflags;
1256a225f079SVille Syrjälä 	int position;
1257a225f079SVille Syrjälä 
1258a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1259a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1260a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1261a225f079SVille Syrjälä 
1262a225f079SVille Syrjälä 	return position;
1263a225f079SVille Syrjälä }
1264a225f079SVille Syrjälä 
126591d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1266f97108d1SJesse Barnes {
12674f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &dev_priv->uncore;
1268b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
12699270388eSDaniel Vetter 	u8 new_delay;
12709270388eSDaniel Vetter 
1271d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1272f97108d1SJesse Barnes 
12734f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
12744f5fd91fSTvrtko Ursulin 			     MEMINTRSTS,
12754f5fd91fSTvrtko Ursulin 			     intel_uncore_read(uncore, MEMINTRSTS));
127673edd18fSDaniel Vetter 
127720e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
12789270388eSDaniel Vetter 
12794f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
12804f5fd91fSTvrtko Ursulin 	busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
12814f5fd91fSTvrtko Ursulin 	busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
12824f5fd91fSTvrtko Ursulin 	max_avg = intel_uncore_read(uncore, RCBMAXAVG);
12834f5fd91fSTvrtko Ursulin 	min_avg = intel_uncore_read(uncore, RCBMINAVG);
1284f97108d1SJesse Barnes 
1285f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1286b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
128720e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
128820e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
128920e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
129020e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1291b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
129220e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
129320e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
129420e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
129520e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1296f97108d1SJesse Barnes 	}
1297f97108d1SJesse Barnes 
129891d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
129920e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1300f97108d1SJesse Barnes 
1301d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
13029270388eSDaniel Vetter 
1303f97108d1SJesse Barnes 	return;
1304f97108d1SJesse Barnes }
1305f97108d1SJesse Barnes 
130643cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
130743cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
130831685c25SDeepak S {
1309679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
131043cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
131143cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
131231685c25SDeepak S }
131331685c25SDeepak S 
131443cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
131543cf3bf0SChris Wilson {
1316562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
131743cf3bf0SChris Wilson }
131843cf3bf0SChris Wilson 
131943cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
132043cf3bf0SChris Wilson {
1321562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1322562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
132343cf3bf0SChris Wilson 	struct intel_rps_ei now;
132443cf3bf0SChris Wilson 	u32 events = 0;
132543cf3bf0SChris Wilson 
1326e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
132743cf3bf0SChris Wilson 		return 0;
132843cf3bf0SChris Wilson 
132943cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
133031685c25SDeepak S 
1331679cb6c1SMika Kuoppala 	if (prev->ktime) {
1332e0e8c7cbSChris Wilson 		u64 time, c0;
1333569884e3SChris Wilson 		u32 render, media;
1334e0e8c7cbSChris Wilson 
1335679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
13368f68d591SChris Wilson 
1337e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1338e0e8c7cbSChris Wilson 
1339e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1340e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1341e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1342e0e8c7cbSChris Wilson 		 * into our activity counter.
1343e0e8c7cbSChris Wilson 		 */
1344569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1345569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1346569884e3SChris Wilson 		c0 = max(render, media);
13476b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1348e0e8c7cbSChris Wilson 
134960548c55SChris Wilson 		if (c0 > time * rps->power.up_threshold)
1350e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
135160548c55SChris Wilson 		else if (c0 < time * rps->power.down_threshold)
1352e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
135331685c25SDeepak S 	}
135431685c25SDeepak S 
1355562d9baeSSagar Arun Kamble 	rps->ei = now;
135643cf3bf0SChris Wilson 	return events;
135731685c25SDeepak S }
135831685c25SDeepak S 
13594912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
13603b8d8d91SJesse Barnes {
13612d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1362562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1363562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
13647c0a16adSChris Wilson 	bool client_boost = false;
13658d3afd7dSChris Wilson 	int new_delay, adj, min, max;
13667c0a16adSChris Wilson 	u32 pm_iir = 0;
13673b8d8d91SJesse Barnes 
136859cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1369562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1370562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1371562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1372d4d70aa5SImre Deak 	}
137359cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
13744912d041SBen Widawsky 
137560611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1376a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
13778d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
13787c0a16adSChris Wilson 		goto out;
13793b8d8d91SJesse Barnes 
1380ebb5eb7dSChris Wilson 	mutex_lock(&rps->lock);
13817b9e0ae6SChris Wilson 
138243cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
138343cf3bf0SChris Wilson 
1384562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1385562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1386562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1387562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
13887b92c1bdSChris Wilson 	if (client_boost)
1389562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1390562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1391562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
13928d3afd7dSChris Wilson 		adj = 0;
13938d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1394dd75fdc8SChris Wilson 		if (adj > 0)
1395dd75fdc8SChris Wilson 			adj *= 2;
1396edcf284bSChris Wilson 		else /* CHV needs even encode values */
1397edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
13987e79a683SSagar Arun Kamble 
1399562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
14007e79a683SSagar Arun Kamble 			adj = 0;
14017b92c1bdSChris Wilson 	} else if (client_boost) {
1402f5a4c67dSChris Wilson 		adj = 0;
1403dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1404562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1405562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1406562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1407562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1408dd75fdc8SChris Wilson 		adj = 0;
1409dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1410dd75fdc8SChris Wilson 		if (adj < 0)
1411dd75fdc8SChris Wilson 			adj *= 2;
1412edcf284bSChris Wilson 		else /* CHV needs even encode values */
1413edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
14147e79a683SSagar Arun Kamble 
1415562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
14167e79a683SSagar Arun Kamble 			adj = 0;
1417dd75fdc8SChris Wilson 	} else { /* unknown event */
1418edcf284bSChris Wilson 		adj = 0;
1419dd75fdc8SChris Wilson 	}
14203b8d8d91SJesse Barnes 
1421562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1422edcf284bSChris Wilson 
14232a8862d2SChris Wilson 	/*
14242a8862d2SChris Wilson 	 * Limit deboosting and boosting to keep ourselves at the extremes
14252a8862d2SChris Wilson 	 * when in the respective power modes (i.e. slowly decrease frequencies
14262a8862d2SChris Wilson 	 * while in the HIGH_POWER zone and slowly increase frequencies while
14272a8862d2SChris Wilson 	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
14282a8862d2SChris Wilson 	 * to the next level quickly, and conversely if busy we expect to
14292a8862d2SChris Wilson 	 * hit a waitboost and rapidly switch into max power.
14302a8862d2SChris Wilson 	 */
14312a8862d2SChris Wilson 	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
14322a8862d2SChris Wilson 	    (adj > 0 && rps->power.mode == LOW_POWER))
14332a8862d2SChris Wilson 		rps->last_adj = 0;
14342a8862d2SChris Wilson 
143579249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
143679249636SBen Widawsky 	 * interrupt
143779249636SBen Widawsky 	 */
1438edcf284bSChris Wilson 	new_delay += adj;
14398d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
144027544369SDeepak S 
14419fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
14429fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1443562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
14449fcee2f7SChris Wilson 	}
14453b8d8d91SJesse Barnes 
1446ebb5eb7dSChris Wilson 	mutex_unlock(&rps->lock);
14477c0a16adSChris Wilson 
14487c0a16adSChris Wilson out:
14497c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
14507c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1451562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
145258820574STvrtko Ursulin 		gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
14537c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
14543b8d8d91SJesse Barnes }
14553b8d8d91SJesse Barnes 
1456e3689190SBen Widawsky 
1457e3689190SBen Widawsky /**
1458e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1459e3689190SBen Widawsky  * occurred.
1460e3689190SBen Widawsky  * @work: workqueue struct
1461e3689190SBen Widawsky  *
1462e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1463e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1464e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1465e3689190SBen Widawsky  */
1466e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1467e3689190SBen Widawsky {
14682d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1469cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1470e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
147135a85ac6SBen Widawsky 	char *parity_event[6];
1472a9c287c9SJani Nikula 	u32 misccpctl;
1473a9c287c9SJani Nikula 	u8 slice = 0;
1474e3689190SBen Widawsky 
1475e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1476e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1477e3689190SBen Widawsky 	 * any time we access those registers.
1478e3689190SBen Widawsky 	 */
147991c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1480e3689190SBen Widawsky 
148135a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
148235a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
148335a85ac6SBen Widawsky 		goto out;
148435a85ac6SBen Widawsky 
1485e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1486e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1487e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1488e3689190SBen Widawsky 
148935a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1490f0f59a00SVille Syrjälä 		i915_reg_t reg;
149135a85ac6SBen Widawsky 
149235a85ac6SBen Widawsky 		slice--;
14932d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
149435a85ac6SBen Widawsky 			break;
149535a85ac6SBen Widawsky 
149635a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
149735a85ac6SBen Widawsky 
14986fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
149935a85ac6SBen Widawsky 
150035a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1501e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1502e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1503e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1504e3689190SBen Widawsky 
150535a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
150635a85ac6SBen Widawsky 		POSTING_READ(reg);
1507e3689190SBen Widawsky 
1508cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1509e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1510e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1511e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
151235a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
151335a85ac6SBen Widawsky 		parity_event[5] = NULL;
1514e3689190SBen Widawsky 
151591c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1516e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1517e3689190SBen Widawsky 
151835a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
151935a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1520e3689190SBen Widawsky 
152135a85ac6SBen Widawsky 		kfree(parity_event[4]);
1522e3689190SBen Widawsky 		kfree(parity_event[3]);
1523e3689190SBen Widawsky 		kfree(parity_event[2]);
1524e3689190SBen Widawsky 		kfree(parity_event[1]);
1525e3689190SBen Widawsky 	}
1526e3689190SBen Widawsky 
152735a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
152835a85ac6SBen Widawsky 
152935a85ac6SBen Widawsky out:
153035a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
15314cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
15322d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
15334cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
153435a85ac6SBen Widawsky 
153591c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
153635a85ac6SBen Widawsky }
153735a85ac6SBen Widawsky 
1538261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1539261e40b8SVille Syrjälä 					       u32 iir)
1540e3689190SBen Widawsky {
1541261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1542e3689190SBen Widawsky 		return;
1543e3689190SBen Widawsky 
1544d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1545261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1546d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1547e3689190SBen Widawsky 
1548261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
154935a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
155035a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
155135a85ac6SBen Widawsky 
155235a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
155335a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
155435a85ac6SBen Widawsky 
1555a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1556e3689190SBen Widawsky }
1557e3689190SBen Widawsky 
1558261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1559f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1560f1af8fc1SPaulo Zanoni {
1561f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15628a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1563f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
15648a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1565f1af8fc1SPaulo Zanoni }
1566f1af8fc1SPaulo Zanoni 
1567261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1568e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1569e7b4c6b1SDaniel Vetter {
1570f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15718a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1572cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
15738a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1574cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
15758a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1576e7b4c6b1SDaniel Vetter 
1577cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1578cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1579aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1580aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1581e3689190SBen Widawsky 
1582261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1583261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1584e7b4c6b1SDaniel Vetter }
1585e7b4c6b1SDaniel Vetter 
15865d3d69d5SChris Wilson static void
158751f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1588fbcc1a0cSNick Hoath {
158931de7350SChris Wilson 	bool tasklet = false;
1590f747026cSChris Wilson 
1591fd8526e5SChris Wilson 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
15928ea397faSChris Wilson 		tasklet = true;
159331de7350SChris Wilson 
159451f6b0f9SChris Wilson 	if (iir & GT_RENDER_USER_INTERRUPT) {
159552c0fdb2SChris Wilson 		intel_engine_breadcrumbs_irq(engine);
15964c6ce5c9SChris Wilson 		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
159731de7350SChris Wilson 	}
159831de7350SChris Wilson 
159931de7350SChris Wilson 	if (tasklet)
1600fd8526e5SChris Wilson 		tasklet_hi_schedule(&engine->execlists.tasklet);
1601fbcc1a0cSNick Hoath }
1602fbcc1a0cSNick Hoath 
16032e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915,
160455ef72f2SChris Wilson 			    u32 master_ctl, u32 gt_iir[4])
1605abd58f01SBen Widawsky {
160625286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
16072e4a5b25SChris Wilson 
1608f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1609f0fd96f5SChris Wilson 		      GEN8_GT_BCS_IRQ | \
16108a68d464SChris Wilson 		      GEN8_GT_VCS0_IRQ | \
1611f0fd96f5SChris Wilson 		      GEN8_GT_VCS1_IRQ | \
1612f0fd96f5SChris Wilson 		      GEN8_GT_VECS_IRQ | \
1613f0fd96f5SChris Wilson 		      GEN8_GT_PM_IRQ | \
1614f0fd96f5SChris Wilson 		      GEN8_GT_GUC_IRQ)
1615f0fd96f5SChris Wilson 
1616abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16172e4a5b25SChris Wilson 		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
16182e4a5b25SChris Wilson 		if (likely(gt_iir[0]))
16192e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1620abd58f01SBen Widawsky 	}
1621abd58f01SBen Widawsky 
16228a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16232e4a5b25SChris Wilson 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
16242e4a5b25SChris Wilson 		if (likely(gt_iir[1]))
16252e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
162674cdb337SChris Wilson 	}
162774cdb337SChris Wilson 
162826705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16292e4a5b25SChris Wilson 		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1630f4de7794SChris Wilson 		if (likely(gt_iir[2]))
1631f4de7794SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
16320961021aSBen Widawsky 	}
16332e4a5b25SChris Wilson 
16342e4a5b25SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16352e4a5b25SChris Wilson 		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
16362e4a5b25SChris Wilson 		if (likely(gt_iir[3]))
16372e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
163855ef72f2SChris Wilson 	}
1639abd58f01SBen Widawsky }
1640abd58f01SBen Widawsky 
16412e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1642f0fd96f5SChris Wilson 				u32 master_ctl, u32 gt_iir[4])
1643e30e251aSVille Syrjälä {
1644f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16458a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[RCS0],
164651f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
16478a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[BCS0],
164851f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1649e30e251aSVille Syrjälä 	}
1650e30e251aSVille Syrjälä 
16518a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16528a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS0],
16538a68d464SChris Wilson 				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
16548a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS1],
165551f6b0f9SChris Wilson 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1656e30e251aSVille Syrjälä 	}
1657e30e251aSVille Syrjälä 
1658f0fd96f5SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16598a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VECS0],
166051f6b0f9SChris Wilson 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1661f0fd96f5SChris Wilson 	}
1662e30e251aSVille Syrjälä 
1663f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16642e4a5b25SChris Wilson 		gen6_rps_irq_handler(i915, gt_iir[2]);
16658b5689d7SDaniele Ceraolo Spurio 		guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
1666e30e251aSVille Syrjälä 	}
1667f0fd96f5SChris Wilson }
1668e30e251aSVille Syrjälä 
1669af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1670121e758eSDhinakaran Pandiyan {
1671af92058fSVille Syrjälä 	switch (pin) {
1672af92058fSVille Syrjälä 	case HPD_PORT_C:
1673121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1674af92058fSVille Syrjälä 	case HPD_PORT_D:
1675121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1676af92058fSVille Syrjälä 	case HPD_PORT_E:
1677121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1678af92058fSVille Syrjälä 	case HPD_PORT_F:
1679121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1680121e758eSDhinakaran Pandiyan 	default:
1681121e758eSDhinakaran Pandiyan 		return false;
1682121e758eSDhinakaran Pandiyan 	}
1683121e758eSDhinakaran Pandiyan }
1684121e758eSDhinakaran Pandiyan 
1685af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
168663c88d22SImre Deak {
1687af92058fSVille Syrjälä 	switch (pin) {
1688af92058fSVille Syrjälä 	case HPD_PORT_A:
1689195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1690af92058fSVille Syrjälä 	case HPD_PORT_B:
169163c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
1692af92058fSVille Syrjälä 	case HPD_PORT_C:
169363c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
169463c88d22SImre Deak 	default:
169563c88d22SImre Deak 		return false;
169663c88d22SImre Deak 	}
169763c88d22SImre Deak }
169863c88d22SImre Deak 
1699af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
170031604222SAnusha Srivatsa {
1701af92058fSVille Syrjälä 	switch (pin) {
1702af92058fSVille Syrjälä 	case HPD_PORT_A:
170331604222SAnusha Srivatsa 		return val & ICP_DDIA_HPD_LONG_DETECT;
1704af92058fSVille Syrjälä 	case HPD_PORT_B:
170531604222SAnusha Srivatsa 		return val & ICP_DDIB_HPD_LONG_DETECT;
170631604222SAnusha Srivatsa 	default:
170731604222SAnusha Srivatsa 		return false;
170831604222SAnusha Srivatsa 	}
170931604222SAnusha Srivatsa }
171031604222SAnusha Srivatsa 
1711af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
171231604222SAnusha Srivatsa {
1713af92058fSVille Syrjälä 	switch (pin) {
1714af92058fSVille Syrjälä 	case HPD_PORT_C:
171531604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1716af92058fSVille Syrjälä 	case HPD_PORT_D:
171731604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1718af92058fSVille Syrjälä 	case HPD_PORT_E:
171931604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1720af92058fSVille Syrjälä 	case HPD_PORT_F:
172131604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
172231604222SAnusha Srivatsa 	default:
172331604222SAnusha Srivatsa 		return false;
172431604222SAnusha Srivatsa 	}
172531604222SAnusha Srivatsa }
172631604222SAnusha Srivatsa 
1727af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
17286dbf30ceSVille Syrjälä {
1729af92058fSVille Syrjälä 	switch (pin) {
1730af92058fSVille Syrjälä 	case HPD_PORT_E:
17316dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
17326dbf30ceSVille Syrjälä 	default:
17336dbf30ceSVille Syrjälä 		return false;
17346dbf30ceSVille Syrjälä 	}
17356dbf30ceSVille Syrjälä }
17366dbf30ceSVille Syrjälä 
1737af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
173874c0b395SVille Syrjälä {
1739af92058fSVille Syrjälä 	switch (pin) {
1740af92058fSVille Syrjälä 	case HPD_PORT_A:
174174c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1742af92058fSVille Syrjälä 	case HPD_PORT_B:
174374c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
1744af92058fSVille Syrjälä 	case HPD_PORT_C:
174574c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
1746af92058fSVille Syrjälä 	case HPD_PORT_D:
174774c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
174874c0b395SVille Syrjälä 	default:
174974c0b395SVille Syrjälä 		return false;
175074c0b395SVille Syrjälä 	}
175174c0b395SVille Syrjälä }
175274c0b395SVille Syrjälä 
1753af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1754e4ce95aaSVille Syrjälä {
1755af92058fSVille Syrjälä 	switch (pin) {
1756af92058fSVille Syrjälä 	case HPD_PORT_A:
1757e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1758e4ce95aaSVille Syrjälä 	default:
1759e4ce95aaSVille Syrjälä 		return false;
1760e4ce95aaSVille Syrjälä 	}
1761e4ce95aaSVille Syrjälä }
1762e4ce95aaSVille Syrjälä 
1763af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
176413cf5504SDave Airlie {
1765af92058fSVille Syrjälä 	switch (pin) {
1766af92058fSVille Syrjälä 	case HPD_PORT_B:
1767676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
1768af92058fSVille Syrjälä 	case HPD_PORT_C:
1769676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
1770af92058fSVille Syrjälä 	case HPD_PORT_D:
1771676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1772676574dfSJani Nikula 	default:
1773676574dfSJani Nikula 		return false;
177413cf5504SDave Airlie 	}
177513cf5504SDave Airlie }
177613cf5504SDave Airlie 
1777af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
177813cf5504SDave Airlie {
1779af92058fSVille Syrjälä 	switch (pin) {
1780af92058fSVille Syrjälä 	case HPD_PORT_B:
1781676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1782af92058fSVille Syrjälä 	case HPD_PORT_C:
1783676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1784af92058fSVille Syrjälä 	case HPD_PORT_D:
1785676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1786676574dfSJani Nikula 	default:
1787676574dfSJani Nikula 		return false;
178813cf5504SDave Airlie 	}
178913cf5504SDave Airlie }
179013cf5504SDave Airlie 
179142db67d6SVille Syrjälä /*
179242db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
179342db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
179442db67d6SVille Syrjälä  * hotplug detection results from several registers.
179542db67d6SVille Syrjälä  *
179642db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
179742db67d6SVille Syrjälä  */
1798cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1799cf53902fSRodrigo Vivi 			       u32 *pin_mask, u32 *long_mask,
18008c841e57SJani Nikula 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1801fd63e2a9SImre Deak 			       const u32 hpd[HPD_NUM_PINS],
1802af92058fSVille Syrjälä 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1803676574dfSJani Nikula {
1804e9be2850SVille Syrjälä 	enum hpd_pin pin;
1805676574dfSJani Nikula 
1806e9be2850SVille Syrjälä 	for_each_hpd_pin(pin) {
1807e9be2850SVille Syrjälä 		if ((hpd[pin] & hotplug_trigger) == 0)
18088c841e57SJani Nikula 			continue;
18098c841e57SJani Nikula 
1810e9be2850SVille Syrjälä 		*pin_mask |= BIT(pin);
1811676574dfSJani Nikula 
1812af92058fSVille Syrjälä 		if (long_pulse_detect(pin, dig_hotplug_reg))
1813e9be2850SVille Syrjälä 			*long_mask |= BIT(pin);
1814676574dfSJani Nikula 	}
1815676574dfSJani Nikula 
1816f88f0478SVille Syrjälä 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1817f88f0478SVille Syrjälä 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1818676574dfSJani Nikula 
1819676574dfSJani Nikula }
1820676574dfSJani Nikula 
182191d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1822515ac2bbSDaniel Vetter {
182328c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1824515ac2bbSDaniel Vetter }
1825515ac2bbSDaniel Vetter 
182691d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1827ce99c256SDaniel Vetter {
18289ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1829ce99c256SDaniel Vetter }
1830ce99c256SDaniel Vetter 
18318bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
183291d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
183391d14251STvrtko Ursulin 					 enum pipe pipe,
1834a9c287c9SJani Nikula 					 u32 crc0, u32 crc1,
1835a9c287c9SJani Nikula 					 u32 crc2, u32 crc3,
1836a9c287c9SJani Nikula 					 u32 crc4)
18378bf1e9f1SShuang He {
18388bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
18398c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18405cee6c45SVille Syrjälä 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
18415cee6c45SVille Syrjälä 
18425cee6c45SVille Syrjälä 	trace_intel_pipe_crc(crtc, crcs);
1843b2c88f5bSDamien Lespiau 
1844d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
18458c6b709dSTomeu Vizoso 	/*
18468c6b709dSTomeu Vizoso 	 * For some not yet identified reason, the first CRC is
18478c6b709dSTomeu Vizoso 	 * bonkers. So let's just wait for the next vblank and read
18488c6b709dSTomeu Vizoso 	 * out the buggy result.
18498c6b709dSTomeu Vizoso 	 *
1850163e8aecSRodrigo Vivi 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
18518c6b709dSTomeu Vizoso 	 * don't trust that one either.
18528c6b709dSTomeu Vizoso 	 */
1853033b7a23SMaarten Lankhorst 	if (pipe_crc->skipped <= 0 ||
1854163e8aecSRodrigo Vivi 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
18558c6b709dSTomeu Vizoso 		pipe_crc->skipped++;
18568c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
18578c6b709dSTomeu Vizoso 		return;
18588c6b709dSTomeu Vizoso 	}
18598c6b709dSTomeu Vizoso 	spin_unlock(&pipe_crc->lock);
18606cc42152SMaarten Lankhorst 
1861246ee524STomeu Vizoso 	drm_crtc_add_crc_entry(&crtc->base, true,
1862ca814b25SDaniel Vetter 				drm_crtc_accurate_vblank_count(&crtc->base),
1863246ee524STomeu Vizoso 				crcs);
18648c6b709dSTomeu Vizoso }
1865277de95eSDaniel Vetter #else
1866277de95eSDaniel Vetter static inline void
186791d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
186891d14251STvrtko Ursulin 			     enum pipe pipe,
1869a9c287c9SJani Nikula 			     u32 crc0, u32 crc1,
1870a9c287c9SJani Nikula 			     u32 crc2, u32 crc3,
1871a9c287c9SJani Nikula 			     u32 crc4) {}
1872277de95eSDaniel Vetter #endif
1873eba94eb9SDaniel Vetter 
1874277de95eSDaniel Vetter 
187591d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
187691d14251STvrtko Ursulin 				     enum pipe pipe)
18775a69b89fSDaniel Vetter {
187891d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
18795a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
18805a69b89fSDaniel Vetter 				     0, 0, 0, 0);
18815a69b89fSDaniel Vetter }
18825a69b89fSDaniel Vetter 
188391d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
188491d14251STvrtko Ursulin 				     enum pipe pipe)
1885eba94eb9SDaniel Vetter {
188691d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1887eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1888eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1889eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1890eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
18918bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1892eba94eb9SDaniel Vetter }
18935b3a856bSDaniel Vetter 
189491d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
189591d14251STvrtko Ursulin 				      enum pipe pipe)
18965b3a856bSDaniel Vetter {
1897a9c287c9SJani Nikula 	u32 res1, res2;
18980b5c5ed0SDaniel Vetter 
189991d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
19000b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
19010b5c5ed0SDaniel Vetter 	else
19020b5c5ed0SDaniel Vetter 		res1 = 0;
19030b5c5ed0SDaniel Vetter 
190491d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
19050b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
19060b5c5ed0SDaniel Vetter 	else
19070b5c5ed0SDaniel Vetter 		res2 = 0;
19085b3a856bSDaniel Vetter 
190991d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
19100b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
19110b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
19120b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
19130b5c5ed0SDaniel Vetter 				     res1, res2);
19145b3a856bSDaniel Vetter }
19158bf1e9f1SShuang He 
19161403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
19171403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
19181403c0d4SPaulo Zanoni  * the work queue. */
191958820574STvrtko Ursulin static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
1920a087bafeSMika Kuoppala {
192158820574STvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
1922a087bafeSMika Kuoppala 	struct intel_rps *rps = &i915->gt_pm.rps;
1923a087bafeSMika Kuoppala 	const u32 events = i915->pm_rps_events & pm_iir;
1924a087bafeSMika Kuoppala 
1925a087bafeSMika Kuoppala 	lockdep_assert_held(&i915->irq_lock);
1926a087bafeSMika Kuoppala 
1927a087bafeSMika Kuoppala 	if (unlikely(!events))
1928a087bafeSMika Kuoppala 		return;
1929a087bafeSMika Kuoppala 
193058820574STvrtko Ursulin 	gen6_mask_pm_irq(gt, events);
1931a087bafeSMika Kuoppala 
1932a087bafeSMika Kuoppala 	if (!rps->interrupts_enabled)
1933a087bafeSMika Kuoppala 		return;
1934a087bafeSMika Kuoppala 
1935a087bafeSMika Kuoppala 	rps->pm_iir |= events;
1936a087bafeSMika Kuoppala 	schedule_work(&rps->work);
1937a087bafeSMika Kuoppala }
1938a087bafeSMika Kuoppala 
19391403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1940baf02a1fSBen Widawsky {
1941562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1942562d9baeSSagar Arun Kamble 
1943a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
194459cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
194558820574STvrtko Ursulin 		gen6_mask_pm_irq(&dev_priv->gt,
194658820574STvrtko Ursulin 				 pm_iir & dev_priv->pm_rps_events);
1947562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1948562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1949562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
195041a05a3aSDaniel Vetter 		}
1951d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1952d4d70aa5SImre Deak 	}
1953baf02a1fSBen Widawsky 
1954bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1955c9a9a268SImre Deak 		return;
1956c9a9a268SImre Deak 
195712638c57SBen Widawsky 	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
19588a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
195912638c57SBen Widawsky 
1960aaecdf61SDaniel Vetter 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1961aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
196212638c57SBen Widawsky }
1963baf02a1fSBen Widawsky 
1964633023a4SDaniele Ceraolo Spurio static void guc_irq_handler(struct intel_guc *guc, u16 iir)
196526705e20SSagar Arun Kamble {
1966633023a4SDaniele Ceraolo Spurio 	if (iir & GUC_INTR_GUC2HOST)
1967633023a4SDaniele Ceraolo Spurio 		intel_guc_to_host_event_handler(guc);
196854c52a84SOscar Mateo }
196954c52a84SOscar Mateo 
197044d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
197144d9241eSVille Syrjälä {
197244d9241eSVille Syrjälä 	enum pipe pipe;
197344d9241eSVille Syrjälä 
197444d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
197544d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
197644d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
197744d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
197844d9241eSVille Syrjälä 
197944d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
198044d9241eSVille Syrjälä 	}
198144d9241eSVille Syrjälä }
198244d9241eSVille Syrjälä 
1983eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
198491d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
19857e231dbeSJesse Barnes {
19867e231dbeSJesse Barnes 	int pipe;
19877e231dbeSJesse Barnes 
198858ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
19891ca993d2SVille Syrjälä 
19901ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
19911ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
19921ca993d2SVille Syrjälä 		return;
19931ca993d2SVille Syrjälä 	}
19941ca993d2SVille Syrjälä 
1995055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1996f0f59a00SVille Syrjälä 		i915_reg_t reg;
19976b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
199891d181ddSImre Deak 
1999bbb5eebfSDaniel Vetter 		/*
2000bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
2001bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
2002bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
2003bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
2004bbb5eebfSDaniel Vetter 		 * handle.
2005bbb5eebfSDaniel Vetter 		 */
20060f239f4cSDaniel Vetter 
20070f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
20086b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
2009bbb5eebfSDaniel Vetter 
2010bbb5eebfSDaniel Vetter 		switch (pipe) {
2011bbb5eebfSDaniel Vetter 		case PIPE_A:
2012bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2013bbb5eebfSDaniel Vetter 			break;
2014bbb5eebfSDaniel Vetter 		case PIPE_B:
2015bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2016bbb5eebfSDaniel Vetter 			break;
20173278f67fSVille Syrjälä 		case PIPE_C:
20183278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
20193278f67fSVille Syrjälä 			break;
2020bbb5eebfSDaniel Vetter 		}
2021bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
20226b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
2023bbb5eebfSDaniel Vetter 
20246b12ca56SVille Syrjälä 		if (!status_mask)
202591d181ddSImre Deak 			continue;
202691d181ddSImre Deak 
202791d181ddSImre Deak 		reg = PIPESTAT(pipe);
20286b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
20296b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
20307e231dbeSJesse Barnes 
20317e231dbeSJesse Barnes 		/*
20327e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
2033132c27c9SVille Syrjälä 		 *
2034132c27c9SVille Syrjälä 		 * Toggle the enable bits to make sure we get an
2035132c27c9SVille Syrjälä 		 * edge in the ISR pipe event bit if we don't clear
2036132c27c9SVille Syrjälä 		 * all the enabled status bits. Otherwise the edge
2037132c27c9SVille Syrjälä 		 * triggered IIR on i965/g4x wouldn't notice that
2038132c27c9SVille Syrjälä 		 * an interrupt is still pending.
20397e231dbeSJesse Barnes 		 */
2040132c27c9SVille Syrjälä 		if (pipe_stats[pipe]) {
2041132c27c9SVille Syrjälä 			I915_WRITE(reg, pipe_stats[pipe]);
2042132c27c9SVille Syrjälä 			I915_WRITE(reg, enable_mask);
2043132c27c9SVille Syrjälä 		}
20447e231dbeSJesse Barnes 	}
204558ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
20462ecb8ca4SVille Syrjälä }
20472ecb8ca4SVille Syrjälä 
2048eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2049eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
2050eb64343cSVille Syrjälä {
2051eb64343cSVille Syrjälä 	enum pipe pipe;
2052eb64343cSVille Syrjälä 
2053eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2054eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2055eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2056eb64343cSVille Syrjälä 
2057eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2058eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2059eb64343cSVille Syrjälä 
2060eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2061eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2062eb64343cSVille Syrjälä 	}
2063eb64343cSVille Syrjälä }
2064eb64343cSVille Syrjälä 
2065eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2066eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2067eb64343cSVille Syrjälä {
2068eb64343cSVille Syrjälä 	bool blc_event = false;
2069eb64343cSVille Syrjälä 	enum pipe pipe;
2070eb64343cSVille Syrjälä 
2071eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2072eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2073eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2074eb64343cSVille Syrjälä 
2075eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2076eb64343cSVille Syrjälä 			blc_event = true;
2077eb64343cSVille Syrjälä 
2078eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2079eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2080eb64343cSVille Syrjälä 
2081eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2082eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2083eb64343cSVille Syrjälä 	}
2084eb64343cSVille Syrjälä 
2085eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2086eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2087eb64343cSVille Syrjälä }
2088eb64343cSVille Syrjälä 
2089eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2090eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2091eb64343cSVille Syrjälä {
2092eb64343cSVille Syrjälä 	bool blc_event = false;
2093eb64343cSVille Syrjälä 	enum pipe pipe;
2094eb64343cSVille Syrjälä 
2095eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2096eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2097eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2098eb64343cSVille Syrjälä 
2099eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2100eb64343cSVille Syrjälä 			blc_event = true;
2101eb64343cSVille Syrjälä 
2102eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2103eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2104eb64343cSVille Syrjälä 
2105eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2106eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2107eb64343cSVille Syrjälä 	}
2108eb64343cSVille Syrjälä 
2109eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2110eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2111eb64343cSVille Syrjälä 
2112eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2113eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
2114eb64343cSVille Syrjälä }
2115eb64343cSVille Syrjälä 
211691d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
21172ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
21182ecb8ca4SVille Syrjälä {
21192ecb8ca4SVille Syrjälä 	enum pipe pipe;
21207e231dbeSJesse Barnes 
2121055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2122fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2123fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
21244356d586SDaniel Vetter 
21254356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
212691d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
21272d9d2b0bSVille Syrjälä 
21281f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
21291f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
213031acc7f5SJesse Barnes 	}
213131acc7f5SJesse Barnes 
2132c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
213391d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2134c1874ed7SImre Deak }
2135c1874ed7SImre Deak 
21361ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
213716c6c56bSVille Syrjälä {
21380ba7c51aSVille Syrjälä 	u32 hotplug_status = 0, hotplug_status_mask;
21390ba7c51aSVille Syrjälä 	int i;
214016c6c56bSVille Syrjälä 
21410ba7c51aSVille Syrjälä 	if (IS_G4X(dev_priv) ||
21420ba7c51aSVille Syrjälä 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
21430ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
21440ba7c51aSVille Syrjälä 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
21450ba7c51aSVille Syrjälä 	else
21460ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
21470ba7c51aSVille Syrjälä 
21480ba7c51aSVille Syrjälä 	/*
21490ba7c51aSVille Syrjälä 	 * We absolutely have to clear all the pending interrupt
21500ba7c51aSVille Syrjälä 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
21510ba7c51aSVille Syrjälä 	 * interrupt bit won't have an edge, and the i965/g4x
21520ba7c51aSVille Syrjälä 	 * edge triggered IIR will not notice that an interrupt
21530ba7c51aSVille Syrjälä 	 * is still pending. We can't use PORT_HOTPLUG_EN to
21540ba7c51aSVille Syrjälä 	 * guarantee the edge as the act of toggling the enable
21550ba7c51aSVille Syrjälä 	 * bits can itself generate a new hotplug interrupt :(
21560ba7c51aSVille Syrjälä 	 */
21570ba7c51aSVille Syrjälä 	for (i = 0; i < 10; i++) {
21580ba7c51aSVille Syrjälä 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
21590ba7c51aSVille Syrjälä 
21600ba7c51aSVille Syrjälä 		if (tmp == 0)
21610ba7c51aSVille Syrjälä 			return hotplug_status;
21620ba7c51aSVille Syrjälä 
21630ba7c51aSVille Syrjälä 		hotplug_status |= tmp;
21643ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
21650ba7c51aSVille Syrjälä 	}
21660ba7c51aSVille Syrjälä 
21670ba7c51aSVille Syrjälä 	WARN_ONCE(1,
21680ba7c51aSVille Syrjälä 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
21690ba7c51aSVille Syrjälä 		  I915_READ(PORT_HOTPLUG_STAT));
21701ae3c34cSVille Syrjälä 
21711ae3c34cSVille Syrjälä 	return hotplug_status;
21721ae3c34cSVille Syrjälä }
21731ae3c34cSVille Syrjälä 
217491d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
21751ae3c34cSVille Syrjälä 				 u32 hotplug_status)
21761ae3c34cSVille Syrjälä {
21771ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
21783ff60f89SOscar Mateo 
217991d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
218091d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
218116c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
218216c6c56bSVille Syrjälä 
218358f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2184cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2185cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2186cf53902fSRodrigo Vivi 					   hpd_status_g4x,
2187fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
218858f2cf24SVille Syrjälä 
218991d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
219058f2cf24SVille Syrjälä 		}
2191369712e8SJani Nikula 
2192369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
219391d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
219416c6c56bSVille Syrjälä 	} else {
219516c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
219616c6c56bSVille Syrjälä 
219758f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2198cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2199cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2200cf53902fSRodrigo Vivi 					   hpd_status_i915,
2201fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
220291d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
220316c6c56bSVille Syrjälä 		}
22043ff60f89SOscar Mateo 	}
220558f2cf24SVille Syrjälä }
220616c6c56bSVille Syrjälä 
2207c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2208c1874ed7SImre Deak {
2209b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2210c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
2211c1874ed7SImre Deak 
22122dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22132dd2a883SImre Deak 		return IRQ_NONE;
22142dd2a883SImre Deak 
22151f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
22169102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22171f814dacSImre Deak 
22181e1cace9SVille Syrjälä 	do {
22196e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
22202ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
22211ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2222a5e485a9SVille Syrjälä 		u32 ier = 0;
22233ff60f89SOscar Mateo 
2224c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2225c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
22263ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2227c1874ed7SImre Deak 
2228c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
22291e1cace9SVille Syrjälä 			break;
2230c1874ed7SImre Deak 
2231c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2232c1874ed7SImre Deak 
2233a5e485a9SVille Syrjälä 		/*
2234a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2235a5e485a9SVille Syrjälä 		 *
2236a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2237a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2238a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2239a5e485a9SVille Syrjälä 		 *
2240a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2241a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2242a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2243a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2244a5e485a9SVille Syrjälä 		 * bits this time around.
2245a5e485a9SVille Syrjälä 		 */
22464a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2247a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2248a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
22494a0a0202SVille Syrjälä 
22504a0a0202SVille Syrjälä 		if (gt_iir)
22514a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
22524a0a0202SVille Syrjälä 		if (pm_iir)
22534a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
22544a0a0202SVille Syrjälä 
22557ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
22561ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
22577ce4d1f2SVille Syrjälä 
22583ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
22593ff60f89SOscar Mateo 		 * signalled in iir */
2260eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
22617ce4d1f2SVille Syrjälä 
2262eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2263eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2264eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2265eef57324SJerome Anand 
22667ce4d1f2SVille Syrjälä 		/*
22677ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
22687ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
22697ce4d1f2SVille Syrjälä 		 */
22707ce4d1f2SVille Syrjälä 		if (iir)
22717ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
22724a0a0202SVille Syrjälä 
2273a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
22744a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
22751ae3c34cSVille Syrjälä 
227652894874SVille Syrjälä 		if (gt_iir)
2277261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
227852894874SVille Syrjälä 		if (pm_iir)
227952894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
228052894874SVille Syrjälä 
22811ae3c34cSVille Syrjälä 		if (hotplug_status)
228291d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
22832ecb8ca4SVille Syrjälä 
228491d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
22851e1cace9SVille Syrjälä 	} while (0);
22867e231dbeSJesse Barnes 
22879102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
22881f814dacSImre Deak 
22897e231dbeSJesse Barnes 	return ret;
22907e231dbeSJesse Barnes }
22917e231dbeSJesse Barnes 
229243f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
229343f328d7SVille Syrjälä {
2294b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
229543f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
229643f328d7SVille Syrjälä 
22972dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22982dd2a883SImre Deak 		return IRQ_NONE;
22992dd2a883SImre Deak 
23001f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
23019102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
23021f814dacSImre Deak 
2303579de73bSChris Wilson 	do {
23046e814800SVille Syrjälä 		u32 master_ctl, iir;
23052ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
23061ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2307f0fd96f5SChris Wilson 		u32 gt_iir[4];
2308a5e485a9SVille Syrjälä 		u32 ier = 0;
2309a5e485a9SVille Syrjälä 
23108e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
23113278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
23123278f67fSVille Syrjälä 
23133278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
23148e5fd599SVille Syrjälä 			break;
231543f328d7SVille Syrjälä 
231627b6c122SOscar Mateo 		ret = IRQ_HANDLED;
231727b6c122SOscar Mateo 
2318a5e485a9SVille Syrjälä 		/*
2319a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2320a5e485a9SVille Syrjälä 		 *
2321a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2322a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2323a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2324a5e485a9SVille Syrjälä 		 *
2325a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2326a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2327a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2328a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2329a5e485a9SVille Syrjälä 		 * bits this time around.
2330a5e485a9SVille Syrjälä 		 */
233143f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2332a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2333a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
233443f328d7SVille Syrjälä 
2335e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
233627b6c122SOscar Mateo 
233727b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
23381ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
233943f328d7SVille Syrjälä 
234027b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
234127b6c122SOscar Mateo 		 * signalled in iir */
2342eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
234343f328d7SVille Syrjälä 
2344eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2345eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2346eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2347eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2348eef57324SJerome Anand 
23497ce4d1f2SVille Syrjälä 		/*
23507ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
23517ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
23527ce4d1f2SVille Syrjälä 		 */
23537ce4d1f2SVille Syrjälä 		if (iir)
23547ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
23557ce4d1f2SVille Syrjälä 
2356a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2357e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
23581ae3c34cSVille Syrjälä 
2359f0fd96f5SChris Wilson 		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2360e30e251aSVille Syrjälä 
23611ae3c34cSVille Syrjälä 		if (hotplug_status)
236291d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
23632ecb8ca4SVille Syrjälä 
236491d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2365579de73bSChris Wilson 	} while (0);
23663278f67fSVille Syrjälä 
23679102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
23681f814dacSImre Deak 
236943f328d7SVille Syrjälä 	return ret;
237043f328d7SVille Syrjälä }
237143f328d7SVille Syrjälä 
237291d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
237391d14251STvrtko Ursulin 				u32 hotplug_trigger,
237440e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2375776ad806SJesse Barnes {
237642db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2377776ad806SJesse Barnes 
23786a39d7c9SJani Nikula 	/*
23796a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
23806a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
23816a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
23826a39d7c9SJani Nikula 	 * errors.
23836a39d7c9SJani Nikula 	 */
238413cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23856a39d7c9SJani Nikula 	if (!hotplug_trigger) {
23866a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
23876a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
23886a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
23896a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
23906a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
23916a39d7c9SJani Nikula 	}
23926a39d7c9SJani Nikula 
239313cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23946a39d7c9SJani Nikula 	if (!hotplug_trigger)
23956a39d7c9SJani Nikula 		return;
239613cf5504SDave Airlie 
2397cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
239840e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2399fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
240040e56410SVille Syrjälä 
240191d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2402aaf5ec2eSSonika Jindal }
240391d131d2SDaniel Vetter 
240491d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
240540e56410SVille Syrjälä {
240640e56410SVille Syrjälä 	int pipe;
240740e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
240840e56410SVille Syrjälä 
240991d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
241040e56410SVille Syrjälä 
2411cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2412cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2413776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2414cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2415cfc33bf7SVille Syrjälä 				 port_name(port));
2416cfc33bf7SVille Syrjälä 	}
2417776ad806SJesse Barnes 
2418ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
241991d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2420ce99c256SDaniel Vetter 
2421776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
242291d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2423776ad806SJesse Barnes 
2424776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2425776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2426776ad806SJesse Barnes 
2427776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2428776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2429776ad806SJesse Barnes 
2430776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2431776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2432776ad806SJesse Barnes 
24339db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2434055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
24359db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
24369db4a9c7SJesse Barnes 					 pipe_name(pipe),
24379db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2438776ad806SJesse Barnes 
2439776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2440776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2441776ad806SJesse Barnes 
2442776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2443776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2444776ad806SJesse Barnes 
2445776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2446a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
24478664281bSPaulo Zanoni 
24488664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2449a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
24508664281bSPaulo Zanoni }
24518664281bSPaulo Zanoni 
245291d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
24538664281bSPaulo Zanoni {
24548664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
24555a69b89fSDaniel Vetter 	enum pipe pipe;
24568664281bSPaulo Zanoni 
2457de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2458de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2459de032bf4SPaulo Zanoni 
2460055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
24611f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
24621f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
24638664281bSPaulo Zanoni 
24645a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
246591d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
246691d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
24675a69b89fSDaniel Vetter 			else
246891d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
24695a69b89fSDaniel Vetter 		}
24705a69b89fSDaniel Vetter 	}
24718bf1e9f1SShuang He 
24728664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
24738664281bSPaulo Zanoni }
24748664281bSPaulo Zanoni 
247591d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
24768664281bSPaulo Zanoni {
24778664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
247845c1cd87SMika Kahola 	enum pipe pipe;
24798664281bSPaulo Zanoni 
2480de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2481de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2482de032bf4SPaulo Zanoni 
248345c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
248445c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
248545c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
24868664281bSPaulo Zanoni 
24878664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2488776ad806SJesse Barnes }
2489776ad806SJesse Barnes 
249091d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
249123e81d69SAdam Jackson {
249223e81d69SAdam Jackson 	int pipe;
24936dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2494aaf5ec2eSSonika Jindal 
249591d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
249691d131d2SDaniel Vetter 
2497cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2498cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
249923e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2500cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2501cfc33bf7SVille Syrjälä 				 port_name(port));
2502cfc33bf7SVille Syrjälä 	}
250323e81d69SAdam Jackson 
250423e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
250591d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
250623e81d69SAdam Jackson 
250723e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
250891d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
250923e81d69SAdam Jackson 
251023e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
251123e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
251223e81d69SAdam Jackson 
251323e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
251423e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
251523e81d69SAdam Jackson 
251623e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2517055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
251823e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
251923e81d69SAdam Jackson 					 pipe_name(pipe),
252023e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
25218664281bSPaulo Zanoni 
25228664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
252391d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
252423e81d69SAdam Jackson }
252523e81d69SAdam Jackson 
2526c6f7acb8SMatt Roper static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
2527c6f7acb8SMatt Roper 			    const u32 *pins)
252831604222SAnusha Srivatsa {
252931604222SAnusha Srivatsa 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
253031604222SAnusha Srivatsa 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
253131604222SAnusha Srivatsa 	u32 pin_mask = 0, long_mask = 0;
253231604222SAnusha Srivatsa 
253331604222SAnusha Srivatsa 	if (ddi_hotplug_trigger) {
253431604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
253531604222SAnusha Srivatsa 
253631604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
253731604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
253831604222SAnusha Srivatsa 
253931604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
254031604222SAnusha Srivatsa 				   ddi_hotplug_trigger,
2541c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
254231604222SAnusha Srivatsa 				   icp_ddi_port_hotplug_long_detect);
254331604222SAnusha Srivatsa 	}
254431604222SAnusha Srivatsa 
254531604222SAnusha Srivatsa 	if (tc_hotplug_trigger) {
254631604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
254731604222SAnusha Srivatsa 
254831604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
254931604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
255031604222SAnusha Srivatsa 
255131604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
255231604222SAnusha Srivatsa 				   tc_hotplug_trigger,
2553c6f7acb8SMatt Roper 				   dig_hotplug_reg, pins,
255431604222SAnusha Srivatsa 				   icp_tc_port_hotplug_long_detect);
255531604222SAnusha Srivatsa 	}
255631604222SAnusha Srivatsa 
255731604222SAnusha Srivatsa 	if (pin_mask)
255831604222SAnusha Srivatsa 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
255931604222SAnusha Srivatsa 
256031604222SAnusha Srivatsa 	if (pch_iir & SDE_GMBUS_ICP)
256131604222SAnusha Srivatsa 		gmbus_irq_handler(dev_priv);
256231604222SAnusha Srivatsa }
256331604222SAnusha Srivatsa 
256491d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
25656dbf30ceSVille Syrjälä {
25666dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
25676dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
25686dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
25696dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
25706dbf30ceSVille Syrjälä 
25716dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
25726dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25736dbf30ceSVille Syrjälä 
25746dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
25756dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
25766dbf30ceSVille Syrjälä 
2577cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2578cf53902fSRodrigo Vivi 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
257974c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
25806dbf30ceSVille Syrjälä 	}
25816dbf30ceSVille Syrjälä 
25826dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
25836dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25846dbf30ceSVille Syrjälä 
25856dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
25866dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
25876dbf30ceSVille Syrjälä 
2588cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2589cf53902fSRodrigo Vivi 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
25906dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
25916dbf30ceSVille Syrjälä 	}
25926dbf30ceSVille Syrjälä 
25936dbf30ceSVille Syrjälä 	if (pin_mask)
259491d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
25956dbf30ceSVille Syrjälä 
25966dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
259791d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
25986dbf30ceSVille Syrjälä }
25996dbf30ceSVille Syrjälä 
260091d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
260191d14251STvrtko Ursulin 				u32 hotplug_trigger,
260240e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2603c008bc6eSPaulo Zanoni {
2604e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2605e4ce95aaSVille Syrjälä 
2606e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2607e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2608e4ce95aaSVille Syrjälä 
2609cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
261040e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2611e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
261240e56410SVille Syrjälä 
261391d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2614e4ce95aaSVille Syrjälä }
2615c008bc6eSPaulo Zanoni 
261691d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
261791d14251STvrtko Ursulin 				    u32 de_iir)
261840e56410SVille Syrjälä {
261940e56410SVille Syrjälä 	enum pipe pipe;
262040e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
262140e56410SVille Syrjälä 
262240e56410SVille Syrjälä 	if (hotplug_trigger)
262391d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
262440e56410SVille Syrjälä 
2625c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
262691d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2627c008bc6eSPaulo Zanoni 
2628c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
262991d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2630c008bc6eSPaulo Zanoni 
2631c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2632c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2633c008bc6eSPaulo Zanoni 
2634055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2635fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2636fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2637c008bc6eSPaulo Zanoni 
263840da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
26391f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2640c008bc6eSPaulo Zanoni 
264140da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
264291d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2643c008bc6eSPaulo Zanoni 	}
2644c008bc6eSPaulo Zanoni 
2645c008bc6eSPaulo Zanoni 	/* check event from PCH */
2646c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2647c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2648c008bc6eSPaulo Zanoni 
264991d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
265091d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2651c008bc6eSPaulo Zanoni 		else
265291d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2653c008bc6eSPaulo Zanoni 
2654c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2655c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2656c008bc6eSPaulo Zanoni 	}
2657c008bc6eSPaulo Zanoni 
2658cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
265991d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2660c008bc6eSPaulo Zanoni }
2661c008bc6eSPaulo Zanoni 
266291d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
266391d14251STvrtko Ursulin 				    u32 de_iir)
26649719fb98SPaulo Zanoni {
266507d27e20SDamien Lespiau 	enum pipe pipe;
266623bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
266723bb4cb5SVille Syrjälä 
266840e56410SVille Syrjälä 	if (hotplug_trigger)
266991d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
26709719fb98SPaulo Zanoni 
26719719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
267291d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
26739719fb98SPaulo Zanoni 
267454fd3149SDhinakaran Pandiyan 	if (de_iir & DE_EDP_PSR_INT_HSW) {
267554fd3149SDhinakaran Pandiyan 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
267654fd3149SDhinakaran Pandiyan 
267754fd3149SDhinakaran Pandiyan 		intel_psr_irq_handler(dev_priv, psr_iir);
267854fd3149SDhinakaran Pandiyan 		I915_WRITE(EDP_PSR_IIR, psr_iir);
267954fd3149SDhinakaran Pandiyan 	}
2680fc340442SDaniel Vetter 
26819719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
268291d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
26839719fb98SPaulo Zanoni 
26849719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
268591d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
26869719fb98SPaulo Zanoni 
2687055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2688fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2689fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
26909719fb98SPaulo Zanoni 	}
26919719fb98SPaulo Zanoni 
26929719fb98SPaulo Zanoni 	/* check event from PCH */
269391d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
26949719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
26959719fb98SPaulo Zanoni 
269691d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
26979719fb98SPaulo Zanoni 
26989719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
26999719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
27009719fb98SPaulo Zanoni 	}
27019719fb98SPaulo Zanoni }
27029719fb98SPaulo Zanoni 
270372c90f62SOscar Mateo /*
270472c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
270572c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
270672c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
270772c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
270872c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
270972c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
271072c90f62SOscar Mateo  */
2711f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2712b1f14ad0SJesse Barnes {
2713b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
2714f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
27150e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2716b1f14ad0SJesse Barnes 
27172dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
27182dd2a883SImre Deak 		return IRQ_NONE;
27192dd2a883SImre Deak 
27201f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27219102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27221f814dacSImre Deak 
2723b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2724b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2725b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
27260e43406bSChris Wilson 
272744498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
272844498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
272944498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
273044498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
273144498aeaSPaulo Zanoni 	 * due to its back queue). */
273291d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
273344498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
273444498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
2735ab5c608bSBen Widawsky 	}
273644498aeaSPaulo Zanoni 
273772c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
273872c90f62SOscar Mateo 
27390e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
27400e43406bSChris Wilson 	if (gt_iir) {
274172c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
274272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
274391d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2744261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2745d8fc8a47SPaulo Zanoni 		else
2746261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
27470e43406bSChris Wilson 	}
2748b1f14ad0SJesse Barnes 
2749b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
27500e43406bSChris Wilson 	if (de_iir) {
275172c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
275272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
275391d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
275491d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2755f1af8fc1SPaulo Zanoni 		else
275691d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
27570e43406bSChris Wilson 	}
27580e43406bSChris Wilson 
275991d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2760f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
27610e43406bSChris Wilson 		if (pm_iir) {
2762b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
27630e43406bSChris Wilson 			ret = IRQ_HANDLED;
276472c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
27650e43406bSChris Wilson 		}
2766f1af8fc1SPaulo Zanoni 	}
2767b1f14ad0SJesse Barnes 
2768b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
276974093f3eSChris Wilson 	if (!HAS_PCH_NOP(dev_priv))
277044498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
2771b1f14ad0SJesse Barnes 
27721f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27739102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
27741f814dacSImre Deak 
2775b1f14ad0SJesse Barnes 	return ret;
2776b1f14ad0SJesse Barnes }
2777b1f14ad0SJesse Barnes 
277891d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
277991d14251STvrtko Ursulin 				u32 hotplug_trigger,
278040e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2781d04a492dSShashank Sharma {
2782cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2783d04a492dSShashank Sharma 
2784a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2785a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2786d04a492dSShashank Sharma 
2787cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
278840e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2789cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
279040e56410SVille Syrjälä 
279191d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2792d04a492dSShashank Sharma }
2793d04a492dSShashank Sharma 
2794121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2795121e758eSDhinakaran Pandiyan {
2796121e758eSDhinakaran Pandiyan 	u32 pin_mask = 0, long_mask = 0;
2797b796b971SDhinakaran Pandiyan 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2798b796b971SDhinakaran Pandiyan 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2799121e758eSDhinakaran Pandiyan 
2800121e758eSDhinakaran Pandiyan 	if (trigger_tc) {
2801b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2802b796b971SDhinakaran Pandiyan 
2803121e758eSDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2804121e758eSDhinakaran Pandiyan 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2805121e758eSDhinakaran Pandiyan 
2806121e758eSDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2807b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2808121e758eSDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2809121e758eSDhinakaran Pandiyan 	}
2810b796b971SDhinakaran Pandiyan 
2811b796b971SDhinakaran Pandiyan 	if (trigger_tbt) {
2812b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2813b796b971SDhinakaran Pandiyan 
2814b796b971SDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2815b796b971SDhinakaran Pandiyan 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2816b796b971SDhinakaran Pandiyan 
2817b796b971SDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2818b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2819b796b971SDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2820b796b971SDhinakaran Pandiyan 	}
2821b796b971SDhinakaran Pandiyan 
2822b796b971SDhinakaran Pandiyan 	if (pin_mask)
2823b796b971SDhinakaran Pandiyan 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2824b796b971SDhinakaran Pandiyan 	else
2825b796b971SDhinakaran Pandiyan 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2826121e758eSDhinakaran Pandiyan }
2827121e758eSDhinakaran Pandiyan 
28289d17210fSLucas De Marchi static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
28299d17210fSLucas De Marchi {
28309d17210fSLucas De Marchi 	u32 mask = GEN8_AUX_CHANNEL_A;
28319d17210fSLucas De Marchi 
28329d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 9)
28339d17210fSLucas De Marchi 		mask |= GEN9_AUX_CHANNEL_B |
28349d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_C |
28359d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_D;
28369d17210fSLucas De Marchi 
28379d17210fSLucas De Marchi 	if (IS_CNL_WITH_PORT_F(dev_priv))
28389d17210fSLucas De Marchi 		mask |= CNL_AUX_CHANNEL_F;
28399d17210fSLucas De Marchi 
28409d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 11)
28419d17210fSLucas De Marchi 		mask |= ICL_AUX_CHANNEL_E |
28429d17210fSLucas De Marchi 			CNL_AUX_CHANNEL_F;
28439d17210fSLucas De Marchi 
28449d17210fSLucas De Marchi 	return mask;
28459d17210fSLucas De Marchi }
28469d17210fSLucas De Marchi 
2847f11a0f46STvrtko Ursulin static irqreturn_t
2848f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2849abd58f01SBen Widawsky {
2850abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2851f11a0f46STvrtko Ursulin 	u32 iir;
2852c42664ccSDaniel Vetter 	enum pipe pipe;
285388e04703SJesse Barnes 
2854abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2855e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2856e32192e1STvrtko Ursulin 		if (iir) {
2857e04f7eceSVille Syrjälä 			bool found = false;
2858e04f7eceSVille Syrjälä 
2859e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2860abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2861e04f7eceSVille Syrjälä 
2862e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_MISC_GSE) {
286391d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
2864e04f7eceSVille Syrjälä 				found = true;
2865e04f7eceSVille Syrjälä 			}
2866e04f7eceSVille Syrjälä 
2867e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_EDP_PSR) {
286854fd3149SDhinakaran Pandiyan 				u32 psr_iir = I915_READ(EDP_PSR_IIR);
286954fd3149SDhinakaran Pandiyan 
287054fd3149SDhinakaran Pandiyan 				intel_psr_irq_handler(dev_priv, psr_iir);
287154fd3149SDhinakaran Pandiyan 				I915_WRITE(EDP_PSR_IIR, psr_iir);
2872e04f7eceSVille Syrjälä 				found = true;
2873e04f7eceSVille Syrjälä 			}
2874e04f7eceSVille Syrjälä 
2875e04f7eceSVille Syrjälä 			if (!found)
287638cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2877abd58f01SBen Widawsky 		}
287838cc46d7SOscar Mateo 		else
287938cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2880abd58f01SBen Widawsky 	}
2881abd58f01SBen Widawsky 
2882121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2883121e758eSDhinakaran Pandiyan 		iir = I915_READ(GEN11_DE_HPD_IIR);
2884121e758eSDhinakaran Pandiyan 		if (iir) {
2885121e758eSDhinakaran Pandiyan 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2886121e758eSDhinakaran Pandiyan 			ret = IRQ_HANDLED;
2887121e758eSDhinakaran Pandiyan 			gen11_hpd_irq_handler(dev_priv, iir);
2888121e758eSDhinakaran Pandiyan 		} else {
2889121e758eSDhinakaran Pandiyan 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2890121e758eSDhinakaran Pandiyan 		}
2891121e758eSDhinakaran Pandiyan 	}
2892121e758eSDhinakaran Pandiyan 
28936d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2894e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2895e32192e1STvrtko Ursulin 		if (iir) {
2896e32192e1STvrtko Ursulin 			u32 tmp_mask;
2897d04a492dSShashank Sharma 			bool found = false;
2898cebd87a0SVille Syrjälä 
2899e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
29006d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
290188e04703SJesse Barnes 
29029d17210fSLucas De Marchi 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
290391d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2904d04a492dSShashank Sharma 				found = true;
2905d04a492dSShashank Sharma 			}
2906d04a492dSShashank Sharma 
2907cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2908e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2909e32192e1STvrtko Ursulin 				if (tmp_mask) {
291091d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
291191d14251STvrtko Ursulin 							    hpd_bxt);
2912d04a492dSShashank Sharma 					found = true;
2913d04a492dSShashank Sharma 				}
2914e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2915e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2916e32192e1STvrtko Ursulin 				if (tmp_mask) {
291791d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
291891d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2919e32192e1STvrtko Ursulin 					found = true;
2920e32192e1STvrtko Ursulin 				}
2921e32192e1STvrtko Ursulin 			}
2922d04a492dSShashank Sharma 
2923cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
292491d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
29259e63743eSShashank Sharma 				found = true;
29269e63743eSShashank Sharma 			}
29279e63743eSShashank Sharma 
2928d04a492dSShashank Sharma 			if (!found)
292938cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
29306d766f02SDaniel Vetter 		}
293138cc46d7SOscar Mateo 		else
293238cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
29336d766f02SDaniel Vetter 	}
29346d766f02SDaniel Vetter 
2935055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2936fd3a4024SDaniel Vetter 		u32 fault_errors;
2937abd58f01SBen Widawsky 
2938c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2939c42664ccSDaniel Vetter 			continue;
2940c42664ccSDaniel Vetter 
2941e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2942e32192e1STvrtko Ursulin 		if (!iir) {
2943e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2944e32192e1STvrtko Ursulin 			continue;
2945e32192e1STvrtko Ursulin 		}
2946770de83dSDamien Lespiau 
2947e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2948e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2949e32192e1STvrtko Ursulin 
2950fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2951fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2952abd58f01SBen Widawsky 
2953e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
295491d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
29550fbe7870SDaniel Vetter 
2956e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2957e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
295838d83c96SDaniel Vetter 
2959e32192e1STvrtko Ursulin 		fault_errors = iir;
2960bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2961e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2962770de83dSDamien Lespiau 		else
2963e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2964770de83dSDamien Lespiau 
2965770de83dSDamien Lespiau 		if (fault_errors)
29661353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
296730100f2bSDaniel Vetter 				  pipe_name(pipe),
2968e32192e1STvrtko Ursulin 				  fault_errors);
2969abd58f01SBen Widawsky 	}
2970abd58f01SBen Widawsky 
297191d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2972266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
297392d03a80SDaniel Vetter 		/*
297492d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
297592d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
297692d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
297792d03a80SDaniel Vetter 		 */
2978e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2979e32192e1STvrtko Ursulin 		if (iir) {
2980e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
298192d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
29826dbf30ceSVille Syrjälä 
2983c6f7acb8SMatt Roper 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
2984c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_mcc);
2985c6f7acb8SMatt Roper 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2986c6f7acb8SMatt Roper 				icp_irq_handler(dev_priv, iir, hpd_icp);
2987c6c30b91SRodrigo Vivi 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
298891d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
29896dbf30ceSVille Syrjälä 			else
299091d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
29912dfb0b81SJani Nikula 		} else {
29922dfb0b81SJani Nikula 			/*
29932dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
29942dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
29952dfb0b81SJani Nikula 			 */
29962dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
29972dfb0b81SJani Nikula 		}
299892d03a80SDaniel Vetter 	}
299992d03a80SDaniel Vetter 
3000f11a0f46STvrtko Ursulin 	return ret;
3001f11a0f46STvrtko Ursulin }
3002f11a0f46STvrtko Ursulin 
30034376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
30044376b9c9SMika Kuoppala {
30054376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
30064376b9c9SMika Kuoppala 
30074376b9c9SMika Kuoppala 	/*
30084376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
30094376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
30104376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
30114376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
30124376b9c9SMika Kuoppala 	 */
30134376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
30144376b9c9SMika Kuoppala }
30154376b9c9SMika Kuoppala 
30164376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
30174376b9c9SMika Kuoppala {
30184376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
30194376b9c9SMika Kuoppala }
30204376b9c9SMika Kuoppala 
3021f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
3022f11a0f46STvrtko Ursulin {
3023b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
302425286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = dev_priv->uncore.regs;
3025f11a0f46STvrtko Ursulin 	u32 master_ctl;
3026f0fd96f5SChris Wilson 	u32 gt_iir[4];
3027f11a0f46STvrtko Ursulin 
3028f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
3029f11a0f46STvrtko Ursulin 		return IRQ_NONE;
3030f11a0f46STvrtko Ursulin 
30314376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
30324376b9c9SMika Kuoppala 	if (!master_ctl) {
30334376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
3034f11a0f46STvrtko Ursulin 		return IRQ_NONE;
30354376b9c9SMika Kuoppala 	}
3036f11a0f46STvrtko Ursulin 
3037f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
303855ef72f2SChris Wilson 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3039f0fd96f5SChris Wilson 
3040f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3041f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
30429102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
304355ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
30449102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3045f0fd96f5SChris Wilson 	}
3046f11a0f46STvrtko Ursulin 
30474376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
3048abd58f01SBen Widawsky 
3049f0fd96f5SChris Wilson 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
30501f814dacSImre Deak 
305155ef72f2SChris Wilson 	return IRQ_HANDLED;
3052abd58f01SBen Widawsky }
3053abd58f01SBen Widawsky 
305451951ae7SMika Kuoppala static u32
30559b77011eSTvrtko Ursulin gen11_gt_engine_identity(struct intel_gt *gt,
305651951ae7SMika Kuoppala 			 const unsigned int bank, const unsigned int bit)
305751951ae7SMika Kuoppala {
30589b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
305951951ae7SMika Kuoppala 	u32 timeout_ts;
306051951ae7SMika Kuoppala 	u32 ident;
306151951ae7SMika Kuoppala 
30629b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
306396606f3bSOscar Mateo 
306451951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
306551951ae7SMika Kuoppala 
306651951ae7SMika Kuoppala 	/*
306751951ae7SMika Kuoppala 	 * NB: Specs do not specify how long to spin wait,
306851951ae7SMika Kuoppala 	 * so we do ~100us as an educated guess.
306951951ae7SMika Kuoppala 	 */
307051951ae7SMika Kuoppala 	timeout_ts = (local_clock() >> 10) + 100;
307151951ae7SMika Kuoppala 	do {
307251951ae7SMika Kuoppala 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
307351951ae7SMika Kuoppala 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
307451951ae7SMika Kuoppala 		 !time_after32(local_clock() >> 10, timeout_ts));
307551951ae7SMika Kuoppala 
307651951ae7SMika Kuoppala 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
307751951ae7SMika Kuoppala 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
307851951ae7SMika Kuoppala 			  bank, bit, ident);
307951951ae7SMika Kuoppala 		return 0;
308051951ae7SMika Kuoppala 	}
308151951ae7SMika Kuoppala 
308251951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
308351951ae7SMika Kuoppala 		      GEN11_INTR_DATA_VALID);
308451951ae7SMika Kuoppala 
3085f744dbc2SMika Kuoppala 	return ident;
3086f744dbc2SMika Kuoppala }
3087f744dbc2SMika Kuoppala 
3088f744dbc2SMika Kuoppala static void
30899b77011eSTvrtko Ursulin gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
30909b77011eSTvrtko Ursulin 			const u16 iir)
3091f744dbc2SMika Kuoppala {
309254c52a84SOscar Mateo 	if (instance == OTHER_GUC_INSTANCE)
30938b5689d7SDaniele Ceraolo Spurio 		return guc_irq_handler(&gt->uc.guc, iir);
309454c52a84SOscar Mateo 
3095d02b98b8SOscar Mateo 	if (instance == OTHER_GTPM_INSTANCE)
309658820574STvrtko Ursulin 		return gen11_rps_irq_handler(gt, iir);
3097d02b98b8SOscar Mateo 
3098f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3099f744dbc2SMika Kuoppala 		  instance, iir);
3100f744dbc2SMika Kuoppala }
3101f744dbc2SMika Kuoppala 
3102f744dbc2SMika Kuoppala static void
31039b77011eSTvrtko Ursulin gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
31049b77011eSTvrtko Ursulin 			 const u8 instance, const u16 iir)
3105f744dbc2SMika Kuoppala {
3106f744dbc2SMika Kuoppala 	struct intel_engine_cs *engine;
3107f744dbc2SMika Kuoppala 
3108f744dbc2SMika Kuoppala 	if (instance <= MAX_ENGINE_INSTANCE)
31099b77011eSTvrtko Ursulin 		engine = gt->i915->engine_class[class][instance];
3110f744dbc2SMika Kuoppala 	else
3111f744dbc2SMika Kuoppala 		engine = NULL;
3112f744dbc2SMika Kuoppala 
3113f744dbc2SMika Kuoppala 	if (likely(engine))
3114f744dbc2SMika Kuoppala 		return gen8_cs_irq_handler(engine, iir);
3115f744dbc2SMika Kuoppala 
3116f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3117f744dbc2SMika Kuoppala 		  class, instance);
3118f744dbc2SMika Kuoppala }
3119f744dbc2SMika Kuoppala 
3120f744dbc2SMika Kuoppala static void
31219b77011eSTvrtko Ursulin gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
3122f744dbc2SMika Kuoppala {
3123f744dbc2SMika Kuoppala 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3124f744dbc2SMika Kuoppala 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3125f744dbc2SMika Kuoppala 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3126f744dbc2SMika Kuoppala 
3127f744dbc2SMika Kuoppala 	if (unlikely(!intr))
3128f744dbc2SMika Kuoppala 		return;
3129f744dbc2SMika Kuoppala 
3130f744dbc2SMika Kuoppala 	if (class <= COPY_ENGINE_CLASS)
31319b77011eSTvrtko Ursulin 		return gen11_engine_irq_handler(gt, class, instance, intr);
3132f744dbc2SMika Kuoppala 
3133f744dbc2SMika Kuoppala 	if (class == OTHER_CLASS)
31349b77011eSTvrtko Ursulin 		return gen11_other_irq_handler(gt, instance, intr);
3135f744dbc2SMika Kuoppala 
3136f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3137f744dbc2SMika Kuoppala 		  class, instance, intr);
313851951ae7SMika Kuoppala }
313951951ae7SMika Kuoppala 
314051951ae7SMika Kuoppala static void
31419b77011eSTvrtko Ursulin gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
314251951ae7SMika Kuoppala {
31439b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
314451951ae7SMika Kuoppala 	unsigned long intr_dw;
314551951ae7SMika Kuoppala 	unsigned int bit;
314651951ae7SMika Kuoppala 
31479b77011eSTvrtko Ursulin 	lockdep_assert_held(&gt->i915->irq_lock);
314851951ae7SMika Kuoppala 
314951951ae7SMika Kuoppala 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
315051951ae7SMika Kuoppala 
315151951ae7SMika Kuoppala 	for_each_set_bit(bit, &intr_dw, 32) {
31529b77011eSTvrtko Ursulin 		const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
315351951ae7SMika Kuoppala 
31549b77011eSTvrtko Ursulin 		gen11_gt_identity_handler(gt, ident);
315551951ae7SMika Kuoppala 	}
315651951ae7SMika Kuoppala 
315751951ae7SMika Kuoppala 	/* Clear must be after shared has been served for engine */
315851951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
315951951ae7SMika Kuoppala }
316096606f3bSOscar Mateo 
316196606f3bSOscar Mateo static void
31629b77011eSTvrtko Ursulin gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
316396606f3bSOscar Mateo {
31649b77011eSTvrtko Ursulin 	struct drm_i915_private *i915 = gt->i915;
316596606f3bSOscar Mateo 	unsigned int bank;
316696606f3bSOscar Mateo 
316796606f3bSOscar Mateo 	spin_lock(&i915->irq_lock);
316896606f3bSOscar Mateo 
316996606f3bSOscar Mateo 	for (bank = 0; bank < 2; bank++) {
317096606f3bSOscar Mateo 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
31719b77011eSTvrtko Ursulin 			gen11_gt_bank_handler(gt, bank);
317296606f3bSOscar Mateo 	}
317396606f3bSOscar Mateo 
317496606f3bSOscar Mateo 	spin_unlock(&i915->irq_lock);
317551951ae7SMika Kuoppala }
317651951ae7SMika Kuoppala 
31777a909383SChris Wilson static u32
31789b77011eSTvrtko Ursulin gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
3179df0d28c1SDhinakaran Pandiyan {
31809b77011eSTvrtko Ursulin 	void __iomem * const regs = gt->uncore->regs;
31817a909383SChris Wilson 	u32 iir;
3182df0d28c1SDhinakaran Pandiyan 
3183df0d28c1SDhinakaran Pandiyan 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
31847a909383SChris Wilson 		return 0;
3185df0d28c1SDhinakaran Pandiyan 
31867a909383SChris Wilson 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
31877a909383SChris Wilson 	if (likely(iir))
31887a909383SChris Wilson 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
31897a909383SChris Wilson 
31907a909383SChris Wilson 	return iir;
3191df0d28c1SDhinakaran Pandiyan }
3192df0d28c1SDhinakaran Pandiyan 
3193df0d28c1SDhinakaran Pandiyan static void
31949b77011eSTvrtko Ursulin gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
3195df0d28c1SDhinakaran Pandiyan {
3196df0d28c1SDhinakaran Pandiyan 	if (iir & GEN11_GU_MISC_GSE)
31979b77011eSTvrtko Ursulin 		intel_opregion_asle_intr(gt->i915);
3198df0d28c1SDhinakaran Pandiyan }
3199df0d28c1SDhinakaran Pandiyan 
320081067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
320181067b71SMika Kuoppala {
320281067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
320381067b71SMika Kuoppala 
320481067b71SMika Kuoppala 	/*
320581067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
320681067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
320781067b71SMika Kuoppala 	 * New indications can and will light up during processing,
320881067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
320981067b71SMika Kuoppala 	 */
321081067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
321181067b71SMika Kuoppala }
321281067b71SMika Kuoppala 
321381067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
321481067b71SMika Kuoppala {
321581067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
321681067b71SMika Kuoppala }
321781067b71SMika Kuoppala 
321851951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg)
321951951ae7SMika Kuoppala {
3220b318b824SVille Syrjälä 	struct drm_i915_private * const i915 = arg;
322125286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
32229b77011eSTvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
322351951ae7SMika Kuoppala 	u32 master_ctl;
3224df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
322551951ae7SMika Kuoppala 
322651951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
322751951ae7SMika Kuoppala 		return IRQ_NONE;
322851951ae7SMika Kuoppala 
322981067b71SMika Kuoppala 	master_ctl = gen11_master_intr_disable(regs);
323081067b71SMika Kuoppala 	if (!master_ctl) {
323181067b71SMika Kuoppala 		gen11_master_intr_enable(regs);
323251951ae7SMika Kuoppala 		return IRQ_NONE;
323381067b71SMika Kuoppala 	}
323451951ae7SMika Kuoppala 
323551951ae7SMika Kuoppala 	/* Find, clear, then process each source of interrupt. */
32369b77011eSTvrtko Ursulin 	gen11_gt_irq_handler(gt, master_ctl);
323751951ae7SMika Kuoppala 
323851951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
323951951ae7SMika Kuoppala 	if (master_ctl & GEN11_DISPLAY_IRQ) {
324051951ae7SMika Kuoppala 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
324151951ae7SMika Kuoppala 
32429102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&i915->runtime_pm);
324351951ae7SMika Kuoppala 		/*
324451951ae7SMika Kuoppala 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
324551951ae7SMika Kuoppala 		 * for the display related bits.
324651951ae7SMika Kuoppala 		 */
324751951ae7SMika Kuoppala 		gen8_de_irq_handler(i915, disp_ctl);
32489102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&i915->runtime_pm);
324951951ae7SMika Kuoppala 	}
325051951ae7SMika Kuoppala 
32519b77011eSTvrtko Ursulin 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
3252df0d28c1SDhinakaran Pandiyan 
325381067b71SMika Kuoppala 	gen11_master_intr_enable(regs);
325451951ae7SMika Kuoppala 
32559b77011eSTvrtko Ursulin 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
3256df0d28c1SDhinakaran Pandiyan 
325751951ae7SMika Kuoppala 	return IRQ_HANDLED;
325851951ae7SMika Kuoppala }
325951951ae7SMika Kuoppala 
326042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
326142f52ef8SKeith Packard  * we use as a pipe index
326242f52ef8SKeith Packard  */
326308fa8fd0SVille Syrjälä int i8xx_enable_vblank(struct drm_crtc *crtc)
32640a3e67a4SJesse Barnes {
326508fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
326608fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3267e9d21d7fSKeith Packard 	unsigned long irqflags;
326871e0ffa5SJesse Barnes 
32691ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
327086e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
327186e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
327286e83e35SChris Wilson 
327386e83e35SChris Wilson 	return 0;
327486e83e35SChris Wilson }
327586e83e35SChris Wilson 
327608fa8fd0SVille Syrjälä int i945gm_enable_vblank(struct drm_crtc *crtc)
3277d938da6bSVille Syrjälä {
327808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3279d938da6bSVille Syrjälä 
3280d938da6bSVille Syrjälä 	if (dev_priv->i945gm_vblank.enabled++ == 0)
3281d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3282d938da6bSVille Syrjälä 
328308fa8fd0SVille Syrjälä 	return i8xx_enable_vblank(crtc);
3284d938da6bSVille Syrjälä }
3285d938da6bSVille Syrjälä 
328608fa8fd0SVille Syrjälä int i965_enable_vblank(struct drm_crtc *crtc)
328786e83e35SChris Wilson {
328808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
328908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
329086e83e35SChris Wilson 	unsigned long irqflags;
329186e83e35SChris Wilson 
329286e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
32937c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
3294755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
32951ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
32968692d00eSChris Wilson 
32970a3e67a4SJesse Barnes 	return 0;
32980a3e67a4SJesse Barnes }
32990a3e67a4SJesse Barnes 
330008fa8fd0SVille Syrjälä int ilk_enable_vblank(struct drm_crtc *crtc)
3301f796cf8fSJesse Barnes {
330208fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
330308fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3304f796cf8fSJesse Barnes 	unsigned long irqflags;
3305a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
330686e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3307f796cf8fSJesse Barnes 
3308f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3309fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
3310b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3311b1f14ad0SJesse Barnes 
33122e8bf223SDhinakaran Pandiyan 	/* Even though there is no DMC, frame counter can get stuck when
33132e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated.
33142e8bf223SDhinakaran Pandiyan 	 */
33152e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
331608fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33172e8bf223SDhinakaran Pandiyan 
3318b1f14ad0SJesse Barnes 	return 0;
3319b1f14ad0SJesse Barnes }
3320b1f14ad0SJesse Barnes 
332108fa8fd0SVille Syrjälä int bdw_enable_vblank(struct drm_crtc *crtc)
3322abd58f01SBen Widawsky {
332308fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
332408fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3325abd58f01SBen Widawsky 	unsigned long irqflags;
3326abd58f01SBen Widawsky 
3327abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3328013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3329abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3330013d3752SVille Syrjälä 
33312e8bf223SDhinakaran Pandiyan 	/* Even if there is no DMC, frame counter can get stuck when
33322e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated, so check only for PSR.
33332e8bf223SDhinakaran Pandiyan 	 */
33342e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
333508fa8fd0SVille Syrjälä 		drm_crtc_vblank_restore(crtc);
33362e8bf223SDhinakaran Pandiyan 
3337abd58f01SBen Widawsky 	return 0;
3338abd58f01SBen Widawsky }
3339abd58f01SBen Widawsky 
334042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
334142f52ef8SKeith Packard  * we use as a pipe index
334242f52ef8SKeith Packard  */
334308fa8fd0SVille Syrjälä void i8xx_disable_vblank(struct drm_crtc *crtc)
334486e83e35SChris Wilson {
334508fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
334608fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
334786e83e35SChris Wilson 	unsigned long irqflags;
334886e83e35SChris Wilson 
334986e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
335086e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
335186e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
335286e83e35SChris Wilson }
335386e83e35SChris Wilson 
335408fa8fd0SVille Syrjälä void i945gm_disable_vblank(struct drm_crtc *crtc)
3355d938da6bSVille Syrjälä {
335608fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3357d938da6bSVille Syrjälä 
335808fa8fd0SVille Syrjälä 	i8xx_disable_vblank(crtc);
3359d938da6bSVille Syrjälä 
3360d938da6bSVille Syrjälä 	if (--dev_priv->i945gm_vblank.enabled == 0)
3361d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3362d938da6bSVille Syrjälä }
3363d938da6bSVille Syrjälä 
336408fa8fd0SVille Syrjälä void i965_disable_vblank(struct drm_crtc *crtc)
33650a3e67a4SJesse Barnes {
336608fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
336708fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3368e9d21d7fSKeith Packard 	unsigned long irqflags;
33690a3e67a4SJesse Barnes 
33701ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
33717c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
3372755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
33731ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
33740a3e67a4SJesse Barnes }
33750a3e67a4SJesse Barnes 
337608fa8fd0SVille Syrjälä void ilk_disable_vblank(struct drm_crtc *crtc)
3377f796cf8fSJesse Barnes {
337808fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
337908fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3380f796cf8fSJesse Barnes 	unsigned long irqflags;
3381a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
338286e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3383f796cf8fSJesse Barnes 
3384f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3385fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3386b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3387b1f14ad0SJesse Barnes }
3388b1f14ad0SJesse Barnes 
338908fa8fd0SVille Syrjälä void bdw_disable_vblank(struct drm_crtc *crtc)
3390abd58f01SBen Widawsky {
339108fa8fd0SVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
339208fa8fd0SVille Syrjälä 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
3393abd58f01SBen Widawsky 	unsigned long irqflags;
3394abd58f01SBen Widawsky 
3395abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3396013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3397abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3398abd58f01SBen Widawsky }
3399abd58f01SBen Widawsky 
34007218524dSChris Wilson static void i945gm_vblank_work_func(struct work_struct *work)
3401d938da6bSVille Syrjälä {
3402d938da6bSVille Syrjälä 	struct drm_i915_private *dev_priv =
3403d938da6bSVille Syrjälä 		container_of(work, struct drm_i915_private, i945gm_vblank.work);
3404d938da6bSVille Syrjälä 
3405d938da6bSVille Syrjälä 	/*
3406d938da6bSVille Syrjälä 	 * Vblank interrupts fail to wake up the device from C3,
3407d938da6bSVille Syrjälä 	 * hence we want to prevent C3 usage while vblank interrupts
3408d938da6bSVille Syrjälä 	 * are enabled.
3409d938da6bSVille Syrjälä 	 */
3410d938da6bSVille Syrjälä 	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3411d938da6bSVille Syrjälä 			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3412d938da6bSVille Syrjälä 			      dev_priv->i945gm_vblank.c3_disable_latency :
3413d938da6bSVille Syrjälä 			      PM_QOS_DEFAULT_VALUE);
3414d938da6bSVille Syrjälä }
3415d938da6bSVille Syrjälä 
3416d938da6bSVille Syrjälä static int cstate_disable_latency(const char *name)
3417d938da6bSVille Syrjälä {
3418d938da6bSVille Syrjälä 	const struct cpuidle_driver *drv;
3419d938da6bSVille Syrjälä 	int i;
3420d938da6bSVille Syrjälä 
3421d938da6bSVille Syrjälä 	drv = cpuidle_get_driver();
3422d938da6bSVille Syrjälä 	if (!drv)
3423d938da6bSVille Syrjälä 		return 0;
3424d938da6bSVille Syrjälä 
3425d938da6bSVille Syrjälä 	for (i = 0; i < drv->state_count; i++) {
3426d938da6bSVille Syrjälä 		const struct cpuidle_state *state = &drv->states[i];
3427d938da6bSVille Syrjälä 
3428d938da6bSVille Syrjälä 		if (!strcmp(state->name, name))
3429d938da6bSVille Syrjälä 			return state->exit_latency ?
3430d938da6bSVille Syrjälä 				state->exit_latency - 1 : 0;
3431d938da6bSVille Syrjälä 	}
3432d938da6bSVille Syrjälä 
3433d938da6bSVille Syrjälä 	return 0;
3434d938da6bSVille Syrjälä }
3435d938da6bSVille Syrjälä 
3436d938da6bSVille Syrjälä static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3437d938da6bSVille Syrjälä {
3438d938da6bSVille Syrjälä 	INIT_WORK(&dev_priv->i945gm_vblank.work,
3439d938da6bSVille Syrjälä 		  i945gm_vblank_work_func);
3440d938da6bSVille Syrjälä 
3441d938da6bSVille Syrjälä 	dev_priv->i945gm_vblank.c3_disable_latency =
3442d938da6bSVille Syrjälä 		cstate_disable_latency("C3");
3443d938da6bSVille Syrjälä 	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3444d938da6bSVille Syrjälä 			   PM_QOS_CPU_DMA_LATENCY,
3445d938da6bSVille Syrjälä 			   PM_QOS_DEFAULT_VALUE);
3446d938da6bSVille Syrjälä }
3447d938da6bSVille Syrjälä 
3448d938da6bSVille Syrjälä static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3449d938da6bSVille Syrjälä {
3450d938da6bSVille Syrjälä 	cancel_work_sync(&dev_priv->i945gm_vblank.work);
3451d938da6bSVille Syrjälä 	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3452d938da6bSVille Syrjälä }
3453d938da6bSVille Syrjälä 
3454b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
345591738a95SPaulo Zanoni {
3456b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3457b16b2a2fSPaulo Zanoni 
34586e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
345991738a95SPaulo Zanoni 		return;
346091738a95SPaulo Zanoni 
3461b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
3462105b122eSPaulo Zanoni 
34636e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3464105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3465622364b6SPaulo Zanoni }
3466105b122eSPaulo Zanoni 
346791738a95SPaulo Zanoni /*
3468622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3469622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3470622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3471622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3472622364b6SPaulo Zanoni  *
3473622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
347491738a95SPaulo Zanoni  */
3475b318b824SVille Syrjälä static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
3476622364b6SPaulo Zanoni {
34776e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3478622364b6SPaulo Zanoni 		return;
3479622364b6SPaulo Zanoni 
3480622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
348191738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
348291738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
348391738a95SPaulo Zanoni }
348491738a95SPaulo Zanoni 
3485b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3486d18ea1b5SDaniel Vetter {
3487b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3488b16b2a2fSPaulo Zanoni 
3489b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GT);
3490b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
3491b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, GEN6_PM);
3492d18ea1b5SDaniel Vetter }
3493d18ea1b5SDaniel Vetter 
349470591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
349570591a41SVille Syrjälä {
3496b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3497b16b2a2fSPaulo Zanoni 
349871b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3499f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
350071b8b41dSVille Syrjälä 	else
3501f0818984STvrtko Ursulin 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
350271b8b41dSVille Syrjälä 
3503ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3504f0818984STvrtko Ursulin 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
350570591a41SVille Syrjälä 
350644d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
350770591a41SVille Syrjälä 
3508b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, VLV_);
35098bd099a7SChris Wilson 	dev_priv->irq_mask = ~0u;
351070591a41SVille Syrjälä }
351170591a41SVille Syrjälä 
35128bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
35138bb61306SVille Syrjälä {
3514b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3515b16b2a2fSPaulo Zanoni 
35168bb61306SVille Syrjälä 	u32 pipestat_mask;
35179ab981f2SVille Syrjälä 	u32 enable_mask;
35188bb61306SVille Syrjälä 	enum pipe pipe;
35198bb61306SVille Syrjälä 
3520842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
35218bb61306SVille Syrjälä 
35228bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
35238bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
35248bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
35258bb61306SVille Syrjälä 
35269ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
35278bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3528ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3529ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3530ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3531ebf5f921SVille Syrjälä 
35328bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3533ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3534ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
35356b7eafc1SVille Syrjälä 
35368bd099a7SChris Wilson 	WARN_ON(dev_priv->irq_mask != ~0u);
35376b7eafc1SVille Syrjälä 
35389ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
35398bb61306SVille Syrjälä 
3540b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
35418bb61306SVille Syrjälä }
35428bb61306SVille Syrjälä 
35438bb61306SVille Syrjälä /* drm_dma.h hooks
35448bb61306SVille Syrjälä */
3545b318b824SVille Syrjälä static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
35468bb61306SVille Syrjälä {
3547b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
35488bb61306SVille Syrjälä 
3549b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
3550cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 7))
3551f0818984STvrtko Ursulin 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
35528bb61306SVille Syrjälä 
3553fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
3554f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3555f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3556fc340442SDaniel Vetter 	}
3557fc340442SDaniel Vetter 
3558b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35598bb61306SVille Syrjälä 
3560b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
35618bb61306SVille Syrjälä }
35628bb61306SVille Syrjälä 
3563b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
35647e231dbeSJesse Barnes {
356534c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
356634c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
356734c7b8a7SVille Syrjälä 
3568b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35697e231dbeSJesse Barnes 
3570ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35719918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
357270591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3573ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
35747e231dbeSJesse Barnes }
35757e231dbeSJesse Barnes 
3576d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3577d6e3cca3SDaniel Vetter {
3578b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3579b16b2a2fSPaulo Zanoni 
3580b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3581b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3582b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3583b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3584d6e3cca3SDaniel Vetter }
3585d6e3cca3SDaniel Vetter 
3586b318b824SVille Syrjälä static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3587abd58f01SBen Widawsky {
3588b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3589abd58f01SBen Widawsky 	int pipe;
3590abd58f01SBen Widawsky 
359125286aacSDaniele Ceraolo Spurio 	gen8_master_intr_disable(dev_priv->uncore.regs);
3592abd58f01SBen Widawsky 
3593d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3594abd58f01SBen Widawsky 
3595f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3596f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3597e04f7eceSVille Syrjälä 
3598055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3599f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3600813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3601b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3602abd58f01SBen Widawsky 
3603b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3604b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3605b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3606abd58f01SBen Widawsky 
36076e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3608b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3609abd58f01SBen Widawsky }
3610abd58f01SBen Widawsky 
36119b77011eSTvrtko Ursulin static void gen11_gt_irq_reset(struct intel_gt *gt)
361251951ae7SMika Kuoppala {
3613f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
36149b77011eSTvrtko Ursulin 
361551951ae7SMika Kuoppala 	/* Disable RCS, BCS, VCS and VECS class engines. */
3616f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
3617f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE,	  0);
361851951ae7SMika Kuoppala 
361951951ae7SMika Kuoppala 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3620f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK,	~0);
3621f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,	~0);
3622f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,	~0);
3623f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,	~0);
3624f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK,	~0);
3625d02b98b8SOscar Mateo 
3626f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3627f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3628f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
3629f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
363051951ae7SMika Kuoppala }
363151951ae7SMika Kuoppala 
3632b318b824SVille Syrjälä static void gen11_irq_reset(struct drm_i915_private *dev_priv)
363351951ae7SMika Kuoppala {
3634b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
363551951ae7SMika Kuoppala 	int pipe;
363651951ae7SMika Kuoppala 
363725286aacSDaniele Ceraolo Spurio 	gen11_master_intr_disable(dev_priv->uncore.regs);
363851951ae7SMika Kuoppala 
36399b77011eSTvrtko Ursulin 	gen11_gt_irq_reset(&dev_priv->gt);
364051951ae7SMika Kuoppala 
3641f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
364251951ae7SMika Kuoppala 
3643f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3644f0818984STvrtko Ursulin 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
364562819dfdSJosé Roberto de Souza 
364651951ae7SMika Kuoppala 	for_each_pipe(dev_priv, pipe)
364751951ae7SMika Kuoppala 		if (intel_display_power_is_enabled(dev_priv,
364851951ae7SMika Kuoppala 						   POWER_DOMAIN_PIPE(pipe)))
3649b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
365051951ae7SMika Kuoppala 
3651b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3652b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3653b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3654b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3655b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
365631604222SAnusha Srivatsa 
365729b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3658b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, SDE);
365951951ae7SMika Kuoppala }
366051951ae7SMika Kuoppala 
36614c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3662001bd2cbSImre Deak 				     u8 pipe_mask)
3663d49bdb0eSPaulo Zanoni {
3664b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3665b16b2a2fSPaulo Zanoni 
3666a9c287c9SJani Nikula 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
36676831f3e3SVille Syrjälä 	enum pipe pipe;
3668d49bdb0eSPaulo Zanoni 
366913321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
36709dfe2e3aSImre Deak 
36719dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36729dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36739dfe2e3aSImre Deak 		return;
36749dfe2e3aSImre Deak 	}
36759dfe2e3aSImre Deak 
36766831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3677b16b2a2fSPaulo Zanoni 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
36786831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
36796831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
36809dfe2e3aSImre Deak 
368113321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3682d49bdb0eSPaulo Zanoni }
3683d49bdb0eSPaulo Zanoni 
3684aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3685001bd2cbSImre Deak 				     u8 pipe_mask)
3686aae8ba84SVille Syrjälä {
3687b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
36886831f3e3SVille Syrjälä 	enum pipe pipe;
36896831f3e3SVille Syrjälä 
3690aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36919dfe2e3aSImre Deak 
36929dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36939dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36949dfe2e3aSImre Deak 		return;
36959dfe2e3aSImre Deak 	}
36969dfe2e3aSImre Deak 
36976831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3698b16b2a2fSPaulo Zanoni 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
36999dfe2e3aSImre Deak 
3700aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3701aae8ba84SVille Syrjälä 
3702aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
3703315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
3704aae8ba84SVille Syrjälä }
3705aae8ba84SVille Syrjälä 
3706b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
370743f328d7SVille Syrjälä {
3708b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
370943f328d7SVille Syrjälä 
371043f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
371143f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
371243f328d7SVille Syrjälä 
3713d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
371443f328d7SVille Syrjälä 
3715b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
371643f328d7SVille Syrjälä 
3717ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
37189918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
371970591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3720ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
372143f328d7SVille Syrjälä }
372243f328d7SVille Syrjälä 
372391d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
372487a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
372587a02106SVille Syrjälä {
372687a02106SVille Syrjälä 	struct intel_encoder *encoder;
372787a02106SVille Syrjälä 	u32 enabled_irqs = 0;
372887a02106SVille Syrjälä 
372991c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
373087a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
373187a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
373287a02106SVille Syrjälä 
373387a02106SVille Syrjälä 	return enabled_irqs;
373487a02106SVille Syrjälä }
373587a02106SVille Syrjälä 
37361a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
37371a56b1a2SImre Deak {
37381a56b1a2SImre Deak 	u32 hotplug;
37391a56b1a2SImre Deak 
37401a56b1a2SImre Deak 	/*
37411a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
37421a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
37431a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
37441a56b1a2SImre Deak 	 */
37451a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
37461a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
37471a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
37481a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
37491a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
37501a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
37511a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
37521a56b1a2SImre Deak 	/*
37531a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
37541a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
37551a56b1a2SImre Deak 	 */
37561a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
37571a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
37581a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
37591a56b1a2SImre Deak }
37601a56b1a2SImre Deak 
376191d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
376282a28bcfSDaniel Vetter {
37631a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
376482a28bcfSDaniel Vetter 
376591d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3766fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
376791d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
376882a28bcfSDaniel Vetter 	} else {
3769fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
377091d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
377182a28bcfSDaniel Vetter 	}
377282a28bcfSDaniel Vetter 
3773fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
377482a28bcfSDaniel Vetter 
37751a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
37766dbf30ceSVille Syrjälä }
377726951cafSXiong Zhang 
377831604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
377931604222SAnusha Srivatsa {
378031604222SAnusha Srivatsa 	u32 hotplug;
378131604222SAnusha Srivatsa 
378231604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
378331604222SAnusha Srivatsa 	hotplug |= ICP_DDIA_HPD_ENABLE |
378431604222SAnusha Srivatsa 		   ICP_DDIB_HPD_ENABLE;
378531604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
378631604222SAnusha Srivatsa 
378731604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_TC);
378831604222SAnusha Srivatsa 	hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
378931604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC2) |
379031604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC3) |
379131604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC4);
379231604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
379331604222SAnusha Srivatsa }
379431604222SAnusha Srivatsa 
379531604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
379631604222SAnusha Srivatsa {
379731604222SAnusha Srivatsa 	u32 hotplug_irqs, enabled_irqs;
379831604222SAnusha Srivatsa 
379931604222SAnusha Srivatsa 	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
380031604222SAnusha Srivatsa 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
380131604222SAnusha Srivatsa 
380231604222SAnusha Srivatsa 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
380331604222SAnusha Srivatsa 
380431604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
380531604222SAnusha Srivatsa }
380631604222SAnusha Srivatsa 
3807121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3808121e758eSDhinakaran Pandiyan {
3809121e758eSDhinakaran Pandiyan 	u32 hotplug;
3810121e758eSDhinakaran Pandiyan 
3811121e758eSDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3812121e758eSDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3813121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3814121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3815121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3816121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3817b796b971SDhinakaran Pandiyan 
3818b796b971SDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3819b796b971SDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3820b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3821b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3822b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3823b796b971SDhinakaran Pandiyan 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3824121e758eSDhinakaran Pandiyan }
3825121e758eSDhinakaran Pandiyan 
3826121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3827121e758eSDhinakaran Pandiyan {
3828121e758eSDhinakaran Pandiyan 	u32 hotplug_irqs, enabled_irqs;
3829121e758eSDhinakaran Pandiyan 	u32 val;
3830121e758eSDhinakaran Pandiyan 
3831b796b971SDhinakaran Pandiyan 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3832b796b971SDhinakaran Pandiyan 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3833121e758eSDhinakaran Pandiyan 
3834121e758eSDhinakaran Pandiyan 	val = I915_READ(GEN11_DE_HPD_IMR);
3835121e758eSDhinakaran Pandiyan 	val &= ~hotplug_irqs;
3836121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3837121e758eSDhinakaran Pandiyan 	POSTING_READ(GEN11_DE_HPD_IMR);
3838121e758eSDhinakaran Pandiyan 
3839121e758eSDhinakaran Pandiyan 	gen11_hpd_detection_setup(dev_priv);
384031604222SAnusha Srivatsa 
384129b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
384231604222SAnusha Srivatsa 		icp_hpd_irq_setup(dev_priv);
3843121e758eSDhinakaran Pandiyan }
3844121e758eSDhinakaran Pandiyan 
38452a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
38462a57d9ccSImre Deak {
38473b92e263SRodrigo Vivi 	u32 val, hotplug;
38483b92e263SRodrigo Vivi 
38493b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
38503b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
38513b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
38523b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
38533b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
38543b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
38553b92e263SRodrigo Vivi 	}
38562a57d9ccSImre Deak 
38572a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
38582a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
38592a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
38602a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
38612a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
38622a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
38632a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
38642a57d9ccSImre Deak 
38652a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
38662a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
38672a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
38682a57d9ccSImre Deak }
38692a57d9ccSImre Deak 
387091d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
38716dbf30ceSVille Syrjälä {
38722a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
38736dbf30ceSVille Syrjälä 
38746dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
387591d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
38766dbf30ceSVille Syrjälä 
38776dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
38786dbf30ceSVille Syrjälä 
38792a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
388026951cafSXiong Zhang }
38817fe0b973SKeith Packard 
38821a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
38831a56b1a2SImre Deak {
38841a56b1a2SImre Deak 	u32 hotplug;
38851a56b1a2SImre Deak 
38861a56b1a2SImre Deak 	/*
38871a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
38881a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
38891a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
38901a56b1a2SImre Deak 	 */
38911a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
38921a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
38931a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
38941a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
38951a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
38961a56b1a2SImre Deak }
38971a56b1a2SImre Deak 
389891d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3899e4ce95aaSVille Syrjälä {
39001a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3901e4ce95aaSVille Syrjälä 
390291d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
39033a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
390491d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
39053a3b3c7dSVille Syrjälä 
39063a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
390791d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
390823bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
390991d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
39103a3b3c7dSVille Syrjälä 
39113a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
391223bb4cb5SVille Syrjälä 	} else {
3913e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
391491d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3915e4ce95aaSVille Syrjälä 
3916e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
39173a3b3c7dSVille Syrjälä 	}
3918e4ce95aaSVille Syrjälä 
39191a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3920e4ce95aaSVille Syrjälä 
392191d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3922e4ce95aaSVille Syrjälä }
3923e4ce95aaSVille Syrjälä 
39242a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
39252a57d9ccSImre Deak 				      u32 enabled_irqs)
3926e0a20ad7SShashank Sharma {
39272a57d9ccSImre Deak 	u32 hotplug;
3928e0a20ad7SShashank Sharma 
3929a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
39302a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
39312a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
39322a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3933d252bf68SShubhangi Shrivastava 
3934d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3935d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3936d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3937d252bf68SShubhangi Shrivastava 
3938d252bf68SShubhangi Shrivastava 	/*
3939d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3940d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3941d252bf68SShubhangi Shrivastava 	 */
3942d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3943d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3944d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3945d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3946d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3947d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3948d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3949d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3950d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3951d252bf68SShubhangi Shrivastava 
3952a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3953e0a20ad7SShashank Sharma }
3954e0a20ad7SShashank Sharma 
39552a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
39562a57d9ccSImre Deak {
39572a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
39582a57d9ccSImre Deak }
39592a57d9ccSImre Deak 
39602a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
39612a57d9ccSImre Deak {
39622a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
39632a57d9ccSImre Deak 
39642a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
39652a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
39662a57d9ccSImre Deak 
39672a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
39682a57d9ccSImre Deak 
39692a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
39702a57d9ccSImre Deak }
39712a57d9ccSImre Deak 
3972b318b824SVille Syrjälä static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3973d46da437SPaulo Zanoni {
397482a28bcfSDaniel Vetter 	u32 mask;
3975d46da437SPaulo Zanoni 
39766e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3977692a04cfSDaniel Vetter 		return;
3978692a04cfSDaniel Vetter 
39796e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
39805c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
39814ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
39825c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
39834ebc6509SDhinakaran Pandiyan 	else
39844ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
39858664281bSPaulo Zanoni 
398665f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3987d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
39882a57d9ccSImre Deak 
39892a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
39902a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
39911a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
39922a57d9ccSImre Deak 	else
39932a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3994d46da437SPaulo Zanoni }
3995d46da437SPaulo Zanoni 
3996b318b824SVille Syrjälä static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
39970a9a8c91SDaniel Vetter {
3998b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
39990a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
40000a9a8c91SDaniel Vetter 
40010a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
40020a9a8c91SDaniel Vetter 
40030a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
40043c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
40050a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
4006772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
4007772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
40080a9a8c91SDaniel Vetter 	}
40090a9a8c91SDaniel Vetter 
40100a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
4011cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5)) {
4012f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
40130a9a8c91SDaniel Vetter 	} else {
40140a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
40150a9a8c91SDaniel Vetter 	}
40160a9a8c91SDaniel Vetter 
4017b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
40180a9a8c91SDaniel Vetter 
4019b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
402078e68d36SImre Deak 		/*
402178e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
402278e68d36SImre Deak 		 * itself is enabled/disabled.
402378e68d36SImre Deak 		 */
40248a68d464SChris Wilson 		if (HAS_ENGINE(dev_priv, VECS0)) {
40250a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
402658820574STvrtko Ursulin 			dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
4027f4e9af4fSAkash Goel 		}
40280a9a8c91SDaniel Vetter 
402958820574STvrtko Ursulin 		dev_priv->gt.pm_imr = 0xffffffff;
403058820574STvrtko Ursulin 		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
40310a9a8c91SDaniel Vetter 	}
40320a9a8c91SDaniel Vetter }
40330a9a8c91SDaniel Vetter 
4034b318b824SVille Syrjälä static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
4035036a4a7dSZhenyu Wang {
4036b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
40378e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
40388e76f8dcSPaulo Zanoni 
4039b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
40408e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4041842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
40428e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
404323bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
404423bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
40458e76f8dcSPaulo Zanoni 	} else {
40468e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4047842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4048842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
4049e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4050e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4051e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
40528e76f8dcSPaulo Zanoni 	}
4053036a4a7dSZhenyu Wang 
4054fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
4055b16b2a2fSPaulo Zanoni 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
40561aeb1b5fSDhinakaran Pandiyan 		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4057fc340442SDaniel Vetter 		display_mask |= DE_EDP_PSR_INT_HSW;
4058fc340442SDaniel Vetter 	}
4059fc340442SDaniel Vetter 
40601ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
4061036a4a7dSZhenyu Wang 
4062b318b824SVille Syrjälä 	ibx_irq_pre_postinstall(dev_priv);
4063622364b6SPaulo Zanoni 
4064b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
4065b16b2a2fSPaulo Zanoni 		      display_mask | extra_mask);
4066036a4a7dSZhenyu Wang 
4067b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
4068036a4a7dSZhenyu Wang 
40691a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
40701a56b1a2SImre Deak 
4071b318b824SVille Syrjälä 	ibx_irq_postinstall(dev_priv);
40727fe0b973SKeith Packard 
407350a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
40746005ce42SDaniel Vetter 		/* Enable PCU event interrupts
40756005ce42SDaniel Vetter 		 *
40766005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
40774bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
40784bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
4079d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
4080fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4081d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
4082f97108d1SJesse Barnes 	}
4083036a4a7dSZhenyu Wang }
4084036a4a7dSZhenyu Wang 
4085f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4086f8b79e58SImre Deak {
408767520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4088f8b79e58SImre Deak 
4089f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
4090f8b79e58SImre Deak 		return;
4091f8b79e58SImre Deak 
4092f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
4093f8b79e58SImre Deak 
4094d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
4095d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4096ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4097f8b79e58SImre Deak 	}
4098d6c69803SVille Syrjälä }
4099f8b79e58SImre Deak 
4100f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4101f8b79e58SImre Deak {
410267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4103f8b79e58SImre Deak 
4104f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
4105f8b79e58SImre Deak 		return;
4106f8b79e58SImre Deak 
4107f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
4108f8b79e58SImre Deak 
4109950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
4110ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4111f8b79e58SImre Deak }
4112f8b79e58SImre Deak 
41130e6c9a9eSVille Syrjälä 
4114b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
41150e6c9a9eSVille Syrjälä {
4116b318b824SVille Syrjälä 	gen5_gt_irq_postinstall(dev_priv);
41177e231dbeSJesse Barnes 
4118ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
41199918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4120ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4121ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4122ad22d106SVille Syrjälä 
41237e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
412434c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
412520afbda2SDaniel Vetter }
412620afbda2SDaniel Vetter 
412758820574STvrtko Ursulin static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
4128abd58f01SBen Widawsky {
412958820574STvrtko Ursulin 	struct intel_gt *gt = &i915->gt;
413058820574STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4131b16b2a2fSPaulo Zanoni 
4132abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
4133a9c287c9SJani Nikula 	u32 gt_interrupts[] = {
41348a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
413573d477f6SOscar Mateo 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
413673d477f6SOscar Mateo 		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
41378a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
41388a68d464SChris Wilson 
41398a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
41408a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4141abd58f01SBen Widawsky 		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
41428a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
41438a68d464SChris Wilson 
4144abd58f01SBen Widawsky 		0,
41458a68d464SChris Wilson 
41468a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
41478a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4148abd58f01SBen Widawsky 	};
4149abd58f01SBen Widawsky 
415058820574STvrtko Ursulin 	gt->pm_ier = 0x0;
415158820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4152b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4153b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
415478e68d36SImre Deak 	/*
415578e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
415626705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
415778e68d36SImre Deak 	 */
415858820574STvrtko Ursulin 	GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
4159b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4160abd58f01SBen Widawsky }
4161abd58f01SBen Widawsky 
4162abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4163abd58f01SBen Widawsky {
4164b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4165b16b2a2fSPaulo Zanoni 
4166a9c287c9SJani Nikula 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4167a9c287c9SJani Nikula 	u32 de_pipe_enables;
41683a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
41693a3b3c7dSVille Syrjälä 	u32 de_port_enables;
4170df0d28c1SDhinakaran Pandiyan 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
41713a3b3c7dSVille Syrjälä 	enum pipe pipe;
4172770de83dSDamien Lespiau 
4173df0d28c1SDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) <= 10)
4174df0d28c1SDhinakaran Pandiyan 		de_misc_masked |= GEN8_DE_MISC_GSE;
4175df0d28c1SDhinakaran Pandiyan 
4176bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
4177842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
41783a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
417988e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
4180cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
41813a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
41823a3b3c7dSVille Syrjälä 	} else {
4183842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
41843a3b3c7dSVille Syrjälä 	}
4185770de83dSDamien Lespiau 
4186bb187e93SJames Ausmus 	if (INTEL_GEN(dev_priv) >= 11)
4187bb187e93SJames Ausmus 		de_port_masked |= ICL_AUX_CHANNEL_E;
4188bb187e93SJames Ausmus 
41899bb635d9SDhinakaran Pandiyan 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4190a324fcacSRodrigo Vivi 		de_port_masked |= CNL_AUX_CHANNEL_F;
4191a324fcacSRodrigo Vivi 
4192770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4193770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
4194770de83dSDamien Lespiau 
41953a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
4196cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
4197a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4198a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
41993a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
42003a3b3c7dSVille Syrjälä 
4201b16b2a2fSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
420254fd3149SDhinakaran Pandiyan 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4203e04f7eceSVille Syrjälä 
42040a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
42050a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4206abd58f01SBen Widawsky 
4207f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
4208813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
4209b16b2a2fSPaulo Zanoni 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4210813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
421135079899SPaulo Zanoni 					  de_pipe_enables);
42120a195c02SMika Kahola 	}
4213abd58f01SBen Widawsky 
4214b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4215b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
42162a57d9ccSImre Deak 
4217121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11) {
4218121e758eSDhinakaran Pandiyan 		u32 de_hpd_masked = 0;
4219b796b971SDhinakaran Pandiyan 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4220b796b971SDhinakaran Pandiyan 				     GEN11_DE_TBT_HOTPLUG_MASK;
4221121e758eSDhinakaran Pandiyan 
4222b16b2a2fSPaulo Zanoni 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4223b16b2a2fSPaulo Zanoni 			      de_hpd_enables);
4224121e758eSDhinakaran Pandiyan 		gen11_hpd_detection_setup(dev_priv);
4225121e758eSDhinakaran Pandiyan 	} else if (IS_GEN9_LP(dev_priv)) {
42262a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
4227121e758eSDhinakaran Pandiyan 	} else if (IS_BROADWELL(dev_priv)) {
42281a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
4229abd58f01SBen Widawsky 	}
4230121e758eSDhinakaran Pandiyan }
4231abd58f01SBen Widawsky 
4232b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
4233abd58f01SBen Widawsky {
42346e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4235b318b824SVille Syrjälä 		ibx_irq_pre_postinstall(dev_priv);
4236622364b6SPaulo Zanoni 
4237abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
4238abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
4239abd58f01SBen Widawsky 
42406e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4241b318b824SVille Syrjälä 		ibx_irq_postinstall(dev_priv);
4242abd58f01SBen Widawsky 
424325286aacSDaniele Ceraolo Spurio 	gen8_master_intr_enable(dev_priv->uncore.regs);
4244abd58f01SBen Widawsky }
4245abd58f01SBen Widawsky 
42469b77011eSTvrtko Ursulin static void gen11_gt_irq_postinstall(struct intel_gt *gt)
424751951ae7SMika Kuoppala {
424851951ae7SMika Kuoppala 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4249f0818984STvrtko Ursulin 	struct intel_uncore *uncore = gt->uncore;
4250f0818984STvrtko Ursulin 	const u32 dmask = irqs << 16 | irqs;
4251f0818984STvrtko Ursulin 	const u32 smask = irqs << 16;
425251951ae7SMika Kuoppala 
425351951ae7SMika Kuoppala 	BUILD_BUG_ON(irqs & 0xffff0000);
425451951ae7SMika Kuoppala 
425551951ae7SMika Kuoppala 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
4256f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
4257f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
425851951ae7SMika Kuoppala 
425951951ae7SMika Kuoppala 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4260f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
4261f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
4262f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
4263f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
4264f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
426551951ae7SMika Kuoppala 
4266d02b98b8SOscar Mateo 	/*
4267d02b98b8SOscar Mateo 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4268d02b98b8SOscar Mateo 	 * is enabled/disabled.
4269d02b98b8SOscar Mateo 	 */
427058820574STvrtko Ursulin 	gt->pm_ier = 0x0;
427158820574STvrtko Ursulin 	gt->pm_imr = ~gt->pm_ier;
4272f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4273f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
427454c52a84SOscar Mateo 
427554c52a84SOscar Mateo 	/* Same thing for GuC interrupts */
4276f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
4277f0818984STvrtko Ursulin 	intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK,  ~0);
427851951ae7SMika Kuoppala }
427951951ae7SMika Kuoppala 
4280b318b824SVille Syrjälä static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
428131604222SAnusha Srivatsa {
428231604222SAnusha Srivatsa 	u32 mask = SDE_GMBUS_ICP;
428331604222SAnusha Srivatsa 
428431604222SAnusha Srivatsa 	WARN_ON(I915_READ(SDEIER) != 0);
428531604222SAnusha Srivatsa 	I915_WRITE(SDEIER, 0xffffffff);
428631604222SAnusha Srivatsa 	POSTING_READ(SDEIER);
428731604222SAnusha Srivatsa 
428865f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
428931604222SAnusha Srivatsa 	I915_WRITE(SDEIMR, ~mask);
429031604222SAnusha Srivatsa 
429131604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
429231604222SAnusha Srivatsa }
429331604222SAnusha Srivatsa 
4294b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
429551951ae7SMika Kuoppala {
4296b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4297df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
429851951ae7SMika Kuoppala 
429929b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4300b318b824SVille Syrjälä 		icp_irq_postinstall(dev_priv);
430131604222SAnusha Srivatsa 
43029b77011eSTvrtko Ursulin 	gen11_gt_irq_postinstall(&dev_priv->gt);
430351951ae7SMika Kuoppala 	gen8_de_irq_postinstall(dev_priv);
430451951ae7SMika Kuoppala 
4305b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4306df0d28c1SDhinakaran Pandiyan 
430751951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
430851951ae7SMika Kuoppala 
43099b77011eSTvrtko Ursulin 	gen11_master_intr_enable(uncore->regs);
4310c25f0c6aSDaniele Ceraolo Spurio 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
431151951ae7SMika Kuoppala }
431251951ae7SMika Kuoppala 
4313b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
431443f328d7SVille Syrjälä {
431543f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
431643f328d7SVille Syrjälä 
4317ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
43189918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4319ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4320ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4321ad22d106SVille Syrjälä 
4322e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
432343f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
432443f328d7SVille Syrjälä }
432543f328d7SVille Syrjälä 
4326b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
4327c2798b19SChris Wilson {
4328b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4329c2798b19SChris Wilson 
433044d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
433144d9241eSVille Syrjälä 
4332b16b2a2fSPaulo Zanoni 	GEN2_IRQ_RESET(uncore);
4333c2798b19SChris Wilson }
4334c2798b19SChris Wilson 
4335b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
4336c2798b19SChris Wilson {
4337b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4338e9e9848aSVille Syrjälä 	u16 enable_mask;
4339c2798b19SChris Wilson 
43404f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore,
43414f5fd91fSTvrtko Ursulin 			     EMR,
43424f5fd91fSTvrtko Ursulin 			     ~(I915_ERROR_PAGE_TABLE |
4343045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH));
4344c2798b19SChris Wilson 
4345c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
4346c2798b19SChris Wilson 	dev_priv->irq_mask =
4347c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
434816659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
434916659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4350c2798b19SChris Wilson 
4351e9e9848aSVille Syrjälä 	enable_mask =
4352c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4353c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
435416659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4355e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
4356e9e9848aSVille Syrjälä 
4357b16b2a2fSPaulo Zanoni 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4358c2798b19SChris Wilson 
4359379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4360379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4361d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4362755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4363755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4364d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4365c2798b19SChris Wilson }
4366c2798b19SChris Wilson 
43674f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915,
436878c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
436978c357ddSVille Syrjälä {
43704f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &i915->uncore;
437178c357ddSVille Syrjälä 	u16 emr;
437278c357ddSVille Syrjälä 
43734f5fd91fSTvrtko Ursulin 	*eir = intel_uncore_read16(uncore, EIR);
437478c357ddSVille Syrjälä 
437578c357ddSVille Syrjälä 	if (*eir)
43764f5fd91fSTvrtko Ursulin 		intel_uncore_write16(uncore, EIR, *eir);
437778c357ddSVille Syrjälä 
43784f5fd91fSTvrtko Ursulin 	*eir_stuck = intel_uncore_read16(uncore, EIR);
437978c357ddSVille Syrjälä 	if (*eir_stuck == 0)
438078c357ddSVille Syrjälä 		return;
438178c357ddSVille Syrjälä 
438278c357ddSVille Syrjälä 	/*
438378c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
438478c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
438578c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
438678c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
438778c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
438878c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
438978c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
439078c357ddSVille Syrjälä 	 * remains set.
439178c357ddSVille Syrjälä 	 */
43924f5fd91fSTvrtko Ursulin 	emr = intel_uncore_read16(uncore, EMR);
43934f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, 0xffff);
43944f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
439578c357ddSVille Syrjälä }
439678c357ddSVille Syrjälä 
439778c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
439878c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
439978c357ddSVille Syrjälä {
440078c357ddSVille Syrjälä 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
440178c357ddSVille Syrjälä 
440278c357ddSVille Syrjälä 	if (eir_stuck)
440378c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
440478c357ddSVille Syrjälä }
440578c357ddSVille Syrjälä 
440678c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
440778c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
440878c357ddSVille Syrjälä {
440978c357ddSVille Syrjälä 	u32 emr;
441078c357ddSVille Syrjälä 
441178c357ddSVille Syrjälä 	*eir = I915_READ(EIR);
441278c357ddSVille Syrjälä 
441378c357ddSVille Syrjälä 	I915_WRITE(EIR, *eir);
441478c357ddSVille Syrjälä 
441578c357ddSVille Syrjälä 	*eir_stuck = I915_READ(EIR);
441678c357ddSVille Syrjälä 	if (*eir_stuck == 0)
441778c357ddSVille Syrjälä 		return;
441878c357ddSVille Syrjälä 
441978c357ddSVille Syrjälä 	/*
442078c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
442178c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
442278c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
442378c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
442478c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
442578c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
442678c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
442778c357ddSVille Syrjälä 	 * remains set.
442878c357ddSVille Syrjälä 	 */
442978c357ddSVille Syrjälä 	emr = I915_READ(EMR);
443078c357ddSVille Syrjälä 	I915_WRITE(EMR, 0xffffffff);
443178c357ddSVille Syrjälä 	I915_WRITE(EMR, emr | *eir_stuck);
443278c357ddSVille Syrjälä }
443378c357ddSVille Syrjälä 
443478c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
443578c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
443678c357ddSVille Syrjälä {
443778c357ddSVille Syrjälä 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
443878c357ddSVille Syrjälä 
443978c357ddSVille Syrjälä 	if (eir_stuck)
444078c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
444178c357ddSVille Syrjälä }
444278c357ddSVille Syrjälä 
4443ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4444c2798b19SChris Wilson {
4445b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4446af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4447c2798b19SChris Wilson 
44482dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
44492dd2a883SImre Deak 		return IRQ_NONE;
44502dd2a883SImre Deak 
44511f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
44529102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44531f814dacSImre Deak 
4454af722d28SVille Syrjälä 	do {
4455af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
445678c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
4457af722d28SVille Syrjälä 		u16 iir;
4458af722d28SVille Syrjälä 
44594f5fd91fSTvrtko Ursulin 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4460c2798b19SChris Wilson 		if (iir == 0)
4461af722d28SVille Syrjälä 			break;
4462c2798b19SChris Wilson 
4463af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4464c2798b19SChris Wilson 
4465eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4466eb64343cSVille Syrjälä 		 * signalled in iir */
4467eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4468c2798b19SChris Wilson 
446978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
447078c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
447178c357ddSVille Syrjälä 
44724f5fd91fSTvrtko Ursulin 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4473c2798b19SChris Wilson 
4474c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
44758a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4476c2798b19SChris Wilson 
447778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
447878c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4479af722d28SVille Syrjälä 
4480eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4481af722d28SVille Syrjälä 	} while (0);
4482c2798b19SChris Wilson 
44839102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
44841f814dacSImre Deak 
44851f814dacSImre Deak 	return ret;
4486c2798b19SChris Wilson }
4487c2798b19SChris Wilson 
4488b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv)
4489a266c7d5SChris Wilson {
4490b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4491a266c7d5SChris Wilson 
449256b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
44930706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4494a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4495a266c7d5SChris Wilson 	}
4496a266c7d5SChris Wilson 
449744d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
449844d9241eSVille Syrjälä 
4499b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4500a266c7d5SChris Wilson }
4501a266c7d5SChris Wilson 
4502b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4503a266c7d5SChris Wilson {
4504b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
450538bde180SChris Wilson 	u32 enable_mask;
4506a266c7d5SChris Wilson 
4507045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4508045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
450938bde180SChris Wilson 
451038bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
451138bde180SChris Wilson 	dev_priv->irq_mask =
451238bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
451338bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
451416659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
451516659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
451638bde180SChris Wilson 
451738bde180SChris Wilson 	enable_mask =
451838bde180SChris Wilson 		I915_ASLE_INTERRUPT |
451938bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
452038bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
452116659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
452238bde180SChris Wilson 		I915_USER_INTERRUPT;
452338bde180SChris Wilson 
452456b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
4525a266c7d5SChris Wilson 		/* Enable in IER... */
4526a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4527a266c7d5SChris Wilson 		/* and unmask in IMR */
4528a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4529a266c7d5SChris Wilson 	}
4530a266c7d5SChris Wilson 
4531b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4532a266c7d5SChris Wilson 
4533379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4534379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4535d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4536755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4537755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4538d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4539379ef82dSDaniel Vetter 
4540c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
454120afbda2SDaniel Vetter }
454220afbda2SDaniel Vetter 
4543ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
4544a266c7d5SChris Wilson {
4545b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4546af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4547a266c7d5SChris Wilson 
45482dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
45492dd2a883SImre Deak 		return IRQ_NONE;
45502dd2a883SImre Deak 
45511f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
45529102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45531f814dacSImre Deak 
455438bde180SChris Wilson 	do {
4555eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
455678c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4557af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4558af722d28SVille Syrjälä 		u32 iir;
4559a266c7d5SChris Wilson 
45609d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4561af722d28SVille Syrjälä 		if (iir == 0)
4562af722d28SVille Syrjälä 			break;
4563af722d28SVille Syrjälä 
4564af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4565af722d28SVille Syrjälä 
4566af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
4567af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4568af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4569a266c7d5SChris Wilson 
4570eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4571eb64343cSVille Syrjälä 		 * signalled in iir */
4572eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4573a266c7d5SChris Wilson 
457478c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
457578c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
457678c357ddSVille Syrjälä 
45779d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4578a266c7d5SChris Wilson 
4579a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
45808a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4581a266c7d5SChris Wilson 
458278c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
458378c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4584a266c7d5SChris Wilson 
4585af722d28SVille Syrjälä 		if (hotplug_status)
4586af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4587af722d28SVille Syrjälä 
4588af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4589af722d28SVille Syrjälä 	} while (0);
4590a266c7d5SChris Wilson 
45919102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
45921f814dacSImre Deak 
4593a266c7d5SChris Wilson 	return ret;
4594a266c7d5SChris Wilson }
4595a266c7d5SChris Wilson 
4596b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv)
4597a266c7d5SChris Wilson {
4598b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4599a266c7d5SChris Wilson 
46000706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4601a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4602a266c7d5SChris Wilson 
460344d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
460444d9241eSVille Syrjälä 
4605b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4606a266c7d5SChris Wilson }
4607a266c7d5SChris Wilson 
4608b318b824SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4609a266c7d5SChris Wilson {
4610b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4611bbba0a97SChris Wilson 	u32 enable_mask;
4612a266c7d5SChris Wilson 	u32 error_mask;
4613a266c7d5SChris Wilson 
4614045cebd2SVille Syrjälä 	/*
4615045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
4616045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
4617045cebd2SVille Syrjälä 	 */
4618045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
4619045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4620045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
4621045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
4622045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4623045cebd2SVille Syrjälä 	} else {
4624045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4625045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4626045cebd2SVille Syrjälä 	}
4627045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
4628045cebd2SVille Syrjälä 
4629a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
4630c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
4631c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
4632adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
4633bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4634bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
463578c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4636bbba0a97SChris Wilson 
4637c30bb1fdSVille Syrjälä 	enable_mask =
4638c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
4639c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
4640c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4641c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
464278c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4643c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
4644bbba0a97SChris Wilson 
464591d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4646bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
4647a266c7d5SChris Wilson 
4648b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4649c30bb1fdSVille Syrjälä 
4650b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4651b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4652d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4653755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4654755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4655755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4656d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4657a266c7d5SChris Wilson 
465891d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
465920afbda2SDaniel Vetter }
466020afbda2SDaniel Vetter 
466191d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
466220afbda2SDaniel Vetter {
466320afbda2SDaniel Vetter 	u32 hotplug_en;
466420afbda2SDaniel Vetter 
466567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4666b5ea2d56SDaniel Vetter 
4667adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
4668e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
466991d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4670a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
4671a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
4672a266c7d5SChris Wilson 	   seconds later.  So just do it once.
4673a266c7d5SChris Wilson 	*/
467491d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4675a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4676a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4677a266c7d5SChris Wilson 
4678a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
46790706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
4680f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
4681f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4682f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
46830706f17cSEgbert Eich 					     hotplug_en);
4684a266c7d5SChris Wilson }
4685a266c7d5SChris Wilson 
4686ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
4687a266c7d5SChris Wilson {
4688b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
4689af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4690a266c7d5SChris Wilson 
46912dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
46922dd2a883SImre Deak 		return IRQ_NONE;
46932dd2a883SImre Deak 
46941f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
46959102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
46961f814dacSImre Deak 
4697af722d28SVille Syrjälä 	do {
4698eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
469978c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4700af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4701af722d28SVille Syrjälä 		u32 iir;
47022c8ba29fSChris Wilson 
47039d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4704af722d28SVille Syrjälä 		if (iir == 0)
4705af722d28SVille Syrjälä 			break;
4706af722d28SVille Syrjälä 
4707af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4708af722d28SVille Syrjälä 
4709af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4710af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4711a266c7d5SChris Wilson 
4712eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4713eb64343cSVille Syrjälä 		 * signalled in iir */
4714eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4715a266c7d5SChris Wilson 
471678c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
471778c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
471878c357ddSVille Syrjälä 
47199d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4720a266c7d5SChris Wilson 
4721a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
47228a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4723af722d28SVille Syrjälä 
4724a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
47258a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4726a266c7d5SChris Wilson 
472778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
472878c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4729515ac2bbSDaniel Vetter 
4730af722d28SVille Syrjälä 		if (hotplug_status)
4731af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4732af722d28SVille Syrjälä 
4733af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4734af722d28SVille Syrjälä 	} while (0);
4735a266c7d5SChris Wilson 
47369102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
47371f814dacSImre Deak 
4738a266c7d5SChris Wilson 	return ret;
4739a266c7d5SChris Wilson }
4740a266c7d5SChris Wilson 
4741fca52a55SDaniel Vetter /**
4742fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4743fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4744fca52a55SDaniel Vetter  *
4745fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4746fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4747fca52a55SDaniel Vetter  */
4748b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4749f71d4af4SJesse Barnes {
475091c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4751562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4752cefcff8fSJoonas Lahtinen 	int i;
47538b2e326dSChris Wilson 
4754d938da6bSVille Syrjälä 	if (IS_I945GM(dev_priv))
4755d938da6bSVille Syrjälä 		i945gm_vblank_work_init(dev_priv);
4756d938da6bSVille Syrjälä 
475777913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
475877913b39SJani Nikula 
4759562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4760cefcff8fSJoonas Lahtinen 
4761a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4762cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4763cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
47648b2e326dSChris Wilson 
4765633023a4SDaniele Ceraolo Spurio 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
476654c52a84SOscar Mateo 	if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
4767*2239e6dfSDaniele Ceraolo Spurio 		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
476826705e20SSagar Arun Kamble 
4769a6706b45SDeepak S 	/* Let's track the enabled rps events */
4770666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
47716c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4772e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
477331685c25SDeepak S 	else
47744668f695SChris Wilson 		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
47754668f695SChris Wilson 					   GEN6_PM_RP_DOWN_THRESHOLD |
47764668f695SChris Wilson 					   GEN6_PM_RP_DOWN_TIMEOUT);
4777a6706b45SDeepak S 
4778917dc6b5SMika Kuoppala 	/* We share the register with other engine */
4779917dc6b5SMika Kuoppala 	if (INTEL_GEN(dev_priv) > 9)
4780917dc6b5SMika Kuoppala 		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4781917dc6b5SMika Kuoppala 
4782562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
47831800ad25SSagar Arun Kamble 
47841800ad25SSagar Arun Kamble 	/*
4785acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
47861800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
47871800ad25SSagar Arun Kamble 	 *
47881800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
47891800ad25SSagar Arun Kamble 	 */
4790bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4791562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
47921800ad25SSagar Arun Kamble 
4793bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4794562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
47951800ad25SSagar Arun Kamble 
479621da2700SVille Syrjälä 	dev->vblank_disable_immediate = true;
479721da2700SVille Syrjälä 
4798262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4799262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4800262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4801262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4802262fd485SChris Wilson 	 * in this case to the runtime pm.
4803262fd485SChris Wilson 	 */
4804262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4805262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4806262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4807262fd485SChris Wilson 
4808317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
48099a64c650SLyude Paul 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
48109a64c650SLyude Paul 	 * detection, as short HPD storms will occur as a natural part of
48119a64c650SLyude Paul 	 * sideband messaging with MST.
48129a64c650SLyude Paul 	 * On older platforms however, IRQ storms can occur with both long and
48139a64c650SLyude Paul 	 * short pulses, as seen on some G4x systems.
48149a64c650SLyude Paul 	 */
48159a64c650SLyude Paul 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4816317eaa95SLyude 
4817b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4818b318b824SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
481943f328d7SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4820b318b824SVille Syrjälä 	} else {
4821b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4822121e758eSDhinakaran Pandiyan 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4823b318b824SVille Syrjälä 		else if (IS_GEN9_LP(dev_priv))
4824e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4825c6c30b91SRodrigo Vivi 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
48266dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
48276dbf30ceSVille Syrjälä 		else
48283a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4829f71d4af4SJesse Barnes 	}
4830f71d4af4SJesse Barnes }
483120afbda2SDaniel Vetter 
4832fca52a55SDaniel Vetter /**
4833cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4834cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4835cefcff8fSJoonas Lahtinen  *
4836cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4837cefcff8fSJoonas Lahtinen  */
4838cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4839cefcff8fSJoonas Lahtinen {
4840cefcff8fSJoonas Lahtinen 	int i;
4841cefcff8fSJoonas Lahtinen 
4842d938da6bSVille Syrjälä 	if (IS_I945GM(i915))
4843d938da6bSVille Syrjälä 		i945gm_vblank_work_fini(i915);
4844d938da6bSVille Syrjälä 
4845cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4846cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4847cefcff8fSJoonas Lahtinen }
4848cefcff8fSJoonas Lahtinen 
4849b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4850b318b824SVille Syrjälä {
4851b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4852b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4853b318b824SVille Syrjälä 			return cherryview_irq_handler;
4854b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4855b318b824SVille Syrjälä 			return valleyview_irq_handler;
4856b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4857b318b824SVille Syrjälä 			return i965_irq_handler;
4858b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4859b318b824SVille Syrjälä 			return i915_irq_handler;
4860b318b824SVille Syrjälä 		else
4861b318b824SVille Syrjälä 			return i8xx_irq_handler;
4862b318b824SVille Syrjälä 	} else {
4863b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4864b318b824SVille Syrjälä 			return gen11_irq_handler;
4865b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4866b318b824SVille Syrjälä 			return gen8_irq_handler;
4867b318b824SVille Syrjälä 		else
4868b318b824SVille Syrjälä 			return ironlake_irq_handler;
4869b318b824SVille Syrjälä 	}
4870b318b824SVille Syrjälä }
4871b318b824SVille Syrjälä 
4872b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv)
4873b318b824SVille Syrjälä {
4874b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4875b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4876b318b824SVille Syrjälä 			cherryview_irq_reset(dev_priv);
4877b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4878b318b824SVille Syrjälä 			valleyview_irq_reset(dev_priv);
4879b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4880b318b824SVille Syrjälä 			i965_irq_reset(dev_priv);
4881b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4882b318b824SVille Syrjälä 			i915_irq_reset(dev_priv);
4883b318b824SVille Syrjälä 		else
4884b318b824SVille Syrjälä 			i8xx_irq_reset(dev_priv);
4885b318b824SVille Syrjälä 	} else {
4886b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4887b318b824SVille Syrjälä 			gen11_irq_reset(dev_priv);
4888b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4889b318b824SVille Syrjälä 			gen8_irq_reset(dev_priv);
4890b318b824SVille Syrjälä 		else
4891b318b824SVille Syrjälä 			ironlake_irq_reset(dev_priv);
4892b318b824SVille Syrjälä 	}
4893b318b824SVille Syrjälä }
4894b318b824SVille Syrjälä 
4895b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4896b318b824SVille Syrjälä {
4897b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
4898b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
4899b318b824SVille Syrjälä 			cherryview_irq_postinstall(dev_priv);
4900b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
4901b318b824SVille Syrjälä 			valleyview_irq_postinstall(dev_priv);
4902b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 4))
4903b318b824SVille Syrjälä 			i965_irq_postinstall(dev_priv);
4904b318b824SVille Syrjälä 		else if (IS_GEN(dev_priv, 3))
4905b318b824SVille Syrjälä 			i915_irq_postinstall(dev_priv);
4906b318b824SVille Syrjälä 		else
4907b318b824SVille Syrjälä 			i8xx_irq_postinstall(dev_priv);
4908b318b824SVille Syrjälä 	} else {
4909b318b824SVille Syrjälä 		if (INTEL_GEN(dev_priv) >= 11)
4910b318b824SVille Syrjälä 			gen11_irq_postinstall(dev_priv);
4911b318b824SVille Syrjälä 		else if (INTEL_GEN(dev_priv) >= 8)
4912b318b824SVille Syrjälä 			gen8_irq_postinstall(dev_priv);
4913b318b824SVille Syrjälä 		else
4914b318b824SVille Syrjälä 			ironlake_irq_postinstall(dev_priv);
4915b318b824SVille Syrjälä 	}
4916b318b824SVille Syrjälä }
4917b318b824SVille Syrjälä 
4918cefcff8fSJoonas Lahtinen /**
4919fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4920fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4921fca52a55SDaniel Vetter  *
4922fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4923fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4924fca52a55SDaniel Vetter  *
4925fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4926fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4927fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4928fca52a55SDaniel Vetter  */
49292aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
49302aeb7d3aSDaniel Vetter {
4931b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4932b318b824SVille Syrjälä 	int ret;
4933b318b824SVille Syrjälä 
49342aeb7d3aSDaniel Vetter 	/*
49352aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
49362aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
49372aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
49382aeb7d3aSDaniel Vetter 	 */
4939ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
49402aeb7d3aSDaniel Vetter 
4941b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = true;
4942b318b824SVille Syrjälä 
4943b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4944b318b824SVille Syrjälä 
4945b318b824SVille Syrjälä 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4946b318b824SVille Syrjälä 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4947b318b824SVille Syrjälä 	if (ret < 0) {
4948b318b824SVille Syrjälä 		dev_priv->drm.irq_enabled = false;
4949b318b824SVille Syrjälä 		return ret;
4950b318b824SVille Syrjälä 	}
4951b318b824SVille Syrjälä 
4952b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
4953b318b824SVille Syrjälä 
4954b318b824SVille Syrjälä 	return ret;
49552aeb7d3aSDaniel Vetter }
49562aeb7d3aSDaniel Vetter 
4957fca52a55SDaniel Vetter /**
4958fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4959fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4960fca52a55SDaniel Vetter  *
4961fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4962fca52a55SDaniel Vetter  * resources acquired in the init functions.
4963fca52a55SDaniel Vetter  */
49642aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
49652aeb7d3aSDaniel Vetter {
4966b318b824SVille Syrjälä 	int irq = dev_priv->drm.pdev->irq;
4967b318b824SVille Syrjälä 
4968b318b824SVille Syrjälä 	/*
4969b318b824SVille Syrjälä 	 * FIXME we can get called twice during driver load
4970b318b824SVille Syrjälä 	 * error handling due to intel_modeset_cleanup()
4971b318b824SVille Syrjälä 	 * calling us out of sequence. Would be nice if
4972b318b824SVille Syrjälä 	 * it didn't do that...
4973b318b824SVille Syrjälä 	 */
4974b318b824SVille Syrjälä 	if (!dev_priv->drm.irq_enabled)
4975b318b824SVille Syrjälä 		return;
4976b318b824SVille Syrjälä 
4977b318b824SVille Syrjälä 	dev_priv->drm.irq_enabled = false;
4978b318b824SVille Syrjälä 
4979b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4980b318b824SVille Syrjälä 
4981b318b824SVille Syrjälä 	free_irq(irq, dev_priv);
4982b318b824SVille Syrjälä 
49832aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4984ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
49852aeb7d3aSDaniel Vetter }
49862aeb7d3aSDaniel Vetter 
4987fca52a55SDaniel Vetter /**
4988fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4989fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4990fca52a55SDaniel Vetter  *
4991fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4992fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4993fca52a55SDaniel Vetter  */
4994b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4995c67a470bSPaulo Zanoni {
4996b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
4997ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
4998315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
4999c67a470bSPaulo Zanoni }
5000c67a470bSPaulo Zanoni 
5001fca52a55SDaniel Vetter /**
5002fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
5003fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
5004fca52a55SDaniel Vetter  *
5005fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
5006fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
5007fca52a55SDaniel Vetter  */
5008b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
5009c67a470bSPaulo Zanoni {
5010ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
5011b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
5012b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
5013c67a470bSPaulo Zanoni }
5014