xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 54c52a8412501fe84bccc28bd443a29cdd3f84a1)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
31b2c88f5bSDamien Lespiau #include <linux/circ_buf.h>
3255367a27SJani Nikula #include <linux/cpuidle.h>
3355367a27SJani Nikula #include <linux/slab.h>
3455367a27SJani Nikula #include <linux/sysrq.h>
3555367a27SJani Nikula 
36fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3755367a27SJani Nikula #include <drm/drm_irq.h>
38760285e7SDavid Howells #include <drm/i915_drm.h>
3955367a27SJani Nikula 
40c0e09200SDave Airlie #include "i915_drv.h"
41440e2b3dSJani Nikula #include "i915_irq.h"
421c5d22f7SChris Wilson #include "i915_trace.h"
4379e53945SJesse Barnes #include "intel_drv.h"
448834e365SJani Nikula #include "intel_fifo_underrun.h"
45dbeb38d9SJani Nikula #include "intel_hotplug.h"
46a2649b34SJani Nikula #include "intel_lpe_audio.h"
4755367a27SJani Nikula #include "intel_psr.h"
48c0e09200SDave Airlie 
49fca52a55SDaniel Vetter /**
50fca52a55SDaniel Vetter  * DOC: interrupt handling
51fca52a55SDaniel Vetter  *
52fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
53fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
54fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
55fca52a55SDaniel Vetter  */
56fca52a55SDaniel Vetter 
57e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = {
58e4ce95aaSVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
59e4ce95aaSVille Syrjälä };
60e4ce95aaSVille Syrjälä 
6123bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = {
6223bb4cb5SVille Syrjälä 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
6323bb4cb5SVille Syrjälä };
6423bb4cb5SVille Syrjälä 
653a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = {
663a3b3c7dSVille Syrjälä 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
673a3b3c7dSVille Syrjälä };
683a3b3c7dSVille Syrjälä 
697c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = {
70e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
71e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
72e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
73e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
74e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
75e5868a31SEgbert Eich };
76e5868a31SEgbert Eich 
777c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = {
78e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
7973c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
80e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
81e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
82e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
83e5868a31SEgbert Eich };
84e5868a31SEgbert Eich 
8526951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = {
8674c0b395SVille Syrjälä 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
8726951cafSXiong Zhang 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
8826951cafSXiong Zhang 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
8926951cafSXiong Zhang 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
9026951cafSXiong Zhang 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
9126951cafSXiong Zhang };
9226951cafSXiong Zhang 
937c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
94e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
95e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
96e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
97e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
98e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
99e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
100e5868a31SEgbert Eich };
101e5868a31SEgbert Eich 
1027c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
103e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
105e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
106e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109e5868a31SEgbert Eich };
110e5868a31SEgbert Eich 
1114bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = {
112e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
113e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
114e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
115e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
116e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
117e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
118e5868a31SEgbert Eich };
119e5868a31SEgbert Eich 
120e0a20ad7SShashank Sharma /* BXT hpd list */
121e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = {
1227f3561beSSonika Jindal 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
123e0a20ad7SShashank Sharma 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
124e0a20ad7SShashank Sharma 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
125e0a20ad7SShashank Sharma };
126e0a20ad7SShashank Sharma 
127b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = {
128b796b971SDhinakaran Pandiyan 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
129b796b971SDhinakaran Pandiyan 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
130b796b971SDhinakaran Pandiyan 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
131b796b971SDhinakaran Pandiyan 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
132121e758eSDhinakaran Pandiyan };
133121e758eSDhinakaran Pandiyan 
13431604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = {
13531604222SAnusha Srivatsa 	[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
13631604222SAnusha Srivatsa 	[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
13731604222SAnusha Srivatsa 	[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
13831604222SAnusha Srivatsa 	[HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
13931604222SAnusha Srivatsa 	[HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
14031604222SAnusha Srivatsa 	[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
14131604222SAnusha Srivatsa };
14231604222SAnusha Srivatsa 
14365f42cdcSPaulo Zanoni static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
14468eb49b1SPaulo Zanoni 			   i915_reg_t iir, i915_reg_t ier)
14568eb49b1SPaulo Zanoni {
14665f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
14765f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
14868eb49b1SPaulo Zanoni 
14965f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
15068eb49b1SPaulo Zanoni 
1515c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
15265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
15365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
15465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
15565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
15668eb49b1SPaulo Zanoni }
1575c502442SPaulo Zanoni 
15865f42cdcSPaulo Zanoni static void gen2_irq_reset(struct intel_uncore *uncore)
15968eb49b1SPaulo Zanoni {
16065f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
16165f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
162a9d356a6SPaulo Zanoni 
16365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
16468eb49b1SPaulo Zanoni 
16568eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
16665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
16765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
16865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
16965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
17068eb49b1SPaulo Zanoni }
17168eb49b1SPaulo Zanoni 
172b16b2a2fSPaulo Zanoni #define GEN8_IRQ_RESET_NDX(uncore, type, which) \
17368eb49b1SPaulo Zanoni ({ \
17468eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
175b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
17668eb49b1SPaulo Zanoni 		       GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
17768eb49b1SPaulo Zanoni })
17868eb49b1SPaulo Zanoni 
179b16b2a2fSPaulo Zanoni #define GEN3_IRQ_RESET(uncore, type) \
180b16b2a2fSPaulo Zanoni 	gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
18168eb49b1SPaulo Zanoni 
182b16b2a2fSPaulo Zanoni #define GEN2_IRQ_RESET(uncore) \
183b16b2a2fSPaulo Zanoni 	gen2_irq_reset(uncore)
184e9e9848aSVille Syrjälä 
185337ba017SPaulo Zanoni /*
186337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
187337ba017SPaulo Zanoni  */
18865f42cdcSPaulo Zanoni static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
189b51a2842SVille Syrjälä {
19065f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
191b51a2842SVille Syrjälä 
192b51a2842SVille Syrjälä 	if (val == 0)
193b51a2842SVille Syrjälä 		return;
194b51a2842SVille Syrjälä 
195b51a2842SVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
196f0f59a00SVille Syrjälä 	     i915_mmio_reg_offset(reg), val);
19765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
19865f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
19965f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
20065f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
201b51a2842SVille Syrjälä }
202337ba017SPaulo Zanoni 
20365f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
204e9e9848aSVille Syrjälä {
20565f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
206e9e9848aSVille Syrjälä 
207e9e9848aSVille Syrjälä 	if (val == 0)
208e9e9848aSVille Syrjälä 		return;
209e9e9848aSVille Syrjälä 
210e9e9848aSVille Syrjälä 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
2119d9523d8SPaulo Zanoni 	     i915_mmio_reg_offset(GEN2_IIR), val);
21265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
21365f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
21465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
21565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
216e9e9848aSVille Syrjälä }
217e9e9848aSVille Syrjälä 
21865f42cdcSPaulo Zanoni static void gen3_irq_init(struct intel_uncore *uncore,
21968eb49b1SPaulo Zanoni 			  i915_reg_t imr, u32 imr_val,
22068eb49b1SPaulo Zanoni 			  i915_reg_t ier, u32 ier_val,
22168eb49b1SPaulo Zanoni 			  i915_reg_t iir)
22268eb49b1SPaulo Zanoni {
22365f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
22435079899SPaulo Zanoni 
22565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
22665f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
22765f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
22868eb49b1SPaulo Zanoni }
22935079899SPaulo Zanoni 
23065f42cdcSPaulo Zanoni static void gen2_irq_init(struct intel_uncore *uncore,
2312918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
23268eb49b1SPaulo Zanoni {
23365f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
23468eb49b1SPaulo Zanoni 
23565f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
23665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
23765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
23868eb49b1SPaulo Zanoni }
23968eb49b1SPaulo Zanoni 
240b16b2a2fSPaulo Zanoni #define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
24168eb49b1SPaulo Zanoni ({ \
24268eb49b1SPaulo Zanoni 	unsigned int which_ = which; \
243b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
24468eb49b1SPaulo Zanoni 		      GEN8_##type##_IMR(which_), imr_val, \
24568eb49b1SPaulo Zanoni 		      GEN8_##type##_IER(which_), ier_val, \
24668eb49b1SPaulo Zanoni 		      GEN8_##type##_IIR(which_)); \
24768eb49b1SPaulo Zanoni })
24868eb49b1SPaulo Zanoni 
249b16b2a2fSPaulo Zanoni #define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
250b16b2a2fSPaulo Zanoni 	gen3_irq_init((uncore), \
25168eb49b1SPaulo Zanoni 		      type##IMR, imr_val, \
25268eb49b1SPaulo Zanoni 		      type##IER, ier_val, \
25368eb49b1SPaulo Zanoni 		      type##IIR)
25468eb49b1SPaulo Zanoni 
255b16b2a2fSPaulo Zanoni #define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
256b16b2a2fSPaulo Zanoni 	gen2_irq_init((uncore), imr_val, ier_val)
257e9e9848aSVille Syrjälä 
258c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
25926705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
260c9a9a268SImre Deak 
2610706f17cSEgbert Eich /* For display hotplug interrupt */
2620706f17cSEgbert Eich static inline void
2630706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
264a9c287c9SJani Nikula 				     u32 mask,
265a9c287c9SJani Nikula 				     u32 bits)
2660706f17cSEgbert Eich {
267a9c287c9SJani Nikula 	u32 val;
2680706f17cSEgbert Eich 
26967520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
2700706f17cSEgbert Eich 	WARN_ON(bits & ~mask);
2710706f17cSEgbert Eich 
2720706f17cSEgbert Eich 	val = I915_READ(PORT_HOTPLUG_EN);
2730706f17cSEgbert Eich 	val &= ~mask;
2740706f17cSEgbert Eich 	val |= bits;
2750706f17cSEgbert Eich 	I915_WRITE(PORT_HOTPLUG_EN, val);
2760706f17cSEgbert Eich }
2770706f17cSEgbert Eich 
2780706f17cSEgbert Eich /**
2790706f17cSEgbert Eich  * i915_hotplug_interrupt_update - update hotplug interrupt enable
2800706f17cSEgbert Eich  * @dev_priv: driver private
2810706f17cSEgbert Eich  * @mask: bits to update
2820706f17cSEgbert Eich  * @bits: bits to enable
2830706f17cSEgbert Eich  * NOTE: the HPD enable bits are modified both inside and outside
2840706f17cSEgbert Eich  * of an interrupt context. To avoid that read-modify-write cycles
2850706f17cSEgbert Eich  * interfer, these bits are protected by a spinlock. Since this
2860706f17cSEgbert Eich  * function is usually not called from a context where the lock is
2870706f17cSEgbert Eich  * held already, this function acquires the lock itself. A non-locking
2880706f17cSEgbert Eich  * version is also available.
2890706f17cSEgbert Eich  */
2900706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
291a9c287c9SJani Nikula 				   u32 mask,
292a9c287c9SJani Nikula 				   u32 bits)
2930706f17cSEgbert Eich {
2940706f17cSEgbert Eich 	spin_lock_irq(&dev_priv->irq_lock);
2950706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
2960706f17cSEgbert Eich 	spin_unlock_irq(&dev_priv->irq_lock);
2970706f17cSEgbert Eich }
2980706f17cSEgbert Eich 
29996606f3bSOscar Mateo static u32
30096606f3bSOscar Mateo gen11_gt_engine_identity(struct drm_i915_private * const i915,
30196606f3bSOscar Mateo 			 const unsigned int bank, const unsigned int bit);
30296606f3bSOscar Mateo 
30360a94324SChris Wilson static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
30496606f3bSOscar Mateo 				const unsigned int bank,
30596606f3bSOscar Mateo 				const unsigned int bit)
30696606f3bSOscar Mateo {
30725286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
30896606f3bSOscar Mateo 	u32 dw;
30996606f3bSOscar Mateo 
31096606f3bSOscar Mateo 	lockdep_assert_held(&i915->irq_lock);
31196606f3bSOscar Mateo 
31296606f3bSOscar Mateo 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
31396606f3bSOscar Mateo 	if (dw & BIT(bit)) {
31496606f3bSOscar Mateo 		/*
31596606f3bSOscar Mateo 		 * According to the BSpec, DW_IIR bits cannot be cleared without
31696606f3bSOscar Mateo 		 * first servicing the Selector & Shared IIR registers.
31796606f3bSOscar Mateo 		 */
31896606f3bSOscar Mateo 		gen11_gt_engine_identity(i915, bank, bit);
31996606f3bSOscar Mateo 
32096606f3bSOscar Mateo 		/*
32196606f3bSOscar Mateo 		 * We locked GT INT DW by reading it. If we want to (try
32296606f3bSOscar Mateo 		 * to) recover from this succesfully, we need to clear
32396606f3bSOscar Mateo 		 * our bit, otherwise we are locking the register for
32496606f3bSOscar Mateo 		 * everybody.
32596606f3bSOscar Mateo 		 */
32696606f3bSOscar Mateo 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
32796606f3bSOscar Mateo 
32896606f3bSOscar Mateo 		return true;
32996606f3bSOscar Mateo 	}
33096606f3bSOscar Mateo 
33196606f3bSOscar Mateo 	return false;
33296606f3bSOscar Mateo }
33396606f3bSOscar Mateo 
334d9dc34f1SVille Syrjälä /**
335d9dc34f1SVille Syrjälä  * ilk_update_display_irq - update DEIMR
336d9dc34f1SVille Syrjälä  * @dev_priv: driver private
337d9dc34f1SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
338d9dc34f1SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
339d9dc34f1SVille Syrjälä  */
340fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv,
341a9c287c9SJani Nikula 			    u32 interrupt_mask,
342a9c287c9SJani Nikula 			    u32 enabled_irq_mask)
343036a4a7dSZhenyu Wang {
344a9c287c9SJani Nikula 	u32 new_val;
345d9dc34f1SVille Syrjälä 
34667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
3474bc9d430SDaniel Vetter 
348d9dc34f1SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
349d9dc34f1SVille Syrjälä 
3509df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
351c67a470bSPaulo Zanoni 		return;
352c67a470bSPaulo Zanoni 
353d9dc34f1SVille Syrjälä 	new_val = dev_priv->irq_mask;
354d9dc34f1SVille Syrjälä 	new_val &= ~interrupt_mask;
355d9dc34f1SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
356d9dc34f1SVille Syrjälä 
357d9dc34f1SVille Syrjälä 	if (new_val != dev_priv->irq_mask) {
358d9dc34f1SVille Syrjälä 		dev_priv->irq_mask = new_val;
3591ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
3603143a2bfSChris Wilson 		POSTING_READ(DEIMR);
361036a4a7dSZhenyu Wang 	}
362036a4a7dSZhenyu Wang }
363036a4a7dSZhenyu Wang 
36443eaea13SPaulo Zanoni /**
36543eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
36643eaea13SPaulo Zanoni  * @dev_priv: driver private
36743eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
36843eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
36943eaea13SPaulo Zanoni  */
37043eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
371a9c287c9SJani Nikula 			      u32 interrupt_mask,
372a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
37343eaea13SPaulo Zanoni {
37467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
37543eaea13SPaulo Zanoni 
37615a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
37715a17aaeSDaniel Vetter 
3789df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
379c67a470bSPaulo Zanoni 		return;
380c67a470bSPaulo Zanoni 
38143eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
38243eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
38343eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
38443eaea13SPaulo Zanoni }
38543eaea13SPaulo Zanoni 
386a9c287c9SJani Nikula void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
38743eaea13SPaulo Zanoni {
38843eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
38931bb59ccSChris Wilson 	POSTING_READ_FW(GTIMR);
39043eaea13SPaulo Zanoni }
39143eaea13SPaulo Zanoni 
392a9c287c9SJani Nikula void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
39343eaea13SPaulo Zanoni {
39443eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
39543eaea13SPaulo Zanoni }
39643eaea13SPaulo Zanoni 
397f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
398b900b949SImre Deak {
399d02b98b8SOscar Mateo 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
400d02b98b8SOscar Mateo 
401bca2bf2aSPandiyan, Dhinakaran 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
402b900b949SImre Deak }
403b900b949SImre Deak 
404917dc6b5SMika Kuoppala static void write_pm_imr(struct drm_i915_private *dev_priv)
405a72fbc3aSImre Deak {
406917dc6b5SMika Kuoppala 	i915_reg_t reg;
407917dc6b5SMika Kuoppala 	u32 mask = dev_priv->pm_imr;
408917dc6b5SMika Kuoppala 
409917dc6b5SMika Kuoppala 	if (INTEL_GEN(dev_priv) >= 11) {
410917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
411917dc6b5SMika Kuoppala 		/* pm is in upper half */
412917dc6b5SMika Kuoppala 		mask = mask << 16;
413917dc6b5SMika Kuoppala 	} else if (INTEL_GEN(dev_priv) >= 8) {
414917dc6b5SMika Kuoppala 		reg = GEN8_GT_IMR(2);
415917dc6b5SMika Kuoppala 	} else {
416917dc6b5SMika Kuoppala 		reg = GEN6_PMIMR;
417a72fbc3aSImre Deak 	}
418a72fbc3aSImre Deak 
419917dc6b5SMika Kuoppala 	I915_WRITE(reg, mask);
420917dc6b5SMika Kuoppala 	POSTING_READ(reg);
421917dc6b5SMika Kuoppala }
422917dc6b5SMika Kuoppala 
423917dc6b5SMika Kuoppala static void write_pm_ier(struct drm_i915_private *dev_priv)
424b900b949SImre Deak {
425917dc6b5SMika Kuoppala 	i915_reg_t reg;
426917dc6b5SMika Kuoppala 	u32 mask = dev_priv->pm_ier;
427917dc6b5SMika Kuoppala 
428917dc6b5SMika Kuoppala 	if (INTEL_GEN(dev_priv) >= 11) {
429917dc6b5SMika Kuoppala 		reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
430917dc6b5SMika Kuoppala 		/* pm is in upper half */
431917dc6b5SMika Kuoppala 		mask = mask << 16;
432917dc6b5SMika Kuoppala 	} else if (INTEL_GEN(dev_priv) >= 8) {
433917dc6b5SMika Kuoppala 		reg = GEN8_GT_IER(2);
434917dc6b5SMika Kuoppala 	} else {
435917dc6b5SMika Kuoppala 		reg = GEN6_PMIER;
436917dc6b5SMika Kuoppala 	}
437917dc6b5SMika Kuoppala 
438917dc6b5SMika Kuoppala 	I915_WRITE(reg, mask);
439b900b949SImre Deak }
440b900b949SImre Deak 
441edbfdb45SPaulo Zanoni /**
442edbfdb45SPaulo Zanoni  * snb_update_pm_irq - update GEN6_PMIMR
443edbfdb45SPaulo Zanoni  * @dev_priv: driver private
444edbfdb45SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
445edbfdb45SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
446edbfdb45SPaulo Zanoni  */
447edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
448a9c287c9SJani Nikula 			      u32 interrupt_mask,
449a9c287c9SJani Nikula 			      u32 enabled_irq_mask)
450edbfdb45SPaulo Zanoni {
451a9c287c9SJani Nikula 	u32 new_val;
452edbfdb45SPaulo Zanoni 
45315a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
45415a17aaeSDaniel Vetter 
45567520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
456edbfdb45SPaulo Zanoni 
457f4e9af4fSAkash Goel 	new_val = dev_priv->pm_imr;
458f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
459f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
460f52ecbcfSPaulo Zanoni 
461f4e9af4fSAkash Goel 	if (new_val != dev_priv->pm_imr) {
462f4e9af4fSAkash Goel 		dev_priv->pm_imr = new_val;
463917dc6b5SMika Kuoppala 		write_pm_imr(dev_priv);
464edbfdb45SPaulo Zanoni 	}
465f52ecbcfSPaulo Zanoni }
466edbfdb45SPaulo Zanoni 
467f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
468edbfdb45SPaulo Zanoni {
4699939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4709939fba2SImre Deak 		return;
4719939fba2SImre Deak 
472edbfdb45SPaulo Zanoni 	snb_update_pm_irq(dev_priv, mask, mask);
473edbfdb45SPaulo Zanoni }
474edbfdb45SPaulo Zanoni 
475f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
4769939fba2SImre Deak {
4779939fba2SImre Deak 	snb_update_pm_irq(dev_priv, mask, 0);
4789939fba2SImre Deak }
4799939fba2SImre Deak 
480f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
481edbfdb45SPaulo Zanoni {
4829939fba2SImre Deak 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
4839939fba2SImre Deak 		return;
4849939fba2SImre Deak 
485f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, mask);
486f4e9af4fSAkash Goel }
487f4e9af4fSAkash Goel 
4883814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
489f4e9af4fSAkash Goel {
490f4e9af4fSAkash Goel 	i915_reg_t reg = gen6_pm_iir(dev_priv);
491f4e9af4fSAkash Goel 
49267520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
493f4e9af4fSAkash Goel 
494f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
495f4e9af4fSAkash Goel 	I915_WRITE(reg, reset_mask);
496f4e9af4fSAkash Goel 	POSTING_READ(reg);
497f4e9af4fSAkash Goel }
498f4e9af4fSAkash Goel 
4993814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
500f4e9af4fSAkash Goel {
50167520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
502f4e9af4fSAkash Goel 
503f4e9af4fSAkash Goel 	dev_priv->pm_ier |= enable_mask;
504917dc6b5SMika Kuoppala 	write_pm_ier(dev_priv);
505f4e9af4fSAkash Goel 	gen6_unmask_pm_irq(dev_priv, enable_mask);
506f4e9af4fSAkash Goel 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
507f4e9af4fSAkash Goel }
508f4e9af4fSAkash Goel 
5093814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
510f4e9af4fSAkash Goel {
51167520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
512f4e9af4fSAkash Goel 
513f4e9af4fSAkash Goel 	dev_priv->pm_ier &= ~disable_mask;
514f4e9af4fSAkash Goel 	__gen6_mask_pm_irq(dev_priv, disable_mask);
515917dc6b5SMika Kuoppala 	write_pm_ier(dev_priv);
516f4e9af4fSAkash Goel 	/* though a barrier is missing here, but don't really need a one */
517edbfdb45SPaulo Zanoni }
518edbfdb45SPaulo Zanoni 
519d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
520d02b98b8SOscar Mateo {
521d02b98b8SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
522d02b98b8SOscar Mateo 
52396606f3bSOscar Mateo 	while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
52496606f3bSOscar Mateo 		;
525d02b98b8SOscar Mateo 
526d02b98b8SOscar Mateo 	dev_priv->gt_pm.rps.pm_iir = 0;
527d02b98b8SOscar Mateo 
528d02b98b8SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
529d02b98b8SOscar Mateo }
530d02b98b8SOscar Mateo 
531dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
5323cc134e3SImre Deak {
5333cc134e3SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
5344668f695SChris Wilson 	gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
535562d9baeSSagar Arun Kamble 	dev_priv->gt_pm.rps.pm_iir = 0;
5363cc134e3SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
5373cc134e3SImre Deak }
5383cc134e3SImre Deak 
53991d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
540b900b949SImre Deak {
541562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
542562d9baeSSagar Arun Kamble 
543562d9baeSSagar Arun Kamble 	if (READ_ONCE(rps->interrupts_enabled))
544f2a91d1aSChris Wilson 		return;
545f2a91d1aSChris Wilson 
546b900b949SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
547562d9baeSSagar Arun Kamble 	WARN_ON_ONCE(rps->pm_iir);
54896606f3bSOscar Mateo 
549d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
55096606f3bSOscar Mateo 		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
551d02b98b8SOscar Mateo 	else
552c33d247dSChris Wilson 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
55396606f3bSOscar Mateo 
554562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = true;
555b900b949SImre Deak 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
55678e68d36SImre Deak 
557b900b949SImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
558b900b949SImre Deak }
559b900b949SImre Deak 
56091d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
561b900b949SImre Deak {
562562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
563562d9baeSSagar Arun Kamble 
564562d9baeSSagar Arun Kamble 	if (!READ_ONCE(rps->interrupts_enabled))
565f2a91d1aSChris Wilson 		return;
566f2a91d1aSChris Wilson 
567d4d70aa5SImre Deak 	spin_lock_irq(&dev_priv->irq_lock);
568562d9baeSSagar Arun Kamble 	rps->interrupts_enabled = false;
5699939fba2SImre Deak 
570b20e3cfeSDave Gordon 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
5719939fba2SImre Deak 
5724668f695SChris Wilson 	gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
57358072ccbSImre Deak 
57458072ccbSImre Deak 	spin_unlock_irq(&dev_priv->irq_lock);
57591c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
576c33d247dSChris Wilson 
577c33d247dSChris Wilson 	/* Now that we will not be generating any more work, flush any
5783814fd77SOscar Mateo 	 * outstanding tasks. As we are called on the RPS idle path,
579c33d247dSChris Wilson 	 * we will reset the GPU to minimum frequencies, so the current
580c33d247dSChris Wilson 	 * state of the worker can be discarded.
581c33d247dSChris Wilson 	 */
582562d9baeSSagar Arun Kamble 	cancel_work_sync(&rps->work);
583d02b98b8SOscar Mateo 	if (INTEL_GEN(dev_priv) >= 11)
584d02b98b8SOscar Mateo 		gen11_reset_rps_interrupts(dev_priv);
585d02b98b8SOscar Mateo 	else
586c33d247dSChris Wilson 		gen6_reset_rps_interrupts(dev_priv);
587b900b949SImre Deak }
588b900b949SImre Deak 
58926705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
59026705e20SSagar Arun Kamble {
5911be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
5921be333d3SSagar Arun Kamble 
59326705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
59426705e20SSagar Arun Kamble 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
59526705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
59626705e20SSagar Arun Kamble }
59726705e20SSagar Arun Kamble 
59826705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
59926705e20SSagar Arun Kamble {
6001be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
6011be333d3SSagar Arun Kamble 
60226705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
6031e83e7a6SOscar Mateo 	if (!dev_priv->guc.interrupts.enabled) {
60426705e20SSagar Arun Kamble 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
60526705e20SSagar Arun Kamble 				       dev_priv->pm_guc_events);
6061e83e7a6SOscar Mateo 		dev_priv->guc.interrupts.enabled = true;
60726705e20SSagar Arun Kamble 		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
60826705e20SSagar Arun Kamble 	}
60926705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
61026705e20SSagar Arun Kamble }
61126705e20SSagar Arun Kamble 
61226705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
61326705e20SSagar Arun Kamble {
6141be333d3SSagar Arun Kamble 	assert_rpm_wakelock_held(dev_priv);
6151be333d3SSagar Arun Kamble 
61626705e20SSagar Arun Kamble 	spin_lock_irq(&dev_priv->irq_lock);
6171e83e7a6SOscar Mateo 	dev_priv->guc.interrupts.enabled = false;
61826705e20SSagar Arun Kamble 
61926705e20SSagar Arun Kamble 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
62026705e20SSagar Arun Kamble 
62126705e20SSagar Arun Kamble 	spin_unlock_irq(&dev_priv->irq_lock);
62226705e20SSagar Arun Kamble 	synchronize_irq(dev_priv->drm.irq);
62326705e20SSagar Arun Kamble 
62426705e20SSagar Arun Kamble 	gen9_reset_guc_interrupts(dev_priv);
62526705e20SSagar Arun Kamble }
62626705e20SSagar Arun Kamble 
627*54c52a84SOscar Mateo void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
628*54c52a84SOscar Mateo {
629*54c52a84SOscar Mateo 	spin_lock_irq(&i915->irq_lock);
630*54c52a84SOscar Mateo 	gen11_reset_one_iir(i915, 0, GEN11_GUC);
631*54c52a84SOscar Mateo 	spin_unlock_irq(&i915->irq_lock);
632*54c52a84SOscar Mateo }
633*54c52a84SOscar Mateo 
634*54c52a84SOscar Mateo void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
635*54c52a84SOscar Mateo {
636*54c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
637*54c52a84SOscar Mateo 	if (!dev_priv->guc.interrupts.enabled) {
638*54c52a84SOscar Mateo 		u32 events = REG_FIELD_PREP(ENGINE1_MASK,
639*54c52a84SOscar Mateo 					    GEN11_GUC_INTR_GUC2HOST);
640*54c52a84SOscar Mateo 
641*54c52a84SOscar Mateo 		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
642*54c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
643*54c52a84SOscar Mateo 		I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
644*54c52a84SOscar Mateo 		dev_priv->guc.interrupts.enabled = true;
645*54c52a84SOscar Mateo 	}
646*54c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
647*54c52a84SOscar Mateo }
648*54c52a84SOscar Mateo 
649*54c52a84SOscar Mateo void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
650*54c52a84SOscar Mateo {
651*54c52a84SOscar Mateo 	spin_lock_irq(&dev_priv->irq_lock);
652*54c52a84SOscar Mateo 	dev_priv->guc.interrupts.enabled = false;
653*54c52a84SOscar Mateo 
654*54c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
655*54c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
656*54c52a84SOscar Mateo 
657*54c52a84SOscar Mateo 	spin_unlock_irq(&dev_priv->irq_lock);
658*54c52a84SOscar Mateo 	synchronize_irq(dev_priv->drm.irq);
659*54c52a84SOscar Mateo 
660*54c52a84SOscar Mateo 	gen11_reset_guc_interrupts(dev_priv);
661*54c52a84SOscar Mateo }
662*54c52a84SOscar Mateo 
6630961021aSBen Widawsky /**
6643a3b3c7dSVille Syrjälä  * bdw_update_port_irq - update DE port interrupt
6653a3b3c7dSVille Syrjälä  * @dev_priv: driver private
6663a3b3c7dSVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
6673a3b3c7dSVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
6683a3b3c7dSVille Syrjälä  */
6693a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
670a9c287c9SJani Nikula 				u32 interrupt_mask,
671a9c287c9SJani Nikula 				u32 enabled_irq_mask)
6723a3b3c7dSVille Syrjälä {
673a9c287c9SJani Nikula 	u32 new_val;
674a9c287c9SJani Nikula 	u32 old_val;
6753a3b3c7dSVille Syrjälä 
67667520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
6773a3b3c7dSVille Syrjälä 
6783a3b3c7dSVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
6793a3b3c7dSVille Syrjälä 
6803a3b3c7dSVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
6813a3b3c7dSVille Syrjälä 		return;
6823a3b3c7dSVille Syrjälä 
6833a3b3c7dSVille Syrjälä 	old_val = I915_READ(GEN8_DE_PORT_IMR);
6843a3b3c7dSVille Syrjälä 
6853a3b3c7dSVille Syrjälä 	new_val = old_val;
6863a3b3c7dSVille Syrjälä 	new_val &= ~interrupt_mask;
6873a3b3c7dSVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
6883a3b3c7dSVille Syrjälä 
6893a3b3c7dSVille Syrjälä 	if (new_val != old_val) {
6903a3b3c7dSVille Syrjälä 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
6913a3b3c7dSVille Syrjälä 		POSTING_READ(GEN8_DE_PORT_IMR);
6923a3b3c7dSVille Syrjälä 	}
6933a3b3c7dSVille Syrjälä }
6943a3b3c7dSVille Syrjälä 
6953a3b3c7dSVille Syrjälä /**
696013d3752SVille Syrjälä  * bdw_update_pipe_irq - update DE pipe interrupt
697013d3752SVille Syrjälä  * @dev_priv: driver private
698013d3752SVille Syrjälä  * @pipe: pipe whose interrupt to update
699013d3752SVille Syrjälä  * @interrupt_mask: mask of interrupt bits to update
700013d3752SVille Syrjälä  * @enabled_irq_mask: mask of interrupt bits to enable
701013d3752SVille Syrjälä  */
702013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
703013d3752SVille Syrjälä 			 enum pipe pipe,
704a9c287c9SJani Nikula 			 u32 interrupt_mask,
705a9c287c9SJani Nikula 			 u32 enabled_irq_mask)
706013d3752SVille Syrjälä {
707a9c287c9SJani Nikula 	u32 new_val;
708013d3752SVille Syrjälä 
70967520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
710013d3752SVille Syrjälä 
711013d3752SVille Syrjälä 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
712013d3752SVille Syrjälä 
713013d3752SVille Syrjälä 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
714013d3752SVille Syrjälä 		return;
715013d3752SVille Syrjälä 
716013d3752SVille Syrjälä 	new_val = dev_priv->de_irq_mask[pipe];
717013d3752SVille Syrjälä 	new_val &= ~interrupt_mask;
718013d3752SVille Syrjälä 	new_val |= (~enabled_irq_mask & interrupt_mask);
719013d3752SVille Syrjälä 
720013d3752SVille Syrjälä 	if (new_val != dev_priv->de_irq_mask[pipe]) {
721013d3752SVille Syrjälä 		dev_priv->de_irq_mask[pipe] = new_val;
722013d3752SVille Syrjälä 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
723013d3752SVille Syrjälä 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
724013d3752SVille Syrjälä 	}
725013d3752SVille Syrjälä }
726013d3752SVille Syrjälä 
727013d3752SVille Syrjälä /**
728fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
729fee884edSDaniel Vetter  * @dev_priv: driver private
730fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
731fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
732fee884edSDaniel Vetter  */
73347339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
734a9c287c9SJani Nikula 				  u32 interrupt_mask,
735a9c287c9SJani Nikula 				  u32 enabled_irq_mask)
736fee884edSDaniel Vetter {
737a9c287c9SJani Nikula 	u32 sdeimr = I915_READ(SDEIMR);
738fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
739fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
740fee884edSDaniel Vetter 
74115a17aaeSDaniel Vetter 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
74215a17aaeSDaniel Vetter 
74367520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
744fee884edSDaniel Vetter 
7459df7575fSJesse Barnes 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
746c67a470bSPaulo Zanoni 		return;
747c67a470bSPaulo Zanoni 
748fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
749fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
750fee884edSDaniel Vetter }
7518664281bSPaulo Zanoni 
7526b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
7536b12ca56SVille Syrjälä 			      enum pipe pipe)
7547c463586SKeith Packard {
7556b12ca56SVille Syrjälä 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
75610c59c51SImre Deak 	u32 enable_mask = status_mask << 16;
75710c59c51SImre Deak 
7586b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
7596b12ca56SVille Syrjälä 
7606b12ca56SVille Syrjälä 	if (INTEL_GEN(dev_priv) < 5)
7616b12ca56SVille Syrjälä 		goto out;
7626b12ca56SVille Syrjälä 
76310c59c51SImre Deak 	/*
764724a6905SVille Syrjälä 	 * On pipe A we don't support the PSR interrupt yet,
765724a6905SVille Syrjälä 	 * on pipe B and C the same bit MBZ.
76610c59c51SImre Deak 	 */
76710c59c51SImre Deak 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
76810c59c51SImre Deak 		return 0;
769724a6905SVille Syrjälä 	/*
770724a6905SVille Syrjälä 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
771724a6905SVille Syrjälä 	 * A the same bit is for perf counters which we don't use either.
772724a6905SVille Syrjälä 	 */
773724a6905SVille Syrjälä 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
774724a6905SVille Syrjälä 		return 0;
77510c59c51SImre Deak 
77610c59c51SImre Deak 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
77710c59c51SImre Deak 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
77810c59c51SImre Deak 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
77910c59c51SImre Deak 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
78010c59c51SImre Deak 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
78110c59c51SImre Deak 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
78210c59c51SImre Deak 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
78310c59c51SImre Deak 
7846b12ca56SVille Syrjälä out:
7856b12ca56SVille Syrjälä 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
7866b12ca56SVille Syrjälä 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
7876b12ca56SVille Syrjälä 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
7886b12ca56SVille Syrjälä 		  pipe_name(pipe), enable_mask, status_mask);
7896b12ca56SVille Syrjälä 
79010c59c51SImre Deak 	return enable_mask;
79110c59c51SImre Deak }
79210c59c51SImre Deak 
7936b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv,
7946b12ca56SVille Syrjälä 			  enum pipe pipe, u32 status_mask)
795755e9019SImre Deak {
7966b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
797755e9019SImre Deak 	u32 enable_mask;
798755e9019SImre Deak 
7996b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8006b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8016b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8026b12ca56SVille Syrjälä 
8036b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8046b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8056b12ca56SVille Syrjälä 
8066b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
8076b12ca56SVille Syrjälä 		return;
8086b12ca56SVille Syrjälä 
8096b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
8106b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8116b12ca56SVille Syrjälä 
8126b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8136b12ca56SVille Syrjälä 	POSTING_READ(reg);
814755e9019SImre Deak }
815755e9019SImre Deak 
8166b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv,
8176b12ca56SVille Syrjälä 			   enum pipe pipe, u32 status_mask)
818755e9019SImre Deak {
8196b12ca56SVille Syrjälä 	i915_reg_t reg = PIPESTAT(pipe);
820755e9019SImre Deak 	u32 enable_mask;
821755e9019SImre Deak 
8226b12ca56SVille Syrjälä 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
8236b12ca56SVille Syrjälä 		  "pipe %c: status_mask=0x%x\n",
8246b12ca56SVille Syrjälä 		  pipe_name(pipe), status_mask);
8256b12ca56SVille Syrjälä 
8266b12ca56SVille Syrjälä 	lockdep_assert_held(&dev_priv->irq_lock);
8276b12ca56SVille Syrjälä 	WARN_ON(!intel_irqs_enabled(dev_priv));
8286b12ca56SVille Syrjälä 
8296b12ca56SVille Syrjälä 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
8306b12ca56SVille Syrjälä 		return;
8316b12ca56SVille Syrjälä 
8326b12ca56SVille Syrjälä 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
8336b12ca56SVille Syrjälä 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
8346b12ca56SVille Syrjälä 
8356b12ca56SVille Syrjälä 	I915_WRITE(reg, enable_mask | status_mask);
8366b12ca56SVille Syrjälä 	POSTING_READ(reg);
837755e9019SImre Deak }
838755e9019SImre Deak 
839f3e30485SVille Syrjälä static bool i915_has_asle(struct drm_i915_private *dev_priv)
840f3e30485SVille Syrjälä {
841f3e30485SVille Syrjälä 	if (!dev_priv->opregion.asle)
842f3e30485SVille Syrjälä 		return false;
843f3e30485SVille Syrjälä 
844f3e30485SVille Syrjälä 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
845f3e30485SVille Syrjälä }
846f3e30485SVille Syrjälä 
847c0e09200SDave Airlie /**
848f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
84914bb2c11STvrtko Ursulin  * @dev_priv: i915 device private
85001c66889SZhao Yakui  */
85191d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
85201c66889SZhao Yakui {
853f3e30485SVille Syrjälä 	if (!i915_has_asle(dev_priv))
854f49e38ddSJani Nikula 		return;
855f49e38ddSJani Nikula 
85613321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
85701c66889SZhao Yakui 
858755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
85991d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 4)
8603b6c42e8SDaniel Vetter 		i915_enable_pipestat(dev_priv, PIPE_A,
861755e9019SImre Deak 				     PIPE_LEGACY_BLC_EVENT_STATUS);
8621ec14ad3SChris Wilson 
86313321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
86401c66889SZhao Yakui }
86501c66889SZhao Yakui 
866f75f3746SVille Syrjälä /*
867f75f3746SVille Syrjälä  * This timing diagram depicts the video signal in and
868f75f3746SVille Syrjälä  * around the vertical blanking period.
869f75f3746SVille Syrjälä  *
870f75f3746SVille Syrjälä  * Assumptions about the fictitious mode used in this example:
871f75f3746SVille Syrjälä  *  vblank_start >= 3
872f75f3746SVille Syrjälä  *  vsync_start = vblank_start + 1
873f75f3746SVille Syrjälä  *  vsync_end = vblank_start + 2
874f75f3746SVille Syrjälä  *  vtotal = vblank_start + 3
875f75f3746SVille Syrjälä  *
876f75f3746SVille Syrjälä  *           start of vblank:
877f75f3746SVille Syrjälä  *           latch double buffered registers
878f75f3746SVille Syrjälä  *           increment frame counter (ctg+)
879f75f3746SVille Syrjälä  *           generate start of vblank interrupt (gen4+)
880f75f3746SVille Syrjälä  *           |
881f75f3746SVille Syrjälä  *           |          frame start:
882f75f3746SVille Syrjälä  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
883f75f3746SVille Syrjälä  *           |          may be shifted forward 1-3 extra lines via PIPECONF
884f75f3746SVille Syrjälä  *           |          |
885f75f3746SVille Syrjälä  *           |          |  start of vsync:
886f75f3746SVille Syrjälä  *           |          |  generate vsync interrupt
887f75f3746SVille Syrjälä  *           |          |  |
888f75f3746SVille Syrjälä  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
889f75f3746SVille Syrjälä  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
890f75f3746SVille Syrjälä  * ----va---> <-----------------vb--------------------> <--------va-------------
891f75f3746SVille Syrjälä  *       |          |       <----vs----->                     |
892f75f3746SVille Syrjälä  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
893f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
894f75f3746SVille Syrjälä  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
895f75f3746SVille Syrjälä  *       |          |                                         |
896f75f3746SVille Syrjälä  *       last visible pixel                                   first visible pixel
897f75f3746SVille Syrjälä  *                  |                                         increment frame counter (gen3/4)
898f75f3746SVille Syrjälä  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
899f75f3746SVille Syrjälä  *
900f75f3746SVille Syrjälä  * x  = horizontal active
901f75f3746SVille Syrjälä  * _  = horizontal blanking
902f75f3746SVille Syrjälä  * hs = horizontal sync
903f75f3746SVille Syrjälä  * va = vertical active
904f75f3746SVille Syrjälä  * vb = vertical blanking
905f75f3746SVille Syrjälä  * vs = vertical sync
906f75f3746SVille Syrjälä  * vbs = vblank_start (number)
907f75f3746SVille Syrjälä  *
908f75f3746SVille Syrjälä  * Summary:
909f75f3746SVille Syrjälä  * - most events happen at the start of horizontal sync
910f75f3746SVille Syrjälä  * - frame start happens at the start of horizontal blank, 1-4 lines
911f75f3746SVille Syrjälä  *   (depending on PIPECONF settings) after the start of vblank
912f75f3746SVille Syrjälä  * - gen3/4 pixel and frame counter are synchronized with the start
913f75f3746SVille Syrjälä  *   of horizontal active on the first line of vertical active
914f75f3746SVille Syrjälä  */
915f75f3746SVille Syrjälä 
91642f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
91742f52ef8SKeith Packard  * we use as a pipe index
91842f52ef8SKeith Packard  */
91988e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
9200a3e67a4SJesse Barnes {
921fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
92232db0b65SVille Syrjälä 	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
92332db0b65SVille Syrjälä 	const struct drm_display_mode *mode = &vblank->hwmode;
924f0f59a00SVille Syrjälä 	i915_reg_t high_frame, low_frame;
9250b2a8e09SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
926694e409dSVille Syrjälä 	unsigned long irqflags;
927391f75e2SVille Syrjälä 
92832db0b65SVille Syrjälä 	/*
92932db0b65SVille Syrjälä 	 * On i965gm TV output the frame counter only works up to
93032db0b65SVille Syrjälä 	 * the point when we enable the TV encoder. After that the
93132db0b65SVille Syrjälä 	 * frame counter ceases to work and reads zero. We need a
93232db0b65SVille Syrjälä 	 * vblank wait before enabling the TV encoder and so we
93332db0b65SVille Syrjälä 	 * have to enable vblank interrupts while the frame counter
93432db0b65SVille Syrjälä 	 * is still in a working state. However the core vblank code
93532db0b65SVille Syrjälä 	 * does not like us returning non-zero frame counter values
93632db0b65SVille Syrjälä 	 * when we've told it that we don't have a working frame
93732db0b65SVille Syrjälä 	 * counter. Thus we must stop non-zero values leaking out.
93832db0b65SVille Syrjälä 	 */
93932db0b65SVille Syrjälä 	if (!vblank->max_vblank_count)
94032db0b65SVille Syrjälä 		return 0;
94132db0b65SVille Syrjälä 
9420b2a8e09SVille Syrjälä 	htotal = mode->crtc_htotal;
9430b2a8e09SVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
9440b2a8e09SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
9450b2a8e09SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
9460b2a8e09SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
947391f75e2SVille Syrjälä 
9480b2a8e09SVille Syrjälä 	/* Convert to pixel count */
9490b2a8e09SVille Syrjälä 	vbl_start *= htotal;
9500b2a8e09SVille Syrjälä 
9510b2a8e09SVille Syrjälä 	/* Start of vblank event occurs at start of hsync */
9520b2a8e09SVille Syrjälä 	vbl_start -= htotal - hsync_start;
9530b2a8e09SVille Syrjälä 
9549db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
9559db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
9565eddb70bSChris Wilson 
957694e409dSVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
958694e409dSVille Syrjälä 
9590a3e67a4SJesse Barnes 	/*
9600a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
9610a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
9620a3e67a4SJesse Barnes 	 * register.
9630a3e67a4SJesse Barnes 	 */
9640a3e67a4SJesse Barnes 	do {
965694e409dSVille Syrjälä 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
966694e409dSVille Syrjälä 		low   = I915_READ_FW(low_frame);
967694e409dSVille Syrjälä 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
9680a3e67a4SJesse Barnes 	} while (high1 != high2);
9690a3e67a4SJesse Barnes 
970694e409dSVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
971694e409dSVille Syrjälä 
9725eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
973391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
9745eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
975391f75e2SVille Syrjälä 
976391f75e2SVille Syrjälä 	/*
977391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
978391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
979391f75e2SVille Syrjälä 	 * counter against vblank start.
980391f75e2SVille Syrjälä 	 */
981edc08d0aSVille Syrjälä 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
9820a3e67a4SJesse Barnes }
9830a3e67a4SJesse Barnes 
984974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
9859880b7a5SJesse Barnes {
986fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
9879880b7a5SJesse Barnes 
988649636efSVille Syrjälä 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
9899880b7a5SJesse Barnes }
9909880b7a5SJesse Barnes 
991aec0246fSUma Shankar /*
992aec0246fSUma Shankar  * On certain encoders on certain platforms, pipe
993aec0246fSUma Shankar  * scanline register will not work to get the scanline,
994aec0246fSUma Shankar  * since the timings are driven from the PORT or issues
995aec0246fSUma Shankar  * with scanline register updates.
996aec0246fSUma Shankar  * This function will use Framestamp and current
997aec0246fSUma Shankar  * timestamp registers to calculate the scanline.
998aec0246fSUma Shankar  */
999aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
1000aec0246fSUma Shankar {
1001aec0246fSUma Shankar 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1002aec0246fSUma Shankar 	struct drm_vblank_crtc *vblank =
1003aec0246fSUma Shankar 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
1004aec0246fSUma Shankar 	const struct drm_display_mode *mode = &vblank->hwmode;
1005aec0246fSUma Shankar 	u32 vblank_start = mode->crtc_vblank_start;
1006aec0246fSUma Shankar 	u32 vtotal = mode->crtc_vtotal;
1007aec0246fSUma Shankar 	u32 htotal = mode->crtc_htotal;
1008aec0246fSUma Shankar 	u32 clock = mode->crtc_clock;
1009aec0246fSUma Shankar 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
1010aec0246fSUma Shankar 
1011aec0246fSUma Shankar 	/*
1012aec0246fSUma Shankar 	 * To avoid the race condition where we might cross into the
1013aec0246fSUma Shankar 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
1014aec0246fSUma Shankar 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
1015aec0246fSUma Shankar 	 * during the same frame.
1016aec0246fSUma Shankar 	 */
1017aec0246fSUma Shankar 	do {
1018aec0246fSUma Shankar 		/*
1019aec0246fSUma Shankar 		 * This field provides read back of the display
1020aec0246fSUma Shankar 		 * pipe frame time stamp. The time stamp value
1021aec0246fSUma Shankar 		 * is sampled at every start of vertical blank.
1022aec0246fSUma Shankar 		 */
1023aec0246fSUma Shankar 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1024aec0246fSUma Shankar 
1025aec0246fSUma Shankar 		/*
1026aec0246fSUma Shankar 		 * The TIMESTAMP_CTR register has the current
1027aec0246fSUma Shankar 		 * time stamp value.
1028aec0246fSUma Shankar 		 */
1029aec0246fSUma Shankar 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
1030aec0246fSUma Shankar 
1031aec0246fSUma Shankar 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
1032aec0246fSUma Shankar 	} while (scan_post_time != scan_prev_time);
1033aec0246fSUma Shankar 
1034aec0246fSUma Shankar 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
1035aec0246fSUma Shankar 					clock), 1000 * htotal);
1036aec0246fSUma Shankar 	scanline = min(scanline, vtotal - 1);
1037aec0246fSUma Shankar 	scanline = (scanline + vblank_start) % vtotal;
1038aec0246fSUma Shankar 
1039aec0246fSUma Shankar 	return scanline;
1040aec0246fSUma Shankar }
1041aec0246fSUma Shankar 
104275aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
1043a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
1044a225f079SVille Syrjälä {
1045a225f079SVille Syrjälä 	struct drm_device *dev = crtc->base.dev;
1046fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
10475caa0feaSDaniel Vetter 	const struct drm_display_mode *mode;
10485caa0feaSDaniel Vetter 	struct drm_vblank_crtc *vblank;
1049a225f079SVille Syrjälä 	enum pipe pipe = crtc->pipe;
105080715b2fSVille Syrjälä 	int position, vtotal;
1051a225f079SVille Syrjälä 
105272259536SVille Syrjälä 	if (!crtc->active)
105372259536SVille Syrjälä 		return -1;
105472259536SVille Syrjälä 
10555caa0feaSDaniel Vetter 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
10565caa0feaSDaniel Vetter 	mode = &vblank->hwmode;
10575caa0feaSDaniel Vetter 
1058aec0246fSUma Shankar 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
1059aec0246fSUma Shankar 		return __intel_get_crtc_scanline_from_timestamp(crtc);
1060aec0246fSUma Shankar 
106180715b2fSVille Syrjälä 	vtotal = mode->crtc_vtotal;
1062a225f079SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1063a225f079SVille Syrjälä 		vtotal /= 2;
1064a225f079SVille Syrjälä 
1065cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 2))
106675aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
1067a225f079SVille Syrjälä 	else
106875aa3f63SVille Syrjälä 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
1069a225f079SVille Syrjälä 
1070a225f079SVille Syrjälä 	/*
107141b578fbSJesse Barnes 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
107241b578fbSJesse Barnes 	 * read it just before the start of vblank.  So try it again
107341b578fbSJesse Barnes 	 * so we don't accidentally end up spanning a vblank frame
107441b578fbSJesse Barnes 	 * increment, causing the pipe_update_end() code to squak at us.
107541b578fbSJesse Barnes 	 *
107641b578fbSJesse Barnes 	 * The nature of this problem means we can't simply check the ISR
107741b578fbSJesse Barnes 	 * bit and return the vblank start value; nor can we use the scanline
107841b578fbSJesse Barnes 	 * debug register in the transcoder as it appears to have the same
107941b578fbSJesse Barnes 	 * problem.  We may need to extend this to include other platforms,
108041b578fbSJesse Barnes 	 * but so far testing only shows the problem on HSW.
108141b578fbSJesse Barnes 	 */
108291d14251STvrtko Ursulin 	if (HAS_DDI(dev_priv) && !position) {
108341b578fbSJesse Barnes 		int i, temp;
108441b578fbSJesse Barnes 
108541b578fbSJesse Barnes 		for (i = 0; i < 100; i++) {
108641b578fbSJesse Barnes 			udelay(1);
1087707bdd3fSVille Syrjälä 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
108841b578fbSJesse Barnes 			if (temp != position) {
108941b578fbSJesse Barnes 				position = temp;
109041b578fbSJesse Barnes 				break;
109141b578fbSJesse Barnes 			}
109241b578fbSJesse Barnes 		}
109341b578fbSJesse Barnes 	}
109441b578fbSJesse Barnes 
109541b578fbSJesse Barnes 	/*
109680715b2fSVille Syrjälä 	 * See update_scanline_offset() for the details on the
109780715b2fSVille Syrjälä 	 * scanline_offset adjustment.
1098a225f079SVille Syrjälä 	 */
109980715b2fSVille Syrjälä 	return (position + crtc->scanline_offset) % vtotal;
1100a225f079SVille Syrjälä }
1101a225f079SVille Syrjälä 
11021bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
11031bf6ad62SDaniel Vetter 				     bool in_vblank_irq, int *vpos, int *hpos,
11043bb403bfSVille Syrjälä 				     ktime_t *stime, ktime_t *etime,
11053bb403bfSVille Syrjälä 				     const struct drm_display_mode *mode)
11060af7e4dfSMario Kleiner {
1107fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
110898187836SVille Syrjälä 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
110998187836SVille Syrjälä 								pipe);
11103aa18df8SVille Syrjälä 	int position;
111178e8fc6bSVille Syrjälä 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
1112ad3543edSMario Kleiner 	unsigned long irqflags;
11138a920e24SVille Syrjälä 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
11148a920e24SVille Syrjälä 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
11158a920e24SVille Syrjälä 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
11160af7e4dfSMario Kleiner 
1117fc467a22SMaarten Lankhorst 	if (WARN_ON(!mode->crtc_clock)) {
11180af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
11199db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
11201bf6ad62SDaniel Vetter 		return false;
11210af7e4dfSMario Kleiner 	}
11220af7e4dfSMario Kleiner 
1123c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
112478e8fc6bSVille Syrjälä 	hsync_start = mode->crtc_hsync_start;
1125c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
1126c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
1127c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
11280af7e4dfSMario Kleiner 
1129d31faf65SVille Syrjälä 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1130d31faf65SVille Syrjälä 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1131d31faf65SVille Syrjälä 		vbl_end /= 2;
1132d31faf65SVille Syrjälä 		vtotal /= 2;
1133d31faf65SVille Syrjälä 	}
1134d31faf65SVille Syrjälä 
1135ad3543edSMario Kleiner 	/*
1136ad3543edSMario Kleiner 	 * Lock uncore.lock, as we will do multiple timing critical raw
1137ad3543edSMario Kleiner 	 * register reads, potentially with preemption disabled, so the
1138ad3543edSMario Kleiner 	 * following code must not block on uncore.lock.
1139ad3543edSMario Kleiner 	 */
1140ad3543edSMario Kleiner 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1141ad3543edSMario Kleiner 
1142ad3543edSMario Kleiner 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1143ad3543edSMario Kleiner 
1144ad3543edSMario Kleiner 	/* Get optional system timestamp before query. */
1145ad3543edSMario Kleiner 	if (stime)
1146ad3543edSMario Kleiner 		*stime = ktime_get();
1147ad3543edSMario Kleiner 
11488a920e24SVille Syrjälä 	if (use_scanline_counter) {
11490af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
11500af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
11510af7e4dfSMario Kleiner 		 */
1152a225f079SVille Syrjälä 		position = __intel_get_crtc_scanline(intel_crtc);
11530af7e4dfSMario Kleiner 	} else {
11540af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
11550af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
11560af7e4dfSMario Kleiner 		 * scanout position.
11570af7e4dfSMario Kleiner 		 */
115875aa3f63SVille Syrjälä 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
11590af7e4dfSMario Kleiner 
11603aa18df8SVille Syrjälä 		/* convert to pixel counts */
11613aa18df8SVille Syrjälä 		vbl_start *= htotal;
11623aa18df8SVille Syrjälä 		vbl_end *= htotal;
11633aa18df8SVille Syrjälä 		vtotal *= htotal;
116478e8fc6bSVille Syrjälä 
116578e8fc6bSVille Syrjälä 		/*
11667e78f1cbSVille Syrjälä 		 * In interlaced modes, the pixel counter counts all pixels,
11677e78f1cbSVille Syrjälä 		 * so one field will have htotal more pixels. In order to avoid
11687e78f1cbSVille Syrjälä 		 * the reported position from jumping backwards when the pixel
11697e78f1cbSVille Syrjälä 		 * counter is beyond the length of the shorter field, just
11707e78f1cbSVille Syrjälä 		 * clamp the position the length of the shorter field. This
11717e78f1cbSVille Syrjälä 		 * matches how the scanline counter based position works since
11727e78f1cbSVille Syrjälä 		 * the scanline counter doesn't count the two half lines.
11737e78f1cbSVille Syrjälä 		 */
11747e78f1cbSVille Syrjälä 		if (position >= vtotal)
11757e78f1cbSVille Syrjälä 			position = vtotal - 1;
11767e78f1cbSVille Syrjälä 
11777e78f1cbSVille Syrjälä 		/*
117878e8fc6bSVille Syrjälä 		 * Start of vblank interrupt is triggered at start of hsync,
117978e8fc6bSVille Syrjälä 		 * just prior to the first active line of vblank. However we
118078e8fc6bSVille Syrjälä 		 * consider lines to start at the leading edge of horizontal
118178e8fc6bSVille Syrjälä 		 * active. So, should we get here before we've crossed into
118278e8fc6bSVille Syrjälä 		 * the horizontal active of the first line in vblank, we would
118378e8fc6bSVille Syrjälä 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
118478e8fc6bSVille Syrjälä 		 * always add htotal-hsync_start to the current pixel position.
118578e8fc6bSVille Syrjälä 		 */
118678e8fc6bSVille Syrjälä 		position = (position + htotal - hsync_start) % vtotal;
11873aa18df8SVille Syrjälä 	}
11883aa18df8SVille Syrjälä 
1189ad3543edSMario Kleiner 	/* Get optional system timestamp after query. */
1190ad3543edSMario Kleiner 	if (etime)
1191ad3543edSMario Kleiner 		*etime = ktime_get();
1192ad3543edSMario Kleiner 
1193ad3543edSMario Kleiner 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1194ad3543edSMario Kleiner 
1195ad3543edSMario Kleiner 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1196ad3543edSMario Kleiner 
11973aa18df8SVille Syrjälä 	/*
11983aa18df8SVille Syrjälä 	 * While in vblank, position will be negative
11993aa18df8SVille Syrjälä 	 * counting up towards 0 at vbl_end. And outside
12003aa18df8SVille Syrjälä 	 * vblank, position will be positive counting
12013aa18df8SVille Syrjälä 	 * up since vbl_end.
12023aa18df8SVille Syrjälä 	 */
12033aa18df8SVille Syrjälä 	if (position >= vbl_start)
12043aa18df8SVille Syrjälä 		position -= vbl_end;
12053aa18df8SVille Syrjälä 	else
12063aa18df8SVille Syrjälä 		position += vtotal - vbl_end;
12073aa18df8SVille Syrjälä 
12088a920e24SVille Syrjälä 	if (use_scanline_counter) {
12093aa18df8SVille Syrjälä 		*vpos = position;
12103aa18df8SVille Syrjälä 		*hpos = 0;
12113aa18df8SVille Syrjälä 	} else {
12120af7e4dfSMario Kleiner 		*vpos = position / htotal;
12130af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
12140af7e4dfSMario Kleiner 	}
12150af7e4dfSMario Kleiner 
12161bf6ad62SDaniel Vetter 	return true;
12170af7e4dfSMario Kleiner }
12180af7e4dfSMario Kleiner 
1219a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc)
1220a225f079SVille Syrjälä {
1221fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1222a225f079SVille Syrjälä 	unsigned long irqflags;
1223a225f079SVille Syrjälä 	int position;
1224a225f079SVille Syrjälä 
1225a225f079SVille Syrjälä 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1226a225f079SVille Syrjälä 	position = __intel_get_crtc_scanline(crtc);
1227a225f079SVille Syrjälä 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1228a225f079SVille Syrjälä 
1229a225f079SVille Syrjälä 	return position;
1230a225f079SVille Syrjälä }
1231a225f079SVille Syrjälä 
123291d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1233f97108d1SJesse Barnes {
1234b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
12359270388eSDaniel Vetter 	u8 new_delay;
12369270388eSDaniel Vetter 
1237d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
1238f97108d1SJesse Barnes 
123973edd18fSDaniel Vetter 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
124073edd18fSDaniel Vetter 
124120e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
12429270388eSDaniel Vetter 
12437648fa99SJesse Barnes 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1244b5b72e89SMatthew Garrett 	busy_up = I915_READ(RCPREVBSYTUPAVG);
1245b5b72e89SMatthew Garrett 	busy_down = I915_READ(RCPREVBSYTDNAVG);
1246f97108d1SJesse Barnes 	max_avg = I915_READ(RCBMAXAVG);
1247f97108d1SJesse Barnes 	min_avg = I915_READ(RCBMINAVG);
1248f97108d1SJesse Barnes 
1249f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
1250b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
125120e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
125220e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
125320e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
125420e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
1255b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
125620e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
125720e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
125820e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
125920e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
1260f97108d1SJesse Barnes 	}
1261f97108d1SJesse Barnes 
126291d14251STvrtko Ursulin 	if (ironlake_set_drps(dev_priv, new_delay))
126320e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
1264f97108d1SJesse Barnes 
1265d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
12669270388eSDaniel Vetter 
1267f97108d1SJesse Barnes 	return;
1268f97108d1SJesse Barnes }
1269f97108d1SJesse Barnes 
127043cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv,
127143cf3bf0SChris Wilson 			struct intel_rps_ei *ei)
127231685c25SDeepak S {
1273679cb6c1SMika Kuoppala 	ei->ktime = ktime_get_raw();
127443cf3bf0SChris Wilson 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
127543cf3bf0SChris Wilson 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
127631685c25SDeepak S }
127731685c25SDeepak S 
127843cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
127943cf3bf0SChris Wilson {
1280562d9baeSSagar Arun Kamble 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
128143cf3bf0SChris Wilson }
128243cf3bf0SChris Wilson 
128343cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
128443cf3bf0SChris Wilson {
1285562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1286562d9baeSSagar Arun Kamble 	const struct intel_rps_ei *prev = &rps->ei;
128743cf3bf0SChris Wilson 	struct intel_rps_ei now;
128843cf3bf0SChris Wilson 	u32 events = 0;
128943cf3bf0SChris Wilson 
1290e0e8c7cbSChris Wilson 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
129143cf3bf0SChris Wilson 		return 0;
129243cf3bf0SChris Wilson 
129343cf3bf0SChris Wilson 	vlv_c0_read(dev_priv, &now);
129431685c25SDeepak S 
1295679cb6c1SMika Kuoppala 	if (prev->ktime) {
1296e0e8c7cbSChris Wilson 		u64 time, c0;
1297569884e3SChris Wilson 		u32 render, media;
1298e0e8c7cbSChris Wilson 
1299679cb6c1SMika Kuoppala 		time = ktime_us_delta(now.ktime, prev->ktime);
13008f68d591SChris Wilson 
1301e0e8c7cbSChris Wilson 		time *= dev_priv->czclk_freq;
1302e0e8c7cbSChris Wilson 
1303e0e8c7cbSChris Wilson 		/* Workload can be split between render + media,
1304e0e8c7cbSChris Wilson 		 * e.g. SwapBuffers being blitted in X after being rendered in
1305e0e8c7cbSChris Wilson 		 * mesa. To account for this we need to combine both engines
1306e0e8c7cbSChris Wilson 		 * into our activity counter.
1307e0e8c7cbSChris Wilson 		 */
1308569884e3SChris Wilson 		render = now.render_c0 - prev->render_c0;
1309569884e3SChris Wilson 		media = now.media_c0 - prev->media_c0;
1310569884e3SChris Wilson 		c0 = max(render, media);
13116b7f6aa7SMika Kuoppala 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1312e0e8c7cbSChris Wilson 
131360548c55SChris Wilson 		if (c0 > time * rps->power.up_threshold)
1314e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_UP_THRESHOLD;
131560548c55SChris Wilson 		else if (c0 < time * rps->power.down_threshold)
1316e0e8c7cbSChris Wilson 			events = GEN6_PM_RP_DOWN_THRESHOLD;
131731685c25SDeepak S 	}
131831685c25SDeepak S 
1319562d9baeSSagar Arun Kamble 	rps->ei = now;
132043cf3bf0SChris Wilson 	return events;
132131685c25SDeepak S }
132231685c25SDeepak S 
13234912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
13243b8d8d91SJesse Barnes {
13252d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1326562d9baeSSagar Arun Kamble 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1327562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
13287c0a16adSChris Wilson 	bool client_boost = false;
13298d3afd7dSChris Wilson 	int new_delay, adj, min, max;
13307c0a16adSChris Wilson 	u32 pm_iir = 0;
13313b8d8d91SJesse Barnes 
133259cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1333562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled) {
1334562d9baeSSagar Arun Kamble 		pm_iir = fetch_and_zero(&rps->pm_iir);
1335562d9baeSSagar Arun Kamble 		client_boost = atomic_read(&rps->num_waiters);
1336d4d70aa5SImre Deak 	}
133759cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
13384912d041SBen Widawsky 
133960611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
1340a6706b45SDeepak S 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
13418d3afd7dSChris Wilson 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
13427c0a16adSChris Wilson 		goto out;
13433b8d8d91SJesse Barnes 
1344ebb5eb7dSChris Wilson 	mutex_lock(&rps->lock);
13457b9e0ae6SChris Wilson 
134643cf3bf0SChris Wilson 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
134743cf3bf0SChris Wilson 
1348562d9baeSSagar Arun Kamble 	adj = rps->last_adj;
1349562d9baeSSagar Arun Kamble 	new_delay = rps->cur_freq;
1350562d9baeSSagar Arun Kamble 	min = rps->min_freq_softlimit;
1351562d9baeSSagar Arun Kamble 	max = rps->max_freq_softlimit;
13527b92c1bdSChris Wilson 	if (client_boost)
1353562d9baeSSagar Arun Kamble 		max = rps->max_freq;
1354562d9baeSSagar Arun Kamble 	if (client_boost && new_delay < rps->boost_freq) {
1355562d9baeSSagar Arun Kamble 		new_delay = rps->boost_freq;
13568d3afd7dSChris Wilson 		adj = 0;
13578d3afd7dSChris Wilson 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1358dd75fdc8SChris Wilson 		if (adj > 0)
1359dd75fdc8SChris Wilson 			adj *= 2;
1360edcf284bSChris Wilson 		else /* CHV needs even encode values */
1361edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
13627e79a683SSagar Arun Kamble 
1363562d9baeSSagar Arun Kamble 		if (new_delay >= rps->max_freq_softlimit)
13647e79a683SSagar Arun Kamble 			adj = 0;
13657b92c1bdSChris Wilson 	} else if (client_boost) {
1366f5a4c67dSChris Wilson 		adj = 0;
1367dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1368562d9baeSSagar Arun Kamble 		if (rps->cur_freq > rps->efficient_freq)
1369562d9baeSSagar Arun Kamble 			new_delay = rps->efficient_freq;
1370562d9baeSSagar Arun Kamble 		else if (rps->cur_freq > rps->min_freq_softlimit)
1371562d9baeSSagar Arun Kamble 			new_delay = rps->min_freq_softlimit;
1372dd75fdc8SChris Wilson 		adj = 0;
1373dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1374dd75fdc8SChris Wilson 		if (adj < 0)
1375dd75fdc8SChris Wilson 			adj *= 2;
1376edcf284bSChris Wilson 		else /* CHV needs even encode values */
1377edcf284bSChris Wilson 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
13787e79a683SSagar Arun Kamble 
1379562d9baeSSagar Arun Kamble 		if (new_delay <= rps->min_freq_softlimit)
13807e79a683SSagar Arun Kamble 			adj = 0;
1381dd75fdc8SChris Wilson 	} else { /* unknown event */
1382edcf284bSChris Wilson 		adj = 0;
1383dd75fdc8SChris Wilson 	}
13843b8d8d91SJesse Barnes 
1385562d9baeSSagar Arun Kamble 	rps->last_adj = adj;
1386edcf284bSChris Wilson 
13872a8862d2SChris Wilson 	/*
13882a8862d2SChris Wilson 	 * Limit deboosting and boosting to keep ourselves at the extremes
13892a8862d2SChris Wilson 	 * when in the respective power modes (i.e. slowly decrease frequencies
13902a8862d2SChris Wilson 	 * while in the HIGH_POWER zone and slowly increase frequencies while
13912a8862d2SChris Wilson 	 * in the LOW_POWER zone). On idle, we will hit the timeout and drop
13922a8862d2SChris Wilson 	 * to the next level quickly, and conversely if busy we expect to
13932a8862d2SChris Wilson 	 * hit a waitboost and rapidly switch into max power.
13942a8862d2SChris Wilson 	 */
13952a8862d2SChris Wilson 	if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
13962a8862d2SChris Wilson 	    (adj > 0 && rps->power.mode == LOW_POWER))
13972a8862d2SChris Wilson 		rps->last_adj = 0;
13982a8862d2SChris Wilson 
139979249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
140079249636SBen Widawsky 	 * interrupt
140179249636SBen Widawsky 	 */
1402edcf284bSChris Wilson 	new_delay += adj;
14038d3afd7dSChris Wilson 	new_delay = clamp_t(int, new_delay, min, max);
140427544369SDeepak S 
14059fcee2f7SChris Wilson 	if (intel_set_rps(dev_priv, new_delay)) {
14069fcee2f7SChris Wilson 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1407562d9baeSSagar Arun Kamble 		rps->last_adj = 0;
14089fcee2f7SChris Wilson 	}
14093b8d8d91SJesse Barnes 
1410ebb5eb7dSChris Wilson 	mutex_unlock(&rps->lock);
14117c0a16adSChris Wilson 
14127c0a16adSChris Wilson out:
14137c0a16adSChris Wilson 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
14147c0a16adSChris Wilson 	spin_lock_irq(&dev_priv->irq_lock);
1415562d9baeSSagar Arun Kamble 	if (rps->interrupts_enabled)
14167c0a16adSChris Wilson 		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
14177c0a16adSChris Wilson 	spin_unlock_irq(&dev_priv->irq_lock);
14183b8d8d91SJesse Barnes }
14193b8d8d91SJesse Barnes 
1420e3689190SBen Widawsky 
1421e3689190SBen Widawsky /**
1422e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1423e3689190SBen Widawsky  * occurred.
1424e3689190SBen Widawsky  * @work: workqueue struct
1425e3689190SBen Widawsky  *
1426e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
1427e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
1428e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
1429e3689190SBen Widawsky  */
1430e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
1431e3689190SBen Widawsky {
14322d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
1433cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1434e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
143535a85ac6SBen Widawsky 	char *parity_event[6];
1436a9c287c9SJani Nikula 	u32 misccpctl;
1437a9c287c9SJani Nikula 	u8 slice = 0;
1438e3689190SBen Widawsky 
1439e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
1440e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
1441e3689190SBen Widawsky 	 * any time we access those registers.
1442e3689190SBen Widawsky 	 */
144391c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
1444e3689190SBen Widawsky 
144535a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
144635a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
144735a85ac6SBen Widawsky 		goto out;
144835a85ac6SBen Widawsky 
1449e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1450e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1451e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
1452e3689190SBen Widawsky 
145335a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1454f0f59a00SVille Syrjälä 		i915_reg_t reg;
145535a85ac6SBen Widawsky 
145635a85ac6SBen Widawsky 		slice--;
14572d1fe073SJoonas Lahtinen 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
145835a85ac6SBen Widawsky 			break;
145935a85ac6SBen Widawsky 
146035a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
146135a85ac6SBen Widawsky 
14626fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
146335a85ac6SBen Widawsky 
146435a85ac6SBen Widawsky 		error_status = I915_READ(reg);
1465e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
1466e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1467e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1468e3689190SBen Widawsky 
146935a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
147035a85ac6SBen Widawsky 		POSTING_READ(reg);
1471e3689190SBen Widawsky 
1472cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1473e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1474e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1475e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
147635a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
147735a85ac6SBen Widawsky 		parity_event[5] = NULL;
1478e3689190SBen Widawsky 
147991c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1480e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
1481e3689190SBen Widawsky 
148235a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
148335a85ac6SBen Widawsky 			  slice, row, bank, subbank);
1484e3689190SBen Widawsky 
148535a85ac6SBen Widawsky 		kfree(parity_event[4]);
1486e3689190SBen Widawsky 		kfree(parity_event[3]);
1487e3689190SBen Widawsky 		kfree(parity_event[2]);
1488e3689190SBen Widawsky 		kfree(parity_event[1]);
1489e3689190SBen Widawsky 	}
1490e3689190SBen Widawsky 
149135a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
149235a85ac6SBen Widawsky 
149335a85ac6SBen Widawsky out:
149435a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
14954cb21832SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
14962d1fe073SJoonas Lahtinen 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
14974cb21832SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
149835a85ac6SBen Widawsky 
149991c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
150035a85ac6SBen Widawsky }
150135a85ac6SBen Widawsky 
1502261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1503261e40b8SVille Syrjälä 					       u32 iir)
1504e3689190SBen Widawsky {
1505261e40b8SVille Syrjälä 	if (!HAS_L3_DPF(dev_priv))
1506e3689190SBen Widawsky 		return;
1507e3689190SBen Widawsky 
1508d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1509261e40b8SVille Syrjälä 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1510d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
1511e3689190SBen Widawsky 
1512261e40b8SVille Syrjälä 	iir &= GT_PARITY_ERROR(dev_priv);
151335a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
151435a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
151535a85ac6SBen Widawsky 
151635a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
151735a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
151835a85ac6SBen Widawsky 
1519a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1520e3689190SBen Widawsky }
1521e3689190SBen Widawsky 
1522261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1523f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1524f1af8fc1SPaulo Zanoni {
1525f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15268a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1527f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
15288a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1529f1af8fc1SPaulo Zanoni }
1530f1af8fc1SPaulo Zanoni 
1531261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1532e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1533e7b4c6b1SDaniel Vetter {
1534f8973c21SChris Wilson 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
15358a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
1536cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
15378a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
1538cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
15398a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
1540e7b4c6b1SDaniel Vetter 
1541cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1542cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1543aaecdf61SDaniel Vetter 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1544aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1545e3689190SBen Widawsky 
1546261e40b8SVille Syrjälä 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1547261e40b8SVille Syrjälä 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1548e7b4c6b1SDaniel Vetter }
1549e7b4c6b1SDaniel Vetter 
15505d3d69d5SChris Wilson static void
155151f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1552fbcc1a0cSNick Hoath {
155331de7350SChris Wilson 	bool tasklet = false;
1554f747026cSChris Wilson 
1555fd8526e5SChris Wilson 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
15568ea397faSChris Wilson 		tasklet = true;
155731de7350SChris Wilson 
155851f6b0f9SChris Wilson 	if (iir & GT_RENDER_USER_INTERRUPT) {
155952c0fdb2SChris Wilson 		intel_engine_breadcrumbs_irq(engine);
15604c6ce5c9SChris Wilson 		tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
156131de7350SChris Wilson 	}
156231de7350SChris Wilson 
156331de7350SChris Wilson 	if (tasklet)
1564fd8526e5SChris Wilson 		tasklet_hi_schedule(&engine->execlists.tasklet);
1565fbcc1a0cSNick Hoath }
1566fbcc1a0cSNick Hoath 
15672e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915,
156855ef72f2SChris Wilson 			    u32 master_ctl, u32 gt_iir[4])
1569abd58f01SBen Widawsky {
157025286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
15712e4a5b25SChris Wilson 
1572f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1573f0fd96f5SChris Wilson 		      GEN8_GT_BCS_IRQ | \
15748a68d464SChris Wilson 		      GEN8_GT_VCS0_IRQ | \
1575f0fd96f5SChris Wilson 		      GEN8_GT_VCS1_IRQ | \
1576f0fd96f5SChris Wilson 		      GEN8_GT_VECS_IRQ | \
1577f0fd96f5SChris Wilson 		      GEN8_GT_PM_IRQ | \
1578f0fd96f5SChris Wilson 		      GEN8_GT_GUC_IRQ)
1579f0fd96f5SChris Wilson 
1580abd58f01SBen Widawsky 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
15812e4a5b25SChris Wilson 		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
15822e4a5b25SChris Wilson 		if (likely(gt_iir[0]))
15832e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1584abd58f01SBen Widawsky 	}
1585abd58f01SBen Widawsky 
15868a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
15872e4a5b25SChris Wilson 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
15882e4a5b25SChris Wilson 		if (likely(gt_iir[1]))
15892e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
159074cdb337SChris Wilson 	}
159174cdb337SChris Wilson 
159226705e20SSagar Arun Kamble 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
15932e4a5b25SChris Wilson 		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1594f4de7794SChris Wilson 		if (likely(gt_iir[2]))
1595f4de7794SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
15960961021aSBen Widawsky 	}
15972e4a5b25SChris Wilson 
15982e4a5b25SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
15992e4a5b25SChris Wilson 		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
16002e4a5b25SChris Wilson 		if (likely(gt_iir[3]))
16012e4a5b25SChris Wilson 			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
160255ef72f2SChris Wilson 	}
1603abd58f01SBen Widawsky }
1604abd58f01SBen Widawsky 
16052e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1606f0fd96f5SChris Wilson 				u32 master_ctl, u32 gt_iir[4])
1607e30e251aSVille Syrjälä {
1608f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
16098a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[RCS0],
161051f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
16118a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[BCS0],
161251f6b0f9SChris Wilson 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1613e30e251aSVille Syrjälä 	}
1614e30e251aSVille Syrjälä 
16158a68d464SChris Wilson 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
16168a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS0],
16178a68d464SChris Wilson 				    gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
16188a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VCS1],
161951f6b0f9SChris Wilson 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1620e30e251aSVille Syrjälä 	}
1621e30e251aSVille Syrjälä 
1622f0fd96f5SChris Wilson 	if (master_ctl & GEN8_GT_VECS_IRQ) {
16238a68d464SChris Wilson 		gen8_cs_irq_handler(i915->engine[VECS0],
162451f6b0f9SChris Wilson 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1625f0fd96f5SChris Wilson 	}
1626e30e251aSVille Syrjälä 
1627f0fd96f5SChris Wilson 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
16282e4a5b25SChris Wilson 		gen6_rps_irq_handler(i915, gt_iir[2]);
16292e4a5b25SChris Wilson 		gen9_guc_irq_handler(i915, gt_iir[2]);
1630e30e251aSVille Syrjälä 	}
1631f0fd96f5SChris Wilson }
1632e30e251aSVille Syrjälä 
1633af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1634121e758eSDhinakaran Pandiyan {
1635af92058fSVille Syrjälä 	switch (pin) {
1636af92058fSVille Syrjälä 	case HPD_PORT_C:
1637121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1638af92058fSVille Syrjälä 	case HPD_PORT_D:
1639121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1640af92058fSVille Syrjälä 	case HPD_PORT_E:
1641121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1642af92058fSVille Syrjälä 	case HPD_PORT_F:
1643121e758eSDhinakaran Pandiyan 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1644121e758eSDhinakaran Pandiyan 	default:
1645121e758eSDhinakaran Pandiyan 		return false;
1646121e758eSDhinakaran Pandiyan 	}
1647121e758eSDhinakaran Pandiyan }
1648121e758eSDhinakaran Pandiyan 
1649af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
165063c88d22SImre Deak {
1651af92058fSVille Syrjälä 	switch (pin) {
1652af92058fSVille Syrjälä 	case HPD_PORT_A:
1653195baa06SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1654af92058fSVille Syrjälä 	case HPD_PORT_B:
165563c88d22SImre Deak 		return val & PORTB_HOTPLUG_LONG_DETECT;
1656af92058fSVille Syrjälä 	case HPD_PORT_C:
165763c88d22SImre Deak 		return val & PORTC_HOTPLUG_LONG_DETECT;
165863c88d22SImre Deak 	default:
165963c88d22SImre Deak 		return false;
166063c88d22SImre Deak 	}
166163c88d22SImre Deak }
166263c88d22SImre Deak 
1663af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
166431604222SAnusha Srivatsa {
1665af92058fSVille Syrjälä 	switch (pin) {
1666af92058fSVille Syrjälä 	case HPD_PORT_A:
166731604222SAnusha Srivatsa 		return val & ICP_DDIA_HPD_LONG_DETECT;
1668af92058fSVille Syrjälä 	case HPD_PORT_B:
166931604222SAnusha Srivatsa 		return val & ICP_DDIB_HPD_LONG_DETECT;
167031604222SAnusha Srivatsa 	default:
167131604222SAnusha Srivatsa 		return false;
167231604222SAnusha Srivatsa 	}
167331604222SAnusha Srivatsa }
167431604222SAnusha Srivatsa 
1675af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
167631604222SAnusha Srivatsa {
1677af92058fSVille Syrjälä 	switch (pin) {
1678af92058fSVille Syrjälä 	case HPD_PORT_C:
167931604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1680af92058fSVille Syrjälä 	case HPD_PORT_D:
168131604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1682af92058fSVille Syrjälä 	case HPD_PORT_E:
168331604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1684af92058fSVille Syrjälä 	case HPD_PORT_F:
168531604222SAnusha Srivatsa 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
168631604222SAnusha Srivatsa 	default:
168731604222SAnusha Srivatsa 		return false;
168831604222SAnusha Srivatsa 	}
168931604222SAnusha Srivatsa }
169031604222SAnusha Srivatsa 
1691af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
16926dbf30ceSVille Syrjälä {
1693af92058fSVille Syrjälä 	switch (pin) {
1694af92058fSVille Syrjälä 	case HPD_PORT_E:
16956dbf30ceSVille Syrjälä 		return val & PORTE_HOTPLUG_LONG_DETECT;
16966dbf30ceSVille Syrjälä 	default:
16976dbf30ceSVille Syrjälä 		return false;
16986dbf30ceSVille Syrjälä 	}
16996dbf30ceSVille Syrjälä }
17006dbf30ceSVille Syrjälä 
1701af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
170274c0b395SVille Syrjälä {
1703af92058fSVille Syrjälä 	switch (pin) {
1704af92058fSVille Syrjälä 	case HPD_PORT_A:
170574c0b395SVille Syrjälä 		return val & PORTA_HOTPLUG_LONG_DETECT;
1706af92058fSVille Syrjälä 	case HPD_PORT_B:
170774c0b395SVille Syrjälä 		return val & PORTB_HOTPLUG_LONG_DETECT;
1708af92058fSVille Syrjälä 	case HPD_PORT_C:
170974c0b395SVille Syrjälä 		return val & PORTC_HOTPLUG_LONG_DETECT;
1710af92058fSVille Syrjälä 	case HPD_PORT_D:
171174c0b395SVille Syrjälä 		return val & PORTD_HOTPLUG_LONG_DETECT;
171274c0b395SVille Syrjälä 	default:
171374c0b395SVille Syrjälä 		return false;
171474c0b395SVille Syrjälä 	}
171574c0b395SVille Syrjälä }
171674c0b395SVille Syrjälä 
1717af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1718e4ce95aaSVille Syrjälä {
1719af92058fSVille Syrjälä 	switch (pin) {
1720af92058fSVille Syrjälä 	case HPD_PORT_A:
1721e4ce95aaSVille Syrjälä 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1722e4ce95aaSVille Syrjälä 	default:
1723e4ce95aaSVille Syrjälä 		return false;
1724e4ce95aaSVille Syrjälä 	}
1725e4ce95aaSVille Syrjälä }
1726e4ce95aaSVille Syrjälä 
1727af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
172813cf5504SDave Airlie {
1729af92058fSVille Syrjälä 	switch (pin) {
1730af92058fSVille Syrjälä 	case HPD_PORT_B:
1731676574dfSJani Nikula 		return val & PORTB_HOTPLUG_LONG_DETECT;
1732af92058fSVille Syrjälä 	case HPD_PORT_C:
1733676574dfSJani Nikula 		return val & PORTC_HOTPLUG_LONG_DETECT;
1734af92058fSVille Syrjälä 	case HPD_PORT_D:
1735676574dfSJani Nikula 		return val & PORTD_HOTPLUG_LONG_DETECT;
1736676574dfSJani Nikula 	default:
1737676574dfSJani Nikula 		return false;
173813cf5504SDave Airlie 	}
173913cf5504SDave Airlie }
174013cf5504SDave Airlie 
1741af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
174213cf5504SDave Airlie {
1743af92058fSVille Syrjälä 	switch (pin) {
1744af92058fSVille Syrjälä 	case HPD_PORT_B:
1745676574dfSJani Nikula 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1746af92058fSVille Syrjälä 	case HPD_PORT_C:
1747676574dfSJani Nikula 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1748af92058fSVille Syrjälä 	case HPD_PORT_D:
1749676574dfSJani Nikula 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1750676574dfSJani Nikula 	default:
1751676574dfSJani Nikula 		return false;
175213cf5504SDave Airlie 	}
175313cf5504SDave Airlie }
175413cf5504SDave Airlie 
175542db67d6SVille Syrjälä /*
175642db67d6SVille Syrjälä  * Get a bit mask of pins that have triggered, and which ones may be long.
175742db67d6SVille Syrjälä  * This can be called multiple times with the same masks to accumulate
175842db67d6SVille Syrjälä  * hotplug detection results from several registers.
175942db67d6SVille Syrjälä  *
176042db67d6SVille Syrjälä  * Note that the caller is expected to zero out the masks initially.
176142db67d6SVille Syrjälä  */
1762cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1763cf53902fSRodrigo Vivi 			       u32 *pin_mask, u32 *long_mask,
17648c841e57SJani Nikula 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1765fd63e2a9SImre Deak 			       const u32 hpd[HPD_NUM_PINS],
1766af92058fSVille Syrjälä 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1767676574dfSJani Nikula {
1768e9be2850SVille Syrjälä 	enum hpd_pin pin;
1769676574dfSJani Nikula 
1770e9be2850SVille Syrjälä 	for_each_hpd_pin(pin) {
1771e9be2850SVille Syrjälä 		if ((hpd[pin] & hotplug_trigger) == 0)
17728c841e57SJani Nikula 			continue;
17738c841e57SJani Nikula 
1774e9be2850SVille Syrjälä 		*pin_mask |= BIT(pin);
1775676574dfSJani Nikula 
1776af92058fSVille Syrjälä 		if (long_pulse_detect(pin, dig_hotplug_reg))
1777e9be2850SVille Syrjälä 			*long_mask |= BIT(pin);
1778676574dfSJani Nikula 	}
1779676574dfSJani Nikula 
1780f88f0478SVille Syrjälä 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1781f88f0478SVille Syrjälä 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1782676574dfSJani Nikula 
1783676574dfSJani Nikula }
1784676574dfSJani Nikula 
178591d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1786515ac2bbSDaniel Vetter {
178728c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1788515ac2bbSDaniel Vetter }
1789515ac2bbSDaniel Vetter 
179091d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1791ce99c256SDaniel Vetter {
17929ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1793ce99c256SDaniel Vetter }
1794ce99c256SDaniel Vetter 
17958bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS)
179691d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
179791d14251STvrtko Ursulin 					 enum pipe pipe,
1798a9c287c9SJani Nikula 					 u32 crc0, u32 crc1,
1799a9c287c9SJani Nikula 					 u32 crc2, u32 crc3,
1800a9c287c9SJani Nikula 					 u32 crc4)
18018bf1e9f1SShuang He {
18028bf1e9f1SShuang He 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
18038c6b709dSTomeu Vizoso 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18045cee6c45SVille Syrjälä 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
18055cee6c45SVille Syrjälä 
18065cee6c45SVille Syrjälä 	trace_intel_pipe_crc(crtc, crcs);
1807b2c88f5bSDamien Lespiau 
1808d538bbdfSDamien Lespiau 	spin_lock(&pipe_crc->lock);
18098c6b709dSTomeu Vizoso 	/*
18108c6b709dSTomeu Vizoso 	 * For some not yet identified reason, the first CRC is
18118c6b709dSTomeu Vizoso 	 * bonkers. So let's just wait for the next vblank and read
18128c6b709dSTomeu Vizoso 	 * out the buggy result.
18138c6b709dSTomeu Vizoso 	 *
1814163e8aecSRodrigo Vivi 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
18158c6b709dSTomeu Vizoso 	 * don't trust that one either.
18168c6b709dSTomeu Vizoso 	 */
1817033b7a23SMaarten Lankhorst 	if (pipe_crc->skipped <= 0 ||
1818163e8aecSRodrigo Vivi 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
18198c6b709dSTomeu Vizoso 		pipe_crc->skipped++;
18208c6b709dSTomeu Vizoso 		spin_unlock(&pipe_crc->lock);
18218c6b709dSTomeu Vizoso 		return;
18228c6b709dSTomeu Vizoso 	}
18238c6b709dSTomeu Vizoso 	spin_unlock(&pipe_crc->lock);
18246cc42152SMaarten Lankhorst 
1825246ee524STomeu Vizoso 	drm_crtc_add_crc_entry(&crtc->base, true,
1826ca814b25SDaniel Vetter 				drm_crtc_accurate_vblank_count(&crtc->base),
1827246ee524STomeu Vizoso 				crcs);
18288c6b709dSTomeu Vizoso }
1829277de95eSDaniel Vetter #else
1830277de95eSDaniel Vetter static inline void
183191d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
183291d14251STvrtko Ursulin 			     enum pipe pipe,
1833a9c287c9SJani Nikula 			     u32 crc0, u32 crc1,
1834a9c287c9SJani Nikula 			     u32 crc2, u32 crc3,
1835a9c287c9SJani Nikula 			     u32 crc4) {}
1836277de95eSDaniel Vetter #endif
1837eba94eb9SDaniel Vetter 
1838277de95eSDaniel Vetter 
183991d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
184091d14251STvrtko Ursulin 				     enum pipe pipe)
18415a69b89fSDaniel Vetter {
184291d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
18435a69b89fSDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
18445a69b89fSDaniel Vetter 				     0, 0, 0, 0);
18455a69b89fSDaniel Vetter }
18465a69b89fSDaniel Vetter 
184791d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
184891d14251STvrtko Ursulin 				     enum pipe pipe)
1849eba94eb9SDaniel Vetter {
185091d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
1851eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1852eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1853eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1854eba94eb9SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
18558bc5e955SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1856eba94eb9SDaniel Vetter }
18575b3a856bSDaniel Vetter 
185891d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
185991d14251STvrtko Ursulin 				      enum pipe pipe)
18605b3a856bSDaniel Vetter {
1861a9c287c9SJani Nikula 	u32 res1, res2;
18620b5c5ed0SDaniel Vetter 
186391d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 3)
18640b5c5ed0SDaniel Vetter 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
18650b5c5ed0SDaniel Vetter 	else
18660b5c5ed0SDaniel Vetter 		res1 = 0;
18670b5c5ed0SDaniel Vetter 
186891d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
18690b5c5ed0SDaniel Vetter 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
18700b5c5ed0SDaniel Vetter 	else
18710b5c5ed0SDaniel Vetter 		res2 = 0;
18725b3a856bSDaniel Vetter 
187391d14251STvrtko Ursulin 	display_pipe_crc_irq_handler(dev_priv, pipe,
18740b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
18750b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
18760b5c5ed0SDaniel Vetter 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
18770b5c5ed0SDaniel Vetter 				     res1, res2);
18785b3a856bSDaniel Vetter }
18798bf1e9f1SShuang He 
18801403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
18811403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
18821403c0d4SPaulo Zanoni  * the work queue. */
1883a087bafeSMika Kuoppala static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
1884a087bafeSMika Kuoppala {
1885a087bafeSMika Kuoppala 	struct intel_rps *rps = &i915->gt_pm.rps;
1886a087bafeSMika Kuoppala 	const u32 events = i915->pm_rps_events & pm_iir;
1887a087bafeSMika Kuoppala 
1888a087bafeSMika Kuoppala 	lockdep_assert_held(&i915->irq_lock);
1889a087bafeSMika Kuoppala 
1890a087bafeSMika Kuoppala 	if (unlikely(!events))
1891a087bafeSMika Kuoppala 		return;
1892a087bafeSMika Kuoppala 
1893a087bafeSMika Kuoppala 	gen6_mask_pm_irq(i915, events);
1894a087bafeSMika Kuoppala 
1895a087bafeSMika Kuoppala 	if (!rps->interrupts_enabled)
1896a087bafeSMika Kuoppala 		return;
1897a087bafeSMika Kuoppala 
1898a087bafeSMika Kuoppala 	rps->pm_iir |= events;
1899a087bafeSMika Kuoppala 	schedule_work(&rps->work);
1900a087bafeSMika Kuoppala }
1901a087bafeSMika Kuoppala 
19021403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1903baf02a1fSBen Widawsky {
1904562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1905562d9baeSSagar Arun Kamble 
1906a6706b45SDeepak S 	if (pm_iir & dev_priv->pm_rps_events) {
190759cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
1908f4e9af4fSAkash Goel 		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1909562d9baeSSagar Arun Kamble 		if (rps->interrupts_enabled) {
1910562d9baeSSagar Arun Kamble 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1911562d9baeSSagar Arun Kamble 			schedule_work(&rps->work);
191241a05a3aSDaniel Vetter 		}
1913d4d70aa5SImre Deak 		spin_unlock(&dev_priv->irq_lock);
1914d4d70aa5SImre Deak 	}
1915baf02a1fSBen Widawsky 
1916bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
1917c9a9a268SImre Deak 		return;
1918c9a9a268SImre Deak 
191912638c57SBen Widawsky 	if (pm_iir & PM_VEBOX_USER_INTERRUPT)
19208a68d464SChris Wilson 		intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
192112638c57SBen Widawsky 
1922aaecdf61SDaniel Vetter 	if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1923aaecdf61SDaniel Vetter 		DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
192412638c57SBen Widawsky }
1925baf02a1fSBen Widawsky 
192626705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
192726705e20SSagar Arun Kamble {
192893bf8096SMichal Wajdeczko 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
192993bf8096SMichal Wajdeczko 		intel_guc_to_host_event_handler(&dev_priv->guc);
193026705e20SSagar Arun Kamble }
193126705e20SSagar Arun Kamble 
1932*54c52a84SOscar Mateo static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
1933*54c52a84SOscar Mateo {
1934*54c52a84SOscar Mateo 	if (iir & GEN11_GUC_INTR_GUC2HOST)
1935*54c52a84SOscar Mateo 		intel_guc_to_host_event_handler(&i915->guc);
1936*54c52a84SOscar Mateo }
1937*54c52a84SOscar Mateo 
193844d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
193944d9241eSVille Syrjälä {
194044d9241eSVille Syrjälä 	enum pipe pipe;
194144d9241eSVille Syrjälä 
194244d9241eSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
194344d9241eSVille Syrjälä 		I915_WRITE(PIPESTAT(pipe),
194444d9241eSVille Syrjälä 			   PIPESTAT_INT_STATUS_MASK |
194544d9241eSVille Syrjälä 			   PIPE_FIFO_UNDERRUN_STATUS);
194644d9241eSVille Syrjälä 
194744d9241eSVille Syrjälä 		dev_priv->pipestat_irq_mask[pipe] = 0;
194844d9241eSVille Syrjälä 	}
194944d9241eSVille Syrjälä }
195044d9241eSVille Syrjälä 
1951eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
195291d14251STvrtko Ursulin 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
19537e231dbeSJesse Barnes {
19547e231dbeSJesse Barnes 	int pipe;
19557e231dbeSJesse Barnes 
195658ead0d7SImre Deak 	spin_lock(&dev_priv->irq_lock);
19571ca993d2SVille Syrjälä 
19581ca993d2SVille Syrjälä 	if (!dev_priv->display_irqs_enabled) {
19591ca993d2SVille Syrjälä 		spin_unlock(&dev_priv->irq_lock);
19601ca993d2SVille Syrjälä 		return;
19611ca993d2SVille Syrjälä 	}
19621ca993d2SVille Syrjälä 
1963055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
1964f0f59a00SVille Syrjälä 		i915_reg_t reg;
19656b12ca56SVille Syrjälä 		u32 status_mask, enable_mask, iir_bit = 0;
196691d181ddSImre Deak 
1967bbb5eebfSDaniel Vetter 		/*
1968bbb5eebfSDaniel Vetter 		 * PIPESTAT bits get signalled even when the interrupt is
1969bbb5eebfSDaniel Vetter 		 * disabled with the mask bits, and some of the status bits do
1970bbb5eebfSDaniel Vetter 		 * not generate interrupts at all (like the underrun bit). Hence
1971bbb5eebfSDaniel Vetter 		 * we need to be careful that we only handle what we want to
1972bbb5eebfSDaniel Vetter 		 * handle.
1973bbb5eebfSDaniel Vetter 		 */
19740f239f4cSDaniel Vetter 
19750f239f4cSDaniel Vetter 		/* fifo underruns are filterered in the underrun handler. */
19766b12ca56SVille Syrjälä 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1977bbb5eebfSDaniel Vetter 
1978bbb5eebfSDaniel Vetter 		switch (pipe) {
1979bbb5eebfSDaniel Vetter 		case PIPE_A:
1980bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1981bbb5eebfSDaniel Vetter 			break;
1982bbb5eebfSDaniel Vetter 		case PIPE_B:
1983bbb5eebfSDaniel Vetter 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1984bbb5eebfSDaniel Vetter 			break;
19853278f67fSVille Syrjälä 		case PIPE_C:
19863278f67fSVille Syrjälä 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
19873278f67fSVille Syrjälä 			break;
1988bbb5eebfSDaniel Vetter 		}
1989bbb5eebfSDaniel Vetter 		if (iir & iir_bit)
19906b12ca56SVille Syrjälä 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1991bbb5eebfSDaniel Vetter 
19926b12ca56SVille Syrjälä 		if (!status_mask)
199391d181ddSImre Deak 			continue;
199491d181ddSImre Deak 
199591d181ddSImre Deak 		reg = PIPESTAT(pipe);
19966b12ca56SVille Syrjälä 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
19976b12ca56SVille Syrjälä 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
19987e231dbeSJesse Barnes 
19997e231dbeSJesse Barnes 		/*
20007e231dbeSJesse Barnes 		 * Clear the PIPE*STAT regs before the IIR
2001132c27c9SVille Syrjälä 		 *
2002132c27c9SVille Syrjälä 		 * Toggle the enable bits to make sure we get an
2003132c27c9SVille Syrjälä 		 * edge in the ISR pipe event bit if we don't clear
2004132c27c9SVille Syrjälä 		 * all the enabled status bits. Otherwise the edge
2005132c27c9SVille Syrjälä 		 * triggered IIR on i965/g4x wouldn't notice that
2006132c27c9SVille Syrjälä 		 * an interrupt is still pending.
20077e231dbeSJesse Barnes 		 */
2008132c27c9SVille Syrjälä 		if (pipe_stats[pipe]) {
2009132c27c9SVille Syrjälä 			I915_WRITE(reg, pipe_stats[pipe]);
2010132c27c9SVille Syrjälä 			I915_WRITE(reg, enable_mask);
2011132c27c9SVille Syrjälä 		}
20127e231dbeSJesse Barnes 	}
201358ead0d7SImre Deak 	spin_unlock(&dev_priv->irq_lock);
20142ecb8ca4SVille Syrjälä }
20152ecb8ca4SVille Syrjälä 
2016eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2017eb64343cSVille Syrjälä 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
2018eb64343cSVille Syrjälä {
2019eb64343cSVille Syrjälä 	enum pipe pipe;
2020eb64343cSVille Syrjälä 
2021eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2022eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2023eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2024eb64343cSVille Syrjälä 
2025eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2026eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2027eb64343cSVille Syrjälä 
2028eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2029eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2030eb64343cSVille Syrjälä 	}
2031eb64343cSVille Syrjälä }
2032eb64343cSVille Syrjälä 
2033eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2034eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2035eb64343cSVille Syrjälä {
2036eb64343cSVille Syrjälä 	bool blc_event = false;
2037eb64343cSVille Syrjälä 	enum pipe pipe;
2038eb64343cSVille Syrjälä 
2039eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2040eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
2041eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2042eb64343cSVille Syrjälä 
2043eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2044eb64343cSVille Syrjälä 			blc_event = true;
2045eb64343cSVille Syrjälä 
2046eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2047eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2048eb64343cSVille Syrjälä 
2049eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2050eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2051eb64343cSVille Syrjälä 	}
2052eb64343cSVille Syrjälä 
2053eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2054eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2055eb64343cSVille Syrjälä }
2056eb64343cSVille Syrjälä 
2057eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2058eb64343cSVille Syrjälä 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
2059eb64343cSVille Syrjälä {
2060eb64343cSVille Syrjälä 	bool blc_event = false;
2061eb64343cSVille Syrjälä 	enum pipe pipe;
2062eb64343cSVille Syrjälä 
2063eb64343cSVille Syrjälä 	for_each_pipe(dev_priv, pipe) {
2064eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2065eb64343cSVille Syrjälä 			drm_handle_vblank(&dev_priv->drm, pipe);
2066eb64343cSVille Syrjälä 
2067eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2068eb64343cSVille Syrjälä 			blc_event = true;
2069eb64343cSVille Syrjälä 
2070eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2071eb64343cSVille Syrjälä 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2072eb64343cSVille Syrjälä 
2073eb64343cSVille Syrjälä 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2074eb64343cSVille Syrjälä 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2075eb64343cSVille Syrjälä 	}
2076eb64343cSVille Syrjälä 
2077eb64343cSVille Syrjälä 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
2078eb64343cSVille Syrjälä 		intel_opregion_asle_intr(dev_priv);
2079eb64343cSVille Syrjälä 
2080eb64343cSVille Syrjälä 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2081eb64343cSVille Syrjälä 		gmbus_irq_handler(dev_priv);
2082eb64343cSVille Syrjälä }
2083eb64343cSVille Syrjälä 
208491d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
20852ecb8ca4SVille Syrjälä 					    u32 pipe_stats[I915_MAX_PIPES])
20862ecb8ca4SVille Syrjälä {
20872ecb8ca4SVille Syrjälä 	enum pipe pipe;
20887e231dbeSJesse Barnes 
2089055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2090fd3a4024SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2091fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
20924356d586SDaniel Vetter 
20934356d586SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
209491d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
20952d9d2b0bSVille Syrjälä 
20961f7247c0SDaniel Vetter 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
20971f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
209831acc7f5SJesse Barnes 	}
209931acc7f5SJesse Barnes 
2100c1874ed7SImre Deak 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
210191d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2102c1874ed7SImre Deak }
2103c1874ed7SImre Deak 
21041ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
210516c6c56bSVille Syrjälä {
21060ba7c51aSVille Syrjälä 	u32 hotplug_status = 0, hotplug_status_mask;
21070ba7c51aSVille Syrjälä 	int i;
210816c6c56bSVille Syrjälä 
21090ba7c51aSVille Syrjälä 	if (IS_G4X(dev_priv) ||
21100ba7c51aSVille Syrjälä 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
21110ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
21120ba7c51aSVille Syrjälä 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
21130ba7c51aSVille Syrjälä 	else
21140ba7c51aSVille Syrjälä 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
21150ba7c51aSVille Syrjälä 
21160ba7c51aSVille Syrjälä 	/*
21170ba7c51aSVille Syrjälä 	 * We absolutely have to clear all the pending interrupt
21180ba7c51aSVille Syrjälä 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
21190ba7c51aSVille Syrjälä 	 * interrupt bit won't have an edge, and the i965/g4x
21200ba7c51aSVille Syrjälä 	 * edge triggered IIR will not notice that an interrupt
21210ba7c51aSVille Syrjälä 	 * is still pending. We can't use PORT_HOTPLUG_EN to
21220ba7c51aSVille Syrjälä 	 * guarantee the edge as the act of toggling the enable
21230ba7c51aSVille Syrjälä 	 * bits can itself generate a new hotplug interrupt :(
21240ba7c51aSVille Syrjälä 	 */
21250ba7c51aSVille Syrjälä 	for (i = 0; i < 10; i++) {
21260ba7c51aSVille Syrjälä 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
21270ba7c51aSVille Syrjälä 
21280ba7c51aSVille Syrjälä 		if (tmp == 0)
21290ba7c51aSVille Syrjälä 			return hotplug_status;
21300ba7c51aSVille Syrjälä 
21310ba7c51aSVille Syrjälä 		hotplug_status |= tmp;
21323ff60f89SOscar Mateo 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
21330ba7c51aSVille Syrjälä 	}
21340ba7c51aSVille Syrjälä 
21350ba7c51aSVille Syrjälä 	WARN_ONCE(1,
21360ba7c51aSVille Syrjälä 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
21370ba7c51aSVille Syrjälä 		  I915_READ(PORT_HOTPLUG_STAT));
21381ae3c34cSVille Syrjälä 
21391ae3c34cSVille Syrjälä 	return hotplug_status;
21401ae3c34cSVille Syrjälä }
21411ae3c34cSVille Syrjälä 
214291d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
21431ae3c34cSVille Syrjälä 				 u32 hotplug_status)
21441ae3c34cSVille Syrjälä {
21451ae3c34cSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
21463ff60f89SOscar Mateo 
214791d14251STvrtko Ursulin 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
214891d14251STvrtko Ursulin 	    IS_CHERRYVIEW(dev_priv)) {
214916c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
215016c6c56bSVille Syrjälä 
215158f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2152cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2153cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2154cf53902fSRodrigo Vivi 					   hpd_status_g4x,
2155fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
215658f2cf24SVille Syrjälä 
215791d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
215858f2cf24SVille Syrjälä 		}
2159369712e8SJani Nikula 
2160369712e8SJani Nikula 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
216191d14251STvrtko Ursulin 			dp_aux_irq_handler(dev_priv);
216216c6c56bSVille Syrjälä 	} else {
216316c6c56bSVille Syrjälä 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
216416c6c56bSVille Syrjälä 
216558f2cf24SVille Syrjälä 		if (hotplug_trigger) {
2166cf53902fSRodrigo Vivi 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2167cf53902fSRodrigo Vivi 					   hotplug_trigger, hotplug_trigger,
2168cf53902fSRodrigo Vivi 					   hpd_status_i915,
2169fd63e2a9SImre Deak 					   i9xx_port_hotplug_long_detect);
217091d14251STvrtko Ursulin 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
217116c6c56bSVille Syrjälä 		}
21723ff60f89SOscar Mateo 	}
217358f2cf24SVille Syrjälä }
217416c6c56bSVille Syrjälä 
2175c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2176c1874ed7SImre Deak {
217745a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2178fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2179c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
2180c1874ed7SImre Deak 
21812dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
21822dd2a883SImre Deak 		return IRQ_NONE;
21832dd2a883SImre Deak 
21841f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
21851f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
21861f814dacSImre Deak 
21871e1cace9SVille Syrjälä 	do {
21886e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
21892ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
21901ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2191a5e485a9SVille Syrjälä 		u32 ier = 0;
21923ff60f89SOscar Mateo 
2193c1874ed7SImre Deak 		gt_iir = I915_READ(GTIIR);
2194c1874ed7SImre Deak 		pm_iir = I915_READ(GEN6_PMIIR);
21953ff60f89SOscar Mateo 		iir = I915_READ(VLV_IIR);
2196c1874ed7SImre Deak 
2197c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
21981e1cace9SVille Syrjälä 			break;
2199c1874ed7SImre Deak 
2200c1874ed7SImre Deak 		ret = IRQ_HANDLED;
2201c1874ed7SImre Deak 
2202a5e485a9SVille Syrjälä 		/*
2203a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2204a5e485a9SVille Syrjälä 		 *
2205a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2206a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2207a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2208a5e485a9SVille Syrjälä 		 *
2209a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2210a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2211a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2212a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2213a5e485a9SVille Syrjälä 		 * bits this time around.
2214a5e485a9SVille Syrjälä 		 */
22154a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, 0);
2216a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2217a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
22184a0a0202SVille Syrjälä 
22194a0a0202SVille Syrjälä 		if (gt_iir)
22204a0a0202SVille Syrjälä 			I915_WRITE(GTIIR, gt_iir);
22214a0a0202SVille Syrjälä 		if (pm_iir)
22224a0a0202SVille Syrjälä 			I915_WRITE(GEN6_PMIIR, pm_iir);
22234a0a0202SVille Syrjälä 
22247ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
22251ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
22267ce4d1f2SVille Syrjälä 
22273ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
22283ff60f89SOscar Mateo 		 * signalled in iir */
2229eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
22307ce4d1f2SVille Syrjälä 
2231eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2232eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
2233eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2234eef57324SJerome Anand 
22357ce4d1f2SVille Syrjälä 		/*
22367ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
22377ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
22387ce4d1f2SVille Syrjälä 		 */
22397ce4d1f2SVille Syrjälä 		if (iir)
22407ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
22414a0a0202SVille Syrjälä 
2242a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
22434a0a0202SVille Syrjälä 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
22441ae3c34cSVille Syrjälä 
224552894874SVille Syrjälä 		if (gt_iir)
2246261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
224752894874SVille Syrjälä 		if (pm_iir)
224852894874SVille Syrjälä 			gen6_rps_irq_handler(dev_priv, pm_iir);
224952894874SVille Syrjälä 
22501ae3c34cSVille Syrjälä 		if (hotplug_status)
225191d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
22522ecb8ca4SVille Syrjälä 
225391d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
22541e1cace9SVille Syrjälä 	} while (0);
22557e231dbeSJesse Barnes 
22561f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
22571f814dacSImre Deak 
22587e231dbeSJesse Barnes 	return ret;
22597e231dbeSJesse Barnes }
22607e231dbeSJesse Barnes 
226143f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
226243f328d7SVille Syrjälä {
226345a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2264fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
226543f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
226643f328d7SVille Syrjälä 
22672dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
22682dd2a883SImre Deak 		return IRQ_NONE;
22692dd2a883SImre Deak 
22701f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
22711f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
22721f814dacSImre Deak 
2273579de73bSChris Wilson 	do {
22746e814800SVille Syrjälä 		u32 master_ctl, iir;
22752ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
22761ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
2277f0fd96f5SChris Wilson 		u32 gt_iir[4];
2278a5e485a9SVille Syrjälä 		u32 ier = 0;
2279a5e485a9SVille Syrjälä 
22808e5fd599SVille Syrjälä 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
22813278f67fSVille Syrjälä 		iir = I915_READ(VLV_IIR);
22823278f67fSVille Syrjälä 
22833278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
22848e5fd599SVille Syrjälä 			break;
228543f328d7SVille Syrjälä 
228627b6c122SOscar Mateo 		ret = IRQ_HANDLED;
228727b6c122SOscar Mateo 
2288a5e485a9SVille Syrjälä 		/*
2289a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
2290a5e485a9SVille Syrjälä 		 *
2291a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
2292a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2293a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2294a5e485a9SVille Syrjälä 		 *
2295a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2296a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2297a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
2298a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2299a5e485a9SVille Syrjälä 		 * bits this time around.
2300a5e485a9SVille Syrjälä 		 */
230143f328d7SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2302a5e485a9SVille Syrjälä 		ier = I915_READ(VLV_IER);
2303a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, 0);
230443f328d7SVille Syrjälä 
2305e30e251aSVille Syrjälä 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
230627b6c122SOscar Mateo 
230727b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
23081ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
230943f328d7SVille Syrjälä 
231027b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
231127b6c122SOscar Mateo 		 * signalled in iir */
2312eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
231343f328d7SVille Syrjälä 
2314eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2315eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
2316eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
2317eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
2318eef57324SJerome Anand 
23197ce4d1f2SVille Syrjälä 		/*
23207ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
23217ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
23227ce4d1f2SVille Syrjälä 		 */
23237ce4d1f2SVille Syrjälä 		if (iir)
23247ce4d1f2SVille Syrjälä 			I915_WRITE(VLV_IIR, iir);
23257ce4d1f2SVille Syrjälä 
2326a5e485a9SVille Syrjälä 		I915_WRITE(VLV_IER, ier);
2327e5328c43SVille Syrjälä 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
23281ae3c34cSVille Syrjälä 
2329f0fd96f5SChris Wilson 		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2330e30e251aSVille Syrjälä 
23311ae3c34cSVille Syrjälä 		if (hotplug_status)
233291d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
23332ecb8ca4SVille Syrjälä 
233491d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2335579de73bSChris Wilson 	} while (0);
23363278f67fSVille Syrjälä 
23371f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
23381f814dacSImre Deak 
233943f328d7SVille Syrjälä 	return ret;
234043f328d7SVille Syrjälä }
234143f328d7SVille Syrjälä 
234291d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
234391d14251STvrtko Ursulin 				u32 hotplug_trigger,
234440e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2345776ad806SJesse Barnes {
234642db67d6SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2347776ad806SJesse Barnes 
23486a39d7c9SJani Nikula 	/*
23496a39d7c9SJani Nikula 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
23506a39d7c9SJani Nikula 	 * unless we touch the hotplug register, even if hotplug_trigger is
23516a39d7c9SJani Nikula 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
23526a39d7c9SJani Nikula 	 * errors.
23536a39d7c9SJani Nikula 	 */
235413cf5504SDave Airlie 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
23556a39d7c9SJani Nikula 	if (!hotplug_trigger) {
23566a39d7c9SJani Nikula 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
23576a39d7c9SJani Nikula 			PORTD_HOTPLUG_STATUS_MASK |
23586a39d7c9SJani Nikula 			PORTC_HOTPLUG_STATUS_MASK |
23596a39d7c9SJani Nikula 			PORTB_HOTPLUG_STATUS_MASK;
23606a39d7c9SJani Nikula 		dig_hotplug_reg &= ~mask;
23616a39d7c9SJani Nikula 	}
23626a39d7c9SJani Nikula 
236313cf5504SDave Airlie 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
23646a39d7c9SJani Nikula 	if (!hotplug_trigger)
23656a39d7c9SJani Nikula 		return;
236613cf5504SDave Airlie 
2367cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
236840e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2369fd63e2a9SImre Deak 			   pch_port_hotplug_long_detect);
237040e56410SVille Syrjälä 
237191d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2372aaf5ec2eSSonika Jindal }
237391d131d2SDaniel Vetter 
237491d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
237540e56410SVille Syrjälä {
237640e56410SVille Syrjälä 	int pipe;
237740e56410SVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
237840e56410SVille Syrjälä 
237991d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
238040e56410SVille Syrjälä 
2381cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2382cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2383776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
2384cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2385cfc33bf7SVille Syrjälä 				 port_name(port));
2386cfc33bf7SVille Syrjälä 	}
2387776ad806SJesse Barnes 
2388ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
238991d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2390ce99c256SDaniel Vetter 
2391776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
239291d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
2393776ad806SJesse Barnes 
2394776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2395776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2396776ad806SJesse Barnes 
2397776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2398776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2399776ad806SJesse Barnes 
2400776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
2401776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
2402776ad806SJesse Barnes 
24039db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
2404055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
24059db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
24069db4a9c7SJesse Barnes 					 pipe_name(pipe),
24079db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
2408776ad806SJesse Barnes 
2409776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2410776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2411776ad806SJesse Barnes 
2412776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2413776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2414776ad806SJesse Barnes 
2415776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2416a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
24178664281bSPaulo Zanoni 
24188664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2419a2196033SMatthias Kaehlcke 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
24208664281bSPaulo Zanoni }
24218664281bSPaulo Zanoni 
242291d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
24238664281bSPaulo Zanoni {
24248664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
24255a69b89fSDaniel Vetter 	enum pipe pipe;
24268664281bSPaulo Zanoni 
2427de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
2428de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2429de032bf4SPaulo Zanoni 
2430055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
24311f7247c0SDaniel Vetter 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
24321f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
24338664281bSPaulo Zanoni 
24345a69b89fSDaniel Vetter 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
243591d14251STvrtko Ursulin 			if (IS_IVYBRIDGE(dev_priv))
243691d14251STvrtko Ursulin 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
24375a69b89fSDaniel Vetter 			else
243891d14251STvrtko Ursulin 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
24395a69b89fSDaniel Vetter 		}
24405a69b89fSDaniel Vetter 	}
24418bf1e9f1SShuang He 
24428664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
24438664281bSPaulo Zanoni }
24448664281bSPaulo Zanoni 
244591d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
24468664281bSPaulo Zanoni {
24478664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
244845c1cd87SMika Kahola 	enum pipe pipe;
24498664281bSPaulo Zanoni 
2450de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
2451de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
2452de032bf4SPaulo Zanoni 
245345c1cd87SMika Kahola 	for_each_pipe(dev_priv, pipe)
245445c1cd87SMika Kahola 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
245545c1cd87SMika Kahola 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
24568664281bSPaulo Zanoni 
24578664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
2458776ad806SJesse Barnes }
2459776ad806SJesse Barnes 
246091d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
246123e81d69SAdam Jackson {
246223e81d69SAdam Jackson 	int pipe;
24636dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2464aaf5ec2eSSonika Jindal 
246591d14251STvrtko Ursulin 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
246691d131d2SDaniel Vetter 
2467cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2468cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
246923e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
2470cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2471cfc33bf7SVille Syrjälä 				 port_name(port));
2472cfc33bf7SVille Syrjälä 	}
247323e81d69SAdam Jackson 
247423e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
247591d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
247623e81d69SAdam Jackson 
247723e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
247891d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
247923e81d69SAdam Jackson 
248023e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
248123e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
248223e81d69SAdam Jackson 
248323e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
248423e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
248523e81d69SAdam Jackson 
248623e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
2487055e393fSDamien Lespiau 		for_each_pipe(dev_priv, pipe)
248823e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
248923e81d69SAdam Jackson 					 pipe_name(pipe),
249023e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
24918664281bSPaulo Zanoni 
24928664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
249391d14251STvrtko Ursulin 		cpt_serr_int_handler(dev_priv);
249423e81d69SAdam Jackson }
249523e81d69SAdam Jackson 
249631604222SAnusha Srivatsa static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
249731604222SAnusha Srivatsa {
249831604222SAnusha Srivatsa 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
249931604222SAnusha Srivatsa 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
250031604222SAnusha Srivatsa 	u32 pin_mask = 0, long_mask = 0;
250131604222SAnusha Srivatsa 
250231604222SAnusha Srivatsa 	if (ddi_hotplug_trigger) {
250331604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
250431604222SAnusha Srivatsa 
250531604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
250631604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
250731604222SAnusha Srivatsa 
250831604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
250931604222SAnusha Srivatsa 				   ddi_hotplug_trigger,
251031604222SAnusha Srivatsa 				   dig_hotplug_reg, hpd_icp,
251131604222SAnusha Srivatsa 				   icp_ddi_port_hotplug_long_detect);
251231604222SAnusha Srivatsa 	}
251331604222SAnusha Srivatsa 
251431604222SAnusha Srivatsa 	if (tc_hotplug_trigger) {
251531604222SAnusha Srivatsa 		u32 dig_hotplug_reg;
251631604222SAnusha Srivatsa 
251731604222SAnusha Srivatsa 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
251831604222SAnusha Srivatsa 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
251931604222SAnusha Srivatsa 
252031604222SAnusha Srivatsa 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
252131604222SAnusha Srivatsa 				   tc_hotplug_trigger,
252231604222SAnusha Srivatsa 				   dig_hotplug_reg, hpd_icp,
252331604222SAnusha Srivatsa 				   icp_tc_port_hotplug_long_detect);
252431604222SAnusha Srivatsa 	}
252531604222SAnusha Srivatsa 
252631604222SAnusha Srivatsa 	if (pin_mask)
252731604222SAnusha Srivatsa 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
252831604222SAnusha Srivatsa 
252931604222SAnusha Srivatsa 	if (pch_iir & SDE_GMBUS_ICP)
253031604222SAnusha Srivatsa 		gmbus_irq_handler(dev_priv);
253131604222SAnusha Srivatsa }
253231604222SAnusha Srivatsa 
253391d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
25346dbf30ceSVille Syrjälä {
25356dbf30ceSVille Syrjälä 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
25366dbf30ceSVille Syrjälä 		~SDE_PORTE_HOTPLUG_SPT;
25376dbf30ceSVille Syrjälä 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
25386dbf30ceSVille Syrjälä 	u32 pin_mask = 0, long_mask = 0;
25396dbf30ceSVille Syrjälä 
25406dbf30ceSVille Syrjälä 	if (hotplug_trigger) {
25416dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25426dbf30ceSVille Syrjälä 
25436dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
25446dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
25456dbf30ceSVille Syrjälä 
2546cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2547cf53902fSRodrigo Vivi 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
254874c0b395SVille Syrjälä 				   spt_port_hotplug_long_detect);
25496dbf30ceSVille Syrjälä 	}
25506dbf30ceSVille Syrjälä 
25516dbf30ceSVille Syrjälä 	if (hotplug2_trigger) {
25526dbf30ceSVille Syrjälä 		u32 dig_hotplug_reg;
25536dbf30ceSVille Syrjälä 
25546dbf30ceSVille Syrjälä 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
25556dbf30ceSVille Syrjälä 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
25566dbf30ceSVille Syrjälä 
2557cf53902fSRodrigo Vivi 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2558cf53902fSRodrigo Vivi 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
25596dbf30ceSVille Syrjälä 				   spt_port_hotplug2_long_detect);
25606dbf30ceSVille Syrjälä 	}
25616dbf30ceSVille Syrjälä 
25626dbf30ceSVille Syrjälä 	if (pin_mask)
256391d14251STvrtko Ursulin 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
25646dbf30ceSVille Syrjälä 
25656dbf30ceSVille Syrjälä 	if (pch_iir & SDE_GMBUS_CPT)
256691d14251STvrtko Ursulin 		gmbus_irq_handler(dev_priv);
25676dbf30ceSVille Syrjälä }
25686dbf30ceSVille Syrjälä 
256991d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
257091d14251STvrtko Ursulin 				u32 hotplug_trigger,
257140e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2572c008bc6eSPaulo Zanoni {
2573e4ce95aaSVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2574e4ce95aaSVille Syrjälä 
2575e4ce95aaSVille Syrjälä 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2576e4ce95aaSVille Syrjälä 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2577e4ce95aaSVille Syrjälä 
2578cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
257940e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2580e4ce95aaSVille Syrjälä 			   ilk_port_hotplug_long_detect);
258140e56410SVille Syrjälä 
258291d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2583e4ce95aaSVille Syrjälä }
2584c008bc6eSPaulo Zanoni 
258591d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
258691d14251STvrtko Ursulin 				    u32 de_iir)
258740e56410SVille Syrjälä {
258840e56410SVille Syrjälä 	enum pipe pipe;
258940e56410SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
259040e56410SVille Syrjälä 
259140e56410SVille Syrjälä 	if (hotplug_trigger)
259291d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
259340e56410SVille Syrjälä 
2594c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
259591d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
2596c008bc6eSPaulo Zanoni 
2597c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
259891d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
2599c008bc6eSPaulo Zanoni 
2600c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
2601c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
2602c008bc6eSPaulo Zanoni 
2603055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2604fd3a4024SDaniel Vetter 		if (de_iir & DE_PIPE_VBLANK(pipe))
2605fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2606c008bc6eSPaulo Zanoni 
260740da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
26081f7247c0SDaniel Vetter 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2609c008bc6eSPaulo Zanoni 
261040da17c2SDaniel Vetter 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
261191d14251STvrtko Ursulin 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2612c008bc6eSPaulo Zanoni 	}
2613c008bc6eSPaulo Zanoni 
2614c008bc6eSPaulo Zanoni 	/* check event from PCH */
2615c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
2616c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
2617c008bc6eSPaulo Zanoni 
261891d14251STvrtko Ursulin 		if (HAS_PCH_CPT(dev_priv))
261991d14251STvrtko Ursulin 			cpt_irq_handler(dev_priv, pch_iir);
2620c008bc6eSPaulo Zanoni 		else
262191d14251STvrtko Ursulin 			ibx_irq_handler(dev_priv, pch_iir);
2622c008bc6eSPaulo Zanoni 
2623c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
2624c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
2625c008bc6eSPaulo Zanoni 	}
2626c008bc6eSPaulo Zanoni 
2627cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
262891d14251STvrtko Ursulin 		ironlake_rps_change_irq_handler(dev_priv);
2629c008bc6eSPaulo Zanoni }
2630c008bc6eSPaulo Zanoni 
263191d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
263291d14251STvrtko Ursulin 				    u32 de_iir)
26339719fb98SPaulo Zanoni {
263407d27e20SDamien Lespiau 	enum pipe pipe;
263523bb4cb5SVille Syrjälä 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
263623bb4cb5SVille Syrjälä 
263740e56410SVille Syrjälä 	if (hotplug_trigger)
263891d14251STvrtko Ursulin 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
26399719fb98SPaulo Zanoni 
26409719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
264191d14251STvrtko Ursulin 		ivb_err_int_handler(dev_priv);
26429719fb98SPaulo Zanoni 
264354fd3149SDhinakaran Pandiyan 	if (de_iir & DE_EDP_PSR_INT_HSW) {
264454fd3149SDhinakaran Pandiyan 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
264554fd3149SDhinakaran Pandiyan 
264654fd3149SDhinakaran Pandiyan 		intel_psr_irq_handler(dev_priv, psr_iir);
264754fd3149SDhinakaran Pandiyan 		I915_WRITE(EDP_PSR_IIR, psr_iir);
264854fd3149SDhinakaran Pandiyan 	}
2649fc340442SDaniel Vetter 
26509719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
265191d14251STvrtko Ursulin 		dp_aux_irq_handler(dev_priv);
26529719fb98SPaulo Zanoni 
26539719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
265491d14251STvrtko Ursulin 		intel_opregion_asle_intr(dev_priv);
26559719fb98SPaulo Zanoni 
2656055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2657fd3a4024SDaniel Vetter 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2658fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
26599719fb98SPaulo Zanoni 	}
26609719fb98SPaulo Zanoni 
26619719fb98SPaulo Zanoni 	/* check event from PCH */
266291d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
26639719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
26649719fb98SPaulo Zanoni 
266591d14251STvrtko Ursulin 		cpt_irq_handler(dev_priv, pch_iir);
26669719fb98SPaulo Zanoni 
26679719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
26689719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
26699719fb98SPaulo Zanoni 	}
26709719fb98SPaulo Zanoni }
26719719fb98SPaulo Zanoni 
267272c90f62SOscar Mateo /*
267372c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
267472c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
267572c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
267672c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
267772c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
267872c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
267972c90f62SOscar Mateo  */
2680f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2681b1f14ad0SJesse Barnes {
268245a83f84SDaniel Vetter 	struct drm_device *dev = arg;
2683fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
2684f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
26850e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
2686b1f14ad0SJesse Barnes 
26872dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
26882dd2a883SImre Deak 		return IRQ_NONE;
26892dd2a883SImre Deak 
26901f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
26911f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
26921f814dacSImre Deak 
2693b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
2694b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
2695b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
26960e43406bSChris Wilson 
269744498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
269844498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
269944498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
270044498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
270144498aeaSPaulo Zanoni 	 * due to its back queue). */
270291d14251STvrtko Ursulin 	if (!HAS_PCH_NOP(dev_priv)) {
270344498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
270444498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
2705ab5c608bSBen Widawsky 	}
270644498aeaSPaulo Zanoni 
270772c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
270872c90f62SOscar Mateo 
27090e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
27100e43406bSChris Wilson 	if (gt_iir) {
271172c90f62SOscar Mateo 		I915_WRITE(GTIIR, gt_iir);
271272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
271391d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 6)
2714261e40b8SVille Syrjälä 			snb_gt_irq_handler(dev_priv, gt_iir);
2715d8fc8a47SPaulo Zanoni 		else
2716261e40b8SVille Syrjälä 			ilk_gt_irq_handler(dev_priv, gt_iir);
27170e43406bSChris Wilson 	}
2718b1f14ad0SJesse Barnes 
2719b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
27200e43406bSChris Wilson 	if (de_iir) {
272172c90f62SOscar Mateo 		I915_WRITE(DEIIR, de_iir);
272272c90f62SOscar Mateo 		ret = IRQ_HANDLED;
272391d14251STvrtko Ursulin 		if (INTEL_GEN(dev_priv) >= 7)
272491d14251STvrtko Ursulin 			ivb_display_irq_handler(dev_priv, de_iir);
2725f1af8fc1SPaulo Zanoni 		else
272691d14251STvrtko Ursulin 			ilk_display_irq_handler(dev_priv, de_iir);
27270e43406bSChris Wilson 	}
27280e43406bSChris Wilson 
272991d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
2730f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
27310e43406bSChris Wilson 		if (pm_iir) {
2732b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
27330e43406bSChris Wilson 			ret = IRQ_HANDLED;
273472c90f62SOscar Mateo 			gen6_rps_irq_handler(dev_priv, pm_iir);
27350e43406bSChris Wilson 		}
2736f1af8fc1SPaulo Zanoni 	}
2737b1f14ad0SJesse Barnes 
2738b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
273974093f3eSChris Wilson 	if (!HAS_PCH_NOP(dev_priv))
274044498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
2741b1f14ad0SJesse Barnes 
27421f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
27431f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
27441f814dacSImre Deak 
2745b1f14ad0SJesse Barnes 	return ret;
2746b1f14ad0SJesse Barnes }
2747b1f14ad0SJesse Barnes 
274891d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
274991d14251STvrtko Ursulin 				u32 hotplug_trigger,
275040e56410SVille Syrjälä 				const u32 hpd[HPD_NUM_PINS])
2751d04a492dSShashank Sharma {
2752cebd87a0SVille Syrjälä 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2753d04a492dSShashank Sharma 
2754a52bb15bSVille Syrjälä 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2755a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2756d04a492dSShashank Sharma 
2757cf53902fSRodrigo Vivi 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
275840e56410SVille Syrjälä 			   dig_hotplug_reg, hpd,
2759cebd87a0SVille Syrjälä 			   bxt_port_hotplug_long_detect);
276040e56410SVille Syrjälä 
276191d14251STvrtko Ursulin 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2762d04a492dSShashank Sharma }
2763d04a492dSShashank Sharma 
2764121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2765121e758eSDhinakaran Pandiyan {
2766121e758eSDhinakaran Pandiyan 	u32 pin_mask = 0, long_mask = 0;
2767b796b971SDhinakaran Pandiyan 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2768b796b971SDhinakaran Pandiyan 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2769121e758eSDhinakaran Pandiyan 
2770121e758eSDhinakaran Pandiyan 	if (trigger_tc) {
2771b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2772b796b971SDhinakaran Pandiyan 
2773121e758eSDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2774121e758eSDhinakaran Pandiyan 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2775121e758eSDhinakaran Pandiyan 
2776121e758eSDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2777b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2778121e758eSDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2779121e758eSDhinakaran Pandiyan 	}
2780b796b971SDhinakaran Pandiyan 
2781b796b971SDhinakaran Pandiyan 	if (trigger_tbt) {
2782b796b971SDhinakaran Pandiyan 		u32 dig_hotplug_reg;
2783b796b971SDhinakaran Pandiyan 
2784b796b971SDhinakaran Pandiyan 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2785b796b971SDhinakaran Pandiyan 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2786b796b971SDhinakaran Pandiyan 
2787b796b971SDhinakaran Pandiyan 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2788b796b971SDhinakaran Pandiyan 				   dig_hotplug_reg, hpd_gen11,
2789b796b971SDhinakaran Pandiyan 				   gen11_port_hotplug_long_detect);
2790b796b971SDhinakaran Pandiyan 	}
2791b796b971SDhinakaran Pandiyan 
2792b796b971SDhinakaran Pandiyan 	if (pin_mask)
2793b796b971SDhinakaran Pandiyan 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2794b796b971SDhinakaran Pandiyan 	else
2795b796b971SDhinakaran Pandiyan 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2796121e758eSDhinakaran Pandiyan }
2797121e758eSDhinakaran Pandiyan 
27989d17210fSLucas De Marchi static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
27999d17210fSLucas De Marchi {
28009d17210fSLucas De Marchi 	u32 mask = GEN8_AUX_CHANNEL_A;
28019d17210fSLucas De Marchi 
28029d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 9)
28039d17210fSLucas De Marchi 		mask |= GEN9_AUX_CHANNEL_B |
28049d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_C |
28059d17210fSLucas De Marchi 			GEN9_AUX_CHANNEL_D;
28069d17210fSLucas De Marchi 
28079d17210fSLucas De Marchi 	if (IS_CNL_WITH_PORT_F(dev_priv))
28089d17210fSLucas De Marchi 		mask |= CNL_AUX_CHANNEL_F;
28099d17210fSLucas De Marchi 
28109d17210fSLucas De Marchi 	if (INTEL_GEN(dev_priv) >= 11)
28119d17210fSLucas De Marchi 		mask |= ICL_AUX_CHANNEL_E |
28129d17210fSLucas De Marchi 			CNL_AUX_CHANNEL_F;
28139d17210fSLucas De Marchi 
28149d17210fSLucas De Marchi 	return mask;
28159d17210fSLucas De Marchi }
28169d17210fSLucas De Marchi 
2817f11a0f46STvrtko Ursulin static irqreturn_t
2818f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2819abd58f01SBen Widawsky {
2820abd58f01SBen Widawsky 	irqreturn_t ret = IRQ_NONE;
2821f11a0f46STvrtko Ursulin 	u32 iir;
2822c42664ccSDaniel Vetter 	enum pipe pipe;
282388e04703SJesse Barnes 
2824abd58f01SBen Widawsky 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2825e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_MISC_IIR);
2826e32192e1STvrtko Ursulin 		if (iir) {
2827e04f7eceSVille Syrjälä 			bool found = false;
2828e04f7eceSVille Syrjälä 
2829e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2830abd58f01SBen Widawsky 			ret = IRQ_HANDLED;
2831e04f7eceSVille Syrjälä 
2832e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_MISC_GSE) {
283391d14251STvrtko Ursulin 				intel_opregion_asle_intr(dev_priv);
2834e04f7eceSVille Syrjälä 				found = true;
2835e04f7eceSVille Syrjälä 			}
2836e04f7eceSVille Syrjälä 
2837e04f7eceSVille Syrjälä 			if (iir & GEN8_DE_EDP_PSR) {
283854fd3149SDhinakaran Pandiyan 				u32 psr_iir = I915_READ(EDP_PSR_IIR);
283954fd3149SDhinakaran Pandiyan 
284054fd3149SDhinakaran Pandiyan 				intel_psr_irq_handler(dev_priv, psr_iir);
284154fd3149SDhinakaran Pandiyan 				I915_WRITE(EDP_PSR_IIR, psr_iir);
2842e04f7eceSVille Syrjälä 				found = true;
2843e04f7eceSVille Syrjälä 			}
2844e04f7eceSVille Syrjälä 
2845e04f7eceSVille Syrjälä 			if (!found)
284638cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2847abd58f01SBen Widawsky 		}
284838cc46d7SOscar Mateo 		else
284938cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2850abd58f01SBen Widawsky 	}
2851abd58f01SBen Widawsky 
2852121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2853121e758eSDhinakaran Pandiyan 		iir = I915_READ(GEN11_DE_HPD_IIR);
2854121e758eSDhinakaran Pandiyan 		if (iir) {
2855121e758eSDhinakaran Pandiyan 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2856121e758eSDhinakaran Pandiyan 			ret = IRQ_HANDLED;
2857121e758eSDhinakaran Pandiyan 			gen11_hpd_irq_handler(dev_priv, iir);
2858121e758eSDhinakaran Pandiyan 		} else {
2859121e758eSDhinakaran Pandiyan 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2860121e758eSDhinakaran Pandiyan 		}
2861121e758eSDhinakaran Pandiyan 	}
2862121e758eSDhinakaran Pandiyan 
28636d766f02SDaniel Vetter 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2864e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PORT_IIR);
2865e32192e1STvrtko Ursulin 		if (iir) {
2866e32192e1STvrtko Ursulin 			u32 tmp_mask;
2867d04a492dSShashank Sharma 			bool found = false;
2868cebd87a0SVille Syrjälä 
2869e32192e1STvrtko Ursulin 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
28706d766f02SDaniel Vetter 			ret = IRQ_HANDLED;
287188e04703SJesse Barnes 
28729d17210fSLucas De Marchi 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
287391d14251STvrtko Ursulin 				dp_aux_irq_handler(dev_priv);
2874d04a492dSShashank Sharma 				found = true;
2875d04a492dSShashank Sharma 			}
2876d04a492dSShashank Sharma 
2877cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv)) {
2878e32192e1STvrtko Ursulin 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2879e32192e1STvrtko Ursulin 				if (tmp_mask) {
288091d14251STvrtko Ursulin 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
288191d14251STvrtko Ursulin 							    hpd_bxt);
2882d04a492dSShashank Sharma 					found = true;
2883d04a492dSShashank Sharma 				}
2884e32192e1STvrtko Ursulin 			} else if (IS_BROADWELL(dev_priv)) {
2885e32192e1STvrtko Ursulin 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2886e32192e1STvrtko Ursulin 				if (tmp_mask) {
288791d14251STvrtko Ursulin 					ilk_hpd_irq_handler(dev_priv,
288891d14251STvrtko Ursulin 							    tmp_mask, hpd_bdw);
2889e32192e1STvrtko Ursulin 					found = true;
2890e32192e1STvrtko Ursulin 				}
2891e32192e1STvrtko Ursulin 			}
2892d04a492dSShashank Sharma 
2893cc3f90f0SAnder Conselvan de Oliveira 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
289491d14251STvrtko Ursulin 				gmbus_irq_handler(dev_priv);
28959e63743eSShashank Sharma 				found = true;
28969e63743eSShashank Sharma 			}
28979e63743eSShashank Sharma 
2898d04a492dSShashank Sharma 			if (!found)
289938cc46d7SOscar Mateo 				DRM_ERROR("Unexpected DE Port interrupt\n");
29006d766f02SDaniel Vetter 		}
290138cc46d7SOscar Mateo 		else
290238cc46d7SOscar Mateo 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
29036d766f02SDaniel Vetter 	}
29046d766f02SDaniel Vetter 
2905055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe) {
2906fd3a4024SDaniel Vetter 		u32 fault_errors;
2907abd58f01SBen Widawsky 
2908c42664ccSDaniel Vetter 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2909c42664ccSDaniel Vetter 			continue;
2910c42664ccSDaniel Vetter 
2911e32192e1STvrtko Ursulin 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2912e32192e1STvrtko Ursulin 		if (!iir) {
2913e32192e1STvrtko Ursulin 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2914e32192e1STvrtko Ursulin 			continue;
2915e32192e1STvrtko Ursulin 		}
2916770de83dSDamien Lespiau 
2917e32192e1STvrtko Ursulin 		ret = IRQ_HANDLED;
2918e32192e1STvrtko Ursulin 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2919e32192e1STvrtko Ursulin 
2920fd3a4024SDaniel Vetter 		if (iir & GEN8_PIPE_VBLANK)
2921fd3a4024SDaniel Vetter 			drm_handle_vblank(&dev_priv->drm, pipe);
2922abd58f01SBen Widawsky 
2923e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
292491d14251STvrtko Ursulin 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
29250fbe7870SDaniel Vetter 
2926e32192e1STvrtko Ursulin 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2927e32192e1STvrtko Ursulin 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
292838d83c96SDaniel Vetter 
2929e32192e1STvrtko Ursulin 		fault_errors = iir;
2930bca2bf2aSPandiyan, Dhinakaran 		if (INTEL_GEN(dev_priv) >= 9)
2931e32192e1STvrtko Ursulin 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2932770de83dSDamien Lespiau 		else
2933e32192e1STvrtko Ursulin 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2934770de83dSDamien Lespiau 
2935770de83dSDamien Lespiau 		if (fault_errors)
29361353ec38STvrtko Ursulin 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
293730100f2bSDaniel Vetter 				  pipe_name(pipe),
2938e32192e1STvrtko Ursulin 				  fault_errors);
2939abd58f01SBen Widawsky 	}
2940abd58f01SBen Widawsky 
294191d14251STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2942266ea3d9SShashank Sharma 	    master_ctl & GEN8_DE_PCH_IRQ) {
294392d03a80SDaniel Vetter 		/*
294492d03a80SDaniel Vetter 		 * FIXME(BDW): Assume for now that the new interrupt handling
294592d03a80SDaniel Vetter 		 * scheme also closed the SDE interrupt handling race we've seen
294692d03a80SDaniel Vetter 		 * on older pch-split platforms. But this needs testing.
294792d03a80SDaniel Vetter 		 */
2948e32192e1STvrtko Ursulin 		iir = I915_READ(SDEIIR);
2949e32192e1STvrtko Ursulin 		if (iir) {
2950e32192e1STvrtko Ursulin 			I915_WRITE(SDEIIR, iir);
295192d03a80SDaniel Vetter 			ret = IRQ_HANDLED;
29526dbf30ceSVille Syrjälä 
295329b43ae2SRodrigo Vivi 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
295431604222SAnusha Srivatsa 				icp_irq_handler(dev_priv, iir);
2955c6c30b91SRodrigo Vivi 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
295691d14251STvrtko Ursulin 				spt_irq_handler(dev_priv, iir);
29576dbf30ceSVille Syrjälä 			else
295891d14251STvrtko Ursulin 				cpt_irq_handler(dev_priv, iir);
29592dfb0b81SJani Nikula 		} else {
29602dfb0b81SJani Nikula 			/*
29612dfb0b81SJani Nikula 			 * Like on previous PCH there seems to be something
29622dfb0b81SJani Nikula 			 * fishy going on with forwarding PCH interrupts.
29632dfb0b81SJani Nikula 			 */
29642dfb0b81SJani Nikula 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
29652dfb0b81SJani Nikula 		}
296692d03a80SDaniel Vetter 	}
296792d03a80SDaniel Vetter 
2968f11a0f46STvrtko Ursulin 	return ret;
2969f11a0f46STvrtko Ursulin }
2970f11a0f46STvrtko Ursulin 
29714376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
29724376b9c9SMika Kuoppala {
29734376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
29744376b9c9SMika Kuoppala 
29754376b9c9SMika Kuoppala 	/*
29764376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
29774376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
29784376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
29794376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
29804376b9c9SMika Kuoppala 	 */
29814376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
29824376b9c9SMika Kuoppala }
29834376b9c9SMika Kuoppala 
29844376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
29854376b9c9SMika Kuoppala {
29864376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
29874376b9c9SMika Kuoppala }
29884376b9c9SMika Kuoppala 
2989f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
2990f11a0f46STvrtko Ursulin {
2991f0fd96f5SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(arg);
299225286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = dev_priv->uncore.regs;
2993f11a0f46STvrtko Ursulin 	u32 master_ctl;
2994f0fd96f5SChris Wilson 	u32 gt_iir[4];
2995f11a0f46STvrtko Ursulin 
2996f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
2997f11a0f46STvrtko Ursulin 		return IRQ_NONE;
2998f11a0f46STvrtko Ursulin 
29994376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
30004376b9c9SMika Kuoppala 	if (!master_ctl) {
30014376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
3002f11a0f46STvrtko Ursulin 		return IRQ_NONE;
30034376b9c9SMika Kuoppala 	}
3004f11a0f46STvrtko Ursulin 
3005f11a0f46STvrtko Ursulin 	/* Find, clear, then process each source of interrupt */
300655ef72f2SChris Wilson 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
3007f0fd96f5SChris Wilson 
3008f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3009f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
3010f0fd96f5SChris Wilson 		disable_rpm_wakeref_asserts(dev_priv);
301155ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
3012f0fd96f5SChris Wilson 		enable_rpm_wakeref_asserts(dev_priv);
3013f0fd96f5SChris Wilson 	}
3014f11a0f46STvrtko Ursulin 
30154376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
3016abd58f01SBen Widawsky 
3017f0fd96f5SChris Wilson 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
30181f814dacSImre Deak 
301955ef72f2SChris Wilson 	return IRQ_HANDLED;
3020abd58f01SBen Widawsky }
3021abd58f01SBen Widawsky 
302251951ae7SMika Kuoppala static u32
3023f744dbc2SMika Kuoppala gen11_gt_engine_identity(struct drm_i915_private * const i915,
302451951ae7SMika Kuoppala 			 const unsigned int bank, const unsigned int bit)
302551951ae7SMika Kuoppala {
302625286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
302751951ae7SMika Kuoppala 	u32 timeout_ts;
302851951ae7SMika Kuoppala 	u32 ident;
302951951ae7SMika Kuoppala 
303096606f3bSOscar Mateo 	lockdep_assert_held(&i915->irq_lock);
303196606f3bSOscar Mateo 
303251951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
303351951ae7SMika Kuoppala 
303451951ae7SMika Kuoppala 	/*
303551951ae7SMika Kuoppala 	 * NB: Specs do not specify how long to spin wait,
303651951ae7SMika Kuoppala 	 * so we do ~100us as an educated guess.
303751951ae7SMika Kuoppala 	 */
303851951ae7SMika Kuoppala 	timeout_ts = (local_clock() >> 10) + 100;
303951951ae7SMika Kuoppala 	do {
304051951ae7SMika Kuoppala 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
304151951ae7SMika Kuoppala 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
304251951ae7SMika Kuoppala 		 !time_after32(local_clock() >> 10, timeout_ts));
304351951ae7SMika Kuoppala 
304451951ae7SMika Kuoppala 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
304551951ae7SMika Kuoppala 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
304651951ae7SMika Kuoppala 			  bank, bit, ident);
304751951ae7SMika Kuoppala 		return 0;
304851951ae7SMika Kuoppala 	}
304951951ae7SMika Kuoppala 
305051951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
305151951ae7SMika Kuoppala 		      GEN11_INTR_DATA_VALID);
305251951ae7SMika Kuoppala 
3053f744dbc2SMika Kuoppala 	return ident;
3054f744dbc2SMika Kuoppala }
3055f744dbc2SMika Kuoppala 
3056f744dbc2SMika Kuoppala static void
3057f744dbc2SMika Kuoppala gen11_other_irq_handler(struct drm_i915_private * const i915,
3058f744dbc2SMika Kuoppala 			const u8 instance, const u16 iir)
3059f744dbc2SMika Kuoppala {
3060*54c52a84SOscar Mateo 	if (instance == OTHER_GUC_INSTANCE)
3061*54c52a84SOscar Mateo 		return gen11_guc_irq_handler(i915, iir);
3062*54c52a84SOscar Mateo 
3063d02b98b8SOscar Mateo 	if (instance == OTHER_GTPM_INSTANCE)
3064a087bafeSMika Kuoppala 		return gen11_rps_irq_handler(i915, iir);
3065d02b98b8SOscar Mateo 
3066f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3067f744dbc2SMika Kuoppala 		  instance, iir);
3068f744dbc2SMika Kuoppala }
3069f744dbc2SMika Kuoppala 
3070f744dbc2SMika Kuoppala static void
3071f744dbc2SMika Kuoppala gen11_engine_irq_handler(struct drm_i915_private * const i915,
3072f744dbc2SMika Kuoppala 			 const u8 class, const u8 instance, const u16 iir)
3073f744dbc2SMika Kuoppala {
3074f744dbc2SMika Kuoppala 	struct intel_engine_cs *engine;
3075f744dbc2SMika Kuoppala 
3076f744dbc2SMika Kuoppala 	if (instance <= MAX_ENGINE_INSTANCE)
3077f744dbc2SMika Kuoppala 		engine = i915->engine_class[class][instance];
3078f744dbc2SMika Kuoppala 	else
3079f744dbc2SMika Kuoppala 		engine = NULL;
3080f744dbc2SMika Kuoppala 
3081f744dbc2SMika Kuoppala 	if (likely(engine))
3082f744dbc2SMika Kuoppala 		return gen8_cs_irq_handler(engine, iir);
3083f744dbc2SMika Kuoppala 
3084f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3085f744dbc2SMika Kuoppala 		  class, instance);
3086f744dbc2SMika Kuoppala }
3087f744dbc2SMika Kuoppala 
3088f744dbc2SMika Kuoppala static void
3089f744dbc2SMika Kuoppala gen11_gt_identity_handler(struct drm_i915_private * const i915,
3090f744dbc2SMika Kuoppala 			  const u32 identity)
3091f744dbc2SMika Kuoppala {
3092f744dbc2SMika Kuoppala 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3093f744dbc2SMika Kuoppala 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3094f744dbc2SMika Kuoppala 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3095f744dbc2SMika Kuoppala 
3096f744dbc2SMika Kuoppala 	if (unlikely(!intr))
3097f744dbc2SMika Kuoppala 		return;
3098f744dbc2SMika Kuoppala 
3099f744dbc2SMika Kuoppala 	if (class <= COPY_ENGINE_CLASS)
3100f744dbc2SMika Kuoppala 		return gen11_engine_irq_handler(i915, class, instance, intr);
3101f744dbc2SMika Kuoppala 
3102f744dbc2SMika Kuoppala 	if (class == OTHER_CLASS)
3103f744dbc2SMika Kuoppala 		return gen11_other_irq_handler(i915, instance, intr);
3104f744dbc2SMika Kuoppala 
3105f744dbc2SMika Kuoppala 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3106f744dbc2SMika Kuoppala 		  class, instance, intr);
310751951ae7SMika Kuoppala }
310851951ae7SMika Kuoppala 
310951951ae7SMika Kuoppala static void
311096606f3bSOscar Mateo gen11_gt_bank_handler(struct drm_i915_private * const i915,
311196606f3bSOscar Mateo 		      const unsigned int bank)
311251951ae7SMika Kuoppala {
311325286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
311451951ae7SMika Kuoppala 	unsigned long intr_dw;
311551951ae7SMika Kuoppala 	unsigned int bit;
311651951ae7SMika Kuoppala 
311796606f3bSOscar Mateo 	lockdep_assert_held(&i915->irq_lock);
311851951ae7SMika Kuoppala 
311951951ae7SMika Kuoppala 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
312051951ae7SMika Kuoppala 
312151951ae7SMika Kuoppala 	for_each_set_bit(bit, &intr_dw, 32) {
31228455dad7SMika Kuoppala 		const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
312351951ae7SMika Kuoppala 
3124f744dbc2SMika Kuoppala 		gen11_gt_identity_handler(i915, ident);
312551951ae7SMika Kuoppala 	}
312651951ae7SMika Kuoppala 
312751951ae7SMika Kuoppala 	/* Clear must be after shared has been served for engine */
312851951ae7SMika Kuoppala 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
312951951ae7SMika Kuoppala }
313096606f3bSOscar Mateo 
313196606f3bSOscar Mateo static void
313296606f3bSOscar Mateo gen11_gt_irq_handler(struct drm_i915_private * const i915,
313396606f3bSOscar Mateo 		     const u32 master_ctl)
313496606f3bSOscar Mateo {
313596606f3bSOscar Mateo 	unsigned int bank;
313696606f3bSOscar Mateo 
313796606f3bSOscar Mateo 	spin_lock(&i915->irq_lock);
313896606f3bSOscar Mateo 
313996606f3bSOscar Mateo 	for (bank = 0; bank < 2; bank++) {
314096606f3bSOscar Mateo 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
314196606f3bSOscar Mateo 			gen11_gt_bank_handler(i915, bank);
314296606f3bSOscar Mateo 	}
314396606f3bSOscar Mateo 
314496606f3bSOscar Mateo 	spin_unlock(&i915->irq_lock);
314551951ae7SMika Kuoppala }
314651951ae7SMika Kuoppala 
31477a909383SChris Wilson static u32
31487a909383SChris Wilson gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3149df0d28c1SDhinakaran Pandiyan {
315025286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = dev_priv->uncore.regs;
31517a909383SChris Wilson 	u32 iir;
3152df0d28c1SDhinakaran Pandiyan 
3153df0d28c1SDhinakaran Pandiyan 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
31547a909383SChris Wilson 		return 0;
3155df0d28c1SDhinakaran Pandiyan 
31567a909383SChris Wilson 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
31577a909383SChris Wilson 	if (likely(iir))
31587a909383SChris Wilson 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
31597a909383SChris Wilson 
31607a909383SChris Wilson 	return iir;
3161df0d28c1SDhinakaran Pandiyan }
3162df0d28c1SDhinakaran Pandiyan 
3163df0d28c1SDhinakaran Pandiyan static void
31647a909383SChris Wilson gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3165df0d28c1SDhinakaran Pandiyan {
3166df0d28c1SDhinakaran Pandiyan 	if (iir & GEN11_GU_MISC_GSE)
3167df0d28c1SDhinakaran Pandiyan 		intel_opregion_asle_intr(dev_priv);
3168df0d28c1SDhinakaran Pandiyan }
3169df0d28c1SDhinakaran Pandiyan 
317081067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
317181067b71SMika Kuoppala {
317281067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
317381067b71SMika Kuoppala 
317481067b71SMika Kuoppala 	/*
317581067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
317681067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
317781067b71SMika Kuoppala 	 * New indications can and will light up during processing,
317881067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
317981067b71SMika Kuoppala 	 */
318081067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
318181067b71SMika Kuoppala }
318281067b71SMika Kuoppala 
318381067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
318481067b71SMika Kuoppala {
318581067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
318681067b71SMika Kuoppala }
318781067b71SMika Kuoppala 
318851951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg)
318951951ae7SMika Kuoppala {
319051951ae7SMika Kuoppala 	struct drm_i915_private * const i915 = to_i915(arg);
319125286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
319251951ae7SMika Kuoppala 	u32 master_ctl;
3193df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
319451951ae7SMika Kuoppala 
319551951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
319651951ae7SMika Kuoppala 		return IRQ_NONE;
319751951ae7SMika Kuoppala 
319881067b71SMika Kuoppala 	master_ctl = gen11_master_intr_disable(regs);
319981067b71SMika Kuoppala 	if (!master_ctl) {
320081067b71SMika Kuoppala 		gen11_master_intr_enable(regs);
320151951ae7SMika Kuoppala 		return IRQ_NONE;
320281067b71SMika Kuoppala 	}
320351951ae7SMika Kuoppala 
320451951ae7SMika Kuoppala 	/* Find, clear, then process each source of interrupt. */
320551951ae7SMika Kuoppala 	gen11_gt_irq_handler(i915, master_ctl);
320651951ae7SMika Kuoppala 
320751951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
320851951ae7SMika Kuoppala 	if (master_ctl & GEN11_DISPLAY_IRQ) {
320951951ae7SMika Kuoppala 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
321051951ae7SMika Kuoppala 
321151951ae7SMika Kuoppala 		disable_rpm_wakeref_asserts(i915);
321251951ae7SMika Kuoppala 		/*
321351951ae7SMika Kuoppala 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
321451951ae7SMika Kuoppala 		 * for the display related bits.
321551951ae7SMika Kuoppala 		 */
321651951ae7SMika Kuoppala 		gen8_de_irq_handler(i915, disp_ctl);
321751951ae7SMika Kuoppala 		enable_rpm_wakeref_asserts(i915);
321851951ae7SMika Kuoppala 	}
321951951ae7SMika Kuoppala 
32207a909383SChris Wilson 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3221df0d28c1SDhinakaran Pandiyan 
322281067b71SMika Kuoppala 	gen11_master_intr_enable(regs);
322351951ae7SMika Kuoppala 
32247a909383SChris Wilson 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3225df0d28c1SDhinakaran Pandiyan 
322651951ae7SMika Kuoppala 	return IRQ_HANDLED;
322751951ae7SMika Kuoppala }
322851951ae7SMika Kuoppala 
322942f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
323042f52ef8SKeith Packard  * we use as a pipe index
323142f52ef8SKeith Packard  */
323286e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
32330a3e67a4SJesse Barnes {
3234fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3235e9d21d7fSKeith Packard 	unsigned long irqflags;
323671e0ffa5SJesse Barnes 
32371ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
323886e83e35SChris Wilson 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
323986e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
324086e83e35SChris Wilson 
324186e83e35SChris Wilson 	return 0;
324286e83e35SChris Wilson }
324386e83e35SChris Wilson 
3244d938da6bSVille Syrjälä static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
3245d938da6bSVille Syrjälä {
3246d938da6bSVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(dev);
3247d938da6bSVille Syrjälä 
3248d938da6bSVille Syrjälä 	if (dev_priv->i945gm_vblank.enabled++ == 0)
3249d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3250d938da6bSVille Syrjälä 
3251d938da6bSVille Syrjälä 	return i8xx_enable_vblank(dev, pipe);
3252d938da6bSVille Syrjälä }
3253d938da6bSVille Syrjälä 
325486e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
325586e83e35SChris Wilson {
325686e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
325786e83e35SChris Wilson 	unsigned long irqflags;
325886e83e35SChris Wilson 
325986e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
32607c463586SKeith Packard 	i915_enable_pipestat(dev_priv, pipe,
3261755e9019SImre Deak 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
32621ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
32638692d00eSChris Wilson 
32640a3e67a4SJesse Barnes 	return 0;
32650a3e67a4SJesse Barnes }
32660a3e67a4SJesse Barnes 
326788e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3268f796cf8fSJesse Barnes {
3269fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3270f796cf8fSJesse Barnes 	unsigned long irqflags;
3271a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
327286e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3273f796cf8fSJesse Barnes 
3274f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3275fbdedaeaSVille Syrjälä 	ilk_enable_display_irq(dev_priv, bit);
3276b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3277b1f14ad0SJesse Barnes 
32782e8bf223SDhinakaran Pandiyan 	/* Even though there is no DMC, frame counter can get stuck when
32792e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated.
32802e8bf223SDhinakaran Pandiyan 	 */
32812e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
32822e8bf223SDhinakaran Pandiyan 		drm_vblank_restore(dev, pipe);
32832e8bf223SDhinakaran Pandiyan 
3284b1f14ad0SJesse Barnes 	return 0;
3285b1f14ad0SJesse Barnes }
3286b1f14ad0SJesse Barnes 
328788e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3288abd58f01SBen Widawsky {
3289fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3290abd58f01SBen Widawsky 	unsigned long irqflags;
3291abd58f01SBen Widawsky 
3292abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3293013d3752SVille Syrjälä 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3294abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3295013d3752SVille Syrjälä 
32962e8bf223SDhinakaran Pandiyan 	/* Even if there is no DMC, frame counter can get stuck when
32972e8bf223SDhinakaran Pandiyan 	 * PSR is active as no frames are generated, so check only for PSR.
32982e8bf223SDhinakaran Pandiyan 	 */
32992e8bf223SDhinakaran Pandiyan 	if (HAS_PSR(dev_priv))
33002e8bf223SDhinakaran Pandiyan 		drm_vblank_restore(dev, pipe);
33012e8bf223SDhinakaran Pandiyan 
3302abd58f01SBen Widawsky 	return 0;
3303abd58f01SBen Widawsky }
3304abd58f01SBen Widawsky 
330542f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
330642f52ef8SKeith Packard  * we use as a pipe index
330742f52ef8SKeith Packard  */
330886e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
330986e83e35SChris Wilson {
331086e83e35SChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
331186e83e35SChris Wilson 	unsigned long irqflags;
331286e83e35SChris Wilson 
331386e83e35SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
331486e83e35SChris Wilson 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
331586e83e35SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
331686e83e35SChris Wilson }
331786e83e35SChris Wilson 
3318d938da6bSVille Syrjälä static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
3319d938da6bSVille Syrjälä {
3320d938da6bSVille Syrjälä 	struct drm_i915_private *dev_priv = to_i915(dev);
3321d938da6bSVille Syrjälä 
3322d938da6bSVille Syrjälä 	i8xx_disable_vblank(dev, pipe);
3323d938da6bSVille Syrjälä 
3324d938da6bSVille Syrjälä 	if (--dev_priv->i945gm_vblank.enabled == 0)
3325d938da6bSVille Syrjälä 		schedule_work(&dev_priv->i945gm_vblank.work);
3326d938da6bSVille Syrjälä }
3327d938da6bSVille Syrjälä 
332886e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
33290a3e67a4SJesse Barnes {
3330fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3331e9d21d7fSKeith Packard 	unsigned long irqflags;
33320a3e67a4SJesse Barnes 
33331ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
33347c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
3335755e9019SImre Deak 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
33361ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
33370a3e67a4SJesse Barnes }
33380a3e67a4SJesse Barnes 
333988e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3340f796cf8fSJesse Barnes {
3341fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3342f796cf8fSJesse Barnes 	unsigned long irqflags;
3343a9c287c9SJani Nikula 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
334486e83e35SChris Wilson 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3345f796cf8fSJesse Barnes 
3346f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3347fbdedaeaSVille Syrjälä 	ilk_disable_display_irq(dev_priv, bit);
3348b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3349b1f14ad0SJesse Barnes }
3350b1f14ad0SJesse Barnes 
335188e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3352abd58f01SBen Widawsky {
3353fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3354abd58f01SBen Widawsky 	unsigned long irqflags;
3355abd58f01SBen Widawsky 
3356abd58f01SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3357013d3752SVille Syrjälä 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3358abd58f01SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3359abd58f01SBen Widawsky }
3360abd58f01SBen Widawsky 
3361d938da6bSVille Syrjälä static void i945gm_vblank_work_func(struct work_struct *work)
3362d938da6bSVille Syrjälä {
3363d938da6bSVille Syrjälä 	struct drm_i915_private *dev_priv =
3364d938da6bSVille Syrjälä 		container_of(work, struct drm_i915_private, i945gm_vblank.work);
3365d938da6bSVille Syrjälä 
3366d938da6bSVille Syrjälä 	/*
3367d938da6bSVille Syrjälä 	 * Vblank interrupts fail to wake up the device from C3,
3368d938da6bSVille Syrjälä 	 * hence we want to prevent C3 usage while vblank interrupts
3369d938da6bSVille Syrjälä 	 * are enabled.
3370d938da6bSVille Syrjälä 	 */
3371d938da6bSVille Syrjälä 	pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
3372d938da6bSVille Syrjälä 			      READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
3373d938da6bSVille Syrjälä 			      dev_priv->i945gm_vblank.c3_disable_latency :
3374d938da6bSVille Syrjälä 			      PM_QOS_DEFAULT_VALUE);
3375d938da6bSVille Syrjälä }
3376d938da6bSVille Syrjälä 
3377d938da6bSVille Syrjälä static int cstate_disable_latency(const char *name)
3378d938da6bSVille Syrjälä {
3379d938da6bSVille Syrjälä 	const struct cpuidle_driver *drv;
3380d938da6bSVille Syrjälä 	int i;
3381d938da6bSVille Syrjälä 
3382d938da6bSVille Syrjälä 	drv = cpuidle_get_driver();
3383d938da6bSVille Syrjälä 	if (!drv)
3384d938da6bSVille Syrjälä 		return 0;
3385d938da6bSVille Syrjälä 
3386d938da6bSVille Syrjälä 	for (i = 0; i < drv->state_count; i++) {
3387d938da6bSVille Syrjälä 		const struct cpuidle_state *state = &drv->states[i];
3388d938da6bSVille Syrjälä 
3389d938da6bSVille Syrjälä 		if (!strcmp(state->name, name))
3390d938da6bSVille Syrjälä 			return state->exit_latency ?
3391d938da6bSVille Syrjälä 				state->exit_latency - 1 : 0;
3392d938da6bSVille Syrjälä 	}
3393d938da6bSVille Syrjälä 
3394d938da6bSVille Syrjälä 	return 0;
3395d938da6bSVille Syrjälä }
3396d938da6bSVille Syrjälä 
3397d938da6bSVille Syrjälä static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
3398d938da6bSVille Syrjälä {
3399d938da6bSVille Syrjälä 	INIT_WORK(&dev_priv->i945gm_vblank.work,
3400d938da6bSVille Syrjälä 		  i945gm_vblank_work_func);
3401d938da6bSVille Syrjälä 
3402d938da6bSVille Syrjälä 	dev_priv->i945gm_vblank.c3_disable_latency =
3403d938da6bSVille Syrjälä 		cstate_disable_latency("C3");
3404d938da6bSVille Syrjälä 	pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
3405d938da6bSVille Syrjälä 			   PM_QOS_CPU_DMA_LATENCY,
3406d938da6bSVille Syrjälä 			   PM_QOS_DEFAULT_VALUE);
3407d938da6bSVille Syrjälä }
3408d938da6bSVille Syrjälä 
3409d938da6bSVille Syrjälä static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
3410d938da6bSVille Syrjälä {
3411d938da6bSVille Syrjälä 	cancel_work_sync(&dev_priv->i945gm_vblank.work);
3412d938da6bSVille Syrjälä 	pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
3413d938da6bSVille Syrjälä }
3414d938da6bSVille Syrjälä 
3415b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
341691738a95SPaulo Zanoni {
3417b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3418b16b2a2fSPaulo Zanoni 
34196e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
342091738a95SPaulo Zanoni 		return;
342191738a95SPaulo Zanoni 
3422b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
3423105b122eSPaulo Zanoni 
34246e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3425105b122eSPaulo Zanoni 		I915_WRITE(SERR_INT, 0xffffffff);
3426622364b6SPaulo Zanoni }
3427105b122eSPaulo Zanoni 
342891738a95SPaulo Zanoni /*
3429622364b6SPaulo Zanoni  * SDEIER is also touched by the interrupt handler to work around missed PCH
3430622364b6SPaulo Zanoni  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3431622364b6SPaulo Zanoni  * instead we unconditionally enable all PCH interrupt sources here, but then
3432622364b6SPaulo Zanoni  * only unmask them as needed with SDEIMR.
3433622364b6SPaulo Zanoni  *
3434622364b6SPaulo Zanoni  * This function needs to be called before interrupts are enabled.
343591738a95SPaulo Zanoni  */
3436622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev)
3437622364b6SPaulo Zanoni {
3438fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3439622364b6SPaulo Zanoni 
34406e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3441622364b6SPaulo Zanoni 		return;
3442622364b6SPaulo Zanoni 
3443622364b6SPaulo Zanoni 	WARN_ON(I915_READ(SDEIER) != 0);
344491738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
344591738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
344691738a95SPaulo Zanoni }
344791738a95SPaulo Zanoni 
3448b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3449d18ea1b5SDaniel Vetter {
3450b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3451b16b2a2fSPaulo Zanoni 
3452b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GT);
3453b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6)
3454b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, GEN6_PM);
3455d18ea1b5SDaniel Vetter }
3456d18ea1b5SDaniel Vetter 
345770591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
345870591a41SVille Syrjälä {
3459b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3460b16b2a2fSPaulo Zanoni 
346171b8b41dSVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
346271b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
346371b8b41dSVille Syrjälä 	else
346471b8b41dSVille Syrjälä 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
346571b8b41dSVille Syrjälä 
3466ad22d106SVille Syrjälä 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
346770591a41SVille Syrjälä 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
346870591a41SVille Syrjälä 
346944d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
347070591a41SVille Syrjälä 
3471b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, VLV_);
34728bd099a7SChris Wilson 	dev_priv->irq_mask = ~0u;
347370591a41SVille Syrjälä }
347470591a41SVille Syrjälä 
34758bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
34768bb61306SVille Syrjälä {
3477b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3478b16b2a2fSPaulo Zanoni 
34798bb61306SVille Syrjälä 	u32 pipestat_mask;
34809ab981f2SVille Syrjälä 	u32 enable_mask;
34818bb61306SVille Syrjälä 	enum pipe pipe;
34828bb61306SVille Syrjälä 
3483842ebf7aSVille Syrjälä 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
34848bb61306SVille Syrjälä 
34858bb61306SVille Syrjälä 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
34868bb61306SVille Syrjälä 	for_each_pipe(dev_priv, pipe)
34878bb61306SVille Syrjälä 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
34888bb61306SVille Syrjälä 
34899ab981f2SVille Syrjälä 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
34908bb61306SVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3491ebf5f921SVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3492ebf5f921SVille Syrjälä 		I915_LPE_PIPE_A_INTERRUPT |
3493ebf5f921SVille Syrjälä 		I915_LPE_PIPE_B_INTERRUPT;
3494ebf5f921SVille Syrjälä 
34958bb61306SVille Syrjälä 	if (IS_CHERRYVIEW(dev_priv))
3496ebf5f921SVille Syrjälä 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3497ebf5f921SVille Syrjälä 			I915_LPE_PIPE_C_INTERRUPT;
34986b7eafc1SVille Syrjälä 
34998bd099a7SChris Wilson 	WARN_ON(dev_priv->irq_mask != ~0u);
35006b7eafc1SVille Syrjälä 
35019ab981f2SVille Syrjälä 	dev_priv->irq_mask = ~enable_mask;
35028bb61306SVille Syrjälä 
3503b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
35048bb61306SVille Syrjälä }
35058bb61306SVille Syrjälä 
35068bb61306SVille Syrjälä /* drm_dma.h hooks
35078bb61306SVille Syrjälä */
35088bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev)
35098bb61306SVille Syrjälä {
3510fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3511b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
35128bb61306SVille Syrjälä 
3513b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
3514cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 7))
35158bb61306SVille Syrjälä 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
35168bb61306SVille Syrjälä 
3517fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
3518fc340442SDaniel Vetter 		I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3519fc340442SDaniel Vetter 		I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3520fc340442SDaniel Vetter 	}
3521fc340442SDaniel Vetter 
3522b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35238bb61306SVille Syrjälä 
3524b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
35258bb61306SVille Syrjälä }
35268bb61306SVille Syrjälä 
35276bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev)
35287e231dbeSJesse Barnes {
3529fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
35307e231dbeSJesse Barnes 
353134c7b8a7SVille Syrjälä 	I915_WRITE(VLV_MASTER_IER, 0);
353234c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
353334c7b8a7SVille Syrjälä 
3534b243f530STvrtko Ursulin 	gen5_gt_irq_reset(dev_priv);
35357e231dbeSJesse Barnes 
3536ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
35379918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
353870591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3539ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
35407e231dbeSJesse Barnes }
35417e231dbeSJesse Barnes 
3542d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3543d6e3cca3SDaniel Vetter {
3544b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3545b16b2a2fSPaulo Zanoni 
3546b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 0);
3547b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 1);
3548b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 2);
3549b16b2a2fSPaulo Zanoni 	GEN8_IRQ_RESET_NDX(uncore, GT, 3);
3550d6e3cca3SDaniel Vetter }
3551d6e3cca3SDaniel Vetter 
3552823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev)
3553abd58f01SBen Widawsky {
3554fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3555b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3556abd58f01SBen Widawsky 	int pipe;
3557abd58f01SBen Widawsky 
355825286aacSDaniele Ceraolo Spurio 	gen8_master_intr_disable(dev_priv->uncore.regs);
3559abd58f01SBen Widawsky 
3560d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
3561abd58f01SBen Widawsky 
3562e04f7eceSVille Syrjälä 	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3563e04f7eceSVille Syrjälä 	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3564e04f7eceSVille Syrjälä 
3565055e393fSDamien Lespiau 	for_each_pipe(dev_priv, pipe)
3566f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
3567813bde43SPaulo Zanoni 						   POWER_DOMAIN_PIPE(pipe)))
3568b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3569abd58f01SBen Widawsky 
3570b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3571b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3572b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3573abd58f01SBen Widawsky 
35746e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
3575b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
3576abd58f01SBen Widawsky }
3577abd58f01SBen Widawsky 
357851951ae7SMika Kuoppala static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
357951951ae7SMika Kuoppala {
358051951ae7SMika Kuoppala 	/* Disable RCS, BCS, VCS and VECS class engines. */
358151951ae7SMika Kuoppala 	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
358251951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  0);
358351951ae7SMika Kuoppala 
358451951ae7SMika Kuoppala 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
358551951ae7SMika Kuoppala 	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~0);
358651951ae7SMika Kuoppala 	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~0);
358751951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
358851951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
358951951ae7SMika Kuoppala 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
3590d02b98b8SOscar Mateo 
3591d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3592d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3593*54c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
3594*54c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_MASK,  ~0);
359551951ae7SMika Kuoppala }
359651951ae7SMika Kuoppala 
359751951ae7SMika Kuoppala static void gen11_irq_reset(struct drm_device *dev)
359851951ae7SMika Kuoppala {
359951951ae7SMika Kuoppala 	struct drm_i915_private *dev_priv = dev->dev_private;
3600b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
360151951ae7SMika Kuoppala 	int pipe;
360251951ae7SMika Kuoppala 
360325286aacSDaniele Ceraolo Spurio 	gen11_master_intr_disable(dev_priv->uncore.regs);
360451951ae7SMika Kuoppala 
360551951ae7SMika Kuoppala 	gen11_gt_irq_reset(dev_priv);
360651951ae7SMika Kuoppala 
360751951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
360851951ae7SMika Kuoppala 
360962819dfdSJosé Roberto de Souza 	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
361062819dfdSJosé Roberto de Souza 	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
361162819dfdSJosé Roberto de Souza 
361251951ae7SMika Kuoppala 	for_each_pipe(dev_priv, pipe)
361351951ae7SMika Kuoppala 		if (intel_display_power_is_enabled(dev_priv,
361451951ae7SMika Kuoppala 						   POWER_DOMAIN_PIPE(pipe)))
3615b16b2a2fSPaulo Zanoni 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
361651951ae7SMika Kuoppala 
3617b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3618b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3619b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3620b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3621b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
362231604222SAnusha Srivatsa 
362329b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3624b16b2a2fSPaulo Zanoni 		GEN3_IRQ_RESET(uncore, SDE);
362551951ae7SMika Kuoppala }
362651951ae7SMika Kuoppala 
36274c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3628001bd2cbSImre Deak 				     u8 pipe_mask)
3629d49bdb0eSPaulo Zanoni {
3630b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
3631b16b2a2fSPaulo Zanoni 
3632a9c287c9SJani Nikula 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
36336831f3e3SVille Syrjälä 	enum pipe pipe;
3634d49bdb0eSPaulo Zanoni 
363513321786SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
36369dfe2e3aSImre Deak 
36379dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36389dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36399dfe2e3aSImre Deak 		return;
36409dfe2e3aSImre Deak 	}
36419dfe2e3aSImre Deak 
36426831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3643b16b2a2fSPaulo Zanoni 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
36446831f3e3SVille Syrjälä 				  dev_priv->de_irq_mask[pipe],
36456831f3e3SVille Syrjälä 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
36469dfe2e3aSImre Deak 
364713321786SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
3648d49bdb0eSPaulo Zanoni }
3649d49bdb0eSPaulo Zanoni 
3650aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3651001bd2cbSImre Deak 				     u8 pipe_mask)
3652aae8ba84SVille Syrjälä {
3653b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
36546831f3e3SVille Syrjälä 	enum pipe pipe;
36556831f3e3SVille Syrjälä 
3656aae8ba84SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36579dfe2e3aSImre Deak 
36589dfe2e3aSImre Deak 	if (!intel_irqs_enabled(dev_priv)) {
36599dfe2e3aSImre Deak 		spin_unlock_irq(&dev_priv->irq_lock);
36609dfe2e3aSImre Deak 		return;
36619dfe2e3aSImre Deak 	}
36629dfe2e3aSImre Deak 
36636831f3e3SVille Syrjälä 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3664b16b2a2fSPaulo Zanoni 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
36659dfe2e3aSImre Deak 
3666aae8ba84SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
3667aae8ba84SVille Syrjälä 
3668aae8ba84SVille Syrjälä 	/* make sure we're done processing display irqs */
366991c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
3670aae8ba84SVille Syrjälä }
3671aae8ba84SVille Syrjälä 
36726bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev)
367343f328d7SVille Syrjälä {
3674fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3675b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
367643f328d7SVille Syrjälä 
367743f328d7SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, 0);
367843f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
367943f328d7SVille Syrjälä 
3680d6e3cca3SDaniel Vetter 	gen8_gt_irq_reset(dev_priv);
368143f328d7SVille Syrjälä 
3682b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
368343f328d7SVille Syrjälä 
3684ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
36859918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
368670591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
3687ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
368843f328d7SVille Syrjälä }
368943f328d7SVille Syrjälä 
369091d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
369187a02106SVille Syrjälä 				  const u32 hpd[HPD_NUM_PINS])
369287a02106SVille Syrjälä {
369387a02106SVille Syrjälä 	struct intel_encoder *encoder;
369487a02106SVille Syrjälä 	u32 enabled_irqs = 0;
369587a02106SVille Syrjälä 
369691c8a326SChris Wilson 	for_each_intel_encoder(&dev_priv->drm, encoder)
369787a02106SVille Syrjälä 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
369887a02106SVille Syrjälä 			enabled_irqs |= hpd[encoder->hpd_pin];
369987a02106SVille Syrjälä 
370087a02106SVille Syrjälä 	return enabled_irqs;
370187a02106SVille Syrjälä }
370287a02106SVille Syrjälä 
37031a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
37041a56b1a2SImre Deak {
37051a56b1a2SImre Deak 	u32 hotplug;
37061a56b1a2SImre Deak 
37071a56b1a2SImre Deak 	/*
37081a56b1a2SImre Deak 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
37091a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec).
37101a56b1a2SImre Deak 	 * The pulse duration bits are reserved on LPT+.
37111a56b1a2SImre Deak 	 */
37121a56b1a2SImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
37131a56b1a2SImre Deak 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
37141a56b1a2SImre Deak 		     PORTC_PULSE_DURATION_MASK |
37151a56b1a2SImre Deak 		     PORTD_PULSE_DURATION_MASK);
37161a56b1a2SImre Deak 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
37171a56b1a2SImre Deak 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
37181a56b1a2SImre Deak 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
37191a56b1a2SImre Deak 	/*
37201a56b1a2SImre Deak 	 * When CPU and PCH are on the same package, port A
37211a56b1a2SImre Deak 	 * HPD must be enabled in both north and south.
37221a56b1a2SImre Deak 	 */
37231a56b1a2SImre Deak 	if (HAS_PCH_LPT_LP(dev_priv))
37241a56b1a2SImre Deak 		hotplug |= PORTA_HOTPLUG_ENABLE;
37251a56b1a2SImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
37261a56b1a2SImre Deak }
37271a56b1a2SImre Deak 
372891d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
372982a28bcfSDaniel Vetter {
37301a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
373182a28bcfSDaniel Vetter 
373291d14251STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv)) {
3733fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
373491d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
373582a28bcfSDaniel Vetter 	} else {
3736fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
373791d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
373882a28bcfSDaniel Vetter 	}
373982a28bcfSDaniel Vetter 
3740fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
374182a28bcfSDaniel Vetter 
37421a56b1a2SImre Deak 	ibx_hpd_detection_setup(dev_priv);
37436dbf30ceSVille Syrjälä }
374426951cafSXiong Zhang 
374531604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
374631604222SAnusha Srivatsa {
374731604222SAnusha Srivatsa 	u32 hotplug;
374831604222SAnusha Srivatsa 
374931604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
375031604222SAnusha Srivatsa 	hotplug |= ICP_DDIA_HPD_ENABLE |
375131604222SAnusha Srivatsa 		   ICP_DDIB_HPD_ENABLE;
375231604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
375331604222SAnusha Srivatsa 
375431604222SAnusha Srivatsa 	hotplug = I915_READ(SHOTPLUG_CTL_TC);
375531604222SAnusha Srivatsa 	hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
375631604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC2) |
375731604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC3) |
375831604222SAnusha Srivatsa 		   ICP_TC_HPD_ENABLE(PORT_TC4);
375931604222SAnusha Srivatsa 	I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
376031604222SAnusha Srivatsa }
376131604222SAnusha Srivatsa 
376231604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
376331604222SAnusha Srivatsa {
376431604222SAnusha Srivatsa 	u32 hotplug_irqs, enabled_irqs;
376531604222SAnusha Srivatsa 
376631604222SAnusha Srivatsa 	hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
376731604222SAnusha Srivatsa 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
376831604222SAnusha Srivatsa 
376931604222SAnusha Srivatsa 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
377031604222SAnusha Srivatsa 
377131604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
377231604222SAnusha Srivatsa }
377331604222SAnusha Srivatsa 
3774121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3775121e758eSDhinakaran Pandiyan {
3776121e758eSDhinakaran Pandiyan 	u32 hotplug;
3777121e758eSDhinakaran Pandiyan 
3778121e758eSDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3779121e758eSDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3780121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3781121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3782121e758eSDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3783121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3784b796b971SDhinakaran Pandiyan 
3785b796b971SDhinakaran Pandiyan 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3786b796b971SDhinakaran Pandiyan 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3787b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3788b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3789b796b971SDhinakaran Pandiyan 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3790b796b971SDhinakaran Pandiyan 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3791121e758eSDhinakaran Pandiyan }
3792121e758eSDhinakaran Pandiyan 
3793121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3794121e758eSDhinakaran Pandiyan {
3795121e758eSDhinakaran Pandiyan 	u32 hotplug_irqs, enabled_irqs;
3796121e758eSDhinakaran Pandiyan 	u32 val;
3797121e758eSDhinakaran Pandiyan 
3798b796b971SDhinakaran Pandiyan 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3799b796b971SDhinakaran Pandiyan 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3800121e758eSDhinakaran Pandiyan 
3801121e758eSDhinakaran Pandiyan 	val = I915_READ(GEN11_DE_HPD_IMR);
3802121e758eSDhinakaran Pandiyan 	val &= ~hotplug_irqs;
3803121e758eSDhinakaran Pandiyan 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3804121e758eSDhinakaran Pandiyan 	POSTING_READ(GEN11_DE_HPD_IMR);
3805121e758eSDhinakaran Pandiyan 
3806121e758eSDhinakaran Pandiyan 	gen11_hpd_detection_setup(dev_priv);
380731604222SAnusha Srivatsa 
380829b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
380931604222SAnusha Srivatsa 		icp_hpd_irq_setup(dev_priv);
3810121e758eSDhinakaran Pandiyan }
3811121e758eSDhinakaran Pandiyan 
38122a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
38132a57d9ccSImre Deak {
38143b92e263SRodrigo Vivi 	u32 val, hotplug;
38153b92e263SRodrigo Vivi 
38163b92e263SRodrigo Vivi 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
38173b92e263SRodrigo Vivi 	if (HAS_PCH_CNP(dev_priv)) {
38183b92e263SRodrigo Vivi 		val = I915_READ(SOUTH_CHICKEN1);
38193b92e263SRodrigo Vivi 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
38203b92e263SRodrigo Vivi 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
38213b92e263SRodrigo Vivi 		I915_WRITE(SOUTH_CHICKEN1, val);
38223b92e263SRodrigo Vivi 	}
38232a57d9ccSImre Deak 
38242a57d9ccSImre Deak 	/* Enable digital hotplug on the PCH */
38252a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
38262a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
38272a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
38282a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE |
38292a57d9ccSImre Deak 		   PORTD_HOTPLUG_ENABLE;
38302a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
38312a57d9ccSImre Deak 
38322a57d9ccSImre Deak 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
38332a57d9ccSImre Deak 	hotplug |= PORTE_HOTPLUG_ENABLE;
38342a57d9ccSImre Deak 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
38352a57d9ccSImre Deak }
38362a57d9ccSImre Deak 
383791d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
38386dbf30ceSVille Syrjälä {
38392a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
38406dbf30ceSVille Syrjälä 
38416dbf30ceSVille Syrjälä 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
384291d14251STvrtko Ursulin 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
38436dbf30ceSVille Syrjälä 
38446dbf30ceSVille Syrjälä 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
38456dbf30ceSVille Syrjälä 
38462a57d9ccSImre Deak 	spt_hpd_detection_setup(dev_priv);
384726951cafSXiong Zhang }
38487fe0b973SKeith Packard 
38491a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
38501a56b1a2SImre Deak {
38511a56b1a2SImre Deak 	u32 hotplug;
38521a56b1a2SImre Deak 
38531a56b1a2SImre Deak 	/*
38541a56b1a2SImre Deak 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
38551a56b1a2SImre Deak 	 * duration to 2ms (which is the minimum in the Display Port spec)
38561a56b1a2SImre Deak 	 * The pulse duration bits are reserved on HSW+.
38571a56b1a2SImre Deak 	 */
38581a56b1a2SImre Deak 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
38591a56b1a2SImre Deak 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
38601a56b1a2SImre Deak 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
38611a56b1a2SImre Deak 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
38621a56b1a2SImre Deak 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
38631a56b1a2SImre Deak }
38641a56b1a2SImre Deak 
386591d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3866e4ce95aaSVille Syrjälä {
38671a56b1a2SImre Deak 	u32 hotplug_irqs, enabled_irqs;
3868e4ce95aaSVille Syrjälä 
386991d14251STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 8) {
38703a3b3c7dSVille Syrjälä 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
387191d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
38723a3b3c7dSVille Syrjälä 
38733a3b3c7dSVille Syrjälä 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
387491d14251STvrtko Ursulin 	} else if (INTEL_GEN(dev_priv) >= 7) {
387523bb4cb5SVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
387691d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
38773a3b3c7dSVille Syrjälä 
38783a3b3c7dSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
387923bb4cb5SVille Syrjälä 	} else {
3880e4ce95aaSVille Syrjälä 		hotplug_irqs = DE_DP_A_HOTPLUG;
388191d14251STvrtko Ursulin 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3882e4ce95aaSVille Syrjälä 
3883e4ce95aaSVille Syrjälä 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
38843a3b3c7dSVille Syrjälä 	}
3885e4ce95aaSVille Syrjälä 
38861a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
3887e4ce95aaSVille Syrjälä 
388891d14251STvrtko Ursulin 	ibx_hpd_irq_setup(dev_priv);
3889e4ce95aaSVille Syrjälä }
3890e4ce95aaSVille Syrjälä 
38912a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
38922a57d9ccSImre Deak 				      u32 enabled_irqs)
3893e0a20ad7SShashank Sharma {
38942a57d9ccSImre Deak 	u32 hotplug;
3895e0a20ad7SShashank Sharma 
3896a52bb15bSVille Syrjälä 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
38972a57d9ccSImre Deak 	hotplug |= PORTA_HOTPLUG_ENABLE |
38982a57d9ccSImre Deak 		   PORTB_HOTPLUG_ENABLE |
38992a57d9ccSImre Deak 		   PORTC_HOTPLUG_ENABLE;
3900d252bf68SShubhangi Shrivastava 
3901d252bf68SShubhangi Shrivastava 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3902d252bf68SShubhangi Shrivastava 		      hotplug, enabled_irqs);
3903d252bf68SShubhangi Shrivastava 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3904d252bf68SShubhangi Shrivastava 
3905d252bf68SShubhangi Shrivastava 	/*
3906d252bf68SShubhangi Shrivastava 	 * For BXT invert bit has to be set based on AOB design
3907d252bf68SShubhangi Shrivastava 	 * for HPD detection logic, update it based on VBT fields.
3908d252bf68SShubhangi Shrivastava 	 */
3909d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3910d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3911d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIA_HPD_INVERT;
3912d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3913d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3914d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIB_HPD_INVERT;
3915d252bf68SShubhangi Shrivastava 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3916d252bf68SShubhangi Shrivastava 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3917d252bf68SShubhangi Shrivastava 		hotplug |= BXT_DDIC_HPD_INVERT;
3918d252bf68SShubhangi Shrivastava 
3919a52bb15bSVille Syrjälä 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3920e0a20ad7SShashank Sharma }
3921e0a20ad7SShashank Sharma 
39222a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
39232a57d9ccSImre Deak {
39242a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
39252a57d9ccSImre Deak }
39262a57d9ccSImre Deak 
39272a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
39282a57d9ccSImre Deak {
39292a57d9ccSImre Deak 	u32 hotplug_irqs, enabled_irqs;
39302a57d9ccSImre Deak 
39312a57d9ccSImre Deak 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
39322a57d9ccSImre Deak 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
39332a57d9ccSImre Deak 
39342a57d9ccSImre Deak 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
39352a57d9ccSImre Deak 
39362a57d9ccSImre Deak 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
39372a57d9ccSImre Deak }
39382a57d9ccSImre Deak 
3939d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev)
3940d46da437SPaulo Zanoni {
3941fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
394282a28bcfSDaniel Vetter 	u32 mask;
3943d46da437SPaulo Zanoni 
39446e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
3945692a04cfSDaniel Vetter 		return;
3946692a04cfSDaniel Vetter 
39476e266956STvrtko Ursulin 	if (HAS_PCH_IBX(dev_priv))
39485c673b60SDaniel Vetter 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
39494ebc6509SDhinakaran Pandiyan 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
39505c673b60SDaniel Vetter 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
39514ebc6509SDhinakaran Pandiyan 	else
39524ebc6509SDhinakaran Pandiyan 		mask = SDE_GMBUS_CPT;
39538664281bSPaulo Zanoni 
395465f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3955d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
39562a57d9ccSImre Deak 
39572a57d9ccSImre Deak 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
39582a57d9ccSImre Deak 	    HAS_PCH_LPT(dev_priv))
39591a56b1a2SImre Deak 		ibx_hpd_detection_setup(dev_priv);
39602a57d9ccSImre Deak 	else
39612a57d9ccSImre Deak 		spt_hpd_detection_setup(dev_priv);
3962d46da437SPaulo Zanoni }
3963d46da437SPaulo Zanoni 
39640a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev)
39650a9a8c91SDaniel Vetter {
3966fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
3967b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
39680a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
39690a9a8c91SDaniel Vetter 
39700a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
39710a9a8c91SDaniel Vetter 
39720a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
39733c9192bcSTvrtko Ursulin 	if (HAS_L3_DPF(dev_priv)) {
39740a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
3975772c2a51STvrtko Ursulin 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3976772c2a51STvrtko Ursulin 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
39770a9a8c91SDaniel Vetter 	}
39780a9a8c91SDaniel Vetter 
39790a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3980cf819effSLucas De Marchi 	if (IS_GEN(dev_priv, 5)) {
3981f8973c21SChris Wilson 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
39820a9a8c91SDaniel Vetter 	} else {
39830a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
39840a9a8c91SDaniel Vetter 	}
39850a9a8c91SDaniel Vetter 
3986b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
39870a9a8c91SDaniel Vetter 
3988b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 6) {
398978e68d36SImre Deak 		/*
399078e68d36SImre Deak 		 * RPS interrupts will get enabled/disabled on demand when RPS
399178e68d36SImre Deak 		 * itself is enabled/disabled.
399278e68d36SImre Deak 		 */
39938a68d464SChris Wilson 		if (HAS_ENGINE(dev_priv, VECS0)) {
39940a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3995f4e9af4fSAkash Goel 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3996f4e9af4fSAkash Goel 		}
39970a9a8c91SDaniel Vetter 
3998f4e9af4fSAkash Goel 		dev_priv->pm_imr = 0xffffffff;
3999b16b2a2fSPaulo Zanoni 		GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
40000a9a8c91SDaniel Vetter 	}
40010a9a8c91SDaniel Vetter }
40020a9a8c91SDaniel Vetter 
4003f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev)
4004036a4a7dSZhenyu Wang {
4005fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4006b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
40078e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
40088e76f8dcSPaulo Zanoni 
4009b243f530STvrtko Ursulin 	if (INTEL_GEN(dev_priv) >= 7) {
40108e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4011842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
40128e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
401323bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
401423bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
40158e76f8dcSPaulo Zanoni 	} else {
40168e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4017842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4018842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
4019e4ce95aaSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4020e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4021e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
40228e76f8dcSPaulo Zanoni 	}
4023036a4a7dSZhenyu Wang 
4024fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
4025b16b2a2fSPaulo Zanoni 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
40261aeb1b5fSDhinakaran Pandiyan 		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4027fc340442SDaniel Vetter 		display_mask |= DE_EDP_PSR_INT_HSW;
4028fc340442SDaniel Vetter 	}
4029fc340442SDaniel Vetter 
40301ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
4031036a4a7dSZhenyu Wang 
4032622364b6SPaulo Zanoni 	ibx_irq_pre_postinstall(dev);
4033622364b6SPaulo Zanoni 
4034b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
4035b16b2a2fSPaulo Zanoni 		      display_mask | extra_mask);
4036036a4a7dSZhenyu Wang 
40370a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
4038036a4a7dSZhenyu Wang 
40391a56b1a2SImre Deak 	ilk_hpd_detection_setup(dev_priv);
40401a56b1a2SImre Deak 
4041d46da437SPaulo Zanoni 	ibx_irq_postinstall(dev);
40427fe0b973SKeith Packard 
404350a0bc90STvrtko Ursulin 	if (IS_IRONLAKE_M(dev_priv)) {
40446005ce42SDaniel Vetter 		/* Enable PCU event interrupts
40456005ce42SDaniel Vetter 		 *
40466005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
40474bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
40484bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
4049d6207435SDaniel Vetter 		spin_lock_irq(&dev_priv->irq_lock);
4050fbdedaeaSVille Syrjälä 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4051d6207435SDaniel Vetter 		spin_unlock_irq(&dev_priv->irq_lock);
4052f97108d1SJesse Barnes 	}
4053f97108d1SJesse Barnes 
4054036a4a7dSZhenyu Wang 	return 0;
4055036a4a7dSZhenyu Wang }
4056036a4a7dSZhenyu Wang 
4057f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4058f8b79e58SImre Deak {
405967520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4060f8b79e58SImre Deak 
4061f8b79e58SImre Deak 	if (dev_priv->display_irqs_enabled)
4062f8b79e58SImre Deak 		return;
4063f8b79e58SImre Deak 
4064f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = true;
4065f8b79e58SImre Deak 
4066d6c69803SVille Syrjälä 	if (intel_irqs_enabled(dev_priv)) {
4067d6c69803SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4068ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4069f8b79e58SImre Deak 	}
4070d6c69803SVille Syrjälä }
4071f8b79e58SImre Deak 
4072f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4073f8b79e58SImre Deak {
407467520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4075f8b79e58SImre Deak 
4076f8b79e58SImre Deak 	if (!dev_priv->display_irqs_enabled)
4077f8b79e58SImre Deak 		return;
4078f8b79e58SImre Deak 
4079f8b79e58SImre Deak 	dev_priv->display_irqs_enabled = false;
4080f8b79e58SImre Deak 
4081950eabafSImre Deak 	if (intel_irqs_enabled(dev_priv))
4082ad22d106SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
4083f8b79e58SImre Deak }
4084f8b79e58SImre Deak 
40850e6c9a9eSVille Syrjälä 
40860e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev)
40870e6c9a9eSVille Syrjälä {
4088fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
40890e6c9a9eSVille Syrjälä 
40900a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
40917e231dbeSJesse Barnes 
4092ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
40939918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4094ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4095ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4096ad22d106SVille Syrjälä 
40977e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
409834c7b8a7SVille Syrjälä 	POSTING_READ(VLV_MASTER_IER);
409920afbda2SDaniel Vetter 
410020afbda2SDaniel Vetter 	return 0;
410120afbda2SDaniel Vetter }
410220afbda2SDaniel Vetter 
4103abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4104abd58f01SBen Widawsky {
4105b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4106b16b2a2fSPaulo Zanoni 
4107abd58f01SBen Widawsky 	/* These are interrupts we'll toggle with the ring mask register */
4108a9c287c9SJani Nikula 	u32 gt_interrupts[] = {
41098a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
411073d477f6SOscar Mateo 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
411173d477f6SOscar Mateo 		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
41128a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
41138a68d464SChris Wilson 
41148a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
41158a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
4116abd58f01SBen Widawsky 		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
41178a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
41188a68d464SChris Wilson 
4119abd58f01SBen Widawsky 		0,
41208a68d464SChris Wilson 
41218a68d464SChris Wilson 		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
41228a68d464SChris Wilson 		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
4123abd58f01SBen Widawsky 	};
4124abd58f01SBen Widawsky 
4125f4e9af4fSAkash Goel 	dev_priv->pm_ier = 0x0;
4126f4e9af4fSAkash Goel 	dev_priv->pm_imr = ~dev_priv->pm_ier;
4127b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4128b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
412978e68d36SImre Deak 	/*
413078e68d36SImre Deak 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
413126705e20SSagar Arun Kamble 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
413278e68d36SImre Deak 	 */
4133b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4134b16b2a2fSPaulo Zanoni 	GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4135abd58f01SBen Widawsky }
4136abd58f01SBen Widawsky 
4137abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4138abd58f01SBen Widawsky {
4139b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4140b16b2a2fSPaulo Zanoni 
4141a9c287c9SJani Nikula 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4142a9c287c9SJani Nikula 	u32 de_pipe_enables;
41433a3b3c7dSVille Syrjälä 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
41443a3b3c7dSVille Syrjälä 	u32 de_port_enables;
4145df0d28c1SDhinakaran Pandiyan 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
41463a3b3c7dSVille Syrjälä 	enum pipe pipe;
4147770de83dSDamien Lespiau 
4148df0d28c1SDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) <= 10)
4149df0d28c1SDhinakaran Pandiyan 		de_misc_masked |= GEN8_DE_MISC_GSE;
4150df0d28c1SDhinakaran Pandiyan 
4151bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 9) {
4152842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
41533a3b3c7dSVille Syrjälä 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
415488e04703SJesse Barnes 				  GEN9_AUX_CHANNEL_D;
4155cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
41563a3b3c7dSVille Syrjälä 			de_port_masked |= BXT_DE_PORT_GMBUS;
41573a3b3c7dSVille Syrjälä 	} else {
4158842ebf7aSVille Syrjälä 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
41593a3b3c7dSVille Syrjälä 	}
4160770de83dSDamien Lespiau 
4161bb187e93SJames Ausmus 	if (INTEL_GEN(dev_priv) >= 11)
4162bb187e93SJames Ausmus 		de_port_masked |= ICL_AUX_CHANNEL_E;
4163bb187e93SJames Ausmus 
41649bb635d9SDhinakaran Pandiyan 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4165a324fcacSRodrigo Vivi 		de_port_masked |= CNL_AUX_CHANNEL_F;
4166a324fcacSRodrigo Vivi 
4167770de83dSDamien Lespiau 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4168770de83dSDamien Lespiau 					   GEN8_PIPE_FIFO_UNDERRUN;
4169770de83dSDamien Lespiau 
41703a3b3c7dSVille Syrjälä 	de_port_enables = de_port_masked;
4171cc3f90f0SAnder Conselvan de Oliveira 	if (IS_GEN9_LP(dev_priv))
4172a52bb15bSVille Syrjälä 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4173a52bb15bSVille Syrjälä 	else if (IS_BROADWELL(dev_priv))
41743a3b3c7dSVille Syrjälä 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
41753a3b3c7dSVille Syrjälä 
4176b16b2a2fSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
417754fd3149SDhinakaran Pandiyan 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4178e04f7eceSVille Syrjälä 
41790a195c02SMika Kahola 	for_each_pipe(dev_priv, pipe) {
41800a195c02SMika Kahola 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4181abd58f01SBen Widawsky 
4182f458ebbcSDaniel Vetter 		if (intel_display_power_is_enabled(dev_priv,
4183813bde43SPaulo Zanoni 				POWER_DOMAIN_PIPE(pipe)))
4184b16b2a2fSPaulo Zanoni 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
4185813bde43SPaulo Zanoni 					  dev_priv->de_irq_mask[pipe],
418635079899SPaulo Zanoni 					  de_pipe_enables);
41870a195c02SMika Kahola 	}
4188abd58f01SBen Widawsky 
4189b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4190b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
41912a57d9ccSImre Deak 
4192121e758eSDhinakaran Pandiyan 	if (INTEL_GEN(dev_priv) >= 11) {
4193121e758eSDhinakaran Pandiyan 		u32 de_hpd_masked = 0;
4194b796b971SDhinakaran Pandiyan 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4195b796b971SDhinakaran Pandiyan 				     GEN11_DE_TBT_HOTPLUG_MASK;
4196121e758eSDhinakaran Pandiyan 
4197b16b2a2fSPaulo Zanoni 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
4198b16b2a2fSPaulo Zanoni 			      de_hpd_enables);
4199121e758eSDhinakaran Pandiyan 		gen11_hpd_detection_setup(dev_priv);
4200121e758eSDhinakaran Pandiyan 	} else if (IS_GEN9_LP(dev_priv)) {
42012a57d9ccSImre Deak 		bxt_hpd_detection_setup(dev_priv);
4202121e758eSDhinakaran Pandiyan 	} else if (IS_BROADWELL(dev_priv)) {
42031a56b1a2SImre Deak 		ilk_hpd_detection_setup(dev_priv);
4204abd58f01SBen Widawsky 	}
4205121e758eSDhinakaran Pandiyan }
4206abd58f01SBen Widawsky 
4207abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev)
4208abd58f01SBen Widawsky {
4209fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4210abd58f01SBen Widawsky 
42116e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4212622364b6SPaulo Zanoni 		ibx_irq_pre_postinstall(dev);
4213622364b6SPaulo Zanoni 
4214abd58f01SBen Widawsky 	gen8_gt_irq_postinstall(dev_priv);
4215abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
4216abd58f01SBen Widawsky 
42176e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
4218abd58f01SBen Widawsky 		ibx_irq_postinstall(dev);
4219abd58f01SBen Widawsky 
422025286aacSDaniele Ceraolo Spurio 	gen8_master_intr_enable(dev_priv->uncore.regs);
4221abd58f01SBen Widawsky 
4222abd58f01SBen Widawsky 	return 0;
4223abd58f01SBen Widawsky }
4224abd58f01SBen Widawsky 
422551951ae7SMika Kuoppala static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
422651951ae7SMika Kuoppala {
422751951ae7SMika Kuoppala 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
422851951ae7SMika Kuoppala 
422951951ae7SMika Kuoppala 	BUILD_BUG_ON(irqs & 0xffff0000);
423051951ae7SMika Kuoppala 
423151951ae7SMika Kuoppala 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
423251951ae7SMika Kuoppala 	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
423351951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  irqs << 16 | irqs);
423451951ae7SMika Kuoppala 
423551951ae7SMika Kuoppala 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
423651951ae7SMika Kuoppala 	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~(irqs << 16));
423751951ae7SMika Kuoppala 	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~(irqs << 16));
423851951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~(irqs | irqs << 16));
423951951ae7SMika Kuoppala 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
424051951ae7SMika Kuoppala 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));
424151951ae7SMika Kuoppala 
4242d02b98b8SOscar Mateo 	/*
4243d02b98b8SOscar Mateo 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4244d02b98b8SOscar Mateo 	 * is enabled/disabled.
4245d02b98b8SOscar Mateo 	 */
4246d02b98b8SOscar Mateo 	dev_priv->pm_ier = 0x0;
4247d02b98b8SOscar Mateo 	dev_priv->pm_imr = ~dev_priv->pm_ier;
4248d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4249d02b98b8SOscar Mateo 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4250*54c52a84SOscar Mateo 
4251*54c52a84SOscar Mateo 	/* Same thing for GuC interrupts */
4252*54c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
4253*54c52a84SOscar Mateo 	I915_WRITE(GEN11_GUC_SG_INTR_MASK,  ~0);
425451951ae7SMika Kuoppala }
425551951ae7SMika Kuoppala 
425631604222SAnusha Srivatsa static void icp_irq_postinstall(struct drm_device *dev)
425731604222SAnusha Srivatsa {
425831604222SAnusha Srivatsa 	struct drm_i915_private *dev_priv = to_i915(dev);
425931604222SAnusha Srivatsa 	u32 mask = SDE_GMBUS_ICP;
426031604222SAnusha Srivatsa 
426131604222SAnusha Srivatsa 	WARN_ON(I915_READ(SDEIER) != 0);
426231604222SAnusha Srivatsa 	I915_WRITE(SDEIER, 0xffffffff);
426331604222SAnusha Srivatsa 	POSTING_READ(SDEIER);
426431604222SAnusha Srivatsa 
426565f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
426631604222SAnusha Srivatsa 	I915_WRITE(SDEIMR, ~mask);
426731604222SAnusha Srivatsa 
426831604222SAnusha Srivatsa 	icp_hpd_detection_setup(dev_priv);
426931604222SAnusha Srivatsa }
427031604222SAnusha Srivatsa 
427151951ae7SMika Kuoppala static int gen11_irq_postinstall(struct drm_device *dev)
427251951ae7SMika Kuoppala {
427351951ae7SMika Kuoppala 	struct drm_i915_private *dev_priv = dev->dev_private;
4274b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4275df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
427651951ae7SMika Kuoppala 
427729b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
427831604222SAnusha Srivatsa 		icp_irq_postinstall(dev);
427931604222SAnusha Srivatsa 
428051951ae7SMika Kuoppala 	gen11_gt_irq_postinstall(dev_priv);
428151951ae7SMika Kuoppala 	gen8_de_irq_postinstall(dev_priv);
428251951ae7SMika Kuoppala 
4283b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4284df0d28c1SDhinakaran Pandiyan 
428551951ae7SMika Kuoppala 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
428651951ae7SMika Kuoppala 
428725286aacSDaniele Ceraolo Spurio 	gen11_master_intr_enable(dev_priv->uncore.regs);
4288c25f0c6aSDaniele Ceraolo Spurio 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
428951951ae7SMika Kuoppala 
429051951ae7SMika Kuoppala 	return 0;
429151951ae7SMika Kuoppala }
429251951ae7SMika Kuoppala 
429343f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev)
429443f328d7SVille Syrjälä {
4295fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
429643f328d7SVille Syrjälä 
429743f328d7SVille Syrjälä 	gen8_gt_irq_postinstall(dev_priv);
429843f328d7SVille Syrjälä 
4299ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
43009918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
4301ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
4302ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
4303ad22d106SVille Syrjälä 
4304e5328c43SVille Syrjälä 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
430543f328d7SVille Syrjälä 	POSTING_READ(GEN8_MASTER_IRQ);
430643f328d7SVille Syrjälä 
430743f328d7SVille Syrjälä 	return 0;
430843f328d7SVille Syrjälä }
430943f328d7SVille Syrjälä 
43106bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev)
4311c2798b19SChris Wilson {
4312fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4313b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4314c2798b19SChris Wilson 
431544d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
431644d9241eSVille Syrjälä 
4317b16b2a2fSPaulo Zanoni 	GEN2_IRQ_RESET(uncore);
4318c2798b19SChris Wilson }
4319c2798b19SChris Wilson 
4320c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev)
4321c2798b19SChris Wilson {
4322fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4323b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4324e9e9848aSVille Syrjälä 	u16 enable_mask;
4325c2798b19SChris Wilson 
4326045cebd2SVille Syrjälä 	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4327045cebd2SVille Syrjälä 			    I915_ERROR_MEMORY_REFRESH));
4328c2798b19SChris Wilson 
4329c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
4330c2798b19SChris Wilson 	dev_priv->irq_mask =
4331c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
433216659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
433316659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4334c2798b19SChris Wilson 
4335e9e9848aSVille Syrjälä 	enable_mask =
4336c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4337c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
433816659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4339e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
4340e9e9848aSVille Syrjälä 
4341b16b2a2fSPaulo Zanoni 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
4342c2798b19SChris Wilson 
4343379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4344379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4345d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4346755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4347755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4348d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4349379ef82dSDaniel Vetter 
4350c2798b19SChris Wilson 	return 0;
4351c2798b19SChris Wilson }
4352c2798b19SChris Wilson 
435378c357ddSVille Syrjälä static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
435478c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
435578c357ddSVille Syrjälä {
435678c357ddSVille Syrjälä 	u16 emr;
435778c357ddSVille Syrjälä 
435878c357ddSVille Syrjälä 	*eir = I915_READ16(EIR);
435978c357ddSVille Syrjälä 
436078c357ddSVille Syrjälä 	if (*eir)
436178c357ddSVille Syrjälä 		I915_WRITE16(EIR, *eir);
436278c357ddSVille Syrjälä 
436378c357ddSVille Syrjälä 	*eir_stuck = I915_READ16(EIR);
436478c357ddSVille Syrjälä 	if (*eir_stuck == 0)
436578c357ddSVille Syrjälä 		return;
436678c357ddSVille Syrjälä 
436778c357ddSVille Syrjälä 	/*
436878c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
436978c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
437078c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
437178c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
437278c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
437378c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
437478c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
437578c357ddSVille Syrjälä 	 * remains set.
437678c357ddSVille Syrjälä 	 */
437778c357ddSVille Syrjälä 	emr = I915_READ16(EMR);
437878c357ddSVille Syrjälä 	I915_WRITE16(EMR, 0xffff);
437978c357ddSVille Syrjälä 	I915_WRITE16(EMR, emr | *eir_stuck);
438078c357ddSVille Syrjälä }
438178c357ddSVille Syrjälä 
438278c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
438378c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
438478c357ddSVille Syrjälä {
438578c357ddSVille Syrjälä 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
438678c357ddSVille Syrjälä 
438778c357ddSVille Syrjälä 	if (eir_stuck)
438878c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
438978c357ddSVille Syrjälä }
439078c357ddSVille Syrjälä 
439178c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
439278c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
439378c357ddSVille Syrjälä {
439478c357ddSVille Syrjälä 	u32 emr;
439578c357ddSVille Syrjälä 
439678c357ddSVille Syrjälä 	*eir = I915_READ(EIR);
439778c357ddSVille Syrjälä 
439878c357ddSVille Syrjälä 	I915_WRITE(EIR, *eir);
439978c357ddSVille Syrjälä 
440078c357ddSVille Syrjälä 	*eir_stuck = I915_READ(EIR);
440178c357ddSVille Syrjälä 	if (*eir_stuck == 0)
440278c357ddSVille Syrjälä 		return;
440378c357ddSVille Syrjälä 
440478c357ddSVille Syrjälä 	/*
440578c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
440678c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
440778c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
440878c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
440978c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
441078c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
441178c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
441278c357ddSVille Syrjälä 	 * remains set.
441378c357ddSVille Syrjälä 	 */
441478c357ddSVille Syrjälä 	emr = I915_READ(EMR);
441578c357ddSVille Syrjälä 	I915_WRITE(EMR, 0xffffffff);
441678c357ddSVille Syrjälä 	I915_WRITE(EMR, emr | *eir_stuck);
441778c357ddSVille Syrjälä }
441878c357ddSVille Syrjälä 
441978c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
442078c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
442178c357ddSVille Syrjälä {
442278c357ddSVille Syrjälä 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
442378c357ddSVille Syrjälä 
442478c357ddSVille Syrjälä 	if (eir_stuck)
442578c357ddSVille Syrjälä 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
442678c357ddSVille Syrjälä }
442778c357ddSVille Syrjälä 
4428ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4429c2798b19SChris Wilson {
443045a83f84SDaniel Vetter 	struct drm_device *dev = arg;
4431fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4432af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4433c2798b19SChris Wilson 
44342dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
44352dd2a883SImre Deak 		return IRQ_NONE;
44362dd2a883SImre Deak 
44371f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
44381f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
44391f814dacSImre Deak 
4440af722d28SVille Syrjälä 	do {
4441af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
444278c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
4443af722d28SVille Syrjälä 		u16 iir;
4444af722d28SVille Syrjälä 
44459d9523d8SPaulo Zanoni 		iir = I915_READ16(GEN2_IIR);
4446c2798b19SChris Wilson 		if (iir == 0)
4447af722d28SVille Syrjälä 			break;
4448c2798b19SChris Wilson 
4449af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4450c2798b19SChris Wilson 
4451eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4452eb64343cSVille Syrjälä 		 * signalled in iir */
4453eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4454c2798b19SChris Wilson 
445578c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
445678c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
445778c357ddSVille Syrjälä 
44589d9523d8SPaulo Zanoni 		I915_WRITE16(GEN2_IIR, iir);
4459c2798b19SChris Wilson 
4460c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
44618a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4462c2798b19SChris Wilson 
446378c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
446478c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4465af722d28SVille Syrjälä 
4466eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4467af722d28SVille Syrjälä 	} while (0);
4468c2798b19SChris Wilson 
44691f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
44701f814dacSImre Deak 
44711f814dacSImre Deak 	return ret;
4472c2798b19SChris Wilson }
4473c2798b19SChris Wilson 
44746bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev)
4475a266c7d5SChris Wilson {
4476fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4477b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4478a266c7d5SChris Wilson 
447956b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
44800706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4481a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4482a266c7d5SChris Wilson 	}
4483a266c7d5SChris Wilson 
448444d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
448544d9241eSVille Syrjälä 
4486b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4487a266c7d5SChris Wilson }
4488a266c7d5SChris Wilson 
4489a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev)
4490a266c7d5SChris Wilson {
4491fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4492b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
449338bde180SChris Wilson 	u32 enable_mask;
4494a266c7d5SChris Wilson 
4495045cebd2SVille Syrjälä 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4496045cebd2SVille Syrjälä 			  I915_ERROR_MEMORY_REFRESH));
449738bde180SChris Wilson 
449838bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
449938bde180SChris Wilson 	dev_priv->irq_mask =
450038bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
450138bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
450216659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
450316659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
450438bde180SChris Wilson 
450538bde180SChris Wilson 	enable_mask =
450638bde180SChris Wilson 		I915_ASLE_INTERRUPT |
450738bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
450838bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
450916659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
451038bde180SChris Wilson 		I915_USER_INTERRUPT;
451138bde180SChris Wilson 
451256b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
4513a266c7d5SChris Wilson 		/* Enable in IER... */
4514a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4515a266c7d5SChris Wilson 		/* and unmask in IMR */
4516a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4517a266c7d5SChris Wilson 	}
4518a266c7d5SChris Wilson 
4519b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4520a266c7d5SChris Wilson 
4521379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4522379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4523d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4524755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4525755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4526d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4527379ef82dSDaniel Vetter 
4528c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
4529c30bb1fdSVille Syrjälä 
453020afbda2SDaniel Vetter 	return 0;
453120afbda2SDaniel Vetter }
453220afbda2SDaniel Vetter 
4533ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
4534a266c7d5SChris Wilson {
453545a83f84SDaniel Vetter 	struct drm_device *dev = arg;
4536fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4537af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4538a266c7d5SChris Wilson 
45392dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
45402dd2a883SImre Deak 		return IRQ_NONE;
45412dd2a883SImre Deak 
45421f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
45431f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
45441f814dacSImre Deak 
454538bde180SChris Wilson 	do {
4546eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
454778c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4548af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4549af722d28SVille Syrjälä 		u32 iir;
4550a266c7d5SChris Wilson 
45519d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4552af722d28SVille Syrjälä 		if (iir == 0)
4553af722d28SVille Syrjälä 			break;
4554af722d28SVille Syrjälä 
4555af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4556af722d28SVille Syrjälä 
4557af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
4558af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4559af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4560a266c7d5SChris Wilson 
4561eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4562eb64343cSVille Syrjälä 		 * signalled in iir */
4563eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4564a266c7d5SChris Wilson 
456578c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
456678c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
456778c357ddSVille Syrjälä 
45689d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4569a266c7d5SChris Wilson 
4570a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
45718a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4572a266c7d5SChris Wilson 
457378c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
457478c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4575a266c7d5SChris Wilson 
4576af722d28SVille Syrjälä 		if (hotplug_status)
4577af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4578af722d28SVille Syrjälä 
4579af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4580af722d28SVille Syrjälä 	} while (0);
4581a266c7d5SChris Wilson 
45821f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
45831f814dacSImre Deak 
4584a266c7d5SChris Wilson 	return ret;
4585a266c7d5SChris Wilson }
4586a266c7d5SChris Wilson 
45876bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev)
4588a266c7d5SChris Wilson {
4589fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4590b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4591a266c7d5SChris Wilson 
45920706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4593a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4594a266c7d5SChris Wilson 
459544d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
459644d9241eSVille Syrjälä 
4597b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
4598a266c7d5SChris Wilson }
4599a266c7d5SChris Wilson 
4600a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev)
4601a266c7d5SChris Wilson {
4602fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4603b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
4604bbba0a97SChris Wilson 	u32 enable_mask;
4605a266c7d5SChris Wilson 	u32 error_mask;
4606a266c7d5SChris Wilson 
4607045cebd2SVille Syrjälä 	/*
4608045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
4609045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
4610045cebd2SVille Syrjälä 	 */
4611045cebd2SVille Syrjälä 	if (IS_G4X(dev_priv)) {
4612045cebd2SVille Syrjälä 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4613045cebd2SVille Syrjälä 			       GM45_ERROR_MEM_PRIV |
4614045cebd2SVille Syrjälä 			       GM45_ERROR_CP_PRIV |
4615045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4616045cebd2SVille Syrjälä 	} else {
4617045cebd2SVille Syrjälä 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4618045cebd2SVille Syrjälä 			       I915_ERROR_MEMORY_REFRESH);
4619045cebd2SVille Syrjälä 	}
4620045cebd2SVille Syrjälä 	I915_WRITE(EMR, error_mask);
4621045cebd2SVille Syrjälä 
4622a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
4623c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
4624c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
4625adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
4626bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4627bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
462878c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
4629bbba0a97SChris Wilson 
4630c30bb1fdSVille Syrjälä 	enable_mask =
4631c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
4632c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
4633c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4634c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
463578c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
4636c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
4637bbba0a97SChris Wilson 
463891d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4639bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
4640a266c7d5SChris Wilson 
4641b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4642c30bb1fdSVille Syrjälä 
4643b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4644b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
4645d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
4646755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4647755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4648755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4649d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
4650a266c7d5SChris Wilson 
465191d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
465220afbda2SDaniel Vetter 
465320afbda2SDaniel Vetter 	return 0;
465420afbda2SDaniel Vetter }
465520afbda2SDaniel Vetter 
465691d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
465720afbda2SDaniel Vetter {
465820afbda2SDaniel Vetter 	u32 hotplug_en;
465920afbda2SDaniel Vetter 
466067520415SChris Wilson 	lockdep_assert_held(&dev_priv->irq_lock);
4661b5ea2d56SDaniel Vetter 
4662adca4730SChris Wilson 	/* Note HDMI and DP share hotplug bits */
4663e5868a31SEgbert Eich 	/* enable bits are the same for all generations */
466491d14251STvrtko Ursulin 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4665a266c7d5SChris Wilson 	/* Programming the CRT detection parameters tends
4666a266c7d5SChris Wilson 	   to generate a spurious hotplug event about three
4667a266c7d5SChris Wilson 	   seconds later.  So just do it once.
4668a266c7d5SChris Wilson 	*/
466991d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
4670a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4671a266c7d5SChris Wilson 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4672a266c7d5SChris Wilson 
4673a266c7d5SChris Wilson 	/* Ignore TV since it's buggy */
46740706f17cSEgbert Eich 	i915_hotplug_interrupt_update_locked(dev_priv,
4675f9e3dc78SJani Nikula 					     HOTPLUG_INT_EN_MASK |
4676f9e3dc78SJani Nikula 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4677f9e3dc78SJani Nikula 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
46780706f17cSEgbert Eich 					     hotplug_en);
4679a266c7d5SChris Wilson }
4680a266c7d5SChris Wilson 
4681ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
4682a266c7d5SChris Wilson {
468345a83f84SDaniel Vetter 	struct drm_device *dev = arg;
4684fac5e23eSChris Wilson 	struct drm_i915_private *dev_priv = to_i915(dev);
4685af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
4686a266c7d5SChris Wilson 
46872dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
46882dd2a883SImre Deak 		return IRQ_NONE;
46892dd2a883SImre Deak 
46901f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
46911f814dacSImre Deak 	disable_rpm_wakeref_asserts(dev_priv);
46921f814dacSImre Deak 
4693af722d28SVille Syrjälä 	do {
4694eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
469578c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
4696af722d28SVille Syrjälä 		u32 hotplug_status = 0;
4697af722d28SVille Syrjälä 		u32 iir;
46982c8ba29fSChris Wilson 
46999d9523d8SPaulo Zanoni 		iir = I915_READ(GEN2_IIR);
4700af722d28SVille Syrjälä 		if (iir == 0)
4701af722d28SVille Syrjälä 			break;
4702af722d28SVille Syrjälä 
4703af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
4704af722d28SVille Syrjälä 
4705af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4706af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4707a266c7d5SChris Wilson 
4708eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
4709eb64343cSVille Syrjälä 		 * signalled in iir */
4710eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4711a266c7d5SChris Wilson 
471278c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
471378c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
471478c357ddSVille Syrjälä 
47159d9523d8SPaulo Zanoni 		I915_WRITE(GEN2_IIR, iir);
4716a266c7d5SChris Wilson 
4717a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
47188a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
4719af722d28SVille Syrjälä 
4720a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
47218a68d464SChris Wilson 			intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
4722a266c7d5SChris Wilson 
472378c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
472478c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4725515ac2bbSDaniel Vetter 
4726af722d28SVille Syrjälä 		if (hotplug_status)
4727af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4728af722d28SVille Syrjälä 
4729af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4730af722d28SVille Syrjälä 	} while (0);
4731a266c7d5SChris Wilson 
47321f814dacSImre Deak 	enable_rpm_wakeref_asserts(dev_priv);
47331f814dacSImre Deak 
4734a266c7d5SChris Wilson 	return ret;
4735a266c7d5SChris Wilson }
4736a266c7d5SChris Wilson 
4737fca52a55SDaniel Vetter /**
4738fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
4739fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4740fca52a55SDaniel Vetter  *
4741fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
4742fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
4743fca52a55SDaniel Vetter  */
4744b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
4745f71d4af4SJesse Barnes {
474691c8a326SChris Wilson 	struct drm_device *dev = &dev_priv->drm;
4747562d9baeSSagar Arun Kamble 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4748cefcff8fSJoonas Lahtinen 	int i;
47498b2e326dSChris Wilson 
4750d938da6bSVille Syrjälä 	if (IS_I945GM(dev_priv))
4751d938da6bSVille Syrjälä 		i945gm_vblank_work_init(dev_priv);
4752d938da6bSVille Syrjälä 
475377913b39SJani Nikula 	intel_hpd_init_work(dev_priv);
475477913b39SJani Nikula 
4755562d9baeSSagar Arun Kamble 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4756cefcff8fSJoonas Lahtinen 
4757a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4758cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4759cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
47608b2e326dSChris Wilson 
4761*54c52a84SOscar Mateo 	if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
476226705e20SSagar Arun Kamble 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
476326705e20SSagar Arun Kamble 
4764a6706b45SDeepak S 	/* Let's track the enabled rps events */
4765666a4537SWayne Boyer 	if (IS_VALLEYVIEW(dev_priv))
47666c65a587SVille Syrjälä 		/* WaGsvRC0ResidencyMethod:vlv */
4767e0e8c7cbSChris Wilson 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
476831685c25SDeepak S 	else
47694668f695SChris Wilson 		dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
47704668f695SChris Wilson 					   GEN6_PM_RP_DOWN_THRESHOLD |
47714668f695SChris Wilson 					   GEN6_PM_RP_DOWN_TIMEOUT);
4772a6706b45SDeepak S 
4773917dc6b5SMika Kuoppala 	/* We share the register with other engine */
4774917dc6b5SMika Kuoppala 	if (INTEL_GEN(dev_priv) > 9)
4775917dc6b5SMika Kuoppala 		GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
4776917dc6b5SMika Kuoppala 
4777562d9baeSSagar Arun Kamble 	rps->pm_intrmsk_mbz = 0;
47781800ad25SSagar Arun Kamble 
47791800ad25SSagar Arun Kamble 	/*
4780acf2dc22SMika Kuoppala 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
47811800ad25SSagar Arun Kamble 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
47821800ad25SSagar Arun Kamble 	 *
47831800ad25SSagar Arun Kamble 	 * TODO: verify if this can be reproduced on VLV,CHV.
47841800ad25SSagar Arun Kamble 	 */
4785bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) <= 7)
4786562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
47871800ad25SSagar Arun Kamble 
4788bca2bf2aSPandiyan, Dhinakaran 	if (INTEL_GEN(dev_priv) >= 8)
4789562d9baeSSagar Arun Kamble 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
47901800ad25SSagar Arun Kamble 
479132db0b65SVille Syrjälä 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4792fd8f507cSVille Syrjälä 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
479332db0b65SVille Syrjälä 	else if (INTEL_GEN(dev_priv) >= 3)
4794391f75e2SVille Syrjälä 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4795f71d4af4SJesse Barnes 
479621da2700SVille Syrjälä 	dev->vblank_disable_immediate = true;
479721da2700SVille Syrjälä 
4798262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
4799262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
4800262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
4801262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
4802262fd485SChris Wilson 	 * in this case to the runtime pm.
4803262fd485SChris Wilson 	 */
4804262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
4805262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4806262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
4807262fd485SChris Wilson 
4808317eaa95SLyude 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
48099a64c650SLyude Paul 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
48109a64c650SLyude Paul 	 * detection, as short HPD storms will occur as a natural part of
48119a64c650SLyude Paul 	 * sideband messaging with MST.
48129a64c650SLyude Paul 	 * On older platforms however, IRQ storms can occur with both long and
48139a64c650SLyude Paul 	 * short pulses, as seen on some G4x systems.
48149a64c650SLyude Paul 	 */
48159a64c650SLyude Paul 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4816317eaa95SLyude 
48171bf6ad62SDaniel Vetter 	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4818f71d4af4SJesse Barnes 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4819f71d4af4SJesse Barnes 
4820b963291cSDaniel Vetter 	if (IS_CHERRYVIEW(dev_priv)) {
482143f328d7SVille Syrjälä 		dev->driver->irq_handler = cherryview_irq_handler;
48226bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = cherryview_irq_reset;
482343f328d7SVille Syrjälä 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
48246bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = cherryview_irq_reset;
482586e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
482686e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
482743f328d7SVille Syrjälä 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4828b963291cSDaniel Vetter 	} else if (IS_VALLEYVIEW(dev_priv)) {
48297e231dbeSJesse Barnes 		dev->driver->irq_handler = valleyview_irq_handler;
48306bcdb1c8SVille Syrjälä 		dev->driver->irq_preinstall = valleyview_irq_reset;
48317e231dbeSJesse Barnes 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
48326bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = valleyview_irq_reset;
483386e83e35SChris Wilson 		dev->driver->enable_vblank = i965_enable_vblank;
483486e83e35SChris Wilson 		dev->driver->disable_vblank = i965_disable_vblank;
4835fa00abe0SEgbert Eich 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
483651951ae7SMika Kuoppala 	} else if (INTEL_GEN(dev_priv) >= 11) {
483751951ae7SMika Kuoppala 		dev->driver->irq_handler = gen11_irq_handler;
483851951ae7SMika Kuoppala 		dev->driver->irq_preinstall = gen11_irq_reset;
483951951ae7SMika Kuoppala 		dev->driver->irq_postinstall = gen11_irq_postinstall;
484051951ae7SMika Kuoppala 		dev->driver->irq_uninstall = gen11_irq_reset;
484151951ae7SMika Kuoppala 		dev->driver->enable_vblank = gen8_enable_vblank;
484251951ae7SMika Kuoppala 		dev->driver->disable_vblank = gen8_disable_vblank;
4843121e758eSDhinakaran Pandiyan 		dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4844bca2bf2aSPandiyan, Dhinakaran 	} else if (INTEL_GEN(dev_priv) >= 8) {
4845abd58f01SBen Widawsky 		dev->driver->irq_handler = gen8_irq_handler;
4846723761b8SDaniel Vetter 		dev->driver->irq_preinstall = gen8_irq_reset;
4847abd58f01SBen Widawsky 		dev->driver->irq_postinstall = gen8_irq_postinstall;
48486bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = gen8_irq_reset;
4849abd58f01SBen Widawsky 		dev->driver->enable_vblank = gen8_enable_vblank;
4850abd58f01SBen Widawsky 		dev->driver->disable_vblank = gen8_disable_vblank;
4851cc3f90f0SAnder Conselvan de Oliveira 		if (IS_GEN9_LP(dev_priv))
4852e0a20ad7SShashank Sharma 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4853c6c30b91SRodrigo Vivi 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
48546dbf30ceSVille Syrjälä 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
48556dbf30ceSVille Syrjälä 		else
48563a3b3c7dSVille Syrjälä 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
48576e266956STvrtko Ursulin 	} else if (HAS_PCH_SPLIT(dev_priv)) {
4858f71d4af4SJesse Barnes 		dev->driver->irq_handler = ironlake_irq_handler;
4859723761b8SDaniel Vetter 		dev->driver->irq_preinstall = ironlake_irq_reset;
4860f71d4af4SJesse Barnes 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
48616bcdb1c8SVille Syrjälä 		dev->driver->irq_uninstall = ironlake_irq_reset;
4862f71d4af4SJesse Barnes 		dev->driver->enable_vblank = ironlake_enable_vblank;
4863f71d4af4SJesse Barnes 		dev->driver->disable_vblank = ironlake_disable_vblank;
4864e4ce95aaSVille Syrjälä 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4865f71d4af4SJesse Barnes 	} else {
4866cf819effSLucas De Marchi 		if (IS_GEN(dev_priv, 2)) {
48676bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i8xx_irq_reset;
4868c2798b19SChris Wilson 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4869c2798b19SChris Wilson 			dev->driver->irq_handler = i8xx_irq_handler;
48706bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i8xx_irq_reset;
487186e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
487286e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
4873d938da6bSVille Syrjälä 		} else if (IS_I945GM(dev_priv)) {
4874d938da6bSVille Syrjälä 			dev->driver->irq_preinstall = i915_irq_reset;
4875d938da6bSVille Syrjälä 			dev->driver->irq_postinstall = i915_irq_postinstall;
4876d938da6bSVille Syrjälä 			dev->driver->irq_uninstall = i915_irq_reset;
4877d938da6bSVille Syrjälä 			dev->driver->irq_handler = i915_irq_handler;
4878d938da6bSVille Syrjälä 			dev->driver->enable_vblank = i945gm_enable_vblank;
4879d938da6bSVille Syrjälä 			dev->driver->disable_vblank = i945gm_disable_vblank;
4880cf819effSLucas De Marchi 		} else if (IS_GEN(dev_priv, 3)) {
48816bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i915_irq_reset;
4882a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i915_irq_postinstall;
48836bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i915_irq_reset;
4884a266c7d5SChris Wilson 			dev->driver->irq_handler = i915_irq_handler;
488586e83e35SChris Wilson 			dev->driver->enable_vblank = i8xx_enable_vblank;
488686e83e35SChris Wilson 			dev->driver->disable_vblank = i8xx_disable_vblank;
4887c2798b19SChris Wilson 		} else {
48886bcdb1c8SVille Syrjälä 			dev->driver->irq_preinstall = i965_irq_reset;
4889a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i965_irq_postinstall;
48906bcdb1c8SVille Syrjälä 			dev->driver->irq_uninstall = i965_irq_reset;
4891a266c7d5SChris Wilson 			dev->driver->irq_handler = i965_irq_handler;
489286e83e35SChris Wilson 			dev->driver->enable_vblank = i965_enable_vblank;
489386e83e35SChris Wilson 			dev->driver->disable_vblank = i965_disable_vblank;
4894c2798b19SChris Wilson 		}
4895778eb334SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv))
4896778eb334SVille Syrjälä 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4897f71d4af4SJesse Barnes 	}
4898f71d4af4SJesse Barnes }
489920afbda2SDaniel Vetter 
4900fca52a55SDaniel Vetter /**
4901cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
4902cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
4903cefcff8fSJoonas Lahtinen  *
4904cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
4905cefcff8fSJoonas Lahtinen  */
4906cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
4907cefcff8fSJoonas Lahtinen {
4908cefcff8fSJoonas Lahtinen 	int i;
4909cefcff8fSJoonas Lahtinen 
4910d938da6bSVille Syrjälä 	if (IS_I945GM(i915))
4911d938da6bSVille Syrjälä 		i945gm_vblank_work_fini(i915);
4912d938da6bSVille Syrjälä 
4913cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
4914cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
4915cefcff8fSJoonas Lahtinen }
4916cefcff8fSJoonas Lahtinen 
4917cefcff8fSJoonas Lahtinen /**
4918fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
4919fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4920fca52a55SDaniel Vetter  *
4921fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
4922fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
4923fca52a55SDaniel Vetter  *
4924fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
4925fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
4926fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
4927fca52a55SDaniel Vetter  */
49282aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
49292aeb7d3aSDaniel Vetter {
49302aeb7d3aSDaniel Vetter 	/*
49312aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
49322aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
49332aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
49342aeb7d3aSDaniel Vetter 	 */
4935ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
49362aeb7d3aSDaniel Vetter 
493791c8a326SChris Wilson 	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
49382aeb7d3aSDaniel Vetter }
49392aeb7d3aSDaniel Vetter 
4940fca52a55SDaniel Vetter /**
4941fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
4942fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4943fca52a55SDaniel Vetter  *
4944fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
4945fca52a55SDaniel Vetter  * resources acquired in the init functions.
4946fca52a55SDaniel Vetter  */
49472aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
49482aeb7d3aSDaniel Vetter {
494991c8a326SChris Wilson 	drm_irq_uninstall(&dev_priv->drm);
49502aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
4951ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
49522aeb7d3aSDaniel Vetter }
49532aeb7d3aSDaniel Vetter 
4954fca52a55SDaniel Vetter /**
4955fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4956fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4957fca52a55SDaniel Vetter  *
4958fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
4959fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4960fca52a55SDaniel Vetter  */
4961b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4962c67a470bSPaulo Zanoni {
496391c8a326SChris Wilson 	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4964ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
496591c8a326SChris Wilson 	synchronize_irq(dev_priv->drm.irq);
4966c67a470bSPaulo Zanoni }
4967c67a470bSPaulo Zanoni 
4968fca52a55SDaniel Vetter /**
4969fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4970fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
4971fca52a55SDaniel Vetter  *
4972fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
4973fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
4974fca52a55SDaniel Vetter  */
4975b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4976c67a470bSPaulo Zanoni {
4977ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
497891c8a326SChris Wilson 	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
497991c8a326SChris Wilson 	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4980c67a470bSPaulo Zanoni }
4981