xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 2b874a027810d50b627408f51c59b9648f778a19)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3155367a27SJani Nikula #include <linux/slab.h>
3255367a27SJani Nikula #include <linux/sysrq.h>
3355367a27SJani Nikula 
34fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3555367a27SJani Nikula 
36*2b874a02SJani Nikula #include "display/intel_display_irq.h"
371d455f8dSJani Nikula #include "display/intel_display_types.h"
38df0566a6SJani Nikula #include "display/intel_hotplug.h"
39da38ba98SJani Nikula #include "display/intel_hotplug_irq.h"
40df0566a6SJani Nikula #include "display/intel_lpe_audio.h"
417f6947fdSJani Nikula #include "display/intel_psr_regs.h"
42df0566a6SJani Nikula 
43b3786b29SChris Wilson #include "gt/intel_breadcrumbs.h"
442239e6dfSDaniele Ceraolo Spurio #include "gt/intel_gt.h"
45cf1c97dcSAndi Shyti #include "gt/intel_gt_irq.h"
46d762043fSAndi Shyti #include "gt/intel_gt_pm_irq.h"
470d6419e9SMatt Roper #include "gt/intel_gt_regs.h"
483e7abf81SAndi Shyti #include "gt/intel_rps.h"
492239e6dfSDaniele Ceraolo Spurio 
5024524e3fSJani Nikula #include "i915_driver.h"
51c0e09200SDave Airlie #include "i915_drv.h"
52440e2b3dSJani Nikula #include "i915_irq.h"
53476f62b8SJani Nikula #include "i915_reg.h"
54c0e09200SDave Airlie 
55fca52a55SDaniel Vetter /**
56fca52a55SDaniel Vetter  * DOC: interrupt handling
57fca52a55SDaniel Vetter  *
58fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
59fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
60fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
61fca52a55SDaniel Vetter  */
62fca52a55SDaniel Vetter 
639c6508b9SThomas Gleixner /*
649c6508b9SThomas Gleixner  * Interrupt statistic for PMU. Increments the counter only if the
6578f48aa6SBo Liu  * interrupt originated from the GPU so interrupts from a device which
669c6508b9SThomas Gleixner  * shares the interrupt line are not accounted.
679c6508b9SThomas Gleixner  */
689c6508b9SThomas Gleixner static inline void pmu_irq_stats(struct drm_i915_private *i915,
699c6508b9SThomas Gleixner 				 irqreturn_t res)
709c6508b9SThomas Gleixner {
719c6508b9SThomas Gleixner 	if (unlikely(res != IRQ_HANDLED))
729c6508b9SThomas Gleixner 		return;
739c6508b9SThomas Gleixner 
749c6508b9SThomas Gleixner 	/*
759c6508b9SThomas Gleixner 	 * A clever compiler translates that into INC. A not so clever one
769c6508b9SThomas Gleixner 	 * should at least prevent store tearing.
779c6508b9SThomas Gleixner 	 */
789c6508b9SThomas Gleixner 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
799c6508b9SThomas Gleixner }
809c6508b9SThomas Gleixner 
81cf1c97dcSAndi Shyti void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
8268eb49b1SPaulo Zanoni 		    i915_reg_t iir, i915_reg_t ier)
8368eb49b1SPaulo Zanoni {
8465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
8565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
8668eb49b1SPaulo Zanoni 
8765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
8868eb49b1SPaulo Zanoni 
895c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
9065f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
9165f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
9265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
9365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
9468eb49b1SPaulo Zanoni }
955c502442SPaulo Zanoni 
96ad7632ffSJani Nikula static void gen2_irq_reset(struct intel_uncore *uncore)
9768eb49b1SPaulo Zanoni {
9865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
9965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
100a9d356a6SPaulo Zanoni 
10165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
10268eb49b1SPaulo Zanoni 
10368eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
10465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
10565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
10665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
10765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
10868eb49b1SPaulo Zanoni }
10968eb49b1SPaulo Zanoni 
110337ba017SPaulo Zanoni /*
111337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
112337ba017SPaulo Zanoni  */
113*2b874a02SJani Nikula void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
114b51a2842SVille Syrjälä {
11565f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
116b51a2842SVille Syrjälä 
117b51a2842SVille Syrjälä 	if (val == 0)
118b51a2842SVille Syrjälä 		return;
119b51a2842SVille Syrjälä 
120a9f236d1SPankaj Bharadiya 	drm_WARN(&uncore->i915->drm, 1,
121a9f236d1SPankaj Bharadiya 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
122f0f59a00SVille Syrjälä 		 i915_mmio_reg_offset(reg), val);
12365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
12465f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
12565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
12665f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
127b51a2842SVille Syrjälä }
128337ba017SPaulo Zanoni 
12965f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
130e9e9848aSVille Syrjälä {
13165f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
132e9e9848aSVille Syrjälä 
133e9e9848aSVille Syrjälä 	if (val == 0)
134e9e9848aSVille Syrjälä 		return;
135e9e9848aSVille Syrjälä 
136a9f236d1SPankaj Bharadiya 	drm_WARN(&uncore->i915->drm, 1,
137a9f236d1SPankaj Bharadiya 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
1389d9523d8SPaulo Zanoni 		 i915_mmio_reg_offset(GEN2_IIR), val);
13965f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
14065f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
14165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
14265f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
143e9e9848aSVille Syrjälä }
144e9e9848aSVille Syrjälä 
145cf1c97dcSAndi Shyti void gen3_irq_init(struct intel_uncore *uncore,
14668eb49b1SPaulo Zanoni 		   i915_reg_t imr, u32 imr_val,
14768eb49b1SPaulo Zanoni 		   i915_reg_t ier, u32 ier_val,
14868eb49b1SPaulo Zanoni 		   i915_reg_t iir)
14968eb49b1SPaulo Zanoni {
15065f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
15135079899SPaulo Zanoni 
15265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
15365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
15465f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
15568eb49b1SPaulo Zanoni }
15635079899SPaulo Zanoni 
157ad7632ffSJani Nikula static void gen2_irq_init(struct intel_uncore *uncore,
1582918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
15968eb49b1SPaulo Zanoni {
16065f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
16168eb49b1SPaulo Zanoni 
16265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
16365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
16465f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
16568eb49b1SPaulo Zanoni }
16668eb49b1SPaulo Zanoni 
167d9dc34f1SVille Syrjälä /**
16874bb98baSLucas De Marchi  * ivb_parity_work - Workqueue called when a parity error interrupt
169e3689190SBen Widawsky  * occurred.
170e3689190SBen Widawsky  * @work: workqueue struct
171e3689190SBen Widawsky  *
172e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
173e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
174e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
175e3689190SBen Widawsky  */
17674bb98baSLucas De Marchi static void ivb_parity_work(struct work_struct *work)
177e3689190SBen Widawsky {
1782d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
179cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1802cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
181e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
18235a85ac6SBen Widawsky 	char *parity_event[6];
183a9c287c9SJani Nikula 	u32 misccpctl;
184a9c287c9SJani Nikula 	u8 slice = 0;
185e3689190SBen Widawsky 
186e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
187e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
188e3689190SBen Widawsky 	 * any time we access those registers.
189e3689190SBen Widawsky 	 */
19091c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
191e3689190SBen Widawsky 
19235a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
19348a1b8d4SPankaj Bharadiya 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
19435a85ac6SBen Widawsky 		goto out;
19535a85ac6SBen Widawsky 
196f7435467SAndrzej Hajda 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
197f7435467SAndrzej Hajda 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
1982939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
199e3689190SBen Widawsky 
20035a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
201f0f59a00SVille Syrjälä 		i915_reg_t reg;
20235a85ac6SBen Widawsky 
20335a85ac6SBen Widawsky 		slice--;
20448a1b8d4SPankaj Bharadiya 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
20548a1b8d4SPankaj Bharadiya 				     slice >= NUM_L3_SLICES(dev_priv)))
20635a85ac6SBen Widawsky 			break;
20735a85ac6SBen Widawsky 
20835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
20935a85ac6SBen Widawsky 
2106fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
21135a85ac6SBen Widawsky 
2122939eb06SJani Nikula 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
213e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
214e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
215e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
216e3689190SBen Widawsky 
2172939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
2182939eb06SJani Nikula 		intel_uncore_posting_read(&dev_priv->uncore, reg);
219e3689190SBen Widawsky 
220cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
221e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
222e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
223e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
22435a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
22535a85ac6SBen Widawsky 		parity_event[5] = NULL;
226e3689190SBen Widawsky 
22791c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
228e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
229e3689190SBen Widawsky 
230a10234fdSTvrtko Ursulin 		drm_dbg(&dev_priv->drm,
231a10234fdSTvrtko Ursulin 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
23235a85ac6SBen Widawsky 			slice, row, bank, subbank);
233e3689190SBen Widawsky 
23435a85ac6SBen Widawsky 		kfree(parity_event[4]);
235e3689190SBen Widawsky 		kfree(parity_event[3]);
236e3689190SBen Widawsky 		kfree(parity_event[2]);
237e3689190SBen Widawsky 		kfree(parity_event[1]);
238e3689190SBen Widawsky 	}
239e3689190SBen Widawsky 
2402939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
24135a85ac6SBen Widawsky 
24235a85ac6SBen Widawsky out:
24348a1b8d4SPankaj Bharadiya 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
24403d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
245cf1c97dcSAndi Shyti 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
24603d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
24735a85ac6SBen Widawsky 
24891c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
24935a85ac6SBen Widawsky }
25035a85ac6SBen Widawsky 
251c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
252c1874ed7SImre Deak {
253b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
254c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
255c1874ed7SImre Deak 
2562dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
2572dd2a883SImre Deak 		return IRQ_NONE;
2582dd2a883SImre Deak 
2591f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2609102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2611f814dacSImre Deak 
2621e1cace9SVille Syrjälä 	do {
2636e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
2642ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
2651ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
266a5e485a9SVille Syrjälä 		u32 ier = 0;
2673ff60f89SOscar Mateo 
2682939eb06SJani Nikula 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
2692939eb06SJani Nikula 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
2702939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
271c1874ed7SImre Deak 
272c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2731e1cace9SVille Syrjälä 			break;
274c1874ed7SImre Deak 
275c1874ed7SImre Deak 		ret = IRQ_HANDLED;
276c1874ed7SImre Deak 
277a5e485a9SVille Syrjälä 		/*
278a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
279a5e485a9SVille Syrjälä 		 *
280a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
281a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
282a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
283a5e485a9SVille Syrjälä 		 *
284a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
285a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
286a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
287a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
288a5e485a9SVille Syrjälä 		 * bits this time around.
289a5e485a9SVille Syrjälä 		 */
2902939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2918cee664dSAndrzej Hajda 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
2924a0a0202SVille Syrjälä 
2934a0a0202SVille Syrjälä 		if (gt_iir)
2942939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
2954a0a0202SVille Syrjälä 		if (pm_iir)
2962939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
2974a0a0202SVille Syrjälä 
2987ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2991ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3007ce4d1f2SVille Syrjälä 
3013ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
3023ff60f89SOscar Mateo 		 * signalled in iir */
303eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3047ce4d1f2SVille Syrjälä 
305eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
306eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
307eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
308eef57324SJerome Anand 
3097ce4d1f2SVille Syrjälä 		/*
3107ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
3117ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
3127ce4d1f2SVille Syrjälä 		 */
3137ce4d1f2SVille Syrjälä 		if (iir)
3142939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
3154a0a0202SVille Syrjälä 
3162939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
3172939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3181ae3c34cSVille Syrjälä 
31952894874SVille Syrjälä 		if (gt_iir)
3202cbc876dSMichał Winiarski 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
32152894874SVille Syrjälä 		if (pm_iir)
3222cbc876dSMichał Winiarski 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
32352894874SVille Syrjälä 
3241ae3c34cSVille Syrjälä 		if (hotplug_status)
32591d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3262ecb8ca4SVille Syrjälä 
32791d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
3281e1cace9SVille Syrjälä 	} while (0);
3297e231dbeSJesse Barnes 
3309c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
3319c6508b9SThomas Gleixner 
3329102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3331f814dacSImre Deak 
3347e231dbeSJesse Barnes 	return ret;
3357e231dbeSJesse Barnes }
3367e231dbeSJesse Barnes 
33743f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
33843f328d7SVille Syrjälä {
339b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
34043f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
34143f328d7SVille Syrjälä 
3422dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
3432dd2a883SImre Deak 		return IRQ_NONE;
3442dd2a883SImre Deak 
3451f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3469102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3471f814dacSImre Deak 
348579de73bSChris Wilson 	do {
3496e814800SVille Syrjälä 		u32 master_ctl, iir;
3502ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3511ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
352a5e485a9SVille Syrjälä 		u32 ier = 0;
353a5e485a9SVille Syrjälä 
3542939eb06SJani Nikula 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
3552939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
3563278f67fSVille Syrjälä 
3573278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
3588e5fd599SVille Syrjälä 			break;
35943f328d7SVille Syrjälä 
36027b6c122SOscar Mateo 		ret = IRQ_HANDLED;
36127b6c122SOscar Mateo 
362a5e485a9SVille Syrjälä 		/*
363a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
364a5e485a9SVille Syrjälä 		 *
365a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
366a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
367a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
368a5e485a9SVille Syrjälä 		 *
369a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
370a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
371a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
372a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
373a5e485a9SVille Syrjälä 		 * bits this time around.
374a5e485a9SVille Syrjälä 		 */
3752939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3768cee664dSAndrzej Hajda 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
37743f328d7SVille Syrjälä 
3782cbc876dSMichał Winiarski 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
37927b6c122SOscar Mateo 
38027b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3811ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
38243f328d7SVille Syrjälä 
38327b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
38427b6c122SOscar Mateo 		 * signalled in iir */
385eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
38643f328d7SVille Syrjälä 
387eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
388eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
389eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
390eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
391eef57324SJerome Anand 
3927ce4d1f2SVille Syrjälä 		/*
3937ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
3947ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
3957ce4d1f2SVille Syrjälä 		 */
3967ce4d1f2SVille Syrjälä 		if (iir)
3972939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
3987ce4d1f2SVille Syrjälä 
3992939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
4002939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4011ae3c34cSVille Syrjälä 
4021ae3c34cSVille Syrjälä 		if (hotplug_status)
40391d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4042ecb8ca4SVille Syrjälä 
40591d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
406579de73bSChris Wilson 	} while (0);
4073278f67fSVille Syrjälä 
4089c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
4099c6508b9SThomas Gleixner 
4109102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4111f814dacSImre Deak 
41243f328d7SVille Syrjälä 	return ret;
41343f328d7SVille Syrjälä }
41443f328d7SVille Syrjälä 
41572c90f62SOscar Mateo /*
41672c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
41772c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
41872c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
41972c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
42072c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
42172c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
42272c90f62SOscar Mateo  */
4239eae5e27SLucas De Marchi static irqreturn_t ilk_irq_handler(int irq, void *arg)
424b1f14ad0SJesse Barnes {
425c48a798aSChris Wilson 	struct drm_i915_private *i915 = arg;
426c48a798aSChris Wilson 	void __iomem * const regs = i915->uncore.regs;
427f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
4280e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
429b1f14ad0SJesse Barnes 
430c48a798aSChris Wilson 	if (unlikely(!intel_irqs_enabled(i915)))
4312dd2a883SImre Deak 		return IRQ_NONE;
4322dd2a883SImre Deak 
4331f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
434c48a798aSChris Wilson 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
4351f814dacSImre Deak 
436b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
437c48a798aSChris Wilson 	de_ier = raw_reg_read(regs, DEIER);
438c48a798aSChris Wilson 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
4390e43406bSChris Wilson 
44044498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
44144498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
44244498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
44344498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
44444498aeaSPaulo Zanoni 	 * due to its back queue). */
445c48a798aSChris Wilson 	if (!HAS_PCH_NOP(i915)) {
446c48a798aSChris Wilson 		sde_ier = raw_reg_read(regs, SDEIER);
447c48a798aSChris Wilson 		raw_reg_write(regs, SDEIER, 0);
448ab5c608bSBen Widawsky 	}
44944498aeaSPaulo Zanoni 
45072c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
45172c90f62SOscar Mateo 
452c48a798aSChris Wilson 	gt_iir = raw_reg_read(regs, GTIIR);
4530e43406bSChris Wilson 	if (gt_iir) {
454c48a798aSChris Wilson 		raw_reg_write(regs, GTIIR, gt_iir);
455651e7d48SLucas De Marchi 		if (GRAPHICS_VER(i915) >= 6)
4562cbc876dSMichał Winiarski 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
457d8fc8a47SPaulo Zanoni 		else
4582cbc876dSMichał Winiarski 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
459c48a798aSChris Wilson 		ret = IRQ_HANDLED;
4600e43406bSChris Wilson 	}
461b1f14ad0SJesse Barnes 
462c48a798aSChris Wilson 	de_iir = raw_reg_read(regs, DEIIR);
4630e43406bSChris Wilson 	if (de_iir) {
464c48a798aSChris Wilson 		raw_reg_write(regs, DEIIR, de_iir);
465373abf1aSMatt Roper 		if (DISPLAY_VER(i915) >= 7)
466c48a798aSChris Wilson 			ivb_display_irq_handler(i915, de_iir);
467f1af8fc1SPaulo Zanoni 		else
468c48a798aSChris Wilson 			ilk_display_irq_handler(i915, de_iir);
4690e43406bSChris Wilson 		ret = IRQ_HANDLED;
470c48a798aSChris Wilson 	}
471c48a798aSChris Wilson 
472651e7d48SLucas De Marchi 	if (GRAPHICS_VER(i915) >= 6) {
473c48a798aSChris Wilson 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
474c48a798aSChris Wilson 		if (pm_iir) {
475c48a798aSChris Wilson 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
4762cbc876dSMichał Winiarski 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
477c48a798aSChris Wilson 			ret = IRQ_HANDLED;
4780e43406bSChris Wilson 		}
479f1af8fc1SPaulo Zanoni 	}
480b1f14ad0SJesse Barnes 
481c48a798aSChris Wilson 	raw_reg_write(regs, DEIER, de_ier);
482c48a798aSChris Wilson 	if (sde_ier)
483c48a798aSChris Wilson 		raw_reg_write(regs, SDEIER, sde_ier);
484b1f14ad0SJesse Barnes 
4859c6508b9SThomas Gleixner 	pmu_irq_stats(i915, ret);
4869c6508b9SThomas Gleixner 
4871f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
488c48a798aSChris Wilson 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
4891f814dacSImre Deak 
490b1f14ad0SJesse Barnes 	return ret;
491b1f14ad0SJesse Barnes }
492b1f14ad0SJesse Barnes 
4934376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
4944376b9c9SMika Kuoppala {
4954376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
4964376b9c9SMika Kuoppala 
4974376b9c9SMika Kuoppala 	/*
4984376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
4994376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
5004376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
5014376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
5024376b9c9SMika Kuoppala 	 */
5034376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
5044376b9c9SMika Kuoppala }
5054376b9c9SMika Kuoppala 
5064376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
5074376b9c9SMika Kuoppala {
5084376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
5094376b9c9SMika Kuoppala }
5104376b9c9SMika Kuoppala 
511f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
512f11a0f46STvrtko Ursulin {
513b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
51425286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = dev_priv->uncore.regs;
515f11a0f46STvrtko Ursulin 	u32 master_ctl;
516f11a0f46STvrtko Ursulin 
517f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
518f11a0f46STvrtko Ursulin 		return IRQ_NONE;
519f11a0f46STvrtko Ursulin 
5204376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
5214376b9c9SMika Kuoppala 	if (!master_ctl) {
5224376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
523f11a0f46STvrtko Ursulin 		return IRQ_NONE;
5244376b9c9SMika Kuoppala 	}
525f11a0f46STvrtko Ursulin 
5266cc32f15SChris Wilson 	/* Find, queue (onto bottom-halves), then clear each source */
5272cbc876dSMichał Winiarski 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
528f0fd96f5SChris Wilson 
529f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
530f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
5319102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
53255ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
5339102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
534f0fd96f5SChris Wilson 	}
535f11a0f46STvrtko Ursulin 
5364376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
537abd58f01SBen Widawsky 
5389c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
5399c6508b9SThomas Gleixner 
54055ef72f2SChris Wilson 	return IRQ_HANDLED;
541abd58f01SBen Widawsky }
542abd58f01SBen Widawsky 
54381067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
54481067b71SMika Kuoppala {
54581067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
54681067b71SMika Kuoppala 
54781067b71SMika Kuoppala 	/*
54881067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
54981067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
55081067b71SMika Kuoppala 	 * New indications can and will light up during processing,
55181067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
55281067b71SMika Kuoppala 	 */
55381067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
55481067b71SMika Kuoppala }
55581067b71SMika Kuoppala 
55681067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
55781067b71SMika Kuoppala {
55881067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
55981067b71SMika Kuoppala }
56081067b71SMika Kuoppala 
56122e26af7SPaulo Zanoni static irqreturn_t gen11_irq_handler(int irq, void *arg)
56251951ae7SMika Kuoppala {
56322e26af7SPaulo Zanoni 	struct drm_i915_private *i915 = arg;
56425286aacSDaniele Ceraolo Spurio 	void __iomem * const regs = i915->uncore.regs;
5652cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(i915);
56651951ae7SMika Kuoppala 	u32 master_ctl;
567df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
56851951ae7SMika Kuoppala 
56951951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
57051951ae7SMika Kuoppala 		return IRQ_NONE;
57151951ae7SMika Kuoppala 
57222e26af7SPaulo Zanoni 	master_ctl = gen11_master_intr_disable(regs);
57381067b71SMika Kuoppala 	if (!master_ctl) {
57422e26af7SPaulo Zanoni 		gen11_master_intr_enable(regs);
57551951ae7SMika Kuoppala 		return IRQ_NONE;
57681067b71SMika Kuoppala 	}
57751951ae7SMika Kuoppala 
5786cc32f15SChris Wilson 	/* Find, queue (onto bottom-halves), then clear each source */
5799b77011eSTvrtko Ursulin 	gen11_gt_irq_handler(gt, master_ctl);
58051951ae7SMika Kuoppala 
58151951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
582a3265d85SMatt Roper 	if (master_ctl & GEN11_DISPLAY_IRQ)
583a3265d85SMatt Roper 		gen11_display_irq_handler(i915);
58451951ae7SMika Kuoppala 
585ddcf980fSAnusha Srivatsa 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
586df0d28c1SDhinakaran Pandiyan 
58722e26af7SPaulo Zanoni 	gen11_master_intr_enable(regs);
58851951ae7SMika Kuoppala 
589ddcf980fSAnusha Srivatsa 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
590df0d28c1SDhinakaran Pandiyan 
5919c6508b9SThomas Gleixner 	pmu_irq_stats(i915, IRQ_HANDLED);
5929c6508b9SThomas Gleixner 
59351951ae7SMika Kuoppala 	return IRQ_HANDLED;
59451951ae7SMika Kuoppala }
59551951ae7SMika Kuoppala 
59622e26af7SPaulo Zanoni static inline u32 dg1_master_intr_disable(void __iomem * const regs)
59797b492f5SLucas De Marchi {
59897b492f5SLucas De Marchi 	u32 val;
59997b492f5SLucas De Marchi 
60097b492f5SLucas De Marchi 	/* First disable interrupts */
60122e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
60297b492f5SLucas De Marchi 
60397b492f5SLucas De Marchi 	/* Get the indication levels and ack the master unit */
60422e26af7SPaulo Zanoni 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
60597b492f5SLucas De Marchi 	if (unlikely(!val))
60697b492f5SLucas De Marchi 		return 0;
60797b492f5SLucas De Marchi 
60822e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
60997b492f5SLucas De Marchi 
61097b492f5SLucas De Marchi 	return val;
61197b492f5SLucas De Marchi }
61297b492f5SLucas De Marchi 
61397b492f5SLucas De Marchi static inline void dg1_master_intr_enable(void __iomem * const regs)
61497b492f5SLucas De Marchi {
61522e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
61697b492f5SLucas De Marchi }
61797b492f5SLucas De Marchi 
61897b492f5SLucas De Marchi static irqreturn_t dg1_irq_handler(int irq, void *arg)
61997b492f5SLucas De Marchi {
62022e26af7SPaulo Zanoni 	struct drm_i915_private * const i915 = arg;
6212cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(i915);
622fd4d7904SPaulo Zanoni 	void __iomem * const regs = gt->uncore->regs;
62322e26af7SPaulo Zanoni 	u32 master_tile_ctl, master_ctl;
62422e26af7SPaulo Zanoni 	u32 gu_misc_iir;
62522e26af7SPaulo Zanoni 
62622e26af7SPaulo Zanoni 	if (!intel_irqs_enabled(i915))
62722e26af7SPaulo Zanoni 		return IRQ_NONE;
62822e26af7SPaulo Zanoni 
62922e26af7SPaulo Zanoni 	master_tile_ctl = dg1_master_intr_disable(regs);
63022e26af7SPaulo Zanoni 	if (!master_tile_ctl) {
63122e26af7SPaulo Zanoni 		dg1_master_intr_enable(regs);
63222e26af7SPaulo Zanoni 		return IRQ_NONE;
63322e26af7SPaulo Zanoni 	}
63422e26af7SPaulo Zanoni 
63522e26af7SPaulo Zanoni 	/* FIXME: we only support tile 0 for now. */
63622e26af7SPaulo Zanoni 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
63722e26af7SPaulo Zanoni 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
63822e26af7SPaulo Zanoni 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
63922e26af7SPaulo Zanoni 	} else {
640a10234fdSTvrtko Ursulin 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
641a10234fdSTvrtko Ursulin 			master_tile_ctl);
64222e26af7SPaulo Zanoni 		dg1_master_intr_enable(regs);
64322e26af7SPaulo Zanoni 		return IRQ_NONE;
64422e26af7SPaulo Zanoni 	}
64522e26af7SPaulo Zanoni 
64622e26af7SPaulo Zanoni 	gen11_gt_irq_handler(gt, master_ctl);
64722e26af7SPaulo Zanoni 
64822e26af7SPaulo Zanoni 	if (master_ctl & GEN11_DISPLAY_IRQ)
64922e26af7SPaulo Zanoni 		gen11_display_irq_handler(i915);
65022e26af7SPaulo Zanoni 
651ddcf980fSAnusha Srivatsa 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
65222e26af7SPaulo Zanoni 
65322e26af7SPaulo Zanoni 	dg1_master_intr_enable(regs);
65422e26af7SPaulo Zanoni 
655ddcf980fSAnusha Srivatsa 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
65622e26af7SPaulo Zanoni 
65722e26af7SPaulo Zanoni 	pmu_irq_stats(i915, IRQ_HANDLED);
65822e26af7SPaulo Zanoni 
65922e26af7SPaulo Zanoni 	return IRQ_HANDLED;
66097b492f5SLucas De Marchi }
66197b492f5SLucas De Marchi 
662b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
66391738a95SPaulo Zanoni {
664b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
665b16b2a2fSPaulo Zanoni 
6666e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
66791738a95SPaulo Zanoni 		return;
66891738a95SPaulo Zanoni 
669b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
670105b122eSPaulo Zanoni 
6716e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
6722939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
673622364b6SPaulo Zanoni }
674105b122eSPaulo Zanoni 
6758bb61306SVille Syrjälä /* drm_dma.h hooks
6768bb61306SVille Syrjälä */
6779eae5e27SLucas De Marchi static void ilk_irq_reset(struct drm_i915_private *dev_priv)
6788bb61306SVille Syrjälä {
679b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
6808bb61306SVille Syrjälä 
681b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
682e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
683e44adb5dSChris Wilson 
684651e7d48SLucas De Marchi 	if (GRAPHICS_VER(dev_priv) == 7)
685f0818984STvrtko Ursulin 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
6868bb61306SVille Syrjälä 
687fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
688f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
689f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
690fc340442SDaniel Vetter 	}
691fc340442SDaniel Vetter 
6922cbc876dSMichał Winiarski 	gen5_gt_irq_reset(to_gt(dev_priv));
6938bb61306SVille Syrjälä 
694b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
6958bb61306SVille Syrjälä }
6968bb61306SVille Syrjälä 
697b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
6987e231dbeSJesse Barnes {
6992939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
7002939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
70134c7b8a7SVille Syrjälä 
7022cbc876dSMichał Winiarski 	gen5_gt_irq_reset(to_gt(dev_priv));
7037e231dbeSJesse Barnes 
704ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7059918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
70670591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
707ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
7087e231dbeSJesse Barnes }
7097e231dbeSJesse Barnes 
710a844cfbeSJosé Roberto de Souza static void gen8_irq_reset(struct drm_i915_private *dev_priv)
711a844cfbeSJosé Roberto de Souza {
712a844cfbeSJosé Roberto de Souza 	struct intel_uncore *uncore = &dev_priv->uncore;
713a844cfbeSJosé Roberto de Souza 
714e58c2cacSAndrzej Hajda 	gen8_master_intr_disable(uncore->regs);
715a844cfbeSJosé Roberto de Souza 
7162cbc876dSMichał Winiarski 	gen8_gt_irq_reset(to_gt(dev_priv));
717a844cfbeSJosé Roberto de Souza 	gen8_display_irq_reset(dev_priv);
718b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
719abd58f01SBen Widawsky 
7206e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
721b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
72259b7cb44STejas Upadhyay 
723abd58f01SBen Widawsky }
724abd58f01SBen Widawsky 
725a3265d85SMatt Roper static void gen11_irq_reset(struct drm_i915_private *dev_priv)
726a3265d85SMatt Roper {
7272cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
728fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
729a3265d85SMatt Roper 
730a3265d85SMatt Roper 	gen11_master_intr_disable(dev_priv->uncore.regs);
731a3265d85SMatt Roper 
732fd4d7904SPaulo Zanoni 	gen11_gt_irq_reset(gt);
733a3265d85SMatt Roper 	gen11_display_irq_reset(dev_priv);
734a3265d85SMatt Roper 
735a3265d85SMatt Roper 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
736a3265d85SMatt Roper 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
737a3265d85SMatt Roper }
738a3265d85SMatt Roper 
73922e26af7SPaulo Zanoni static void dg1_irq_reset(struct drm_i915_private *dev_priv)
74022e26af7SPaulo Zanoni {
7412cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
742fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
74322e26af7SPaulo Zanoni 
74422e26af7SPaulo Zanoni 	dg1_master_intr_disable(dev_priv->uncore.regs);
74522e26af7SPaulo Zanoni 
746fd4d7904SPaulo Zanoni 	gen11_gt_irq_reset(gt);
74722e26af7SPaulo Zanoni 	gen11_display_irq_reset(dev_priv);
74822e26af7SPaulo Zanoni 
74922e26af7SPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
75022e26af7SPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
75122e26af7SPaulo Zanoni }
75222e26af7SPaulo Zanoni 
753b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
75443f328d7SVille Syrjälä {
755b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
75643f328d7SVille Syrjälä 
757e58c2cacSAndrzej Hajda 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
7582939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
75943f328d7SVille Syrjälä 
7602cbc876dSMichał Winiarski 	gen8_gt_irq_reset(to_gt(dev_priv));
76143f328d7SVille Syrjälä 
762b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
76343f328d7SVille Syrjälä 
764ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7659918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
76670591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
767ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
76843f328d7SVille Syrjälä }
76943f328d7SVille Syrjälä 
7709eae5e27SLucas De Marchi static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
771036a4a7dSZhenyu Wang {
772b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
7738e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
7748e76f8dcSPaulo Zanoni 
775651e7d48SLucas De Marchi 	if (GRAPHICS_VER(dev_priv) >= 7) {
7768e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
777842ebf7aSVille Syrjälä 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
7788e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
77923bb4cb5SVille Syrjälä 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
7802a636e24SVille Syrjälä 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
7812a636e24SVille Syrjälä 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
7822a636e24SVille Syrjälä 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
78323bb4cb5SVille Syrjälä 			      DE_DP_A_HOTPLUG_IVB);
7848e76f8dcSPaulo Zanoni 	} else {
7858e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
786842ebf7aSVille Syrjälä 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
787842ebf7aSVille Syrjälä 				DE_PIPEA_CRC_DONE | DE_POISON);
788c6073d4cSVille Syrjälä 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
789e4ce95aaSVille Syrjälä 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
7904bb18054SVille Syrjälä 			      DE_PLANE_FLIP_DONE(PLANE_A) |
7914bb18054SVille Syrjälä 			      DE_PLANE_FLIP_DONE(PLANE_B) |
792e4ce95aaSVille Syrjälä 			      DE_DP_A_HOTPLUG);
7938e76f8dcSPaulo Zanoni 	}
794036a4a7dSZhenyu Wang 
795fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
796b16b2a2fSPaulo Zanoni 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
797fc340442SDaniel Vetter 		display_mask |= DE_EDP_PSR_INT_HSW;
798fc340442SDaniel Vetter 	}
799fc340442SDaniel Vetter 
800c6073d4cSVille Syrjälä 	if (IS_IRONLAKE_M(dev_priv))
801c6073d4cSVille Syrjälä 		extra_mask |= DE_PCU_EVENT;
802c6073d4cSVille Syrjälä 
8031ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
804036a4a7dSZhenyu Wang 
805a0a6d8cbSVille Syrjälä 	ibx_irq_postinstall(dev_priv);
806622364b6SPaulo Zanoni 
8072cbc876dSMichał Winiarski 	gen5_gt_irq_postinstall(to_gt(dev_priv));
808a9922912SVille Syrjälä 
809b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
810b16b2a2fSPaulo Zanoni 		      display_mask | extra_mask);
811036a4a7dSZhenyu Wang }
812036a4a7dSZhenyu Wang 
813b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
8140e6c9a9eSVille Syrjälä {
8152cbc876dSMichał Winiarski 	gen5_gt_irq_postinstall(to_gt(dev_priv));
8167e231dbeSJesse Barnes 
817ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
8189918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
819ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
820ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
821ad22d106SVille Syrjälä 
8222939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
8232939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
82420afbda2SDaniel Vetter }
82520afbda2SDaniel Vetter 
826b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
827abd58f01SBen Widawsky {
82859b7cb44STejas Upadhyay 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
82959b7cb44STejas Upadhyay 		icp_irq_postinstall(dev_priv);
83059b7cb44STejas Upadhyay 	else if (HAS_PCH_SPLIT(dev_priv))
831a0a6d8cbSVille Syrjälä 		ibx_irq_postinstall(dev_priv);
832622364b6SPaulo Zanoni 
8332cbc876dSMichał Winiarski 	gen8_gt_irq_postinstall(to_gt(dev_priv));
834abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
835abd58f01SBen Widawsky 
83625286aacSDaniele Ceraolo Spurio 	gen8_master_intr_enable(dev_priv->uncore.regs);
837abd58f01SBen Widawsky }
838abd58f01SBen Widawsky 
839b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
84051951ae7SMika Kuoppala {
8412cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
842fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
843df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
84451951ae7SMika Kuoppala 
84529b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
846b318b824SVille Syrjälä 		icp_irq_postinstall(dev_priv);
84731604222SAnusha Srivatsa 
848fd4d7904SPaulo Zanoni 	gen11_gt_irq_postinstall(gt);
849a844cfbeSJosé Roberto de Souza 	gen11_de_irq_postinstall(dev_priv);
85051951ae7SMika Kuoppala 
851b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
852df0d28c1SDhinakaran Pandiyan 
8539b77011eSTvrtko Ursulin 	gen11_master_intr_enable(uncore->regs);
8542939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
85551951ae7SMika Kuoppala }
85622e26af7SPaulo Zanoni 
85722e26af7SPaulo Zanoni static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
85822e26af7SPaulo Zanoni {
8592cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
860fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
86122e26af7SPaulo Zanoni 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
86222e26af7SPaulo Zanoni 
863fd4d7904SPaulo Zanoni 	gen11_gt_irq_postinstall(gt);
86422e26af7SPaulo Zanoni 
86522e26af7SPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
86622e26af7SPaulo Zanoni 
86722e26af7SPaulo Zanoni 	if (HAS_DISPLAY(dev_priv)) {
868babde06dSMika Kahola 		if (DISPLAY_VER(dev_priv) >= 14)
869babde06dSMika Kahola 			mtp_irq_postinstall(dev_priv);
870babde06dSMika Kahola 		else
87122e26af7SPaulo Zanoni 			icp_irq_postinstall(dev_priv);
872babde06dSMika Kahola 
87322e26af7SPaulo Zanoni 		gen8_de_irq_postinstall(dev_priv);
87422e26af7SPaulo Zanoni 		intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
87522e26af7SPaulo Zanoni 				   GEN11_DISPLAY_IRQ_ENABLE);
87622e26af7SPaulo Zanoni 	}
87722e26af7SPaulo Zanoni 
878fd4d7904SPaulo Zanoni 	dg1_master_intr_enable(uncore->regs);
879fd4d7904SPaulo Zanoni 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
88097b492f5SLucas De Marchi }
88151951ae7SMika Kuoppala 
882b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
88343f328d7SVille Syrjälä {
8842cbc876dSMichał Winiarski 	gen8_gt_irq_postinstall(to_gt(dev_priv));
88543f328d7SVille Syrjälä 
886ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
8879918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
888ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
889ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
890ad22d106SVille Syrjälä 
8912939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
8922939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
89343f328d7SVille Syrjälä }
89443f328d7SVille Syrjälä 
895b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
896c2798b19SChris Wilson {
897b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
898c2798b19SChris Wilson 
89944d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
90044d9241eSVille Syrjälä 
901ad7632ffSJani Nikula 	gen2_irq_reset(uncore);
902e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
903c2798b19SChris Wilson }
904c2798b19SChris Wilson 
9053687ce75SVille Syrjälä static u32 i9xx_error_mask(struct drm_i915_private *i915)
9063687ce75SVille Syrjälä {
907e7e12f6eSVille Syrjälä 	/*
908e7e12f6eSVille Syrjälä 	 * On gen2/3 FBC generates (seemingly spurious)
909e7e12f6eSVille Syrjälä 	 * display INVALID_GTT/INVALID_GTT_PTE table errors.
910e7e12f6eSVille Syrjälä 	 *
911e7e12f6eSVille Syrjälä 	 * Also gen3 bspec has this to say:
912e7e12f6eSVille Syrjälä 	 * "DISPA_INVALID_GTT_PTE
913e7e12f6eSVille Syrjälä 	 "  [DevNapa] : Reserved. This bit does not reflect the page
914e7e12f6eSVille Syrjälä 	 "              table error for the display plane A."
915e7e12f6eSVille Syrjälä 	 *
916e7e12f6eSVille Syrjälä 	 * Unfortunately we can't mask off individual PGTBL_ER bits,
917e7e12f6eSVille Syrjälä 	 * so we just have to mask off all page table errors via EMR.
918e7e12f6eSVille Syrjälä 	 */
919e7e12f6eSVille Syrjälä 	if (HAS_FBC(i915))
920e7e12f6eSVille Syrjälä 		return ~I915_ERROR_MEMORY_REFRESH;
921e7e12f6eSVille Syrjälä 	else
9223687ce75SVille Syrjälä 		return ~(I915_ERROR_PAGE_TABLE |
9233687ce75SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
9243687ce75SVille Syrjälä }
9253687ce75SVille Syrjälä 
926b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
927c2798b19SChris Wilson {
928b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
929e9e9848aSVille Syrjälä 	u16 enable_mask;
930c2798b19SChris Wilson 
9313687ce75SVille Syrjälä 	intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
932c2798b19SChris Wilson 
933c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
934c2798b19SChris Wilson 	dev_priv->irq_mask =
935c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
93616659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
93716659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
938c2798b19SChris Wilson 
939e9e9848aSVille Syrjälä 	enable_mask =
940c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
941c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
94216659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
943e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
944e9e9848aSVille Syrjälä 
945ad7632ffSJani Nikula 	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
946c2798b19SChris Wilson 
947379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
948379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
949d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
950755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
951755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
952d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
953c2798b19SChris Wilson }
954c2798b19SChris Wilson 
9554f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915,
95678c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
95778c357ddSVille Syrjälä {
9584f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &i915->uncore;
95978c357ddSVille Syrjälä 	u16 emr;
96078c357ddSVille Syrjälä 
9614f5fd91fSTvrtko Ursulin 	*eir = intel_uncore_read16(uncore, EIR);
9624f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EIR, *eir);
96378c357ddSVille Syrjälä 
9644f5fd91fSTvrtko Ursulin 	*eir_stuck = intel_uncore_read16(uncore, EIR);
96578c357ddSVille Syrjälä 	if (*eir_stuck == 0)
96678c357ddSVille Syrjälä 		return;
96778c357ddSVille Syrjälä 
96878c357ddSVille Syrjälä 	/*
96978c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
97078c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
97178c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
97278c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
97378c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
97478c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
97578c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
97678c357ddSVille Syrjälä 	 * remains set.
97778c357ddSVille Syrjälä 	 */
9784f5fd91fSTvrtko Ursulin 	emr = intel_uncore_read16(uncore, EMR);
9794f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, 0xffff);
9804f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
98178c357ddSVille Syrjälä }
98278c357ddSVille Syrjälä 
98378c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
98478c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
98578c357ddSVille Syrjälä {
986a10234fdSTvrtko Ursulin 	drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
98778c357ddSVille Syrjälä 
98878c357ddSVille Syrjälä 	if (eir_stuck)
98900376ccfSWambui Karuga 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
99000376ccfSWambui Karuga 			eir_stuck);
991d1e89592SVille Syrjälä 
992d1e89592SVille Syrjälä 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
993d1e89592SVille Syrjälä 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
99478c357ddSVille Syrjälä }
99578c357ddSVille Syrjälä 
99678c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
99778c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
99878c357ddSVille Syrjälä {
99978c357ddSVille Syrjälä 	u32 emr;
100078c357ddSVille Syrjälä 
1001839259b8SVille Syrjälä 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
1002839259b8SVille Syrjälä 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
100378c357ddSVille Syrjälä 
10042939eb06SJani Nikula 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
100578c357ddSVille Syrjälä 	if (*eir_stuck == 0)
100678c357ddSVille Syrjälä 		return;
100778c357ddSVille Syrjälä 
100878c357ddSVille Syrjälä 	/*
100978c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
101078c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
101178c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
101278c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
101378c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
101478c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
101578c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
101678c357ddSVille Syrjälä 	 * remains set.
101778c357ddSVille Syrjälä 	 */
1018839259b8SVille Syrjälä 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
1019839259b8SVille Syrjälä 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
10202939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
102178c357ddSVille Syrjälä }
102278c357ddSVille Syrjälä 
102378c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
102478c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
102578c357ddSVille Syrjälä {
1026a10234fdSTvrtko Ursulin 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
102778c357ddSVille Syrjälä 
102878c357ddSVille Syrjälä 	if (eir_stuck)
102900376ccfSWambui Karuga 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
103000376ccfSWambui Karuga 			eir_stuck);
1031d1e89592SVille Syrjälä 
1032d1e89592SVille Syrjälä 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
1033d1e89592SVille Syrjälä 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
103478c357ddSVille Syrjälä }
103578c357ddSVille Syrjälä 
1036ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
1037c2798b19SChris Wilson {
1038b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
1039af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1040c2798b19SChris Wilson 
10412dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
10422dd2a883SImre Deak 		return IRQ_NONE;
10432dd2a883SImre Deak 
10441f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
10459102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
10461f814dacSImre Deak 
1047af722d28SVille Syrjälä 	do {
1048af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
104978c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
1050af722d28SVille Syrjälä 		u16 iir;
1051af722d28SVille Syrjälä 
10524f5fd91fSTvrtko Ursulin 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
1053c2798b19SChris Wilson 		if (iir == 0)
1054af722d28SVille Syrjälä 			break;
1055c2798b19SChris Wilson 
1056af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1057c2798b19SChris Wilson 
1058eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1059eb64343cSVille Syrjälä 		 * signalled in iir */
1060eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1061c2798b19SChris Wilson 
106278c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
106378c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
106478c357ddSVille Syrjälä 
10654f5fd91fSTvrtko Ursulin 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
1066c2798b19SChris Wilson 
1067c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
10682cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
1069c2798b19SChris Wilson 
107078c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
107178c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
1072af722d28SVille Syrjälä 
1073eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1074af722d28SVille Syrjälä 	} while (0);
1075c2798b19SChris Wilson 
10769c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
10779c6508b9SThomas Gleixner 
10789102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
10791f814dacSImre Deak 
10801f814dacSImre Deak 	return ret;
1081c2798b19SChris Wilson }
1082c2798b19SChris Wilson 
1083b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv)
1084a266c7d5SChris Wilson {
1085b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
1086a266c7d5SChris Wilson 
108756b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
10880706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
10898cee664dSAndrzej Hajda 		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
1090a266c7d5SChris Wilson 	}
1091a266c7d5SChris Wilson 
109244d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
109344d9241eSVille Syrjälä 
1094b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
1095e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
1096a266c7d5SChris Wilson }
1097a266c7d5SChris Wilson 
1098b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
1099a266c7d5SChris Wilson {
1100b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
110138bde180SChris Wilson 	u32 enable_mask;
1102a266c7d5SChris Wilson 
11033687ce75SVille Syrjälä 	intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
110438bde180SChris Wilson 
110538bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
110638bde180SChris Wilson 	dev_priv->irq_mask =
110738bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
110838bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
110916659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
111016659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
111138bde180SChris Wilson 
111238bde180SChris Wilson 	enable_mask =
111338bde180SChris Wilson 		I915_ASLE_INTERRUPT |
111438bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
111538bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
111616659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
111738bde180SChris Wilson 		I915_USER_INTERRUPT;
111838bde180SChris Wilson 
111956b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
1120a266c7d5SChris Wilson 		/* Enable in IER... */
1121a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1122a266c7d5SChris Wilson 		/* and unmask in IMR */
1123a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1124a266c7d5SChris Wilson 	}
1125a266c7d5SChris Wilson 
1126b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
1127a266c7d5SChris Wilson 
1128379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
1129379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
1130d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1131755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1132755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1133d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
1134379ef82dSDaniel Vetter 
1135c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
113620afbda2SDaniel Vetter }
113720afbda2SDaniel Vetter 
1138ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
1139a266c7d5SChris Wilson {
1140b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
1141af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1142a266c7d5SChris Wilson 
11432dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
11442dd2a883SImre Deak 		return IRQ_NONE;
11452dd2a883SImre Deak 
11461f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
11479102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
11481f814dacSImre Deak 
114938bde180SChris Wilson 	do {
1150eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
115178c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
1152af722d28SVille Syrjälä 		u32 hotplug_status = 0;
1153af722d28SVille Syrjälä 		u32 iir;
1154a266c7d5SChris Wilson 
11552939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1156af722d28SVille Syrjälä 		if (iir == 0)
1157af722d28SVille Syrjälä 			break;
1158af722d28SVille Syrjälä 
1159af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1160af722d28SVille Syrjälä 
1161af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
1162af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
1163af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1164a266c7d5SChris Wilson 
1165eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1166eb64343cSVille Syrjälä 		 * signalled in iir */
1167eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1168a266c7d5SChris Wilson 
116978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
117078c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
117178c357ddSVille Syrjälä 
11722939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1173a266c7d5SChris Wilson 
1174a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
11752cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
1176a266c7d5SChris Wilson 
117778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
117878c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1179a266c7d5SChris Wilson 
1180af722d28SVille Syrjälä 		if (hotplug_status)
1181af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1182af722d28SVille Syrjälä 
1183af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1184af722d28SVille Syrjälä 	} while (0);
1185a266c7d5SChris Wilson 
11869c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
11879c6508b9SThomas Gleixner 
11889102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
11891f814dacSImre Deak 
1190a266c7d5SChris Wilson 	return ret;
1191a266c7d5SChris Wilson }
1192a266c7d5SChris Wilson 
1193b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv)
1194a266c7d5SChris Wilson {
1195b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
1196a266c7d5SChris Wilson 
11970706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
11988cee664dSAndrzej Hajda 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
1199a266c7d5SChris Wilson 
120044d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
120144d9241eSVille Syrjälä 
1202b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
1203e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
1204a266c7d5SChris Wilson }
1205a266c7d5SChris Wilson 
12063687ce75SVille Syrjälä static u32 i965_error_mask(struct drm_i915_private *i915)
1207a266c7d5SChris Wilson {
1208045cebd2SVille Syrjälä 	/*
1209045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
1210045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
1211e7e12f6eSVille Syrjälä 	 *
1212e7e12f6eSVille Syrjälä 	 * i965 FBC no longer generates spurious GTT errors,
1213e7e12f6eSVille Syrjälä 	 * so we can always enable the page table errors.
1214045cebd2SVille Syrjälä 	 */
12153687ce75SVille Syrjälä 	if (IS_G4X(i915))
12163687ce75SVille Syrjälä 		return ~(GM45_ERROR_PAGE_TABLE |
1217045cebd2SVille Syrjälä 			 GM45_ERROR_MEM_PRIV |
1218045cebd2SVille Syrjälä 			 GM45_ERROR_CP_PRIV |
1219045cebd2SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
12203687ce75SVille Syrjälä 	else
12213687ce75SVille Syrjälä 		return ~(I915_ERROR_PAGE_TABLE |
1222045cebd2SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
1223045cebd2SVille Syrjälä }
12243687ce75SVille Syrjälä 
12253687ce75SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
12263687ce75SVille Syrjälä {
12273687ce75SVille Syrjälä 	struct intel_uncore *uncore = &dev_priv->uncore;
12283687ce75SVille Syrjälä 	u32 enable_mask;
12293687ce75SVille Syrjälä 
12303687ce75SVille Syrjälä 	intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
1231045cebd2SVille Syrjälä 
1232a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
1233c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
1234c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
1235adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
1236bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1237bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
123878c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
1239bbba0a97SChris Wilson 
1240c30bb1fdSVille Syrjälä 	enable_mask =
1241c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
1242c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
1243c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1244c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
124578c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
1246c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
1247bbba0a97SChris Wilson 
124891d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
1249bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
1250a266c7d5SChris Wilson 
1251b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
1252c30bb1fdSVille Syrjälä 
1253b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
1254b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
1255d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1256755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1257755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1258755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1259d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
1260a266c7d5SChris Wilson 
126191d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
126220afbda2SDaniel Vetter }
126320afbda2SDaniel Vetter 
1264ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
1265a266c7d5SChris Wilson {
1266b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
1267af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1268a266c7d5SChris Wilson 
12692dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
12702dd2a883SImre Deak 		return IRQ_NONE;
12712dd2a883SImre Deak 
12721f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
12739102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
12741f814dacSImre Deak 
1275af722d28SVille Syrjälä 	do {
1276eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
127778c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
1278af722d28SVille Syrjälä 		u32 hotplug_status = 0;
1279af722d28SVille Syrjälä 		u32 iir;
12802c8ba29fSChris Wilson 
12812939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1282af722d28SVille Syrjälä 		if (iir == 0)
1283af722d28SVille Syrjälä 			break;
1284af722d28SVille Syrjälä 
1285af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1286af722d28SVille Syrjälä 
1287af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1288af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1289a266c7d5SChris Wilson 
1290eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1291eb64343cSVille Syrjälä 		 * signalled in iir */
1292eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1293a266c7d5SChris Wilson 
129478c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
129578c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
129678c357ddSVille Syrjälä 
12972939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1298a266c7d5SChris Wilson 
1299a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
13002cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
13010669a6e1SChris Wilson 					    iir);
1302af722d28SVille Syrjälä 
1303a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
13042cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
13050669a6e1SChris Wilson 					    iir >> 25);
1306a266c7d5SChris Wilson 
130778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
130878c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1309515ac2bbSDaniel Vetter 
1310af722d28SVille Syrjälä 		if (hotplug_status)
1311af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1312af722d28SVille Syrjälä 
1313af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1314af722d28SVille Syrjälä 	} while (0);
1315a266c7d5SChris Wilson 
13169c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
13179c6508b9SThomas Gleixner 
13189102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
13191f814dacSImre Deak 
1320a266c7d5SChris Wilson 	return ret;
1321a266c7d5SChris Wilson }
1322a266c7d5SChris Wilson 
1323fca52a55SDaniel Vetter /**
1324fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
1325fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1326fca52a55SDaniel Vetter  *
1327fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
1328fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
1329fca52a55SDaniel Vetter  */
1330b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
1331f71d4af4SJesse Barnes {
1332cefcff8fSJoonas Lahtinen 	int i;
13338b2e326dSChris Wilson 
133474bb98baSLucas De Marchi 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1335cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
1336cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
13378b2e326dSChris Wilson 
1338633023a4SDaniele Ceraolo Spurio 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1339651e7d48SLucas De Marchi 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
13402cbc876dSMichał Winiarski 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
134126705e20SSagar Arun Kamble 
13429a450b68SLucas De Marchi 	if (!HAS_DISPLAY(dev_priv))
13439a450b68SLucas De Marchi 		return;
13449a450b68SLucas De Marchi 
13453703060dSAndrzej Hajda 	dev_priv->drm.vblank_disable_immediate = true;
134621da2700SVille Syrjälä 
1347262fd485SChris Wilson 	/* Most platforms treat the display irq block as an always-on
1348262fd485SChris Wilson 	 * power domain. vlv/chv can disable it at runtime and need
1349262fd485SChris Wilson 	 * special care to avoid writing any of the display block registers
1350262fd485SChris Wilson 	 * outside of the power domain. We defer setting up the display irqs
1351262fd485SChris Wilson 	 * in this case to the runtime pm.
1352262fd485SChris Wilson 	 */
1353262fd485SChris Wilson 	dev_priv->display_irqs_enabled = true;
1354262fd485SChris Wilson 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1355262fd485SChris Wilson 		dev_priv->display_irqs_enabled = false;
1356262fd485SChris Wilson 
1357da38ba98SJani Nikula 	intel_hotplug_irq_init(dev_priv);
13582ccf2e03SChris Wilson }
135920afbda2SDaniel Vetter 
1360fca52a55SDaniel Vetter /**
1361cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
1362cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
1363cefcff8fSJoonas Lahtinen  *
1364cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
1365cefcff8fSJoonas Lahtinen  */
1366cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
1367cefcff8fSJoonas Lahtinen {
1368cefcff8fSJoonas Lahtinen 	int i;
1369cefcff8fSJoonas Lahtinen 
1370cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
1371cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
1372cefcff8fSJoonas Lahtinen }
1373cefcff8fSJoonas Lahtinen 
1374b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1375b318b824SVille Syrjälä {
1376b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1377b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1378b318b824SVille Syrjälä 			return cherryview_irq_handler;
1379b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1380b318b824SVille Syrjälä 			return valleyview_irq_handler;
1381651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1382b318b824SVille Syrjälä 			return i965_irq_handler;
1383651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1384b318b824SVille Syrjälä 			return i915_irq_handler;
1385b318b824SVille Syrjälä 		else
1386b318b824SVille Syrjälä 			return i8xx_irq_handler;
1387b318b824SVille Syrjälä 	} else {
138822e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
138997b492f5SLucas De Marchi 			return dg1_irq_handler;
139022e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1391b318b824SVille Syrjälä 			return gen11_irq_handler;
1392651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1393b318b824SVille Syrjälä 			return gen8_irq_handler;
1394b318b824SVille Syrjälä 		else
13959eae5e27SLucas De Marchi 			return ilk_irq_handler;
1396b318b824SVille Syrjälä 	}
1397b318b824SVille Syrjälä }
1398b318b824SVille Syrjälä 
1399b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv)
1400b318b824SVille Syrjälä {
1401b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1402b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1403b318b824SVille Syrjälä 			cherryview_irq_reset(dev_priv);
1404b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1405b318b824SVille Syrjälä 			valleyview_irq_reset(dev_priv);
1406651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1407b318b824SVille Syrjälä 			i965_irq_reset(dev_priv);
1408651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1409b318b824SVille Syrjälä 			i915_irq_reset(dev_priv);
1410b318b824SVille Syrjälä 		else
1411b318b824SVille Syrjälä 			i8xx_irq_reset(dev_priv);
1412b318b824SVille Syrjälä 	} else {
141322e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
141422e26af7SPaulo Zanoni 			dg1_irq_reset(dev_priv);
141522e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1416b318b824SVille Syrjälä 			gen11_irq_reset(dev_priv);
1417651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1418b318b824SVille Syrjälä 			gen8_irq_reset(dev_priv);
1419b318b824SVille Syrjälä 		else
14209eae5e27SLucas De Marchi 			ilk_irq_reset(dev_priv);
1421b318b824SVille Syrjälä 	}
1422b318b824SVille Syrjälä }
1423b318b824SVille Syrjälä 
1424b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1425b318b824SVille Syrjälä {
1426b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1427b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1428b318b824SVille Syrjälä 			cherryview_irq_postinstall(dev_priv);
1429b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1430b318b824SVille Syrjälä 			valleyview_irq_postinstall(dev_priv);
1431651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1432b318b824SVille Syrjälä 			i965_irq_postinstall(dev_priv);
1433651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1434b318b824SVille Syrjälä 			i915_irq_postinstall(dev_priv);
1435b318b824SVille Syrjälä 		else
1436b318b824SVille Syrjälä 			i8xx_irq_postinstall(dev_priv);
1437b318b824SVille Syrjälä 	} else {
143822e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
143922e26af7SPaulo Zanoni 			dg1_irq_postinstall(dev_priv);
144022e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1441b318b824SVille Syrjälä 			gen11_irq_postinstall(dev_priv);
1442651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1443b318b824SVille Syrjälä 			gen8_irq_postinstall(dev_priv);
1444b318b824SVille Syrjälä 		else
14459eae5e27SLucas De Marchi 			ilk_irq_postinstall(dev_priv);
1446b318b824SVille Syrjälä 	}
1447b318b824SVille Syrjälä }
1448b318b824SVille Syrjälä 
1449cefcff8fSJoonas Lahtinen /**
1450fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
1451fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1452fca52a55SDaniel Vetter  *
1453fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
1454fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
1455fca52a55SDaniel Vetter  *
1456fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
1457fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
1458fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
1459fca52a55SDaniel Vetter  */
14602aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
14612aeb7d3aSDaniel Vetter {
14628ff5446aSThomas Zimmermann 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1463b318b824SVille Syrjälä 	int ret;
1464b318b824SVille Syrjälä 
14652aeb7d3aSDaniel Vetter 	/*
14662aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
14672aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
14682aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
14692aeb7d3aSDaniel Vetter 	 */
1470ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
14712aeb7d3aSDaniel Vetter 
1472ac1723c1SThomas Zimmermann 	dev_priv->irq_enabled = true;
1473b318b824SVille Syrjälä 
1474b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1475b318b824SVille Syrjälä 
1476b318b824SVille Syrjälä 	ret = request_irq(irq, intel_irq_handler(dev_priv),
1477b318b824SVille Syrjälä 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
1478b318b824SVille Syrjälä 	if (ret < 0) {
1479ac1723c1SThomas Zimmermann 		dev_priv->irq_enabled = false;
1480b318b824SVille Syrjälä 		return ret;
1481b318b824SVille Syrjälä 	}
1482b318b824SVille Syrjälä 
1483b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
1484b318b824SVille Syrjälä 
1485b318b824SVille Syrjälä 	return ret;
14862aeb7d3aSDaniel Vetter }
14872aeb7d3aSDaniel Vetter 
1488fca52a55SDaniel Vetter /**
1489fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
1490fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1491fca52a55SDaniel Vetter  *
1492fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
1493fca52a55SDaniel Vetter  * resources acquired in the init functions.
1494fca52a55SDaniel Vetter  */
14952aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
14962aeb7d3aSDaniel Vetter {
14978ff5446aSThomas Zimmermann 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1498b318b824SVille Syrjälä 
1499b318b824SVille Syrjälä 	/*
1500789fa874SJanusz Krzysztofik 	 * FIXME we can get called twice during driver probe
1501789fa874SJanusz Krzysztofik 	 * error handling as well as during driver remove due to
150286a1758dSJani Nikula 	 * intel_display_driver_remove() calling us out of sequence.
1503789fa874SJanusz Krzysztofik 	 * Would be nice if it didn't do that...
1504b318b824SVille Syrjälä 	 */
1505ac1723c1SThomas Zimmermann 	if (!dev_priv->irq_enabled)
1506b318b824SVille Syrjälä 		return;
1507b318b824SVille Syrjälä 
1508ac1723c1SThomas Zimmermann 	dev_priv->irq_enabled = false;
1509b318b824SVille Syrjälä 
1510b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1511b318b824SVille Syrjälä 
1512b318b824SVille Syrjälä 	free_irq(irq, dev_priv);
1513b318b824SVille Syrjälä 
15142aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
1515ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
15162aeb7d3aSDaniel Vetter }
15172aeb7d3aSDaniel Vetter 
1518fca52a55SDaniel Vetter /**
1519fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
1520fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1521fca52a55SDaniel Vetter  *
1522fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
1523fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
1524fca52a55SDaniel Vetter  */
1525b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
1526c67a470bSPaulo Zanoni {
1527b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1528ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
1529315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
1530c67a470bSPaulo Zanoni }
1531c67a470bSPaulo Zanoni 
1532fca52a55SDaniel Vetter /**
1533fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
1534fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1535fca52a55SDaniel Vetter  *
1536fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
1537fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
1538fca52a55SDaniel Vetter  */
1539b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
1540c67a470bSPaulo Zanoni {
1541ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
1542b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1543b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
1544c67a470bSPaulo Zanoni }
1545d64575eeSJani Nikula 
1546d64575eeSJani Nikula bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1547d64575eeSJani Nikula {
1548d64575eeSJani Nikula 	return dev_priv->runtime_pm.irqs_enabled;
1549d64575eeSJani Nikula }
1550d64575eeSJani Nikula 
1551d64575eeSJani Nikula void intel_synchronize_irq(struct drm_i915_private *i915)
1552d64575eeSJani Nikula {
15538ff5446aSThomas Zimmermann 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1554d64575eeSJani Nikula }
1555320ad343SThomas Zimmermann 
1556320ad343SThomas Zimmermann void intel_synchronize_hardirq(struct drm_i915_private *i915)
1557320ad343SThomas Zimmermann {
1558320ad343SThomas Zimmermann 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1559320ad343SThomas Zimmermann }
1560