xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision fcc02c754f313e244cbecfa057ba27978f3b09ce)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3155367a27SJani Nikula #include <linux/slab.h>
3255367a27SJani Nikula #include <linux/sysrq.h>
3355367a27SJani Nikula 
34fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3555367a27SJani Nikula 
362b874a02SJani Nikula #include "display/intel_display_irq.h"
371d455f8dSJani Nikula #include "display/intel_display_types.h"
38df0566a6SJani Nikula #include "display/intel_hotplug.h"
39da38ba98SJani Nikula #include "display/intel_hotplug_irq.h"
40df0566a6SJani Nikula #include "display/intel_lpe_audio.h"
417f6947fdSJani Nikula #include "display/intel_psr_regs.h"
42df0566a6SJani Nikula 
43b3786b29SChris Wilson #include "gt/intel_breadcrumbs.h"
442239e6dfSDaniele Ceraolo Spurio #include "gt/intel_gt.h"
45cf1c97dcSAndi Shyti #include "gt/intel_gt_irq.h"
46d762043fSAndi Shyti #include "gt/intel_gt_pm_irq.h"
470d6419e9SMatt Roper #include "gt/intel_gt_regs.h"
483e7abf81SAndi Shyti #include "gt/intel_rps.h"
492239e6dfSDaniele Ceraolo Spurio 
5024524e3fSJani Nikula #include "i915_driver.h"
51c0e09200SDave Airlie #include "i915_drv.h"
52440e2b3dSJani Nikula #include "i915_irq.h"
53476f62b8SJani Nikula #include "i915_reg.h"
54c0e09200SDave Airlie 
55fca52a55SDaniel Vetter /**
56fca52a55SDaniel Vetter  * DOC: interrupt handling
57fca52a55SDaniel Vetter  *
58fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
59fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
60fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
61fca52a55SDaniel Vetter  */
62fca52a55SDaniel Vetter 
639c6508b9SThomas Gleixner /*
649c6508b9SThomas Gleixner  * Interrupt statistic for PMU. Increments the counter only if the
6578f48aa6SBo Liu  * interrupt originated from the GPU so interrupts from a device which
669c6508b9SThomas Gleixner  * shares the interrupt line are not accounted.
679c6508b9SThomas Gleixner  */
689c6508b9SThomas Gleixner static inline void pmu_irq_stats(struct drm_i915_private *i915,
699c6508b9SThomas Gleixner 				 irqreturn_t res)
709c6508b9SThomas Gleixner {
719c6508b9SThomas Gleixner 	if (unlikely(res != IRQ_HANDLED))
729c6508b9SThomas Gleixner 		return;
739c6508b9SThomas Gleixner 
749c6508b9SThomas Gleixner 	/*
759c6508b9SThomas Gleixner 	 * A clever compiler translates that into INC. A not so clever one
769c6508b9SThomas Gleixner 	 * should at least prevent store tearing.
779c6508b9SThomas Gleixner 	 */
789c6508b9SThomas Gleixner 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
799c6508b9SThomas Gleixner }
809c6508b9SThomas Gleixner 
81cf1c97dcSAndi Shyti void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
8268eb49b1SPaulo Zanoni 		    i915_reg_t iir, i915_reg_t ier)
8368eb49b1SPaulo Zanoni {
8465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
8565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
8668eb49b1SPaulo Zanoni 
8765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
8868eb49b1SPaulo Zanoni 
895c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
9065f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
9165f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
9265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
9365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
9468eb49b1SPaulo Zanoni }
955c502442SPaulo Zanoni 
96ad7632ffSJani Nikula static void gen2_irq_reset(struct intel_uncore *uncore)
9768eb49b1SPaulo Zanoni {
9865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
9965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
100a9d356a6SPaulo Zanoni 
10165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
10268eb49b1SPaulo Zanoni 
10368eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
10465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
10565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
10665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
10765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
10868eb49b1SPaulo Zanoni }
10968eb49b1SPaulo Zanoni 
110337ba017SPaulo Zanoni /*
111337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
112337ba017SPaulo Zanoni  */
1132b874a02SJani Nikula void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
114b51a2842SVille Syrjälä {
11565f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
116b51a2842SVille Syrjälä 
117b51a2842SVille Syrjälä 	if (val == 0)
118b51a2842SVille Syrjälä 		return;
119b51a2842SVille Syrjälä 
120a9f236d1SPankaj Bharadiya 	drm_WARN(&uncore->i915->drm, 1,
121a9f236d1SPankaj Bharadiya 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
122f0f59a00SVille Syrjälä 		 i915_mmio_reg_offset(reg), val);
12365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
12465f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
12565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
12665f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
127b51a2842SVille Syrjälä }
128337ba017SPaulo Zanoni 
12965f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
130e9e9848aSVille Syrjälä {
13165f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
132e9e9848aSVille Syrjälä 
133e9e9848aSVille Syrjälä 	if (val == 0)
134e9e9848aSVille Syrjälä 		return;
135e9e9848aSVille Syrjälä 
136a9f236d1SPankaj Bharadiya 	drm_WARN(&uncore->i915->drm, 1,
137a9f236d1SPankaj Bharadiya 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
1389d9523d8SPaulo Zanoni 		 i915_mmio_reg_offset(GEN2_IIR), val);
13965f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
14065f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
14165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
14265f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
143e9e9848aSVille Syrjälä }
144e9e9848aSVille Syrjälä 
145cf1c97dcSAndi Shyti void gen3_irq_init(struct intel_uncore *uncore,
14668eb49b1SPaulo Zanoni 		   i915_reg_t imr, u32 imr_val,
14768eb49b1SPaulo Zanoni 		   i915_reg_t ier, u32 ier_val,
14868eb49b1SPaulo Zanoni 		   i915_reg_t iir)
14968eb49b1SPaulo Zanoni {
15065f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
15135079899SPaulo Zanoni 
15265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
15365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
15465f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
15568eb49b1SPaulo Zanoni }
15635079899SPaulo Zanoni 
157ad7632ffSJani Nikula static void gen2_irq_init(struct intel_uncore *uncore,
1582918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
15968eb49b1SPaulo Zanoni {
16065f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
16168eb49b1SPaulo Zanoni 
16265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
16365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
16465f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
16568eb49b1SPaulo Zanoni }
16668eb49b1SPaulo Zanoni 
167d9dc34f1SVille Syrjälä /**
16874bb98baSLucas De Marchi  * ivb_parity_work - Workqueue called when a parity error interrupt
169e3689190SBen Widawsky  * occurred.
170e3689190SBen Widawsky  * @work: workqueue struct
171e3689190SBen Widawsky  *
172e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
173e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
174e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
175e3689190SBen Widawsky  */
17674bb98baSLucas De Marchi static void ivb_parity_work(struct work_struct *work)
177e3689190SBen Widawsky {
1782d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
179cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1802cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
181e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
18235a85ac6SBen Widawsky 	char *parity_event[6];
183a9c287c9SJani Nikula 	u32 misccpctl;
184a9c287c9SJani Nikula 	u8 slice = 0;
185e3689190SBen Widawsky 
186e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
187e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
188e3689190SBen Widawsky 	 * any time we access those registers.
189e3689190SBen Widawsky 	 */
19091c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
191e3689190SBen Widawsky 
19235a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
19348a1b8d4SPankaj Bharadiya 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
19435a85ac6SBen Widawsky 		goto out;
19535a85ac6SBen Widawsky 
196f7435467SAndrzej Hajda 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
197f7435467SAndrzej Hajda 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
1982939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
199e3689190SBen Widawsky 
20035a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
201f0f59a00SVille Syrjälä 		i915_reg_t reg;
20235a85ac6SBen Widawsky 
20335a85ac6SBen Widawsky 		slice--;
20448a1b8d4SPankaj Bharadiya 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
20548a1b8d4SPankaj Bharadiya 				     slice >= NUM_L3_SLICES(dev_priv)))
20635a85ac6SBen Widawsky 			break;
20735a85ac6SBen Widawsky 
20835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
20935a85ac6SBen Widawsky 
2106fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
21135a85ac6SBen Widawsky 
2122939eb06SJani Nikula 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
213e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
214e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
215e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
216e3689190SBen Widawsky 
2172939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
2182939eb06SJani Nikula 		intel_uncore_posting_read(&dev_priv->uncore, reg);
219e3689190SBen Widawsky 
220cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
221e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
222e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
223e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
22435a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
22535a85ac6SBen Widawsky 		parity_event[5] = NULL;
226e3689190SBen Widawsky 
22791c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
228e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
229e3689190SBen Widawsky 
230a10234fdSTvrtko Ursulin 		drm_dbg(&dev_priv->drm,
231a10234fdSTvrtko Ursulin 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
23235a85ac6SBen Widawsky 			slice, row, bank, subbank);
233e3689190SBen Widawsky 
23435a85ac6SBen Widawsky 		kfree(parity_event[4]);
235e3689190SBen Widawsky 		kfree(parity_event[3]);
236e3689190SBen Widawsky 		kfree(parity_event[2]);
237e3689190SBen Widawsky 		kfree(parity_event[1]);
238e3689190SBen Widawsky 	}
239e3689190SBen Widawsky 
2402939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
24135a85ac6SBen Widawsky 
24235a85ac6SBen Widawsky out:
24348a1b8d4SPankaj Bharadiya 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
24403d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
245cf1c97dcSAndi Shyti 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
24603d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
24735a85ac6SBen Widawsky 
24891c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
24935a85ac6SBen Widawsky }
25035a85ac6SBen Widawsky 
251c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
252c1874ed7SImre Deak {
253b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
254c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
255c1874ed7SImre Deak 
2562dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
2572dd2a883SImre Deak 		return IRQ_NONE;
2582dd2a883SImre Deak 
2591f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2609102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2611f814dacSImre Deak 
2621e1cace9SVille Syrjälä 	do {
2636e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
2642ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
2651ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
266a5e485a9SVille Syrjälä 		u32 ier = 0;
2673ff60f89SOscar Mateo 
2682939eb06SJani Nikula 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
2692939eb06SJani Nikula 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
2702939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
271c1874ed7SImre Deak 
272c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2731e1cace9SVille Syrjälä 			break;
274c1874ed7SImre Deak 
275c1874ed7SImre Deak 		ret = IRQ_HANDLED;
276c1874ed7SImre Deak 
277a5e485a9SVille Syrjälä 		/*
278a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
279a5e485a9SVille Syrjälä 		 *
280a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
281a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
282a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
283a5e485a9SVille Syrjälä 		 *
284a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
285a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
286a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
287a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
288a5e485a9SVille Syrjälä 		 * bits this time around.
289a5e485a9SVille Syrjälä 		 */
2902939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2918cee664dSAndrzej Hajda 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
2924a0a0202SVille Syrjälä 
2934a0a0202SVille Syrjälä 		if (gt_iir)
2942939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
2954a0a0202SVille Syrjälä 		if (pm_iir)
2962939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
2974a0a0202SVille Syrjälä 
2987ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2991ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3007ce4d1f2SVille Syrjälä 
3013ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
3023ff60f89SOscar Mateo 		 * signalled in iir */
303eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3047ce4d1f2SVille Syrjälä 
305eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
306eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
307eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
308eef57324SJerome Anand 
3097ce4d1f2SVille Syrjälä 		/*
3107ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
3117ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
3127ce4d1f2SVille Syrjälä 		 */
3137ce4d1f2SVille Syrjälä 		if (iir)
3142939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
3154a0a0202SVille Syrjälä 
3162939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
3172939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3181ae3c34cSVille Syrjälä 
31952894874SVille Syrjälä 		if (gt_iir)
3202cbc876dSMichał Winiarski 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
32152894874SVille Syrjälä 		if (pm_iir)
3222cbc876dSMichał Winiarski 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
32352894874SVille Syrjälä 
3241ae3c34cSVille Syrjälä 		if (hotplug_status)
32591d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3262ecb8ca4SVille Syrjälä 
32791d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
3281e1cace9SVille Syrjälä 	} while (0);
3297e231dbeSJesse Barnes 
3309c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
3319c6508b9SThomas Gleixner 
3329102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3331f814dacSImre Deak 
3347e231dbeSJesse Barnes 	return ret;
3357e231dbeSJesse Barnes }
3367e231dbeSJesse Barnes 
33743f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
33843f328d7SVille Syrjälä {
339b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
34043f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
34143f328d7SVille Syrjälä 
3422dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
3432dd2a883SImre Deak 		return IRQ_NONE;
3442dd2a883SImre Deak 
3451f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3469102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3471f814dacSImre Deak 
348579de73bSChris Wilson 	do {
3496e814800SVille Syrjälä 		u32 master_ctl, iir;
3502ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3511ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
352a5e485a9SVille Syrjälä 		u32 ier = 0;
353a5e485a9SVille Syrjälä 
3542939eb06SJani Nikula 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
3552939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
3563278f67fSVille Syrjälä 
3573278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
3588e5fd599SVille Syrjälä 			break;
35943f328d7SVille Syrjälä 
36027b6c122SOscar Mateo 		ret = IRQ_HANDLED;
36127b6c122SOscar Mateo 
362a5e485a9SVille Syrjälä 		/*
363a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
364a5e485a9SVille Syrjälä 		 *
365a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
366a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
367a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
368a5e485a9SVille Syrjälä 		 *
369a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
370a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
371a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
372a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
373a5e485a9SVille Syrjälä 		 * bits this time around.
374a5e485a9SVille Syrjälä 		 */
3752939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3768cee664dSAndrzej Hajda 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
37743f328d7SVille Syrjälä 
3782cbc876dSMichał Winiarski 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
37927b6c122SOscar Mateo 
38027b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3811ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
38243f328d7SVille Syrjälä 
38327b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
38427b6c122SOscar Mateo 		 * signalled in iir */
385eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
38643f328d7SVille Syrjälä 
387eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
388eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
389eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
390eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
391eef57324SJerome Anand 
3927ce4d1f2SVille Syrjälä 		/*
3937ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
3947ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
3957ce4d1f2SVille Syrjälä 		 */
3967ce4d1f2SVille Syrjälä 		if (iir)
3972939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
3987ce4d1f2SVille Syrjälä 
3992939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
4002939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4011ae3c34cSVille Syrjälä 
4021ae3c34cSVille Syrjälä 		if (hotplug_status)
40391d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4042ecb8ca4SVille Syrjälä 
40591d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
406579de73bSChris Wilson 	} while (0);
4073278f67fSVille Syrjälä 
4089c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
4099c6508b9SThomas Gleixner 
4109102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4111f814dacSImre Deak 
41243f328d7SVille Syrjälä 	return ret;
41343f328d7SVille Syrjälä }
41443f328d7SVille Syrjälä 
41572c90f62SOscar Mateo /*
41672c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
41772c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
41872c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
41972c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
42072c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
42172c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
42272c90f62SOscar Mateo  */
4239eae5e27SLucas De Marchi static irqreturn_t ilk_irq_handler(int irq, void *arg)
424b1f14ad0SJesse Barnes {
425c48a798aSChris Wilson 	struct drm_i915_private *i915 = arg;
42672e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
427f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
4280e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
429b1f14ad0SJesse Barnes 
430c48a798aSChris Wilson 	if (unlikely(!intel_irqs_enabled(i915)))
4312dd2a883SImre Deak 		return IRQ_NONE;
4322dd2a883SImre Deak 
4331f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
434c48a798aSChris Wilson 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
4351f814dacSImre Deak 
436b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
437c48a798aSChris Wilson 	de_ier = raw_reg_read(regs, DEIER);
438c48a798aSChris Wilson 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
4390e43406bSChris Wilson 
44044498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
44144498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
44244498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
44344498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
44444498aeaSPaulo Zanoni 	 * due to its back queue). */
445c48a798aSChris Wilson 	if (!HAS_PCH_NOP(i915)) {
446c48a798aSChris Wilson 		sde_ier = raw_reg_read(regs, SDEIER);
447c48a798aSChris Wilson 		raw_reg_write(regs, SDEIER, 0);
448ab5c608bSBen Widawsky 	}
44944498aeaSPaulo Zanoni 
45072c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
45172c90f62SOscar Mateo 
452c48a798aSChris Wilson 	gt_iir = raw_reg_read(regs, GTIIR);
4530e43406bSChris Wilson 	if (gt_iir) {
454c48a798aSChris Wilson 		raw_reg_write(regs, GTIIR, gt_iir);
455651e7d48SLucas De Marchi 		if (GRAPHICS_VER(i915) >= 6)
4562cbc876dSMichał Winiarski 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
457d8fc8a47SPaulo Zanoni 		else
4582cbc876dSMichał Winiarski 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
459c48a798aSChris Wilson 		ret = IRQ_HANDLED;
4600e43406bSChris Wilson 	}
461b1f14ad0SJesse Barnes 
462c48a798aSChris Wilson 	de_iir = raw_reg_read(regs, DEIIR);
4630e43406bSChris Wilson 	if (de_iir) {
464c48a798aSChris Wilson 		raw_reg_write(regs, DEIIR, de_iir);
465373abf1aSMatt Roper 		if (DISPLAY_VER(i915) >= 7)
466c48a798aSChris Wilson 			ivb_display_irq_handler(i915, de_iir);
467f1af8fc1SPaulo Zanoni 		else
468c48a798aSChris Wilson 			ilk_display_irq_handler(i915, de_iir);
4690e43406bSChris Wilson 		ret = IRQ_HANDLED;
470c48a798aSChris Wilson 	}
471c48a798aSChris Wilson 
472651e7d48SLucas De Marchi 	if (GRAPHICS_VER(i915) >= 6) {
473c48a798aSChris Wilson 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
474c48a798aSChris Wilson 		if (pm_iir) {
475c48a798aSChris Wilson 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
4762cbc876dSMichał Winiarski 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
477c48a798aSChris Wilson 			ret = IRQ_HANDLED;
4780e43406bSChris Wilson 		}
479f1af8fc1SPaulo Zanoni 	}
480b1f14ad0SJesse Barnes 
481c48a798aSChris Wilson 	raw_reg_write(regs, DEIER, de_ier);
482c48a798aSChris Wilson 	if (sde_ier)
483c48a798aSChris Wilson 		raw_reg_write(regs, SDEIER, sde_ier);
484b1f14ad0SJesse Barnes 
4859c6508b9SThomas Gleixner 	pmu_irq_stats(i915, ret);
4869c6508b9SThomas Gleixner 
4871f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
488c48a798aSChris Wilson 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
4891f814dacSImre Deak 
490b1f14ad0SJesse Barnes 	return ret;
491b1f14ad0SJesse Barnes }
492b1f14ad0SJesse Barnes 
4934376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
4944376b9c9SMika Kuoppala {
4954376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
4964376b9c9SMika Kuoppala 
4974376b9c9SMika Kuoppala 	/*
4984376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
4994376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
5004376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
5014376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
5024376b9c9SMika Kuoppala 	 */
5034376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
5044376b9c9SMika Kuoppala }
5054376b9c9SMika Kuoppala 
5064376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
5074376b9c9SMika Kuoppala {
5084376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
5094376b9c9SMika Kuoppala }
5104376b9c9SMika Kuoppala 
511f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
512f11a0f46STvrtko Ursulin {
513b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
51472e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
515f11a0f46STvrtko Ursulin 	u32 master_ctl;
516f11a0f46STvrtko Ursulin 
517f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
518f11a0f46STvrtko Ursulin 		return IRQ_NONE;
519f11a0f46STvrtko Ursulin 
5204376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
5214376b9c9SMika Kuoppala 	if (!master_ctl) {
5224376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
523f11a0f46STvrtko Ursulin 		return IRQ_NONE;
5244376b9c9SMika Kuoppala 	}
525f11a0f46STvrtko Ursulin 
5266cc32f15SChris Wilson 	/* Find, queue (onto bottom-halves), then clear each source */
5272cbc876dSMichał Winiarski 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
528f0fd96f5SChris Wilson 
529f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
530f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
5319102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
53255ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
5339102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
534f0fd96f5SChris Wilson 	}
535f11a0f46STvrtko Ursulin 
5364376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
537abd58f01SBen Widawsky 
5389c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
5399c6508b9SThomas Gleixner 
54055ef72f2SChris Wilson 	return IRQ_HANDLED;
541abd58f01SBen Widawsky }
542abd58f01SBen Widawsky 
54381067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
54481067b71SMika Kuoppala {
54581067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
54681067b71SMika Kuoppala 
54781067b71SMika Kuoppala 	/*
54881067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
54981067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
55081067b71SMika Kuoppala 	 * New indications can and will light up during processing,
55181067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
55281067b71SMika Kuoppala 	 */
55381067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
55481067b71SMika Kuoppala }
55581067b71SMika Kuoppala 
55681067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
55781067b71SMika Kuoppala {
55881067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
55981067b71SMika Kuoppala }
56081067b71SMika Kuoppala 
56122e26af7SPaulo Zanoni static irqreturn_t gen11_irq_handler(int irq, void *arg)
56251951ae7SMika Kuoppala {
56322e26af7SPaulo Zanoni 	struct drm_i915_private *i915 = arg;
56472e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
5652cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(i915);
56651951ae7SMika Kuoppala 	u32 master_ctl;
567df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
56851951ae7SMika Kuoppala 
56951951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
57051951ae7SMika Kuoppala 		return IRQ_NONE;
57151951ae7SMika Kuoppala 
57222e26af7SPaulo Zanoni 	master_ctl = gen11_master_intr_disable(regs);
57381067b71SMika Kuoppala 	if (!master_ctl) {
57422e26af7SPaulo Zanoni 		gen11_master_intr_enable(regs);
57551951ae7SMika Kuoppala 		return IRQ_NONE;
57681067b71SMika Kuoppala 	}
57751951ae7SMika Kuoppala 
5786cc32f15SChris Wilson 	/* Find, queue (onto bottom-halves), then clear each source */
5799b77011eSTvrtko Ursulin 	gen11_gt_irq_handler(gt, master_ctl);
58051951ae7SMika Kuoppala 
58151951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
582a3265d85SMatt Roper 	if (master_ctl & GEN11_DISPLAY_IRQ)
583a3265d85SMatt Roper 		gen11_display_irq_handler(i915);
58451951ae7SMika Kuoppala 
585ddcf980fSAnusha Srivatsa 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
586df0d28c1SDhinakaran Pandiyan 
58722e26af7SPaulo Zanoni 	gen11_master_intr_enable(regs);
58851951ae7SMika Kuoppala 
589ddcf980fSAnusha Srivatsa 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
590df0d28c1SDhinakaran Pandiyan 
5919c6508b9SThomas Gleixner 	pmu_irq_stats(i915, IRQ_HANDLED);
5929c6508b9SThomas Gleixner 
59351951ae7SMika Kuoppala 	return IRQ_HANDLED;
59451951ae7SMika Kuoppala }
59551951ae7SMika Kuoppala 
59622e26af7SPaulo Zanoni static inline u32 dg1_master_intr_disable(void __iomem * const regs)
59797b492f5SLucas De Marchi {
59897b492f5SLucas De Marchi 	u32 val;
59997b492f5SLucas De Marchi 
60097b492f5SLucas De Marchi 	/* First disable interrupts */
60122e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
60297b492f5SLucas De Marchi 
60397b492f5SLucas De Marchi 	/* Get the indication levels and ack the master unit */
60422e26af7SPaulo Zanoni 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
60597b492f5SLucas De Marchi 	if (unlikely(!val))
60697b492f5SLucas De Marchi 		return 0;
60797b492f5SLucas De Marchi 
60822e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
60997b492f5SLucas De Marchi 
61097b492f5SLucas De Marchi 	return val;
61197b492f5SLucas De Marchi }
61297b492f5SLucas De Marchi 
61397b492f5SLucas De Marchi static inline void dg1_master_intr_enable(void __iomem * const regs)
61497b492f5SLucas De Marchi {
61522e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
61697b492f5SLucas De Marchi }
61797b492f5SLucas De Marchi 
61897b492f5SLucas De Marchi static irqreturn_t dg1_irq_handler(int irq, void *arg)
61997b492f5SLucas De Marchi {
62022e26af7SPaulo Zanoni 	struct drm_i915_private * const i915 = arg;
6212cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(i915);
62272e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(gt->uncore);
62322e26af7SPaulo Zanoni 	u32 master_tile_ctl, master_ctl;
62422e26af7SPaulo Zanoni 	u32 gu_misc_iir;
62522e26af7SPaulo Zanoni 
62622e26af7SPaulo Zanoni 	if (!intel_irqs_enabled(i915))
62722e26af7SPaulo Zanoni 		return IRQ_NONE;
62822e26af7SPaulo Zanoni 
62922e26af7SPaulo Zanoni 	master_tile_ctl = dg1_master_intr_disable(regs);
63022e26af7SPaulo Zanoni 	if (!master_tile_ctl) {
63122e26af7SPaulo Zanoni 		dg1_master_intr_enable(regs);
63222e26af7SPaulo Zanoni 		return IRQ_NONE;
63322e26af7SPaulo Zanoni 	}
63422e26af7SPaulo Zanoni 
63522e26af7SPaulo Zanoni 	/* FIXME: we only support tile 0 for now. */
63622e26af7SPaulo Zanoni 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
63722e26af7SPaulo Zanoni 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
63822e26af7SPaulo Zanoni 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
63922e26af7SPaulo Zanoni 	} else {
640a10234fdSTvrtko Ursulin 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
641a10234fdSTvrtko Ursulin 			master_tile_ctl);
64222e26af7SPaulo Zanoni 		dg1_master_intr_enable(regs);
64322e26af7SPaulo Zanoni 		return IRQ_NONE;
64422e26af7SPaulo Zanoni 	}
64522e26af7SPaulo Zanoni 
64622e26af7SPaulo Zanoni 	gen11_gt_irq_handler(gt, master_ctl);
64722e26af7SPaulo Zanoni 
64822e26af7SPaulo Zanoni 	if (master_ctl & GEN11_DISPLAY_IRQ)
64922e26af7SPaulo Zanoni 		gen11_display_irq_handler(i915);
65022e26af7SPaulo Zanoni 
651ddcf980fSAnusha Srivatsa 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
65222e26af7SPaulo Zanoni 
65322e26af7SPaulo Zanoni 	dg1_master_intr_enable(regs);
65422e26af7SPaulo Zanoni 
655ddcf980fSAnusha Srivatsa 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
65622e26af7SPaulo Zanoni 
65722e26af7SPaulo Zanoni 	pmu_irq_stats(i915, IRQ_HANDLED);
65822e26af7SPaulo Zanoni 
65922e26af7SPaulo Zanoni 	return IRQ_HANDLED;
66097b492f5SLucas De Marchi }
66197b492f5SLucas De Marchi 
662b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
66391738a95SPaulo Zanoni {
664b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
665b16b2a2fSPaulo Zanoni 
6666e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
66791738a95SPaulo Zanoni 		return;
66891738a95SPaulo Zanoni 
669b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
670105b122eSPaulo Zanoni 
6716e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
6722939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
673622364b6SPaulo Zanoni }
674105b122eSPaulo Zanoni 
6758bb61306SVille Syrjälä /* drm_dma.h hooks
6768bb61306SVille Syrjälä */
6779eae5e27SLucas De Marchi static void ilk_irq_reset(struct drm_i915_private *dev_priv)
6788bb61306SVille Syrjälä {
679b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
6808bb61306SVille Syrjälä 
681b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
682e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
683e44adb5dSChris Wilson 
684651e7d48SLucas De Marchi 	if (GRAPHICS_VER(dev_priv) == 7)
685f0818984STvrtko Ursulin 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
6868bb61306SVille Syrjälä 
687fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
688f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
689f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
690fc340442SDaniel Vetter 	}
691fc340442SDaniel Vetter 
6922cbc876dSMichał Winiarski 	gen5_gt_irq_reset(to_gt(dev_priv));
6938bb61306SVille Syrjälä 
694b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
6958bb61306SVille Syrjälä }
6968bb61306SVille Syrjälä 
697b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
6987e231dbeSJesse Barnes {
6992939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
7002939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
70134c7b8a7SVille Syrjälä 
7022cbc876dSMichał Winiarski 	gen5_gt_irq_reset(to_gt(dev_priv));
7037e231dbeSJesse Barnes 
704ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7059918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
70670591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
707ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
7087e231dbeSJesse Barnes }
7097e231dbeSJesse Barnes 
710a844cfbeSJosé Roberto de Souza static void gen8_irq_reset(struct drm_i915_private *dev_priv)
711a844cfbeSJosé Roberto de Souza {
712a844cfbeSJosé Roberto de Souza 	struct intel_uncore *uncore = &dev_priv->uncore;
713a844cfbeSJosé Roberto de Souza 
71472e9abc3SJani Nikula 	gen8_master_intr_disable(intel_uncore_regs(uncore));
715a844cfbeSJosé Roberto de Souza 
7162cbc876dSMichał Winiarski 	gen8_gt_irq_reset(to_gt(dev_priv));
717a844cfbeSJosé Roberto de Souza 	gen8_display_irq_reset(dev_priv);
718b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
719abd58f01SBen Widawsky 
7206e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
721b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
72259b7cb44STejas Upadhyay 
723abd58f01SBen Widawsky }
724abd58f01SBen Widawsky 
725a3265d85SMatt Roper static void gen11_irq_reset(struct drm_i915_private *dev_priv)
726a3265d85SMatt Roper {
7272cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
728fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
729a3265d85SMatt Roper 
73072e9abc3SJani Nikula 	gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
731a3265d85SMatt Roper 
732fd4d7904SPaulo Zanoni 	gen11_gt_irq_reset(gt);
733a3265d85SMatt Roper 	gen11_display_irq_reset(dev_priv);
734a3265d85SMatt Roper 
735a3265d85SMatt Roper 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
736a3265d85SMatt Roper 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
737a3265d85SMatt Roper }
738a3265d85SMatt Roper 
73922e26af7SPaulo Zanoni static void dg1_irq_reset(struct drm_i915_private *dev_priv)
74022e26af7SPaulo Zanoni {
741d1f3b5e9SAndi Shyti 	struct intel_uncore *uncore = &dev_priv->uncore;
742d1f3b5e9SAndi Shyti 	struct intel_gt *gt;
743d1f3b5e9SAndi Shyti 	unsigned int i;
74422e26af7SPaulo Zanoni 
74572e9abc3SJani Nikula 	dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
74622e26af7SPaulo Zanoni 
747d1f3b5e9SAndi Shyti 	for_each_gt(gt, dev_priv, i)
748fd4d7904SPaulo Zanoni 		gen11_gt_irq_reset(gt);
749d1f3b5e9SAndi Shyti 
75022e26af7SPaulo Zanoni 	gen11_display_irq_reset(dev_priv);
75122e26af7SPaulo Zanoni 
75222e26af7SPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
75322e26af7SPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
75422e26af7SPaulo Zanoni }
75522e26af7SPaulo Zanoni 
756b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
75743f328d7SVille Syrjälä {
758b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
75943f328d7SVille Syrjälä 
760e58c2cacSAndrzej Hajda 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
7612939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
76243f328d7SVille Syrjälä 
7632cbc876dSMichał Winiarski 	gen8_gt_irq_reset(to_gt(dev_priv));
76443f328d7SVille Syrjälä 
765b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
76643f328d7SVille Syrjälä 
767ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7689918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
76970591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
770ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
77143f328d7SVille Syrjälä }
77243f328d7SVille Syrjälä 
7739eae5e27SLucas De Marchi static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
774036a4a7dSZhenyu Wang {
7752cbc876dSMichał Winiarski 	gen5_gt_irq_postinstall(to_gt(dev_priv));
776a9922912SVille Syrjälä 
777*fcc02c75SJani Nikula 	ilk_de_irq_postinstall(dev_priv);
778036a4a7dSZhenyu Wang }
779036a4a7dSZhenyu Wang 
780b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
7810e6c9a9eSVille Syrjälä {
7822cbc876dSMichał Winiarski 	gen5_gt_irq_postinstall(to_gt(dev_priv));
7837e231dbeSJesse Barnes 
784ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7859918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
786ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
787ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
788ad22d106SVille Syrjälä 
7892939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
7902939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
79120afbda2SDaniel Vetter }
79220afbda2SDaniel Vetter 
793b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
794abd58f01SBen Widawsky {
79559b7cb44STejas Upadhyay 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
79659b7cb44STejas Upadhyay 		icp_irq_postinstall(dev_priv);
79759b7cb44STejas Upadhyay 	else if (HAS_PCH_SPLIT(dev_priv))
798a0a6d8cbSVille Syrjälä 		ibx_irq_postinstall(dev_priv);
799622364b6SPaulo Zanoni 
8002cbc876dSMichał Winiarski 	gen8_gt_irq_postinstall(to_gt(dev_priv));
801abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
802abd58f01SBen Widawsky 
80372e9abc3SJani Nikula 	gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
804abd58f01SBen Widawsky }
805abd58f01SBen Widawsky 
806b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
80751951ae7SMika Kuoppala {
8082cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
809fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
810df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
81151951ae7SMika Kuoppala 
81229b43ae2SRodrigo Vivi 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
813b318b824SVille Syrjälä 		icp_irq_postinstall(dev_priv);
81431604222SAnusha Srivatsa 
815fd4d7904SPaulo Zanoni 	gen11_gt_irq_postinstall(gt);
816a844cfbeSJosé Roberto de Souza 	gen11_de_irq_postinstall(dev_priv);
81751951ae7SMika Kuoppala 
818b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
819df0d28c1SDhinakaran Pandiyan 
82072e9abc3SJani Nikula 	gen11_master_intr_enable(intel_uncore_regs(uncore));
8212939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
82251951ae7SMika Kuoppala }
82322e26af7SPaulo Zanoni 
82422e26af7SPaulo Zanoni static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
82522e26af7SPaulo Zanoni {
826d1f3b5e9SAndi Shyti 	struct intel_uncore *uncore = &dev_priv->uncore;
82722e26af7SPaulo Zanoni 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
828d1f3b5e9SAndi Shyti 	struct intel_gt *gt;
829d1f3b5e9SAndi Shyti 	unsigned int i;
83022e26af7SPaulo Zanoni 
831d1f3b5e9SAndi Shyti 	for_each_gt(gt, dev_priv, i)
832fd4d7904SPaulo Zanoni 		gen11_gt_irq_postinstall(gt);
83322e26af7SPaulo Zanoni 
83422e26af7SPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
83522e26af7SPaulo Zanoni 
8361007337fSJani Nikula 	dg1_de_irq_postinstall(dev_priv);
83722e26af7SPaulo Zanoni 
83872e9abc3SJani Nikula 	dg1_master_intr_enable(intel_uncore_regs(uncore));
839fd4d7904SPaulo Zanoni 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
84097b492f5SLucas De Marchi }
84151951ae7SMika Kuoppala 
842b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
84343f328d7SVille Syrjälä {
8442cbc876dSMichał Winiarski 	gen8_gt_irq_postinstall(to_gt(dev_priv));
84543f328d7SVille Syrjälä 
846ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
8479918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
848ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
849ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
850ad22d106SVille Syrjälä 
8512939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
8522939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
85343f328d7SVille Syrjälä }
85443f328d7SVille Syrjälä 
855b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
856c2798b19SChris Wilson {
857b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
858c2798b19SChris Wilson 
85944d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
86044d9241eSVille Syrjälä 
861ad7632ffSJani Nikula 	gen2_irq_reset(uncore);
862e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
863c2798b19SChris Wilson }
864c2798b19SChris Wilson 
8653687ce75SVille Syrjälä static u32 i9xx_error_mask(struct drm_i915_private *i915)
8663687ce75SVille Syrjälä {
867e7e12f6eSVille Syrjälä 	/*
868e7e12f6eSVille Syrjälä 	 * On gen2/3 FBC generates (seemingly spurious)
869e7e12f6eSVille Syrjälä 	 * display INVALID_GTT/INVALID_GTT_PTE table errors.
870e7e12f6eSVille Syrjälä 	 *
871e7e12f6eSVille Syrjälä 	 * Also gen3 bspec has this to say:
872e7e12f6eSVille Syrjälä 	 * "DISPA_INVALID_GTT_PTE
873e7e12f6eSVille Syrjälä 	 "  [DevNapa] : Reserved. This bit does not reflect the page
874e7e12f6eSVille Syrjälä 	 "              table error for the display plane A."
875e7e12f6eSVille Syrjälä 	 *
876e7e12f6eSVille Syrjälä 	 * Unfortunately we can't mask off individual PGTBL_ER bits,
877e7e12f6eSVille Syrjälä 	 * so we just have to mask off all page table errors via EMR.
878e7e12f6eSVille Syrjälä 	 */
879e7e12f6eSVille Syrjälä 	if (HAS_FBC(i915))
880e7e12f6eSVille Syrjälä 		return ~I915_ERROR_MEMORY_REFRESH;
881e7e12f6eSVille Syrjälä 	else
8823687ce75SVille Syrjälä 		return ~(I915_ERROR_PAGE_TABLE |
8833687ce75SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
8843687ce75SVille Syrjälä }
8853687ce75SVille Syrjälä 
886b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
887c2798b19SChris Wilson {
888b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
889e9e9848aSVille Syrjälä 	u16 enable_mask;
890c2798b19SChris Wilson 
8913687ce75SVille Syrjälä 	intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
892c2798b19SChris Wilson 
893c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
894c2798b19SChris Wilson 	dev_priv->irq_mask =
895c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
89616659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
89716659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
898c2798b19SChris Wilson 
899e9e9848aSVille Syrjälä 	enable_mask =
900c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
901c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
90216659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
903e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
904e9e9848aSVille Syrjälä 
905ad7632ffSJani Nikula 	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
906c2798b19SChris Wilson 
907379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
908379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
909d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
910755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
911755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
912d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
913c2798b19SChris Wilson }
914c2798b19SChris Wilson 
9154f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915,
91678c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
91778c357ddSVille Syrjälä {
9184f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &i915->uncore;
91978c357ddSVille Syrjälä 	u16 emr;
92078c357ddSVille Syrjälä 
9214f5fd91fSTvrtko Ursulin 	*eir = intel_uncore_read16(uncore, EIR);
9224f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EIR, *eir);
92378c357ddSVille Syrjälä 
9244f5fd91fSTvrtko Ursulin 	*eir_stuck = intel_uncore_read16(uncore, EIR);
92578c357ddSVille Syrjälä 	if (*eir_stuck == 0)
92678c357ddSVille Syrjälä 		return;
92778c357ddSVille Syrjälä 
92878c357ddSVille Syrjälä 	/*
92978c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
93078c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
93178c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
93278c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
93378c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
93478c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
93578c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
93678c357ddSVille Syrjälä 	 * remains set.
93778c357ddSVille Syrjälä 	 */
9384f5fd91fSTvrtko Ursulin 	emr = intel_uncore_read16(uncore, EMR);
9394f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, 0xffff);
9404f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
94178c357ddSVille Syrjälä }
94278c357ddSVille Syrjälä 
94378c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
94478c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
94578c357ddSVille Syrjälä {
946a10234fdSTvrtko Ursulin 	drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
94778c357ddSVille Syrjälä 
94878c357ddSVille Syrjälä 	if (eir_stuck)
94900376ccfSWambui Karuga 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
95000376ccfSWambui Karuga 			eir_stuck);
951d1e89592SVille Syrjälä 
952d1e89592SVille Syrjälä 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
953d1e89592SVille Syrjälä 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
95478c357ddSVille Syrjälä }
95578c357ddSVille Syrjälä 
95678c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
95778c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
95878c357ddSVille Syrjälä {
95978c357ddSVille Syrjälä 	u32 emr;
96078c357ddSVille Syrjälä 
961839259b8SVille Syrjälä 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
962839259b8SVille Syrjälä 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
96378c357ddSVille Syrjälä 
9642939eb06SJani Nikula 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
96578c357ddSVille Syrjälä 	if (*eir_stuck == 0)
96678c357ddSVille Syrjälä 		return;
96778c357ddSVille Syrjälä 
96878c357ddSVille Syrjälä 	/*
96978c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
97078c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
97178c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
97278c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
97378c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
97478c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
97578c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
97678c357ddSVille Syrjälä 	 * remains set.
97778c357ddSVille Syrjälä 	 */
978839259b8SVille Syrjälä 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
979839259b8SVille Syrjälä 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
9802939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
98178c357ddSVille Syrjälä }
98278c357ddSVille Syrjälä 
98378c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
98478c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
98578c357ddSVille Syrjälä {
986a10234fdSTvrtko Ursulin 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
98778c357ddSVille Syrjälä 
98878c357ddSVille Syrjälä 	if (eir_stuck)
98900376ccfSWambui Karuga 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
99000376ccfSWambui Karuga 			eir_stuck);
991d1e89592SVille Syrjälä 
992d1e89592SVille Syrjälä 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
993d1e89592SVille Syrjälä 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
99478c357ddSVille Syrjälä }
99578c357ddSVille Syrjälä 
996ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
997c2798b19SChris Wilson {
998b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
999af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1000c2798b19SChris Wilson 
10012dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
10022dd2a883SImre Deak 		return IRQ_NONE;
10032dd2a883SImre Deak 
10041f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
10059102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
10061f814dacSImre Deak 
1007af722d28SVille Syrjälä 	do {
1008af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
100978c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
1010af722d28SVille Syrjälä 		u16 iir;
1011af722d28SVille Syrjälä 
10124f5fd91fSTvrtko Ursulin 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
1013c2798b19SChris Wilson 		if (iir == 0)
1014af722d28SVille Syrjälä 			break;
1015c2798b19SChris Wilson 
1016af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1017c2798b19SChris Wilson 
1018eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1019eb64343cSVille Syrjälä 		 * signalled in iir */
1020eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1021c2798b19SChris Wilson 
102278c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
102378c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
102478c357ddSVille Syrjälä 
10254f5fd91fSTvrtko Ursulin 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
1026c2798b19SChris Wilson 
1027c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
10282cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
1029c2798b19SChris Wilson 
103078c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
103178c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
1032af722d28SVille Syrjälä 
1033eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1034af722d28SVille Syrjälä 	} while (0);
1035c2798b19SChris Wilson 
10369c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
10379c6508b9SThomas Gleixner 
10389102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
10391f814dacSImre Deak 
10401f814dacSImre Deak 	return ret;
1041c2798b19SChris Wilson }
1042c2798b19SChris Wilson 
1043b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv)
1044a266c7d5SChris Wilson {
1045b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
1046a266c7d5SChris Wilson 
104756b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
10480706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
10498cee664dSAndrzej Hajda 		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
1050a266c7d5SChris Wilson 	}
1051a266c7d5SChris Wilson 
105244d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
105344d9241eSVille Syrjälä 
1054b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
1055e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
1056a266c7d5SChris Wilson }
1057a266c7d5SChris Wilson 
1058b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
1059a266c7d5SChris Wilson {
1060b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
106138bde180SChris Wilson 	u32 enable_mask;
1062a266c7d5SChris Wilson 
10633687ce75SVille Syrjälä 	intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
106438bde180SChris Wilson 
106538bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
106638bde180SChris Wilson 	dev_priv->irq_mask =
106738bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
106838bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
106916659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
107016659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
107138bde180SChris Wilson 
107238bde180SChris Wilson 	enable_mask =
107338bde180SChris Wilson 		I915_ASLE_INTERRUPT |
107438bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
107538bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
107616659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
107738bde180SChris Wilson 		I915_USER_INTERRUPT;
107838bde180SChris Wilson 
107956b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
1080a266c7d5SChris Wilson 		/* Enable in IER... */
1081a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1082a266c7d5SChris Wilson 		/* and unmask in IMR */
1083a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1084a266c7d5SChris Wilson 	}
1085a266c7d5SChris Wilson 
1086b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
1087a266c7d5SChris Wilson 
1088379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
1089379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
1090d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1091755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1092755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1093d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
1094379ef82dSDaniel Vetter 
1095c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
109620afbda2SDaniel Vetter }
109720afbda2SDaniel Vetter 
1098ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
1099a266c7d5SChris Wilson {
1100b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
1101af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1102a266c7d5SChris Wilson 
11032dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
11042dd2a883SImre Deak 		return IRQ_NONE;
11052dd2a883SImre Deak 
11061f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
11079102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
11081f814dacSImre Deak 
110938bde180SChris Wilson 	do {
1110eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
111178c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
1112af722d28SVille Syrjälä 		u32 hotplug_status = 0;
1113af722d28SVille Syrjälä 		u32 iir;
1114a266c7d5SChris Wilson 
11152939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1116af722d28SVille Syrjälä 		if (iir == 0)
1117af722d28SVille Syrjälä 			break;
1118af722d28SVille Syrjälä 
1119af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1120af722d28SVille Syrjälä 
1121af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
1122af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
1123af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1124a266c7d5SChris Wilson 
1125eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1126eb64343cSVille Syrjälä 		 * signalled in iir */
1127eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1128a266c7d5SChris Wilson 
112978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
113078c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
113178c357ddSVille Syrjälä 
11322939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1133a266c7d5SChris Wilson 
1134a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
11352cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
1136a266c7d5SChris Wilson 
113778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
113878c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1139a266c7d5SChris Wilson 
1140af722d28SVille Syrjälä 		if (hotplug_status)
1141af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1142af722d28SVille Syrjälä 
1143af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1144af722d28SVille Syrjälä 	} while (0);
1145a266c7d5SChris Wilson 
11469c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
11479c6508b9SThomas Gleixner 
11489102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
11491f814dacSImre Deak 
1150a266c7d5SChris Wilson 	return ret;
1151a266c7d5SChris Wilson }
1152a266c7d5SChris Wilson 
1153b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv)
1154a266c7d5SChris Wilson {
1155b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
1156a266c7d5SChris Wilson 
11570706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
11588cee664dSAndrzej Hajda 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
1159a266c7d5SChris Wilson 
116044d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
116144d9241eSVille Syrjälä 
1162b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
1163e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
1164a266c7d5SChris Wilson }
1165a266c7d5SChris Wilson 
11663687ce75SVille Syrjälä static u32 i965_error_mask(struct drm_i915_private *i915)
1167a266c7d5SChris Wilson {
1168045cebd2SVille Syrjälä 	/*
1169045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
1170045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
1171e7e12f6eSVille Syrjälä 	 *
1172e7e12f6eSVille Syrjälä 	 * i965 FBC no longer generates spurious GTT errors,
1173e7e12f6eSVille Syrjälä 	 * so we can always enable the page table errors.
1174045cebd2SVille Syrjälä 	 */
11753687ce75SVille Syrjälä 	if (IS_G4X(i915))
11763687ce75SVille Syrjälä 		return ~(GM45_ERROR_PAGE_TABLE |
1177045cebd2SVille Syrjälä 			 GM45_ERROR_MEM_PRIV |
1178045cebd2SVille Syrjälä 			 GM45_ERROR_CP_PRIV |
1179045cebd2SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
11803687ce75SVille Syrjälä 	else
11813687ce75SVille Syrjälä 		return ~(I915_ERROR_PAGE_TABLE |
1182045cebd2SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
1183045cebd2SVille Syrjälä }
11843687ce75SVille Syrjälä 
11853687ce75SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
11863687ce75SVille Syrjälä {
11873687ce75SVille Syrjälä 	struct intel_uncore *uncore = &dev_priv->uncore;
11883687ce75SVille Syrjälä 	u32 enable_mask;
11893687ce75SVille Syrjälä 
11903687ce75SVille Syrjälä 	intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
1191045cebd2SVille Syrjälä 
1192a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
1193c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
1194c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
1195adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
1196bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1197bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
119878c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
1199bbba0a97SChris Wilson 
1200c30bb1fdSVille Syrjälä 	enable_mask =
1201c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
1202c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
1203c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1204c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
120578c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
1206c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
1207bbba0a97SChris Wilson 
120891d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
1209bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
1210a266c7d5SChris Wilson 
1211b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
1212c30bb1fdSVille Syrjälä 
1213b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
1214b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
1215d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1216755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1217755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1218755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1219d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
1220a266c7d5SChris Wilson 
122191d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
122220afbda2SDaniel Vetter }
122320afbda2SDaniel Vetter 
1224ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
1225a266c7d5SChris Wilson {
1226b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
1227af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1228a266c7d5SChris Wilson 
12292dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
12302dd2a883SImre Deak 		return IRQ_NONE;
12312dd2a883SImre Deak 
12321f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
12339102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
12341f814dacSImre Deak 
1235af722d28SVille Syrjälä 	do {
1236eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
123778c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
1238af722d28SVille Syrjälä 		u32 hotplug_status = 0;
1239af722d28SVille Syrjälä 		u32 iir;
12402c8ba29fSChris Wilson 
12412939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1242af722d28SVille Syrjälä 		if (iir == 0)
1243af722d28SVille Syrjälä 			break;
1244af722d28SVille Syrjälä 
1245af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1246af722d28SVille Syrjälä 
1247af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1248af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1249a266c7d5SChris Wilson 
1250eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1251eb64343cSVille Syrjälä 		 * signalled in iir */
1252eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1253a266c7d5SChris Wilson 
125478c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
125578c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
125678c357ddSVille Syrjälä 
12572939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1258a266c7d5SChris Wilson 
1259a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
12602cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
12610669a6e1SChris Wilson 					    iir);
1262af722d28SVille Syrjälä 
1263a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
12642cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
12650669a6e1SChris Wilson 					    iir >> 25);
1266a266c7d5SChris Wilson 
126778c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
126878c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1269515ac2bbSDaniel Vetter 
1270af722d28SVille Syrjälä 		if (hotplug_status)
1271af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1272af722d28SVille Syrjälä 
1273af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1274af722d28SVille Syrjälä 	} while (0);
1275a266c7d5SChris Wilson 
12769c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
12779c6508b9SThomas Gleixner 
12789102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
12791f814dacSImre Deak 
1280a266c7d5SChris Wilson 	return ret;
1281a266c7d5SChris Wilson }
1282a266c7d5SChris Wilson 
1283fca52a55SDaniel Vetter /**
1284fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
1285fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1286fca52a55SDaniel Vetter  *
1287fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
1288fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
1289fca52a55SDaniel Vetter  */
1290b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
1291f71d4af4SJesse Barnes {
1292cefcff8fSJoonas Lahtinen 	int i;
12938b2e326dSChris Wilson 
129474bb98baSLucas De Marchi 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1295cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
1296cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
12978b2e326dSChris Wilson 
1298633023a4SDaniele Ceraolo Spurio 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1299651e7d48SLucas De Marchi 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
13002cbc876dSMichał Winiarski 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
13012ccf2e03SChris Wilson }
130220afbda2SDaniel Vetter 
1303fca52a55SDaniel Vetter /**
1304cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
1305cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
1306cefcff8fSJoonas Lahtinen  *
1307cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
1308cefcff8fSJoonas Lahtinen  */
1309cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
1310cefcff8fSJoonas Lahtinen {
1311cefcff8fSJoonas Lahtinen 	int i;
1312cefcff8fSJoonas Lahtinen 
1313cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
1314cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
1315cefcff8fSJoonas Lahtinen }
1316cefcff8fSJoonas Lahtinen 
1317b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1318b318b824SVille Syrjälä {
1319b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1320b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1321b318b824SVille Syrjälä 			return cherryview_irq_handler;
1322b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1323b318b824SVille Syrjälä 			return valleyview_irq_handler;
1324651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1325b318b824SVille Syrjälä 			return i965_irq_handler;
1326651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1327b318b824SVille Syrjälä 			return i915_irq_handler;
1328b318b824SVille Syrjälä 		else
1329b318b824SVille Syrjälä 			return i8xx_irq_handler;
1330b318b824SVille Syrjälä 	} else {
133122e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
133297b492f5SLucas De Marchi 			return dg1_irq_handler;
133322e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1334b318b824SVille Syrjälä 			return gen11_irq_handler;
1335651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1336b318b824SVille Syrjälä 			return gen8_irq_handler;
1337b318b824SVille Syrjälä 		else
13389eae5e27SLucas De Marchi 			return ilk_irq_handler;
1339b318b824SVille Syrjälä 	}
1340b318b824SVille Syrjälä }
1341b318b824SVille Syrjälä 
1342b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv)
1343b318b824SVille Syrjälä {
1344b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1345b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1346b318b824SVille Syrjälä 			cherryview_irq_reset(dev_priv);
1347b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1348b318b824SVille Syrjälä 			valleyview_irq_reset(dev_priv);
1349651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1350b318b824SVille Syrjälä 			i965_irq_reset(dev_priv);
1351651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1352b318b824SVille Syrjälä 			i915_irq_reset(dev_priv);
1353b318b824SVille Syrjälä 		else
1354b318b824SVille Syrjälä 			i8xx_irq_reset(dev_priv);
1355b318b824SVille Syrjälä 	} else {
135622e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
135722e26af7SPaulo Zanoni 			dg1_irq_reset(dev_priv);
135822e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1359b318b824SVille Syrjälä 			gen11_irq_reset(dev_priv);
1360651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1361b318b824SVille Syrjälä 			gen8_irq_reset(dev_priv);
1362b318b824SVille Syrjälä 		else
13639eae5e27SLucas De Marchi 			ilk_irq_reset(dev_priv);
1364b318b824SVille Syrjälä 	}
1365b318b824SVille Syrjälä }
1366b318b824SVille Syrjälä 
1367b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1368b318b824SVille Syrjälä {
1369b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1370b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1371b318b824SVille Syrjälä 			cherryview_irq_postinstall(dev_priv);
1372b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1373b318b824SVille Syrjälä 			valleyview_irq_postinstall(dev_priv);
1374651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1375b318b824SVille Syrjälä 			i965_irq_postinstall(dev_priv);
1376651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1377b318b824SVille Syrjälä 			i915_irq_postinstall(dev_priv);
1378b318b824SVille Syrjälä 		else
1379b318b824SVille Syrjälä 			i8xx_irq_postinstall(dev_priv);
1380b318b824SVille Syrjälä 	} else {
138122e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
138222e26af7SPaulo Zanoni 			dg1_irq_postinstall(dev_priv);
138322e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1384b318b824SVille Syrjälä 			gen11_irq_postinstall(dev_priv);
1385651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1386b318b824SVille Syrjälä 			gen8_irq_postinstall(dev_priv);
1387b318b824SVille Syrjälä 		else
13889eae5e27SLucas De Marchi 			ilk_irq_postinstall(dev_priv);
1389b318b824SVille Syrjälä 	}
1390b318b824SVille Syrjälä }
1391b318b824SVille Syrjälä 
1392cefcff8fSJoonas Lahtinen /**
1393fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
1394fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1395fca52a55SDaniel Vetter  *
1396fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
1397fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
1398fca52a55SDaniel Vetter  *
1399fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
1400fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
1401fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
1402fca52a55SDaniel Vetter  */
14032aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
14042aeb7d3aSDaniel Vetter {
14058ff5446aSThomas Zimmermann 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1406b318b824SVille Syrjälä 	int ret;
1407b318b824SVille Syrjälä 
14082aeb7d3aSDaniel Vetter 	/*
14092aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
14102aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
14112aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
14122aeb7d3aSDaniel Vetter 	 */
1413ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
14142aeb7d3aSDaniel Vetter 
1415ac1723c1SThomas Zimmermann 	dev_priv->irq_enabled = true;
1416b318b824SVille Syrjälä 
1417b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1418b318b824SVille Syrjälä 
1419b318b824SVille Syrjälä 	ret = request_irq(irq, intel_irq_handler(dev_priv),
1420b318b824SVille Syrjälä 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
1421b318b824SVille Syrjälä 	if (ret < 0) {
1422ac1723c1SThomas Zimmermann 		dev_priv->irq_enabled = false;
1423b318b824SVille Syrjälä 		return ret;
1424b318b824SVille Syrjälä 	}
1425b318b824SVille Syrjälä 
1426b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
1427b318b824SVille Syrjälä 
1428b318b824SVille Syrjälä 	return ret;
14292aeb7d3aSDaniel Vetter }
14302aeb7d3aSDaniel Vetter 
1431fca52a55SDaniel Vetter /**
1432fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
1433fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1434fca52a55SDaniel Vetter  *
1435fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
1436fca52a55SDaniel Vetter  * resources acquired in the init functions.
1437fca52a55SDaniel Vetter  */
14382aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
14392aeb7d3aSDaniel Vetter {
14408ff5446aSThomas Zimmermann 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1441b318b824SVille Syrjälä 
1442b318b824SVille Syrjälä 	/*
1443789fa874SJanusz Krzysztofik 	 * FIXME we can get called twice during driver probe
1444789fa874SJanusz Krzysztofik 	 * error handling as well as during driver remove due to
144586a1758dSJani Nikula 	 * intel_display_driver_remove() calling us out of sequence.
1446789fa874SJanusz Krzysztofik 	 * Would be nice if it didn't do that...
1447b318b824SVille Syrjälä 	 */
1448ac1723c1SThomas Zimmermann 	if (!dev_priv->irq_enabled)
1449b318b824SVille Syrjälä 		return;
1450b318b824SVille Syrjälä 
1451ac1723c1SThomas Zimmermann 	dev_priv->irq_enabled = false;
1452b318b824SVille Syrjälä 
1453b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1454b318b824SVille Syrjälä 
1455b318b824SVille Syrjälä 	free_irq(irq, dev_priv);
1456b318b824SVille Syrjälä 
14572aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
1458ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
14592aeb7d3aSDaniel Vetter }
14602aeb7d3aSDaniel Vetter 
1461fca52a55SDaniel Vetter /**
1462fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
1463fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1464fca52a55SDaniel Vetter  *
1465fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
1466fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
1467fca52a55SDaniel Vetter  */
1468b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
1469c67a470bSPaulo Zanoni {
1470b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1471ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
1472315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
1473c67a470bSPaulo Zanoni }
1474c67a470bSPaulo Zanoni 
1475fca52a55SDaniel Vetter /**
1476fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
1477fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1478fca52a55SDaniel Vetter  *
1479fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
1480fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
1481fca52a55SDaniel Vetter  */
1482b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
1483c67a470bSPaulo Zanoni {
1484ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
1485b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1486b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
1487c67a470bSPaulo Zanoni }
1488d64575eeSJani Nikula 
1489d64575eeSJani Nikula bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1490d64575eeSJani Nikula {
1491d64575eeSJani Nikula 	return dev_priv->runtime_pm.irqs_enabled;
1492d64575eeSJani Nikula }
1493d64575eeSJani Nikula 
1494d64575eeSJani Nikula void intel_synchronize_irq(struct drm_i915_private *i915)
1495d64575eeSJani Nikula {
14968ff5446aSThomas Zimmermann 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1497d64575eeSJani Nikula }
1498320ad343SThomas Zimmermann 
1499320ad343SThomas Zimmermann void intel_synchronize_hardirq(struct drm_i915_private *i915)
1500320ad343SThomas Zimmermann {
1501320ad343SThomas Zimmermann 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1502320ad343SThomas Zimmermann }
1503