xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3155367a27SJani Nikula #include <linux/slab.h>
3255367a27SJani Nikula #include <linux/sysrq.h>
3355367a27SJani Nikula 
34fcd70cd3SDaniel Vetter #include <drm/drm_drv.h>
3555367a27SJani Nikula 
362b874a02SJani Nikula #include "display/intel_display_irq.h"
371d455f8dSJani Nikula #include "display/intel_display_types.h"
38df0566a6SJani Nikula #include "display/intel_hotplug.h"
39da38ba98SJani Nikula #include "display/intel_hotplug_irq.h"
40df0566a6SJani Nikula #include "display/intel_lpe_audio.h"
417f6947fdSJani Nikula #include "display/intel_psr_regs.h"
42df0566a6SJani Nikula 
43b3786b29SChris Wilson #include "gt/intel_breadcrumbs.h"
442239e6dfSDaniele Ceraolo Spurio #include "gt/intel_gt.h"
45cf1c97dcSAndi Shyti #include "gt/intel_gt_irq.h"
46d762043fSAndi Shyti #include "gt/intel_gt_pm_irq.h"
470d6419e9SMatt Roper #include "gt/intel_gt_regs.h"
483e7abf81SAndi Shyti #include "gt/intel_rps.h"
492239e6dfSDaniele Ceraolo Spurio 
5024524e3fSJani Nikula #include "i915_driver.h"
51c0e09200SDave Airlie #include "i915_drv.h"
52440e2b3dSJani Nikula #include "i915_irq.h"
53476f62b8SJani Nikula #include "i915_reg.h"
54c0e09200SDave Airlie 
55fca52a55SDaniel Vetter /**
56fca52a55SDaniel Vetter  * DOC: interrupt handling
57fca52a55SDaniel Vetter  *
58fca52a55SDaniel Vetter  * These functions provide the basic support for enabling and disabling the
59fca52a55SDaniel Vetter  * interrupt handling support. There's a lot more functionality in i915_irq.c
60fca52a55SDaniel Vetter  * and related files, but that will be described in separate chapters.
61fca52a55SDaniel Vetter  */
62fca52a55SDaniel Vetter 
639c6508b9SThomas Gleixner /*
649c6508b9SThomas Gleixner  * Interrupt statistic for PMU. Increments the counter only if the
6578f48aa6SBo Liu  * interrupt originated from the GPU so interrupts from a device which
669c6508b9SThomas Gleixner  * shares the interrupt line are not accounted.
679c6508b9SThomas Gleixner  */
pmu_irq_stats(struct drm_i915_private * i915,irqreturn_t res)689c6508b9SThomas Gleixner static inline void pmu_irq_stats(struct drm_i915_private *i915,
699c6508b9SThomas Gleixner 				 irqreturn_t res)
709c6508b9SThomas Gleixner {
719c6508b9SThomas Gleixner 	if (unlikely(res != IRQ_HANDLED))
729c6508b9SThomas Gleixner 		return;
739c6508b9SThomas Gleixner 
749c6508b9SThomas Gleixner 	/*
759c6508b9SThomas Gleixner 	 * A clever compiler translates that into INC. A not so clever one
769c6508b9SThomas Gleixner 	 * should at least prevent store tearing.
779c6508b9SThomas Gleixner 	 */
789c6508b9SThomas Gleixner 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
799c6508b9SThomas Gleixner }
809c6508b9SThomas Gleixner 
gen3_irq_reset(struct intel_uncore * uncore,i915_reg_t imr,i915_reg_t iir,i915_reg_t ier)81cf1c97dcSAndi Shyti void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
8268eb49b1SPaulo Zanoni 		    i915_reg_t iir, i915_reg_t ier)
8368eb49b1SPaulo Zanoni {
8465f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, 0xffffffff);
8565f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
8668eb49b1SPaulo Zanoni 
8765f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, 0);
8868eb49b1SPaulo Zanoni 
895c502442SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
9065f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
9165f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
9265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, iir, 0xffffffff);
9365f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, iir);
9468eb49b1SPaulo Zanoni }
955c502442SPaulo Zanoni 
gen2_irq_reset(struct intel_uncore * uncore)96ad7632ffSJani Nikula static void gen2_irq_reset(struct intel_uncore *uncore)
9768eb49b1SPaulo Zanoni {
9865f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
9965f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
100a9d356a6SPaulo Zanoni 
10165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, 0);
10268eb49b1SPaulo Zanoni 
10368eb49b1SPaulo Zanoni 	/* IIR can theoretically queue up two events. Be paranoid. */
10465f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
10565f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
10665f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
10765f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
10868eb49b1SPaulo Zanoni }
10968eb49b1SPaulo Zanoni 
110337ba017SPaulo Zanoni /*
111337ba017SPaulo Zanoni  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
112337ba017SPaulo Zanoni  */
gen3_assert_iir_is_zero(struct intel_uncore * uncore,i915_reg_t reg)1132b874a02SJani Nikula void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
114b51a2842SVille Syrjälä {
11565f42cdcSPaulo Zanoni 	u32 val = intel_uncore_read(uncore, reg);
116b51a2842SVille Syrjälä 
117b51a2842SVille Syrjälä 	if (val == 0)
118b51a2842SVille Syrjälä 		return;
119b51a2842SVille Syrjälä 
120a9f236d1SPankaj Bharadiya 	drm_WARN(&uncore->i915->drm, 1,
121a9f236d1SPankaj Bharadiya 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
122f0f59a00SVille Syrjälä 		 i915_mmio_reg_offset(reg), val);
12365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
12465f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
12565f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, reg, 0xffffffff);
12665f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, reg);
127b51a2842SVille Syrjälä }
128337ba017SPaulo Zanoni 
gen2_assert_iir_is_zero(struct intel_uncore * uncore)12965f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
130e9e9848aSVille Syrjälä {
13165f42cdcSPaulo Zanoni 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
132e9e9848aSVille Syrjälä 
133e9e9848aSVille Syrjälä 	if (val == 0)
134e9e9848aSVille Syrjälä 		return;
135e9e9848aSVille Syrjälä 
136a9f236d1SPankaj Bharadiya 	drm_WARN(&uncore->i915->drm, 1,
137a9f236d1SPankaj Bharadiya 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
1389d9523d8SPaulo Zanoni 		 i915_mmio_reg_offset(GEN2_IIR), val);
13965f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
14065f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
14165f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
14265f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IIR);
143e9e9848aSVille Syrjälä }
144e9e9848aSVille Syrjälä 
gen3_irq_init(struct intel_uncore * uncore,i915_reg_t imr,u32 imr_val,i915_reg_t ier,u32 ier_val,i915_reg_t iir)145cf1c97dcSAndi Shyti void gen3_irq_init(struct intel_uncore *uncore,
14668eb49b1SPaulo Zanoni 		   i915_reg_t imr, u32 imr_val,
14768eb49b1SPaulo Zanoni 		   i915_reg_t ier, u32 ier_val,
14868eb49b1SPaulo Zanoni 		   i915_reg_t iir)
14968eb49b1SPaulo Zanoni {
15065f42cdcSPaulo Zanoni 	gen3_assert_iir_is_zero(uncore, iir);
15135079899SPaulo Zanoni 
15265f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, ier, ier_val);
15365f42cdcSPaulo Zanoni 	intel_uncore_write(uncore, imr, imr_val);
15465f42cdcSPaulo Zanoni 	intel_uncore_posting_read(uncore, imr);
15568eb49b1SPaulo Zanoni }
15635079899SPaulo Zanoni 
gen2_irq_init(struct intel_uncore * uncore,u32 imr_val,u32 ier_val)157ad7632ffSJani Nikula static void gen2_irq_init(struct intel_uncore *uncore,
1582918c3caSPaulo Zanoni 			  u32 imr_val, u32 ier_val)
15968eb49b1SPaulo Zanoni {
16065f42cdcSPaulo Zanoni 	gen2_assert_iir_is_zero(uncore);
16168eb49b1SPaulo Zanoni 
16265f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
16365f42cdcSPaulo Zanoni 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
16465f42cdcSPaulo Zanoni 	intel_uncore_posting_read16(uncore, GEN2_IMR);
16568eb49b1SPaulo Zanoni }
16668eb49b1SPaulo Zanoni 
167d9dc34f1SVille Syrjälä /**
16874bb98baSLucas De Marchi  * ivb_parity_work - Workqueue called when a parity error interrupt
169e3689190SBen Widawsky  * occurred.
170e3689190SBen Widawsky  * @work: workqueue struct
171e3689190SBen Widawsky  *
172e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
173e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
174e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
175e3689190SBen Widawsky  */
ivb_parity_work(struct work_struct * work)17674bb98baSLucas De Marchi static void ivb_parity_work(struct work_struct *work)
177e3689190SBen Widawsky {
1782d1013ddSJani Nikula 	struct drm_i915_private *dev_priv =
179cefcff8fSJoonas Lahtinen 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1802cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
181e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
18235a85ac6SBen Widawsky 	char *parity_event[6];
183a9c287c9SJani Nikula 	u32 misccpctl;
184a9c287c9SJani Nikula 	u8 slice = 0;
185e3689190SBen Widawsky 
186e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
187e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
188e3689190SBen Widawsky 	 * any time we access those registers.
189e3689190SBen Widawsky 	 */
19091c8a326SChris Wilson 	mutex_lock(&dev_priv->drm.struct_mutex);
191e3689190SBen Widawsky 
19235a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
19348a1b8d4SPankaj Bharadiya 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
19435a85ac6SBen Widawsky 		goto out;
19535a85ac6SBen Widawsky 
196f7435467SAndrzej Hajda 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
197f7435467SAndrzej Hajda 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
1982939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
199e3689190SBen Widawsky 
20035a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
201f0f59a00SVille Syrjälä 		i915_reg_t reg;
20235a85ac6SBen Widawsky 
20335a85ac6SBen Widawsky 		slice--;
20448a1b8d4SPankaj Bharadiya 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
20548a1b8d4SPankaj Bharadiya 				     slice >= NUM_L3_SLICES(dev_priv)))
20635a85ac6SBen Widawsky 			break;
20735a85ac6SBen Widawsky 
20835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
20935a85ac6SBen Widawsky 
2106fa1c5f1SVille Syrjälä 		reg = GEN7_L3CDERRST1(slice);
21135a85ac6SBen Widawsky 
2122939eb06SJani Nikula 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
213e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
214e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
215e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
216e3689190SBen Widawsky 
2172939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
2182939eb06SJani Nikula 		intel_uncore_posting_read(&dev_priv->uncore, reg);
219e3689190SBen Widawsky 
220cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
221e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
222e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
223e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
22435a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
22535a85ac6SBen Widawsky 		parity_event[5] = NULL;
226e3689190SBen Widawsky 
22791c8a326SChris Wilson 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
228e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
229e3689190SBen Widawsky 
230a10234fdSTvrtko Ursulin 		drm_dbg(&dev_priv->drm,
231a10234fdSTvrtko Ursulin 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
23235a85ac6SBen Widawsky 			slice, row, bank, subbank);
233e3689190SBen Widawsky 
23435a85ac6SBen Widawsky 		kfree(parity_event[4]);
235e3689190SBen Widawsky 		kfree(parity_event[3]);
236e3689190SBen Widawsky 		kfree(parity_event[2]);
237e3689190SBen Widawsky 		kfree(parity_event[1]);
238e3689190SBen Widawsky 	}
239e3689190SBen Widawsky 
2402939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
24135a85ac6SBen Widawsky 
24235a85ac6SBen Widawsky out:
24348a1b8d4SPankaj Bharadiya 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
24403d2c54dSMatt Roper 	spin_lock_irq(gt->irq_lock);
245cf1c97dcSAndi Shyti 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
24603d2c54dSMatt Roper 	spin_unlock_irq(gt->irq_lock);
24735a85ac6SBen Widawsky 
24891c8a326SChris Wilson 	mutex_unlock(&dev_priv->drm.struct_mutex);
24935a85ac6SBen Widawsky }
25035a85ac6SBen Widawsky 
valleyview_irq_handler(int irq,void * arg)251c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg)
252c1874ed7SImre Deak {
253b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
254c1874ed7SImre Deak 	irqreturn_t ret = IRQ_NONE;
255c1874ed7SImre Deak 
2562dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
2572dd2a883SImre Deak 		return IRQ_NONE;
2582dd2a883SImre Deak 
2591f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2609102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2611f814dacSImre Deak 
2621e1cace9SVille Syrjälä 	do {
2636e814800SVille Syrjälä 		u32 iir, gt_iir, pm_iir;
2642ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
2651ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
266a5e485a9SVille Syrjälä 		u32 ier = 0;
2673ff60f89SOscar Mateo 
2682939eb06SJani Nikula 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
2692939eb06SJani Nikula 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
2702939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
271c1874ed7SImre Deak 
272c1874ed7SImre Deak 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2731e1cace9SVille Syrjälä 			break;
274c1874ed7SImre Deak 
275c1874ed7SImre Deak 		ret = IRQ_HANDLED;
276c1874ed7SImre Deak 
277a5e485a9SVille Syrjälä 		/*
278a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
279a5e485a9SVille Syrjälä 		 *
280a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
281a5e485a9SVille Syrjälä 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
282a5e485a9SVille Syrjälä 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
283a5e485a9SVille Syrjälä 		 *
284a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
285a5e485a9SVille Syrjälä 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
286a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
287a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
288a5e485a9SVille Syrjälä 		 * bits this time around.
289a5e485a9SVille Syrjälä 		 */
2902939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2918cee664dSAndrzej Hajda 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
2924a0a0202SVille Syrjälä 
2934a0a0202SVille Syrjälä 		if (gt_iir)
2942939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
2954a0a0202SVille Syrjälä 		if (pm_iir)
2962939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
2974a0a0202SVille Syrjälä 
2987ce4d1f2SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2991ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3007ce4d1f2SVille Syrjälä 
3013ff60f89SOscar Mateo 		/* Call regardless, as some status bits might not be
3023ff60f89SOscar Mateo 		 * signalled in iir */
303eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3047ce4d1f2SVille Syrjälä 
305eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
306eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT))
307eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
308eef57324SJerome Anand 
3097ce4d1f2SVille Syrjälä 		/*
3107ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
3117ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
3127ce4d1f2SVille Syrjälä 		 */
3137ce4d1f2SVille Syrjälä 		if (iir)
3142939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
3154a0a0202SVille Syrjälä 
3162939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
3172939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3181ae3c34cSVille Syrjälä 
31952894874SVille Syrjälä 		if (gt_iir)
3202cbc876dSMichał Winiarski 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
32152894874SVille Syrjälä 		if (pm_iir)
3222cbc876dSMichał Winiarski 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
32352894874SVille Syrjälä 
3241ae3c34cSVille Syrjälä 		if (hotplug_status)
32591d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3262ecb8ca4SVille Syrjälä 
32791d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
3281e1cace9SVille Syrjälä 	} while (0);
3297e231dbeSJesse Barnes 
3309c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
3319c6508b9SThomas Gleixner 
3329102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3331f814dacSImre Deak 
3347e231dbeSJesse Barnes 	return ret;
3357e231dbeSJesse Barnes }
3367e231dbeSJesse Barnes 
cherryview_irq_handler(int irq,void * arg)33743f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg)
33843f328d7SVille Syrjälä {
339b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
34043f328d7SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
34143f328d7SVille Syrjälä 
3422dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
3432dd2a883SImre Deak 		return IRQ_NONE;
3442dd2a883SImre Deak 
3451f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3469102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3471f814dacSImre Deak 
348579de73bSChris Wilson 	do {
3496e814800SVille Syrjälä 		u32 master_ctl, iir;
3502ecb8ca4SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
3511ae3c34cSVille Syrjälä 		u32 hotplug_status = 0;
352a5e485a9SVille Syrjälä 		u32 ier = 0;
353a5e485a9SVille Syrjälä 
3542939eb06SJani Nikula 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
3552939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
3563278f67fSVille Syrjälä 
3573278f67fSVille Syrjälä 		if (master_ctl == 0 && iir == 0)
3588e5fd599SVille Syrjälä 			break;
35943f328d7SVille Syrjälä 
36027b6c122SOscar Mateo 		ret = IRQ_HANDLED;
36127b6c122SOscar Mateo 
362a5e485a9SVille Syrjälä 		/*
363a5e485a9SVille Syrjälä 		 * Theory on interrupt generation, based on empirical evidence:
364a5e485a9SVille Syrjälä 		 *
365a5e485a9SVille Syrjälä 		 * x = ((VLV_IIR & VLV_IER) ||
366a5e485a9SVille Syrjälä 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
367a5e485a9SVille Syrjälä 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
368a5e485a9SVille Syrjälä 		 *
369a5e485a9SVille Syrjälä 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
370a5e485a9SVille Syrjälä 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
371a5e485a9SVille Syrjälä 		 * guarantee the CPU interrupt will be raised again even if we
372a5e485a9SVille Syrjälä 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
373a5e485a9SVille Syrjälä 		 * bits this time around.
374a5e485a9SVille Syrjälä 		 */
3752939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3768cee664dSAndrzej Hajda 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
37743f328d7SVille Syrjälä 
3782cbc876dSMichał Winiarski 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
37927b6c122SOscar Mateo 
38027b6c122SOscar Mateo 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3811ae3c34cSVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
38243f328d7SVille Syrjälä 
38327b6c122SOscar Mateo 		/* Call regardless, as some status bits might not be
38427b6c122SOscar Mateo 		 * signalled in iir */
385eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
38643f328d7SVille Syrjälä 
387eef57324SJerome Anand 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
388eef57324SJerome Anand 			   I915_LPE_PIPE_B_INTERRUPT |
389eef57324SJerome Anand 			   I915_LPE_PIPE_C_INTERRUPT))
390eef57324SJerome Anand 			intel_lpe_audio_irq_handler(dev_priv);
391eef57324SJerome Anand 
3927ce4d1f2SVille Syrjälä 		/*
3937ce4d1f2SVille Syrjälä 		 * VLV_IIR is single buffered, and reflects the level
3947ce4d1f2SVille Syrjälä 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
3957ce4d1f2SVille Syrjälä 		 */
3967ce4d1f2SVille Syrjälä 		if (iir)
3972939eb06SJani Nikula 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
3987ce4d1f2SVille Syrjälä 
3992939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
4002939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4011ae3c34cSVille Syrjälä 
4021ae3c34cSVille Syrjälä 		if (hotplug_status)
40391d14251STvrtko Ursulin 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4042ecb8ca4SVille Syrjälä 
40591d14251STvrtko Ursulin 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
406579de73bSChris Wilson 	} while (0);
4073278f67fSVille Syrjälä 
4089c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
4099c6508b9SThomas Gleixner 
4109102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4111f814dacSImre Deak 
41243f328d7SVille Syrjälä 	return ret;
41343f328d7SVille Syrjälä }
41443f328d7SVille Syrjälä 
41572c90f62SOscar Mateo /*
41672c90f62SOscar Mateo  * To handle irqs with the minimum potential races with fresh interrupts, we:
41772c90f62SOscar Mateo  * 1 - Disable Master Interrupt Control.
41872c90f62SOscar Mateo  * 2 - Find the source(s) of the interrupt.
41972c90f62SOscar Mateo  * 3 - Clear the Interrupt Identity bits (IIR).
42072c90f62SOscar Mateo  * 4 - Process the interrupt(s) that had bits set in the IIRs.
42172c90f62SOscar Mateo  * 5 - Re-enable Master Interrupt Control.
42272c90f62SOscar Mateo  */
ilk_irq_handler(int irq,void * arg)4239eae5e27SLucas De Marchi static irqreturn_t ilk_irq_handler(int irq, void *arg)
424b1f14ad0SJesse Barnes {
425c48a798aSChris Wilson 	struct drm_i915_private *i915 = arg;
42672e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
427f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
4280e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
429b1f14ad0SJesse Barnes 
430c48a798aSChris Wilson 	if (unlikely(!intel_irqs_enabled(i915)))
4312dd2a883SImre Deak 		return IRQ_NONE;
4322dd2a883SImre Deak 
4331f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
434c48a798aSChris Wilson 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
4351f814dacSImre Deak 
436b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
437c48a798aSChris Wilson 	de_ier = raw_reg_read(regs, DEIER);
438c48a798aSChris Wilson 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
4390e43406bSChris Wilson 
44044498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
44144498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
44244498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
44344498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
44444498aeaSPaulo Zanoni 	 * due to its back queue). */
445c48a798aSChris Wilson 	if (!HAS_PCH_NOP(i915)) {
446c48a798aSChris Wilson 		sde_ier = raw_reg_read(regs, SDEIER);
447c48a798aSChris Wilson 		raw_reg_write(regs, SDEIER, 0);
448ab5c608bSBen Widawsky 	}
44944498aeaSPaulo Zanoni 
45072c90f62SOscar Mateo 	/* Find, clear, then process each source of interrupt */
45172c90f62SOscar Mateo 
452c48a798aSChris Wilson 	gt_iir = raw_reg_read(regs, GTIIR);
4530e43406bSChris Wilson 	if (gt_iir) {
454c48a798aSChris Wilson 		raw_reg_write(regs, GTIIR, gt_iir);
455651e7d48SLucas De Marchi 		if (GRAPHICS_VER(i915) >= 6)
4562cbc876dSMichał Winiarski 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
457d8fc8a47SPaulo Zanoni 		else
4582cbc876dSMichał Winiarski 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
459c48a798aSChris Wilson 		ret = IRQ_HANDLED;
4600e43406bSChris Wilson 	}
461b1f14ad0SJesse Barnes 
462c48a798aSChris Wilson 	de_iir = raw_reg_read(regs, DEIIR);
4630e43406bSChris Wilson 	if (de_iir) {
464c48a798aSChris Wilson 		raw_reg_write(regs, DEIIR, de_iir);
465373abf1aSMatt Roper 		if (DISPLAY_VER(i915) >= 7)
466c48a798aSChris Wilson 			ivb_display_irq_handler(i915, de_iir);
467f1af8fc1SPaulo Zanoni 		else
468c48a798aSChris Wilson 			ilk_display_irq_handler(i915, de_iir);
4690e43406bSChris Wilson 		ret = IRQ_HANDLED;
470c48a798aSChris Wilson 	}
471c48a798aSChris Wilson 
472651e7d48SLucas De Marchi 	if (GRAPHICS_VER(i915) >= 6) {
473c48a798aSChris Wilson 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
474c48a798aSChris Wilson 		if (pm_iir) {
475c48a798aSChris Wilson 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
4762cbc876dSMichał Winiarski 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
477c48a798aSChris Wilson 			ret = IRQ_HANDLED;
4780e43406bSChris Wilson 		}
479f1af8fc1SPaulo Zanoni 	}
480b1f14ad0SJesse Barnes 
481c48a798aSChris Wilson 	raw_reg_write(regs, DEIER, de_ier);
482c48a798aSChris Wilson 	if (sde_ier)
483c48a798aSChris Wilson 		raw_reg_write(regs, SDEIER, sde_ier);
484b1f14ad0SJesse Barnes 
4859c6508b9SThomas Gleixner 	pmu_irq_stats(i915, ret);
4869c6508b9SThomas Gleixner 
4871f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
488c48a798aSChris Wilson 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
4891f814dacSImre Deak 
490b1f14ad0SJesse Barnes 	return ret;
491b1f14ad0SJesse Barnes }
492b1f14ad0SJesse Barnes 
gen8_master_intr_disable(void __iomem * const regs)4934376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs)
4944376b9c9SMika Kuoppala {
4954376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
4964376b9c9SMika Kuoppala 
4974376b9c9SMika Kuoppala 	/*
4984376b9c9SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
4994376b9c9SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
5004376b9c9SMika Kuoppala 	 * New indications can and will light up during processing,
5014376b9c9SMika Kuoppala 	 * and will generate new interrupt after enabling master.
5024376b9c9SMika Kuoppala 	 */
5034376b9c9SMika Kuoppala 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
5044376b9c9SMika Kuoppala }
5054376b9c9SMika Kuoppala 
gen8_master_intr_enable(void __iomem * const regs)5064376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs)
5074376b9c9SMika Kuoppala {
5084376b9c9SMika Kuoppala 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
5094376b9c9SMika Kuoppala }
5104376b9c9SMika Kuoppala 
gen8_irq_handler(int irq,void * arg)511f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg)
512f11a0f46STvrtko Ursulin {
513b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
51472e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore);
515f11a0f46STvrtko Ursulin 	u32 master_ctl;
516f11a0f46STvrtko Ursulin 
517f11a0f46STvrtko Ursulin 	if (!intel_irqs_enabled(dev_priv))
518f11a0f46STvrtko Ursulin 		return IRQ_NONE;
519f11a0f46STvrtko Ursulin 
5204376b9c9SMika Kuoppala 	master_ctl = gen8_master_intr_disable(regs);
5214376b9c9SMika Kuoppala 	if (!master_ctl) {
5224376b9c9SMika Kuoppala 		gen8_master_intr_enable(regs);
523f11a0f46STvrtko Ursulin 		return IRQ_NONE;
5244376b9c9SMika Kuoppala 	}
525f11a0f46STvrtko Ursulin 
5266cc32f15SChris Wilson 	/* Find, queue (onto bottom-halves), then clear each source */
5272cbc876dSMichał Winiarski 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
528f0fd96f5SChris Wilson 
529f0fd96f5SChris Wilson 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
530f0fd96f5SChris Wilson 	if (master_ctl & ~GEN8_GT_IRQS) {
5319102650fSDaniele Ceraolo Spurio 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
53255ef72f2SChris Wilson 		gen8_de_irq_handler(dev_priv, master_ctl);
5339102650fSDaniele Ceraolo Spurio 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
534f0fd96f5SChris Wilson 	}
535f11a0f46STvrtko Ursulin 
5364376b9c9SMika Kuoppala 	gen8_master_intr_enable(regs);
537abd58f01SBen Widawsky 
5389c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
5399c6508b9SThomas Gleixner 
54055ef72f2SChris Wilson 	return IRQ_HANDLED;
541abd58f01SBen Widawsky }
542abd58f01SBen Widawsky 
gen11_master_intr_disable(void __iomem * const regs)54381067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs)
54481067b71SMika Kuoppala {
54581067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
54681067b71SMika Kuoppala 
54781067b71SMika Kuoppala 	/*
54881067b71SMika Kuoppala 	 * Now with master disabled, get a sample of level indications
54981067b71SMika Kuoppala 	 * for this interrupt. Indications will be cleared on related acks.
55081067b71SMika Kuoppala 	 * New indications can and will light up during processing,
55181067b71SMika Kuoppala 	 * and will generate new interrupt after enabling master.
55281067b71SMika Kuoppala 	 */
55381067b71SMika Kuoppala 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
55481067b71SMika Kuoppala }
55581067b71SMika Kuoppala 
gen11_master_intr_enable(void __iomem * const regs)55681067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs)
55781067b71SMika Kuoppala {
55881067b71SMika Kuoppala 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
55981067b71SMika Kuoppala }
56081067b71SMika Kuoppala 
gen11_irq_handler(int irq,void * arg)56122e26af7SPaulo Zanoni static irqreturn_t gen11_irq_handler(int irq, void *arg)
56251951ae7SMika Kuoppala {
56322e26af7SPaulo Zanoni 	struct drm_i915_private *i915 = arg;
56472e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(&i915->uncore);
5652cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(i915);
56651951ae7SMika Kuoppala 	u32 master_ctl;
567df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_iir;
56851951ae7SMika Kuoppala 
56951951ae7SMika Kuoppala 	if (!intel_irqs_enabled(i915))
57051951ae7SMika Kuoppala 		return IRQ_NONE;
57151951ae7SMika Kuoppala 
57222e26af7SPaulo Zanoni 	master_ctl = gen11_master_intr_disable(regs);
57381067b71SMika Kuoppala 	if (!master_ctl) {
57422e26af7SPaulo Zanoni 		gen11_master_intr_enable(regs);
57551951ae7SMika Kuoppala 		return IRQ_NONE;
57681067b71SMika Kuoppala 	}
57751951ae7SMika Kuoppala 
5786cc32f15SChris Wilson 	/* Find, queue (onto bottom-halves), then clear each source */
5799b77011eSTvrtko Ursulin 	gen11_gt_irq_handler(gt, master_ctl);
58051951ae7SMika Kuoppala 
58151951ae7SMika Kuoppala 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
582a3265d85SMatt Roper 	if (master_ctl & GEN11_DISPLAY_IRQ)
583a3265d85SMatt Roper 		gen11_display_irq_handler(i915);
58451951ae7SMika Kuoppala 
585ddcf980fSAnusha Srivatsa 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
586df0d28c1SDhinakaran Pandiyan 
58722e26af7SPaulo Zanoni 	gen11_master_intr_enable(regs);
58851951ae7SMika Kuoppala 
589ddcf980fSAnusha Srivatsa 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
590df0d28c1SDhinakaran Pandiyan 
5919c6508b9SThomas Gleixner 	pmu_irq_stats(i915, IRQ_HANDLED);
5929c6508b9SThomas Gleixner 
59351951ae7SMika Kuoppala 	return IRQ_HANDLED;
59451951ae7SMika Kuoppala }
59551951ae7SMika Kuoppala 
dg1_master_intr_disable(void __iomem * const regs)59622e26af7SPaulo Zanoni static inline u32 dg1_master_intr_disable(void __iomem * const regs)
59797b492f5SLucas De Marchi {
59897b492f5SLucas De Marchi 	u32 val;
59997b492f5SLucas De Marchi 
60097b492f5SLucas De Marchi 	/* First disable interrupts */
60122e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
60297b492f5SLucas De Marchi 
60397b492f5SLucas De Marchi 	/* Get the indication levels and ack the master unit */
60422e26af7SPaulo Zanoni 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
60597b492f5SLucas De Marchi 	if (unlikely(!val))
60697b492f5SLucas De Marchi 		return 0;
60797b492f5SLucas De Marchi 
60822e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
60997b492f5SLucas De Marchi 
61097b492f5SLucas De Marchi 	return val;
61197b492f5SLucas De Marchi }
61297b492f5SLucas De Marchi 
dg1_master_intr_enable(void __iomem * const regs)61397b492f5SLucas De Marchi static inline void dg1_master_intr_enable(void __iomem * const regs)
61497b492f5SLucas De Marchi {
61522e26af7SPaulo Zanoni 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
61697b492f5SLucas De Marchi }
61797b492f5SLucas De Marchi 
dg1_irq_handler(int irq,void * arg)61897b492f5SLucas De Marchi static irqreturn_t dg1_irq_handler(int irq, void *arg)
61997b492f5SLucas De Marchi {
62022e26af7SPaulo Zanoni 	struct drm_i915_private * const i915 = arg;
6212cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(i915);
62272e9abc3SJani Nikula 	void __iomem * const regs = intel_uncore_regs(gt->uncore);
62322e26af7SPaulo Zanoni 	u32 master_tile_ctl, master_ctl;
62422e26af7SPaulo Zanoni 	u32 gu_misc_iir;
62522e26af7SPaulo Zanoni 
62622e26af7SPaulo Zanoni 	if (!intel_irqs_enabled(i915))
62722e26af7SPaulo Zanoni 		return IRQ_NONE;
62822e26af7SPaulo Zanoni 
62922e26af7SPaulo Zanoni 	master_tile_ctl = dg1_master_intr_disable(regs);
63022e26af7SPaulo Zanoni 	if (!master_tile_ctl) {
63122e26af7SPaulo Zanoni 		dg1_master_intr_enable(regs);
63222e26af7SPaulo Zanoni 		return IRQ_NONE;
63322e26af7SPaulo Zanoni 	}
63422e26af7SPaulo Zanoni 
63522e26af7SPaulo Zanoni 	/* FIXME: we only support tile 0 for now. */
63622e26af7SPaulo Zanoni 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
63722e26af7SPaulo Zanoni 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
63822e26af7SPaulo Zanoni 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
63922e26af7SPaulo Zanoni 	} else {
640a10234fdSTvrtko Ursulin 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
641a10234fdSTvrtko Ursulin 			master_tile_ctl);
64222e26af7SPaulo Zanoni 		dg1_master_intr_enable(regs);
64322e26af7SPaulo Zanoni 		return IRQ_NONE;
64422e26af7SPaulo Zanoni 	}
64522e26af7SPaulo Zanoni 
64622e26af7SPaulo Zanoni 	gen11_gt_irq_handler(gt, master_ctl);
64722e26af7SPaulo Zanoni 
64822e26af7SPaulo Zanoni 	if (master_ctl & GEN11_DISPLAY_IRQ)
64922e26af7SPaulo Zanoni 		gen11_display_irq_handler(i915);
65022e26af7SPaulo Zanoni 
651ddcf980fSAnusha Srivatsa 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
65222e26af7SPaulo Zanoni 
65322e26af7SPaulo Zanoni 	dg1_master_intr_enable(regs);
65422e26af7SPaulo Zanoni 
655ddcf980fSAnusha Srivatsa 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
65622e26af7SPaulo Zanoni 
65722e26af7SPaulo Zanoni 	pmu_irq_stats(i915, IRQ_HANDLED);
65822e26af7SPaulo Zanoni 
65922e26af7SPaulo Zanoni 	return IRQ_HANDLED;
66097b492f5SLucas De Marchi }
66197b492f5SLucas De Marchi 
ibx_irq_reset(struct drm_i915_private * dev_priv)662b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv)
66391738a95SPaulo Zanoni {
664b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
665b16b2a2fSPaulo Zanoni 
6666e266956STvrtko Ursulin 	if (HAS_PCH_NOP(dev_priv))
66791738a95SPaulo Zanoni 		return;
66891738a95SPaulo Zanoni 
669b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, SDE);
670105b122eSPaulo Zanoni 
6716e266956STvrtko Ursulin 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
6722939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
673622364b6SPaulo Zanoni }
674105b122eSPaulo Zanoni 
6758bb61306SVille Syrjälä /* drm_dma.h hooks
6768bb61306SVille Syrjälä */
ilk_irq_reset(struct drm_i915_private * dev_priv)6779eae5e27SLucas De Marchi static void ilk_irq_reset(struct drm_i915_private *dev_priv)
6788bb61306SVille Syrjälä {
679b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
6808bb61306SVille Syrjälä 
681b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, DE);
682e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
683e44adb5dSChris Wilson 
684651e7d48SLucas De Marchi 	if (GRAPHICS_VER(dev_priv) == 7)
685f0818984STvrtko Ursulin 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
6868bb61306SVille Syrjälä 
687fc340442SDaniel Vetter 	if (IS_HASWELL(dev_priv)) {
688f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
689f0818984STvrtko Ursulin 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
690fc340442SDaniel Vetter 	}
691fc340442SDaniel Vetter 
6922cbc876dSMichał Winiarski 	gen5_gt_irq_reset(to_gt(dev_priv));
6938bb61306SVille Syrjälä 
694b243f530STvrtko Ursulin 	ibx_irq_reset(dev_priv);
6958bb61306SVille Syrjälä }
6968bb61306SVille Syrjälä 
valleyview_irq_reset(struct drm_i915_private * dev_priv)697b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
6987e231dbeSJesse Barnes {
6992939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
7002939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
70134c7b8a7SVille Syrjälä 
7022cbc876dSMichał Winiarski 	gen5_gt_irq_reset(to_gt(dev_priv));
7037e231dbeSJesse Barnes 
704ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7059918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
70670591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
707ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
7087e231dbeSJesse Barnes }
7097e231dbeSJesse Barnes 
gen8_irq_reset(struct drm_i915_private * dev_priv)710a844cfbeSJosé Roberto de Souza static void gen8_irq_reset(struct drm_i915_private *dev_priv)
711a844cfbeSJosé Roberto de Souza {
712a844cfbeSJosé Roberto de Souza 	struct intel_uncore *uncore = &dev_priv->uncore;
713a844cfbeSJosé Roberto de Souza 
71472e9abc3SJani Nikula 	gen8_master_intr_disable(intel_uncore_regs(uncore));
715a844cfbeSJosé Roberto de Souza 
7162cbc876dSMichał Winiarski 	gen8_gt_irq_reset(to_gt(dev_priv));
717a844cfbeSJosé Roberto de Souza 	gen8_display_irq_reset(dev_priv);
718b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
719abd58f01SBen Widawsky 
7206e266956STvrtko Ursulin 	if (HAS_PCH_SPLIT(dev_priv))
721b243f530STvrtko Ursulin 		ibx_irq_reset(dev_priv);
72259b7cb44STejas Upadhyay 
723abd58f01SBen Widawsky }
724abd58f01SBen Widawsky 
gen11_irq_reset(struct drm_i915_private * dev_priv)725a3265d85SMatt Roper static void gen11_irq_reset(struct drm_i915_private *dev_priv)
726a3265d85SMatt Roper {
7272cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
728fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
729a3265d85SMatt Roper 
73072e9abc3SJani Nikula 	gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
731a3265d85SMatt Roper 
732fd4d7904SPaulo Zanoni 	gen11_gt_irq_reset(gt);
733a3265d85SMatt Roper 	gen11_display_irq_reset(dev_priv);
734a3265d85SMatt Roper 
735a3265d85SMatt Roper 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
736a3265d85SMatt Roper 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
737a3265d85SMatt Roper }
738a3265d85SMatt Roper 
dg1_irq_reset(struct drm_i915_private * dev_priv)73922e26af7SPaulo Zanoni static void dg1_irq_reset(struct drm_i915_private *dev_priv)
74022e26af7SPaulo Zanoni {
741d1f3b5e9SAndi Shyti 	struct intel_uncore *uncore = &dev_priv->uncore;
742d1f3b5e9SAndi Shyti 	struct intel_gt *gt;
743d1f3b5e9SAndi Shyti 	unsigned int i;
74422e26af7SPaulo Zanoni 
74572e9abc3SJani Nikula 	dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore));
74622e26af7SPaulo Zanoni 
747d1f3b5e9SAndi Shyti 	for_each_gt(gt, dev_priv, i)
748fd4d7904SPaulo Zanoni 		gen11_gt_irq_reset(gt);
749d1f3b5e9SAndi Shyti 
75022e26af7SPaulo Zanoni 	gen11_display_irq_reset(dev_priv);
75122e26af7SPaulo Zanoni 
75222e26af7SPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
75322e26af7SPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
75422e26af7SPaulo Zanoni }
75522e26af7SPaulo Zanoni 
cherryview_irq_reset(struct drm_i915_private * dev_priv)756b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
75743f328d7SVille Syrjälä {
758b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
75943f328d7SVille Syrjälä 
760e58c2cacSAndrzej Hajda 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
7612939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
76243f328d7SVille Syrjälä 
7632cbc876dSMichał Winiarski 	gen8_gt_irq_reset(to_gt(dev_priv));
76443f328d7SVille Syrjälä 
765b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
76643f328d7SVille Syrjälä 
767ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7689918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
76970591a41SVille Syrjälä 		vlv_display_irq_reset(dev_priv);
770ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
77143f328d7SVille Syrjälä }
77243f328d7SVille Syrjälä 
ilk_irq_postinstall(struct drm_i915_private * dev_priv)7739eae5e27SLucas De Marchi static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
774036a4a7dSZhenyu Wang {
7752cbc876dSMichał Winiarski 	gen5_gt_irq_postinstall(to_gt(dev_priv));
776a9922912SVille Syrjälä 
777*fcc02c75SJani Nikula 	ilk_de_irq_postinstall(dev_priv);
778036a4a7dSZhenyu Wang }
779036a4a7dSZhenyu Wang 
valleyview_irq_postinstall(struct drm_i915_private * dev_priv)780b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
7810e6c9a9eSVille Syrjälä {
7822cbc876dSMichał Winiarski 	gen5_gt_irq_postinstall(to_gt(dev_priv));
7837e231dbeSJesse Barnes 
784ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
7859918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
786ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
787ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
788ad22d106SVille Syrjälä 
7892939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
7902939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
79120afbda2SDaniel Vetter }
79220afbda2SDaniel Vetter 
gen8_irq_postinstall(struct drm_i915_private * dev_priv)793b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
794abd58f01SBen Widawsky {
7952cbc876dSMichał Winiarski 	gen8_gt_irq_postinstall(to_gt(dev_priv));
796abd58f01SBen Widawsky 	gen8_de_irq_postinstall(dev_priv);
797abd58f01SBen Widawsky 
79872e9abc3SJani Nikula 	gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore));
799abd58f01SBen Widawsky }
800abd58f01SBen Widawsky 
gen11_irq_postinstall(struct drm_i915_private * dev_priv)801b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
80251951ae7SMika Kuoppala {
8032cbc876dSMichał Winiarski 	struct intel_gt *gt = to_gt(dev_priv);
804fd4d7904SPaulo Zanoni 	struct intel_uncore *uncore = gt->uncore;
805df0d28c1SDhinakaran Pandiyan 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
80651951ae7SMika Kuoppala 
807fd4d7904SPaulo Zanoni 	gen11_gt_irq_postinstall(gt);
808a844cfbeSJosé Roberto de Souza 	gen11_de_irq_postinstall(dev_priv);
80951951ae7SMika Kuoppala 
810b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
811df0d28c1SDhinakaran Pandiyan 
81272e9abc3SJani Nikula 	gen11_master_intr_enable(intel_uncore_regs(uncore));
8132939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
81451951ae7SMika Kuoppala }
81522e26af7SPaulo Zanoni 
dg1_irq_postinstall(struct drm_i915_private * dev_priv)81622e26af7SPaulo Zanoni static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
81722e26af7SPaulo Zanoni {
818d1f3b5e9SAndi Shyti 	struct intel_uncore *uncore = &dev_priv->uncore;
81922e26af7SPaulo Zanoni 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
820d1f3b5e9SAndi Shyti 	struct intel_gt *gt;
821d1f3b5e9SAndi Shyti 	unsigned int i;
82222e26af7SPaulo Zanoni 
823d1f3b5e9SAndi Shyti 	for_each_gt(gt, dev_priv, i)
824fd4d7904SPaulo Zanoni 		gen11_gt_irq_postinstall(gt);
82522e26af7SPaulo Zanoni 
82622e26af7SPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
82722e26af7SPaulo Zanoni 
8281007337fSJani Nikula 	dg1_de_irq_postinstall(dev_priv);
82922e26af7SPaulo Zanoni 
83072e9abc3SJani Nikula 	dg1_master_intr_enable(intel_uncore_regs(uncore));
831fd4d7904SPaulo Zanoni 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
83297b492f5SLucas De Marchi }
83351951ae7SMika Kuoppala 
cherryview_irq_postinstall(struct drm_i915_private * dev_priv)834b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
83543f328d7SVille Syrjälä {
8362cbc876dSMichał Winiarski 	gen8_gt_irq_postinstall(to_gt(dev_priv));
83743f328d7SVille Syrjälä 
838ad22d106SVille Syrjälä 	spin_lock_irq(&dev_priv->irq_lock);
8399918271eSVille Syrjälä 	if (dev_priv->display_irqs_enabled)
840ad22d106SVille Syrjälä 		vlv_display_irq_postinstall(dev_priv);
841ad22d106SVille Syrjälä 	spin_unlock_irq(&dev_priv->irq_lock);
842ad22d106SVille Syrjälä 
8432939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
8442939eb06SJani Nikula 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
84543f328d7SVille Syrjälä }
84643f328d7SVille Syrjälä 
i8xx_irq_reset(struct drm_i915_private * dev_priv)847b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
848c2798b19SChris Wilson {
849b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
850c2798b19SChris Wilson 
85144d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
85244d9241eSVille Syrjälä 
853ad7632ffSJani Nikula 	gen2_irq_reset(uncore);
854e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
855c2798b19SChris Wilson }
856c2798b19SChris Wilson 
i9xx_error_mask(struct drm_i915_private * i915)8573687ce75SVille Syrjälä static u32 i9xx_error_mask(struct drm_i915_private *i915)
8583687ce75SVille Syrjälä {
859e7e12f6eSVille Syrjälä 	/*
860e7e12f6eSVille Syrjälä 	 * On gen2/3 FBC generates (seemingly spurious)
861e7e12f6eSVille Syrjälä 	 * display INVALID_GTT/INVALID_GTT_PTE table errors.
862e7e12f6eSVille Syrjälä 	 *
863e7e12f6eSVille Syrjälä 	 * Also gen3 bspec has this to say:
864e7e12f6eSVille Syrjälä 	 * "DISPA_INVALID_GTT_PTE
865e7e12f6eSVille Syrjälä 	 "  [DevNapa] : Reserved. This bit does not reflect the page
866e7e12f6eSVille Syrjälä 	 "              table error for the display plane A."
867e7e12f6eSVille Syrjälä 	 *
868e7e12f6eSVille Syrjälä 	 * Unfortunately we can't mask off individual PGTBL_ER bits,
869e7e12f6eSVille Syrjälä 	 * so we just have to mask off all page table errors via EMR.
870e7e12f6eSVille Syrjälä 	 */
871e7e12f6eSVille Syrjälä 	if (HAS_FBC(i915))
872e7e12f6eSVille Syrjälä 		return ~I915_ERROR_MEMORY_REFRESH;
873e7e12f6eSVille Syrjälä 	else
8743687ce75SVille Syrjälä 		return ~(I915_ERROR_PAGE_TABLE |
8753687ce75SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
8763687ce75SVille Syrjälä }
8773687ce75SVille Syrjälä 
i8xx_irq_postinstall(struct drm_i915_private * dev_priv)878b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
879c2798b19SChris Wilson {
880b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
881e9e9848aSVille Syrjälä 	u16 enable_mask;
882c2798b19SChris Wilson 
8833687ce75SVille Syrjälä 	intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
884c2798b19SChris Wilson 
885c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
886c2798b19SChris Wilson 	dev_priv->irq_mask =
887c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
88816659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
88916659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
890c2798b19SChris Wilson 
891e9e9848aSVille Syrjälä 	enable_mask =
892c2798b19SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
893c2798b19SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
89416659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
895e9e9848aSVille Syrjälä 		I915_USER_INTERRUPT;
896e9e9848aSVille Syrjälä 
897ad7632ffSJani Nikula 	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
898c2798b19SChris Wilson 
899379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
900379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
901d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
902755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
903755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
904d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
905c2798b19SChris Wilson }
906c2798b19SChris Wilson 
i8xx_error_irq_ack(struct drm_i915_private * i915,u16 * eir,u16 * eir_stuck)9074f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915,
90878c357ddSVille Syrjälä 			       u16 *eir, u16 *eir_stuck)
90978c357ddSVille Syrjälä {
9104f5fd91fSTvrtko Ursulin 	struct intel_uncore *uncore = &i915->uncore;
91178c357ddSVille Syrjälä 	u16 emr;
91278c357ddSVille Syrjälä 
9134f5fd91fSTvrtko Ursulin 	*eir = intel_uncore_read16(uncore, EIR);
9144f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EIR, *eir);
91578c357ddSVille Syrjälä 
9164f5fd91fSTvrtko Ursulin 	*eir_stuck = intel_uncore_read16(uncore, EIR);
91778c357ddSVille Syrjälä 	if (*eir_stuck == 0)
91878c357ddSVille Syrjälä 		return;
91978c357ddSVille Syrjälä 
92078c357ddSVille Syrjälä 	/*
92178c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
92278c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
92378c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
92478c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
92578c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
92678c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
92778c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
92878c357ddSVille Syrjälä 	 * remains set.
92978c357ddSVille Syrjälä 	 */
9304f5fd91fSTvrtko Ursulin 	emr = intel_uncore_read16(uncore, EMR);
9314f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, 0xffff);
9324f5fd91fSTvrtko Ursulin 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
93378c357ddSVille Syrjälä }
93478c357ddSVille Syrjälä 
i8xx_error_irq_handler(struct drm_i915_private * dev_priv,u16 eir,u16 eir_stuck)93578c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
93678c357ddSVille Syrjälä 				   u16 eir, u16 eir_stuck)
93778c357ddSVille Syrjälä {
938a10234fdSTvrtko Ursulin 	drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
93978c357ddSVille Syrjälä 
94078c357ddSVille Syrjälä 	if (eir_stuck)
94100376ccfSWambui Karuga 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
94200376ccfSWambui Karuga 			eir_stuck);
943d1e89592SVille Syrjälä 
944d1e89592SVille Syrjälä 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
945d1e89592SVille Syrjälä 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
94678c357ddSVille Syrjälä }
94778c357ddSVille Syrjälä 
i9xx_error_irq_ack(struct drm_i915_private * dev_priv,u32 * eir,u32 * eir_stuck)94878c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
94978c357ddSVille Syrjälä 			       u32 *eir, u32 *eir_stuck)
95078c357ddSVille Syrjälä {
95178c357ddSVille Syrjälä 	u32 emr;
95278c357ddSVille Syrjälä 
953839259b8SVille Syrjälä 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
954839259b8SVille Syrjälä 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
95578c357ddSVille Syrjälä 
9562939eb06SJani Nikula 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
95778c357ddSVille Syrjälä 	if (*eir_stuck == 0)
95878c357ddSVille Syrjälä 		return;
95978c357ddSVille Syrjälä 
96078c357ddSVille Syrjälä 	/*
96178c357ddSVille Syrjälä 	 * Toggle all EMR bits to make sure we get an edge
96278c357ddSVille Syrjälä 	 * in the ISR master error bit if we don't clear
96378c357ddSVille Syrjälä 	 * all the EIR bits. Otherwise the edge triggered
96478c357ddSVille Syrjälä 	 * IIR on i965/g4x wouldn't notice that an interrupt
96578c357ddSVille Syrjälä 	 * is still pending. Also some EIR bits can't be
96678c357ddSVille Syrjälä 	 * cleared except by handling the underlying error
96778c357ddSVille Syrjälä 	 * (or by a GPU reset) so we mask any bit that
96878c357ddSVille Syrjälä 	 * remains set.
96978c357ddSVille Syrjälä 	 */
970839259b8SVille Syrjälä 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
971839259b8SVille Syrjälä 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
9722939eb06SJani Nikula 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
97378c357ddSVille Syrjälä }
97478c357ddSVille Syrjälä 
i9xx_error_irq_handler(struct drm_i915_private * dev_priv,u32 eir,u32 eir_stuck)97578c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
97678c357ddSVille Syrjälä 				   u32 eir, u32 eir_stuck)
97778c357ddSVille Syrjälä {
978a10234fdSTvrtko Ursulin 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
97978c357ddSVille Syrjälä 
98078c357ddSVille Syrjälä 	if (eir_stuck)
98100376ccfSWambui Karuga 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
98200376ccfSWambui Karuga 			eir_stuck);
983d1e89592SVille Syrjälä 
984d1e89592SVille Syrjälä 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
985d1e89592SVille Syrjälä 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
98678c357ddSVille Syrjälä }
98778c357ddSVille Syrjälä 
i8xx_irq_handler(int irq,void * arg)988ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
989c2798b19SChris Wilson {
990b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
991af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
992c2798b19SChris Wilson 
9932dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
9942dd2a883SImre Deak 		return IRQ_NONE;
9952dd2a883SImre Deak 
9961f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
9979102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
9981f814dacSImre Deak 
999af722d28SVille Syrjälä 	do {
1000af722d28SVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
100178c357ddSVille Syrjälä 		u16 eir = 0, eir_stuck = 0;
1002af722d28SVille Syrjälä 		u16 iir;
1003af722d28SVille Syrjälä 
10044f5fd91fSTvrtko Ursulin 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
1005c2798b19SChris Wilson 		if (iir == 0)
1006af722d28SVille Syrjälä 			break;
1007c2798b19SChris Wilson 
1008af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1009c2798b19SChris Wilson 
1010eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1011eb64343cSVille Syrjälä 		 * signalled in iir */
1012eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1013c2798b19SChris Wilson 
101478c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
101578c357ddSVille Syrjälä 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
101678c357ddSVille Syrjälä 
10174f5fd91fSTvrtko Ursulin 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
1018c2798b19SChris Wilson 
1019c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
10202cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
1021c2798b19SChris Wilson 
102278c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
102378c357ddSVille Syrjälä 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
1024af722d28SVille Syrjälä 
1025eb64343cSVille Syrjälä 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1026af722d28SVille Syrjälä 	} while (0);
1027c2798b19SChris Wilson 
10289c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
10299c6508b9SThomas Gleixner 
10309102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
10311f814dacSImre Deak 
10321f814dacSImre Deak 	return ret;
1033c2798b19SChris Wilson }
1034c2798b19SChris Wilson 
i915_irq_reset(struct drm_i915_private * dev_priv)1035b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv)
1036a266c7d5SChris Wilson {
1037b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
1038a266c7d5SChris Wilson 
103956b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
10400706f17cSEgbert Eich 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
10418cee664dSAndrzej Hajda 		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
1042a266c7d5SChris Wilson 	}
1043a266c7d5SChris Wilson 
104444d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
104544d9241eSVille Syrjälä 
1046b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
1047e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
1048a266c7d5SChris Wilson }
1049a266c7d5SChris Wilson 
i915_irq_postinstall(struct drm_i915_private * dev_priv)1050b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
1051a266c7d5SChris Wilson {
1052b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
105338bde180SChris Wilson 	u32 enable_mask;
1054a266c7d5SChris Wilson 
10553687ce75SVille Syrjälä 	intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
105638bde180SChris Wilson 
105738bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
105838bde180SChris Wilson 	dev_priv->irq_mask =
105938bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
106038bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
106116659bc5SVille Syrjälä 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
106216659bc5SVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
106338bde180SChris Wilson 
106438bde180SChris Wilson 	enable_mask =
106538bde180SChris Wilson 		I915_ASLE_INTERRUPT |
106638bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
106738bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
106816659bc5SVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
106938bde180SChris Wilson 		I915_USER_INTERRUPT;
107038bde180SChris Wilson 
107156b857a5STvrtko Ursulin 	if (I915_HAS_HOTPLUG(dev_priv)) {
1072a266c7d5SChris Wilson 		/* Enable in IER... */
1073a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1074a266c7d5SChris Wilson 		/* and unmask in IMR */
1075a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1076a266c7d5SChris Wilson 	}
1077a266c7d5SChris Wilson 
1078b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
1079a266c7d5SChris Wilson 
1080379ef82dSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
1081379ef82dSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
1082d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1083755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1084755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1085d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
1086379ef82dSDaniel Vetter 
1087c30bb1fdSVille Syrjälä 	i915_enable_asle_pipestat(dev_priv);
108820afbda2SDaniel Vetter }
108920afbda2SDaniel Vetter 
i915_irq_handler(int irq,void * arg)1090ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
1091a266c7d5SChris Wilson {
1092b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
1093af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1094a266c7d5SChris Wilson 
10952dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
10962dd2a883SImre Deak 		return IRQ_NONE;
10972dd2a883SImre Deak 
10981f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
10999102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
11001f814dacSImre Deak 
110138bde180SChris Wilson 	do {
1102eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
110378c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
1104af722d28SVille Syrjälä 		u32 hotplug_status = 0;
1105af722d28SVille Syrjälä 		u32 iir;
1106a266c7d5SChris Wilson 
11072939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1108af722d28SVille Syrjälä 		if (iir == 0)
1109af722d28SVille Syrjälä 			break;
1110af722d28SVille Syrjälä 
1111af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1112af722d28SVille Syrjälä 
1113af722d28SVille Syrjälä 		if (I915_HAS_HOTPLUG(dev_priv) &&
1114af722d28SVille Syrjälä 		    iir & I915_DISPLAY_PORT_INTERRUPT)
1115af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1116a266c7d5SChris Wilson 
1117eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1118eb64343cSVille Syrjälä 		 * signalled in iir */
1119eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1120a266c7d5SChris Wilson 
112178c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
112278c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
112378c357ddSVille Syrjälä 
11242939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1125a266c7d5SChris Wilson 
1126a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
11272cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
1128a266c7d5SChris Wilson 
112978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
113078c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1131a266c7d5SChris Wilson 
1132af722d28SVille Syrjälä 		if (hotplug_status)
1133af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1134af722d28SVille Syrjälä 
1135af722d28SVille Syrjälä 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1136af722d28SVille Syrjälä 	} while (0);
1137a266c7d5SChris Wilson 
11389c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, ret);
11399c6508b9SThomas Gleixner 
11409102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
11411f814dacSImre Deak 
1142a266c7d5SChris Wilson 	return ret;
1143a266c7d5SChris Wilson }
1144a266c7d5SChris Wilson 
i965_irq_reset(struct drm_i915_private * dev_priv)1145b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv)
1146a266c7d5SChris Wilson {
1147b16b2a2fSPaulo Zanoni 	struct intel_uncore *uncore = &dev_priv->uncore;
1148a266c7d5SChris Wilson 
11490706f17cSEgbert Eich 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
11508cee664dSAndrzej Hajda 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
1151a266c7d5SChris Wilson 
115244d9241eSVille Syrjälä 	i9xx_pipestat_irq_reset(dev_priv);
115344d9241eSVille Syrjälä 
1154b16b2a2fSPaulo Zanoni 	GEN3_IRQ_RESET(uncore, GEN2_);
1155e44adb5dSChris Wilson 	dev_priv->irq_mask = ~0u;
1156a266c7d5SChris Wilson }
1157a266c7d5SChris Wilson 
i965_error_mask(struct drm_i915_private * i915)11583687ce75SVille Syrjälä static u32 i965_error_mask(struct drm_i915_private *i915)
1159a266c7d5SChris Wilson {
1160045cebd2SVille Syrjälä 	/*
1161045cebd2SVille Syrjälä 	 * Enable some error detection, note the instruction error mask
1162045cebd2SVille Syrjälä 	 * bit is reserved, so we leave it masked.
1163e7e12f6eSVille Syrjälä 	 *
1164e7e12f6eSVille Syrjälä 	 * i965 FBC no longer generates spurious GTT errors,
1165e7e12f6eSVille Syrjälä 	 * so we can always enable the page table errors.
1166045cebd2SVille Syrjälä 	 */
11673687ce75SVille Syrjälä 	if (IS_G4X(i915))
11683687ce75SVille Syrjälä 		return ~(GM45_ERROR_PAGE_TABLE |
1169045cebd2SVille Syrjälä 			 GM45_ERROR_MEM_PRIV |
1170045cebd2SVille Syrjälä 			 GM45_ERROR_CP_PRIV |
1171045cebd2SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
11723687ce75SVille Syrjälä 	else
11733687ce75SVille Syrjälä 		return ~(I915_ERROR_PAGE_TABLE |
1174045cebd2SVille Syrjälä 			 I915_ERROR_MEMORY_REFRESH);
1175045cebd2SVille Syrjälä }
11763687ce75SVille Syrjälä 
i965_irq_postinstall(struct drm_i915_private * dev_priv)11773687ce75SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
11783687ce75SVille Syrjälä {
11793687ce75SVille Syrjälä 	struct intel_uncore *uncore = &dev_priv->uncore;
11803687ce75SVille Syrjälä 	u32 enable_mask;
11813687ce75SVille Syrjälä 
11823687ce75SVille Syrjälä 	intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
1183045cebd2SVille Syrjälä 
1184a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
1185c30bb1fdSVille Syrjälä 	dev_priv->irq_mask =
1186c30bb1fdSVille Syrjälä 		~(I915_ASLE_INTERRUPT |
1187adca4730SChris Wilson 		  I915_DISPLAY_PORT_INTERRUPT |
1188bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1189bbba0a97SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
119078c357ddSVille Syrjälä 		  I915_MASTER_ERROR_INTERRUPT);
1191bbba0a97SChris Wilson 
1192c30bb1fdSVille Syrjälä 	enable_mask =
1193c30bb1fdSVille Syrjälä 		I915_ASLE_INTERRUPT |
1194c30bb1fdSVille Syrjälä 		I915_DISPLAY_PORT_INTERRUPT |
1195c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1196c30bb1fdSVille Syrjälä 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
119778c357ddSVille Syrjälä 		I915_MASTER_ERROR_INTERRUPT |
1198c30bb1fdSVille Syrjälä 		I915_USER_INTERRUPT;
1199bbba0a97SChris Wilson 
120091d14251STvrtko Ursulin 	if (IS_G4X(dev_priv))
1201bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
1202a266c7d5SChris Wilson 
1203b16b2a2fSPaulo Zanoni 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
1204c30bb1fdSVille Syrjälä 
1205b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
1206b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
1207d6207435SDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
1208755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1209755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
1210755e9019SImre Deak 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
1211d6207435SDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
1212a266c7d5SChris Wilson 
121391d14251STvrtko Ursulin 	i915_enable_asle_pipestat(dev_priv);
121420afbda2SDaniel Vetter }
121520afbda2SDaniel Vetter 
i965_irq_handler(int irq,void * arg)1216ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
1217a266c7d5SChris Wilson {
1218b318b824SVille Syrjälä 	struct drm_i915_private *dev_priv = arg;
1219af722d28SVille Syrjälä 	irqreturn_t ret = IRQ_NONE;
1220a266c7d5SChris Wilson 
12212dd2a883SImre Deak 	if (!intel_irqs_enabled(dev_priv))
12222dd2a883SImre Deak 		return IRQ_NONE;
12232dd2a883SImre Deak 
12241f814dacSImre Deak 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
12259102650fSDaniele Ceraolo Spurio 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
12261f814dacSImre Deak 
1227af722d28SVille Syrjälä 	do {
1228eb64343cSVille Syrjälä 		u32 pipe_stats[I915_MAX_PIPES] = {};
122978c357ddSVille Syrjälä 		u32 eir = 0, eir_stuck = 0;
1230af722d28SVille Syrjälä 		u32 hotplug_status = 0;
1231af722d28SVille Syrjälä 		u32 iir;
12322c8ba29fSChris Wilson 
12332939eb06SJani Nikula 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
1234af722d28SVille Syrjälä 		if (iir == 0)
1235af722d28SVille Syrjälä 			break;
1236af722d28SVille Syrjälä 
1237af722d28SVille Syrjälä 		ret = IRQ_HANDLED;
1238af722d28SVille Syrjälä 
1239af722d28SVille Syrjälä 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1240af722d28SVille Syrjälä 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1241a266c7d5SChris Wilson 
1242eb64343cSVille Syrjälä 		/* Call regardless, as some status bits might not be
1243eb64343cSVille Syrjälä 		 * signalled in iir */
1244eb64343cSVille Syrjälä 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1245a266c7d5SChris Wilson 
124678c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
124778c357ddSVille Syrjälä 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
124878c357ddSVille Syrjälä 
12492939eb06SJani Nikula 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
1250a266c7d5SChris Wilson 
1251a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
12522cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
12530669a6e1SChris Wilson 					    iir);
1254af722d28SVille Syrjälä 
1255a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
12562cbc876dSMichał Winiarski 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
12570669a6e1SChris Wilson 					    iir >> 25);
1258a266c7d5SChris Wilson 
125978c357ddSVille Syrjälä 		if (iir & I915_MASTER_ERROR_INTERRUPT)
126078c357ddSVille Syrjälä 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
1261515ac2bbSDaniel Vetter 
1262af722d28SVille Syrjälä 		if (hotplug_status)
1263af722d28SVille Syrjälä 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1264af722d28SVille Syrjälä 
1265af722d28SVille Syrjälä 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
1266af722d28SVille Syrjälä 	} while (0);
1267a266c7d5SChris Wilson 
12689c6508b9SThomas Gleixner 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
12699c6508b9SThomas Gleixner 
12709102650fSDaniele Ceraolo Spurio 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
12711f814dacSImre Deak 
1272a266c7d5SChris Wilson 	return ret;
1273a266c7d5SChris Wilson }
1274a266c7d5SChris Wilson 
1275fca52a55SDaniel Vetter /**
1276fca52a55SDaniel Vetter  * intel_irq_init - initializes irq support
1277fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1278fca52a55SDaniel Vetter  *
1279fca52a55SDaniel Vetter  * This function initializes all the irq support including work items, timers
1280fca52a55SDaniel Vetter  * and all the vtables. It does not setup the interrupt itself though.
1281fca52a55SDaniel Vetter  */
intel_irq_init(struct drm_i915_private * dev_priv)1282b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv)
1283f71d4af4SJesse Barnes {
1284cefcff8fSJoonas Lahtinen 	int i;
12858b2e326dSChris Wilson 
128674bb98baSLucas De Marchi 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
1287cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
1288cefcff8fSJoonas Lahtinen 		dev_priv->l3_parity.remap_info[i] = NULL;
12898b2e326dSChris Wilson 
1290633023a4SDaniele Ceraolo Spurio 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
1291651e7d48SLucas De Marchi 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
12922cbc876dSMichał Winiarski 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
12932ccf2e03SChris Wilson }
129420afbda2SDaniel Vetter 
1295fca52a55SDaniel Vetter /**
1296cefcff8fSJoonas Lahtinen  * intel_irq_fini - deinitializes IRQ support
1297cefcff8fSJoonas Lahtinen  * @i915: i915 device instance
1298cefcff8fSJoonas Lahtinen  *
1299cefcff8fSJoonas Lahtinen  * This function deinitializes all the IRQ support.
1300cefcff8fSJoonas Lahtinen  */
intel_irq_fini(struct drm_i915_private * i915)1301cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915)
1302cefcff8fSJoonas Lahtinen {
1303cefcff8fSJoonas Lahtinen 	int i;
1304cefcff8fSJoonas Lahtinen 
1305cefcff8fSJoonas Lahtinen 	for (i = 0; i < MAX_L3_SLICES; ++i)
1306cefcff8fSJoonas Lahtinen 		kfree(i915->l3_parity.remap_info[i]);
1307cefcff8fSJoonas Lahtinen }
1308cefcff8fSJoonas Lahtinen 
intel_irq_handler(struct drm_i915_private * dev_priv)1309b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
1310b318b824SVille Syrjälä {
1311b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1312b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1313b318b824SVille Syrjälä 			return cherryview_irq_handler;
1314b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1315b318b824SVille Syrjälä 			return valleyview_irq_handler;
1316651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1317b318b824SVille Syrjälä 			return i965_irq_handler;
1318651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1319b318b824SVille Syrjälä 			return i915_irq_handler;
1320b318b824SVille Syrjälä 		else
1321b318b824SVille Syrjälä 			return i8xx_irq_handler;
1322b318b824SVille Syrjälä 	} else {
132322e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
132497b492f5SLucas De Marchi 			return dg1_irq_handler;
132522e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1326b318b824SVille Syrjälä 			return gen11_irq_handler;
1327651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1328b318b824SVille Syrjälä 			return gen8_irq_handler;
1329b318b824SVille Syrjälä 		else
13309eae5e27SLucas De Marchi 			return ilk_irq_handler;
1331b318b824SVille Syrjälä 	}
1332b318b824SVille Syrjälä }
1333b318b824SVille Syrjälä 
intel_irq_reset(struct drm_i915_private * dev_priv)1334b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv)
1335b318b824SVille Syrjälä {
1336b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1337b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1338b318b824SVille Syrjälä 			cherryview_irq_reset(dev_priv);
1339b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1340b318b824SVille Syrjälä 			valleyview_irq_reset(dev_priv);
1341651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1342b318b824SVille Syrjälä 			i965_irq_reset(dev_priv);
1343651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1344b318b824SVille Syrjälä 			i915_irq_reset(dev_priv);
1345b318b824SVille Syrjälä 		else
1346b318b824SVille Syrjälä 			i8xx_irq_reset(dev_priv);
1347b318b824SVille Syrjälä 	} else {
134822e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
134922e26af7SPaulo Zanoni 			dg1_irq_reset(dev_priv);
135022e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1351b318b824SVille Syrjälä 			gen11_irq_reset(dev_priv);
1352651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1353b318b824SVille Syrjälä 			gen8_irq_reset(dev_priv);
1354b318b824SVille Syrjälä 		else
13559eae5e27SLucas De Marchi 			ilk_irq_reset(dev_priv);
1356b318b824SVille Syrjälä 	}
1357b318b824SVille Syrjälä }
1358b318b824SVille Syrjälä 
intel_irq_postinstall(struct drm_i915_private * dev_priv)1359b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
1360b318b824SVille Syrjälä {
1361b318b824SVille Syrjälä 	if (HAS_GMCH(dev_priv)) {
1362b318b824SVille Syrjälä 		if (IS_CHERRYVIEW(dev_priv))
1363b318b824SVille Syrjälä 			cherryview_irq_postinstall(dev_priv);
1364b318b824SVille Syrjälä 		else if (IS_VALLEYVIEW(dev_priv))
1365b318b824SVille Syrjälä 			valleyview_irq_postinstall(dev_priv);
1366651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 4)
1367b318b824SVille Syrjälä 			i965_irq_postinstall(dev_priv);
1368651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) == 3)
1369b318b824SVille Syrjälä 			i915_irq_postinstall(dev_priv);
1370b318b824SVille Syrjälä 		else
1371b318b824SVille Syrjälä 			i8xx_irq_postinstall(dev_priv);
1372b318b824SVille Syrjälä 	} else {
137322e26af7SPaulo Zanoni 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
137422e26af7SPaulo Zanoni 			dg1_irq_postinstall(dev_priv);
137522e26af7SPaulo Zanoni 		else if (GRAPHICS_VER(dev_priv) >= 11)
1376b318b824SVille Syrjälä 			gen11_irq_postinstall(dev_priv);
1377651e7d48SLucas De Marchi 		else if (GRAPHICS_VER(dev_priv) >= 8)
1378b318b824SVille Syrjälä 			gen8_irq_postinstall(dev_priv);
1379b318b824SVille Syrjälä 		else
13809eae5e27SLucas De Marchi 			ilk_irq_postinstall(dev_priv);
1381b318b824SVille Syrjälä 	}
1382b318b824SVille Syrjälä }
1383b318b824SVille Syrjälä 
1384cefcff8fSJoonas Lahtinen /**
1385fca52a55SDaniel Vetter  * intel_irq_install - enables the hardware interrupt
1386fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1387fca52a55SDaniel Vetter  *
1388fca52a55SDaniel Vetter  * This function enables the hardware interrupt handling, but leaves the hotplug
1389fca52a55SDaniel Vetter  * handling still disabled. It is called after intel_irq_init().
1390fca52a55SDaniel Vetter  *
1391fca52a55SDaniel Vetter  * In the driver load and resume code we need working interrupts in a few places
1392fca52a55SDaniel Vetter  * but don't want to deal with the hassle of concurrent probe and hotplug
1393fca52a55SDaniel Vetter  * workers. Hence the split into this two-stage approach.
1394fca52a55SDaniel Vetter  */
intel_irq_install(struct drm_i915_private * dev_priv)13952aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv)
13962aeb7d3aSDaniel Vetter {
13978ff5446aSThomas Zimmermann 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1398b318b824SVille Syrjälä 	int ret;
1399b318b824SVille Syrjälä 
14002aeb7d3aSDaniel Vetter 	/*
14012aeb7d3aSDaniel Vetter 	 * We enable some interrupt sources in our postinstall hooks, so mark
14022aeb7d3aSDaniel Vetter 	 * interrupts as enabled _before_ actually enabling them to avoid
14032aeb7d3aSDaniel Vetter 	 * special cases in our ordering checks.
14042aeb7d3aSDaniel Vetter 	 */
1405ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
14062aeb7d3aSDaniel Vetter 
1407ac1723c1SThomas Zimmermann 	dev_priv->irq_enabled = true;
1408b318b824SVille Syrjälä 
1409b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1410b318b824SVille Syrjälä 
1411b318b824SVille Syrjälä 	ret = request_irq(irq, intel_irq_handler(dev_priv),
1412b318b824SVille Syrjälä 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
1413b318b824SVille Syrjälä 	if (ret < 0) {
1414ac1723c1SThomas Zimmermann 		dev_priv->irq_enabled = false;
1415b318b824SVille Syrjälä 		return ret;
1416b318b824SVille Syrjälä 	}
1417b318b824SVille Syrjälä 
1418b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
1419b318b824SVille Syrjälä 
1420b318b824SVille Syrjälä 	return ret;
14212aeb7d3aSDaniel Vetter }
14222aeb7d3aSDaniel Vetter 
1423fca52a55SDaniel Vetter /**
1424fca52a55SDaniel Vetter  * intel_irq_uninstall - finilizes all irq handling
1425fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1426fca52a55SDaniel Vetter  *
1427fca52a55SDaniel Vetter  * This stops interrupt and hotplug handling and unregisters and frees all
1428fca52a55SDaniel Vetter  * resources acquired in the init functions.
1429fca52a55SDaniel Vetter  */
intel_irq_uninstall(struct drm_i915_private * dev_priv)14302aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv)
14312aeb7d3aSDaniel Vetter {
14328ff5446aSThomas Zimmermann 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
1433b318b824SVille Syrjälä 
1434b318b824SVille Syrjälä 	/*
1435789fa874SJanusz Krzysztofik 	 * FIXME we can get called twice during driver probe
1436789fa874SJanusz Krzysztofik 	 * error handling as well as during driver remove due to
143786a1758dSJani Nikula 	 * intel_display_driver_remove() calling us out of sequence.
1438789fa874SJanusz Krzysztofik 	 * Would be nice if it didn't do that...
1439b318b824SVille Syrjälä 	 */
1440ac1723c1SThomas Zimmermann 	if (!dev_priv->irq_enabled)
1441b318b824SVille Syrjälä 		return;
1442b318b824SVille Syrjälä 
1443ac1723c1SThomas Zimmermann 	dev_priv->irq_enabled = false;
1444b318b824SVille Syrjälä 
1445b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1446b318b824SVille Syrjälä 
1447b318b824SVille Syrjälä 	free_irq(irq, dev_priv);
1448b318b824SVille Syrjälä 
14492aeb7d3aSDaniel Vetter 	intel_hpd_cancel_work(dev_priv);
1450ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
14512aeb7d3aSDaniel Vetter }
14522aeb7d3aSDaniel Vetter 
1453fca52a55SDaniel Vetter /**
1454fca52a55SDaniel Vetter  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
1455fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1456fca52a55SDaniel Vetter  *
1457fca52a55SDaniel Vetter  * This function is used to disable interrupts at runtime, both in the runtime
1458fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
1459fca52a55SDaniel Vetter  */
intel_runtime_pm_disable_interrupts(struct drm_i915_private * dev_priv)1460b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
1461c67a470bSPaulo Zanoni {
1462b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1463ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = false;
1464315ca4c4SVille Syrjälä 	intel_synchronize_irq(dev_priv);
1465c67a470bSPaulo Zanoni }
1466c67a470bSPaulo Zanoni 
1467fca52a55SDaniel Vetter /**
1468fca52a55SDaniel Vetter  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
1469fca52a55SDaniel Vetter  * @dev_priv: i915 device instance
1470fca52a55SDaniel Vetter  *
1471fca52a55SDaniel Vetter  * This function is used to enable interrupts at runtime, both in the runtime
1472fca52a55SDaniel Vetter  * pm and the system suspend/resume code.
1473fca52a55SDaniel Vetter  */
intel_runtime_pm_enable_interrupts(struct drm_i915_private * dev_priv)1474b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
1475c67a470bSPaulo Zanoni {
1476ad1443f0SSagar Arun Kamble 	dev_priv->runtime_pm.irqs_enabled = true;
1477b318b824SVille Syrjälä 	intel_irq_reset(dev_priv);
1478b318b824SVille Syrjälä 	intel_irq_postinstall(dev_priv);
1479c67a470bSPaulo Zanoni }
1480d64575eeSJani Nikula 
intel_irqs_enabled(struct drm_i915_private * dev_priv)1481d64575eeSJani Nikula bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
1482d64575eeSJani Nikula {
1483d64575eeSJani Nikula 	return dev_priv->runtime_pm.irqs_enabled;
1484d64575eeSJani Nikula }
1485d64575eeSJani Nikula 
intel_synchronize_irq(struct drm_i915_private * i915)1486d64575eeSJani Nikula void intel_synchronize_irq(struct drm_i915_private *i915)
1487d64575eeSJani Nikula {
14888ff5446aSThomas Zimmermann 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
1489d64575eeSJani Nikula }
1490320ad343SThomas Zimmermann 
intel_synchronize_hardirq(struct drm_i915_private * i915)1491320ad343SThomas Zimmermann void intel_synchronize_hardirq(struct drm_i915_private *i915)
1492320ad343SThomas Zimmermann {
1493320ad343SThomas Zimmermann 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
1494320ad343SThomas Zimmermann }
1495