xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 9a32dd32)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33 
34 #include <drm/drm_drv.h>
35 
36 #include "display/icl_dsi_regs.h"
37 #include "display/intel_de.h"
38 #include "display/intel_display_trace.h"
39 #include "display/intel_display_types.h"
40 #include "display/intel_fdi_regs.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45 #include "display/intel_psr_regs.h"
46 
47 #include "gt/intel_breadcrumbs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_irq.h"
50 #include "gt/intel_gt_pm_irq.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/intel_rps.h"
53 
54 #include "i915_driver.h"
55 #include "i915_drv.h"
56 #include "i915_irq.h"
57 
58 /**
59  * DOC: interrupt handling
60  *
61  * These functions provide the basic support for enabling and disabling the
62  * interrupt handling support. There's a lot more functionality in i915_irq.c
63  * and related files, but that will be described in separate chapters.
64  */
65 
66 /*
67  * Interrupt statistic for PMU. Increments the counter only if the
68  * interrupt originated from the GPU so interrupts from a device which
69  * shares the interrupt line are not accounted.
70  */
71 static inline void pmu_irq_stats(struct drm_i915_private *i915,
72 				 irqreturn_t res)
73 {
74 	if (unlikely(res != IRQ_HANDLED))
75 		return;
76 
77 	/*
78 	 * A clever compiler translates that into INC. A not so clever one
79 	 * should at least prevent store tearing.
80 	 */
81 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
82 }
83 
84 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
85 typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
86 
87 static const u32 hpd_ilk[HPD_NUM_PINS] = {
88 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
89 };
90 
91 static const u32 hpd_ivb[HPD_NUM_PINS] = {
92 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
93 };
94 
95 static const u32 hpd_bdw[HPD_NUM_PINS] = {
96 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
97 };
98 
99 static const u32 hpd_ibx[HPD_NUM_PINS] = {
100 	[HPD_CRT] = SDE_CRT_HOTPLUG,
101 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
102 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
103 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
104 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
105 };
106 
107 static const u32 hpd_cpt[HPD_NUM_PINS] = {
108 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
109 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
110 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
111 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
112 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
113 };
114 
115 static const u32 hpd_spt[HPD_NUM_PINS] = {
116 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
117 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
118 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
119 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
120 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
121 };
122 
123 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
124 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
125 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
126 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
127 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
128 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
129 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
130 };
131 
132 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
133 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
134 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
135 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
136 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
137 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
138 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
139 };
140 
141 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
142 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
143 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
144 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
145 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
146 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
147 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
148 };
149 
150 static const u32 hpd_bxt[HPD_NUM_PINS] = {
151 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
152 	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
153 	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
154 };
155 
156 static const u32 hpd_gen11[HPD_NUM_PINS] = {
157 	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
158 	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
159 	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
160 	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
161 	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
162 	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
163 };
164 
165 static const u32 hpd_icp[HPD_NUM_PINS] = {
166 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
167 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
168 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
169 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
170 	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
171 	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
172 	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
173 	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
174 	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
175 };
176 
177 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
178 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
179 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
180 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
181 	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
182 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
183 };
184 
185 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
186 {
187 	struct intel_hotplug *hpd = &dev_priv->display.hotplug;
188 
189 	if (HAS_GMCH(dev_priv)) {
190 		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
191 		    IS_CHERRYVIEW(dev_priv))
192 			hpd->hpd = hpd_status_g4x;
193 		else
194 			hpd->hpd = hpd_status_i915;
195 		return;
196 	}
197 
198 	if (DISPLAY_VER(dev_priv) >= 11)
199 		hpd->hpd = hpd_gen11;
200 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
201 		hpd->hpd = hpd_bxt;
202 	else if (DISPLAY_VER(dev_priv) == 9)
203 		hpd->hpd = NULL; /* no north HPD on SKL */
204 	else if (DISPLAY_VER(dev_priv) >= 8)
205 		hpd->hpd = hpd_bdw;
206 	else if (DISPLAY_VER(dev_priv) >= 7)
207 		hpd->hpd = hpd_ivb;
208 	else
209 		hpd->hpd = hpd_ilk;
210 
211 	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
212 	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
213 		return;
214 
215 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
216 		hpd->pch_hpd = hpd_sde_dg1;
217 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
218 		hpd->pch_hpd = hpd_icp;
219 	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
220 		hpd->pch_hpd = hpd_spt;
221 	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
222 		hpd->pch_hpd = hpd_cpt;
223 	else if (HAS_PCH_IBX(dev_priv))
224 		hpd->pch_hpd = hpd_ibx;
225 	else
226 		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
227 }
228 
229 static void
230 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
231 {
232 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
233 
234 	drm_crtc_handle_vblank(&crtc->base);
235 }
236 
237 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
238 		    i915_reg_t iir, i915_reg_t ier)
239 {
240 	intel_uncore_write(uncore, imr, 0xffffffff);
241 	intel_uncore_posting_read(uncore, imr);
242 
243 	intel_uncore_write(uncore, ier, 0);
244 
245 	/* IIR can theoretically queue up two events. Be paranoid. */
246 	intel_uncore_write(uncore, iir, 0xffffffff);
247 	intel_uncore_posting_read(uncore, iir);
248 	intel_uncore_write(uncore, iir, 0xffffffff);
249 	intel_uncore_posting_read(uncore, iir);
250 }
251 
252 static void gen2_irq_reset(struct intel_uncore *uncore)
253 {
254 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
255 	intel_uncore_posting_read16(uncore, GEN2_IMR);
256 
257 	intel_uncore_write16(uncore, GEN2_IER, 0);
258 
259 	/* IIR can theoretically queue up two events. Be paranoid. */
260 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
261 	intel_uncore_posting_read16(uncore, GEN2_IIR);
262 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
263 	intel_uncore_posting_read16(uncore, GEN2_IIR);
264 }
265 
266 /*
267  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
268  */
269 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
270 {
271 	u32 val = intel_uncore_read(uncore, reg);
272 
273 	if (val == 0)
274 		return;
275 
276 	drm_WARN(&uncore->i915->drm, 1,
277 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
278 		 i915_mmio_reg_offset(reg), val);
279 	intel_uncore_write(uncore, reg, 0xffffffff);
280 	intel_uncore_posting_read(uncore, reg);
281 	intel_uncore_write(uncore, reg, 0xffffffff);
282 	intel_uncore_posting_read(uncore, reg);
283 }
284 
285 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
286 {
287 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
288 
289 	if (val == 0)
290 		return;
291 
292 	drm_WARN(&uncore->i915->drm, 1,
293 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
294 		 i915_mmio_reg_offset(GEN2_IIR), val);
295 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
296 	intel_uncore_posting_read16(uncore, GEN2_IIR);
297 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
298 	intel_uncore_posting_read16(uncore, GEN2_IIR);
299 }
300 
301 void gen3_irq_init(struct intel_uncore *uncore,
302 		   i915_reg_t imr, u32 imr_val,
303 		   i915_reg_t ier, u32 ier_val,
304 		   i915_reg_t iir)
305 {
306 	gen3_assert_iir_is_zero(uncore, iir);
307 
308 	intel_uncore_write(uncore, ier, ier_val);
309 	intel_uncore_write(uncore, imr, imr_val);
310 	intel_uncore_posting_read(uncore, imr);
311 }
312 
313 static void gen2_irq_init(struct intel_uncore *uncore,
314 			  u32 imr_val, u32 ier_val)
315 {
316 	gen2_assert_iir_is_zero(uncore);
317 
318 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
319 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
320 	intel_uncore_posting_read16(uncore, GEN2_IMR);
321 }
322 
323 /* For display hotplug interrupt */
324 static inline void
325 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
326 				     u32 mask,
327 				     u32 bits)
328 {
329 	lockdep_assert_held(&dev_priv->irq_lock);
330 	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
331 
332 	intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
333 }
334 
335 /**
336  * i915_hotplug_interrupt_update - update hotplug interrupt enable
337  * @dev_priv: driver private
338  * @mask: bits to update
339  * @bits: bits to enable
340  * NOTE: the HPD enable bits are modified both inside and outside
341  * of an interrupt context. To avoid that read-modify-write cycles
342  * interfer, these bits are protected by a spinlock. Since this
343  * function is usually not called from a context where the lock is
344  * held already, this function acquires the lock itself. A non-locking
345  * version is also available.
346  */
347 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
348 				   u32 mask,
349 				   u32 bits)
350 {
351 	spin_lock_irq(&dev_priv->irq_lock);
352 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
353 	spin_unlock_irq(&dev_priv->irq_lock);
354 }
355 
356 /**
357  * ilk_update_display_irq - update DEIMR
358  * @dev_priv: driver private
359  * @interrupt_mask: mask of interrupt bits to update
360  * @enabled_irq_mask: mask of interrupt bits to enable
361  */
362 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
363 				   u32 interrupt_mask, u32 enabled_irq_mask)
364 {
365 	u32 new_val;
366 
367 	lockdep_assert_held(&dev_priv->irq_lock);
368 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
369 
370 	new_val = dev_priv->irq_mask;
371 	new_val &= ~interrupt_mask;
372 	new_val |= (~enabled_irq_mask & interrupt_mask);
373 
374 	if (new_val != dev_priv->irq_mask &&
375 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
376 		dev_priv->irq_mask = new_val;
377 		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
378 		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
379 	}
380 }
381 
382 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
383 {
384 	ilk_update_display_irq(i915, bits, bits);
385 }
386 
387 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
388 {
389 	ilk_update_display_irq(i915, bits, 0);
390 }
391 
392 /**
393  * bdw_update_port_irq - update DE port interrupt
394  * @dev_priv: driver private
395  * @interrupt_mask: mask of interrupt bits to update
396  * @enabled_irq_mask: mask of interrupt bits to enable
397  */
398 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
399 				u32 interrupt_mask,
400 				u32 enabled_irq_mask)
401 {
402 	u32 new_val;
403 	u32 old_val;
404 
405 	lockdep_assert_held(&dev_priv->irq_lock);
406 
407 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
408 
409 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
410 		return;
411 
412 	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
413 
414 	new_val = old_val;
415 	new_val &= ~interrupt_mask;
416 	new_val |= (~enabled_irq_mask & interrupt_mask);
417 
418 	if (new_val != old_val) {
419 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
420 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
421 	}
422 }
423 
424 /**
425  * bdw_update_pipe_irq - update DE pipe interrupt
426  * @dev_priv: driver private
427  * @pipe: pipe whose interrupt to update
428  * @interrupt_mask: mask of interrupt bits to update
429  * @enabled_irq_mask: mask of interrupt bits to enable
430  */
431 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
432 				enum pipe pipe, u32 interrupt_mask,
433 				u32 enabled_irq_mask)
434 {
435 	u32 new_val;
436 
437 	lockdep_assert_held(&dev_priv->irq_lock);
438 
439 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
440 
441 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
442 		return;
443 
444 	new_val = dev_priv->de_irq_mask[pipe];
445 	new_val &= ~interrupt_mask;
446 	new_val |= (~enabled_irq_mask & interrupt_mask);
447 
448 	if (new_val != dev_priv->de_irq_mask[pipe]) {
449 		dev_priv->de_irq_mask[pipe] = new_val;
450 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
451 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
452 	}
453 }
454 
455 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
456 			 enum pipe pipe, u32 bits)
457 {
458 	bdw_update_pipe_irq(i915, pipe, bits, bits);
459 }
460 
461 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
462 			  enum pipe pipe, u32 bits)
463 {
464 	bdw_update_pipe_irq(i915, pipe, bits, 0);
465 }
466 
467 /**
468  * ibx_display_interrupt_update - update SDEIMR
469  * @dev_priv: driver private
470  * @interrupt_mask: mask of interrupt bits to update
471  * @enabled_irq_mask: mask of interrupt bits to enable
472  */
473 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
474 					 u32 interrupt_mask,
475 					 u32 enabled_irq_mask)
476 {
477 	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
478 	sdeimr &= ~interrupt_mask;
479 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
480 
481 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
482 
483 	lockdep_assert_held(&dev_priv->irq_lock);
484 
485 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
486 		return;
487 
488 	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
489 	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
490 }
491 
492 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
493 {
494 	ibx_display_interrupt_update(i915, bits, bits);
495 }
496 
497 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
498 {
499 	ibx_display_interrupt_update(i915, bits, 0);
500 }
501 
502 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
503 			      enum pipe pipe)
504 {
505 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
506 	u32 enable_mask = status_mask << 16;
507 
508 	lockdep_assert_held(&dev_priv->irq_lock);
509 
510 	if (DISPLAY_VER(dev_priv) < 5)
511 		goto out;
512 
513 	/*
514 	 * On pipe A we don't support the PSR interrupt yet,
515 	 * on pipe B and C the same bit MBZ.
516 	 */
517 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
518 			     status_mask & PIPE_A_PSR_STATUS_VLV))
519 		return 0;
520 	/*
521 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
522 	 * A the same bit is for perf counters which we don't use either.
523 	 */
524 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
525 			     status_mask & PIPE_B_PSR_STATUS_VLV))
526 		return 0;
527 
528 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
529 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
530 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
531 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
532 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
533 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
534 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
535 
536 out:
537 	drm_WARN_ONCE(&dev_priv->drm,
538 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
539 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
540 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
541 		      pipe_name(pipe), enable_mask, status_mask);
542 
543 	return enable_mask;
544 }
545 
546 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
547 			  enum pipe pipe, u32 status_mask)
548 {
549 	i915_reg_t reg = PIPESTAT(pipe);
550 	u32 enable_mask;
551 
552 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
553 		      "pipe %c: status_mask=0x%x\n",
554 		      pipe_name(pipe), status_mask);
555 
556 	lockdep_assert_held(&dev_priv->irq_lock);
557 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
558 
559 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
560 		return;
561 
562 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
563 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
564 
565 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
566 	intel_uncore_posting_read(&dev_priv->uncore, reg);
567 }
568 
569 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
570 			   enum pipe pipe, u32 status_mask)
571 {
572 	i915_reg_t reg = PIPESTAT(pipe);
573 	u32 enable_mask;
574 
575 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
576 		      "pipe %c: status_mask=0x%x\n",
577 		      pipe_name(pipe), status_mask);
578 
579 	lockdep_assert_held(&dev_priv->irq_lock);
580 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
581 
582 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
583 		return;
584 
585 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
586 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
587 
588 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
589 	intel_uncore_posting_read(&dev_priv->uncore, reg);
590 }
591 
592 static bool i915_has_asle(struct drm_i915_private *dev_priv)
593 {
594 	if (!dev_priv->display.opregion.asle)
595 		return false;
596 
597 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
598 }
599 
600 /**
601  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
602  * @dev_priv: i915 device private
603  */
604 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
605 {
606 	if (!i915_has_asle(dev_priv))
607 		return;
608 
609 	spin_lock_irq(&dev_priv->irq_lock);
610 
611 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
612 	if (DISPLAY_VER(dev_priv) >= 4)
613 		i915_enable_pipestat(dev_priv, PIPE_A,
614 				     PIPE_LEGACY_BLC_EVENT_STATUS);
615 
616 	spin_unlock_irq(&dev_priv->irq_lock);
617 }
618 
619 /**
620  * ivb_parity_work - Workqueue called when a parity error interrupt
621  * occurred.
622  * @work: workqueue struct
623  *
624  * Doesn't actually do anything except notify userspace. As a consequence of
625  * this event, userspace should try to remap the bad rows since statistically
626  * it is likely the same row is more likely to go bad again.
627  */
628 static void ivb_parity_work(struct work_struct *work)
629 {
630 	struct drm_i915_private *dev_priv =
631 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
632 	struct intel_gt *gt = to_gt(dev_priv);
633 	u32 error_status, row, bank, subbank;
634 	char *parity_event[6];
635 	u32 misccpctl;
636 	u8 slice = 0;
637 
638 	/* We must turn off DOP level clock gating to access the L3 registers.
639 	 * In order to prevent a get/put style interface, acquire struct mutex
640 	 * any time we access those registers.
641 	 */
642 	mutex_lock(&dev_priv->drm.struct_mutex);
643 
644 	/* If we've screwed up tracking, just let the interrupt fire again */
645 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
646 		goto out;
647 
648 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
649 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
650 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
651 
652 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
653 		i915_reg_t reg;
654 
655 		slice--;
656 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
657 				     slice >= NUM_L3_SLICES(dev_priv)))
658 			break;
659 
660 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
661 
662 		reg = GEN7_L3CDERRST1(slice);
663 
664 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
665 		row = GEN7_PARITY_ERROR_ROW(error_status);
666 		bank = GEN7_PARITY_ERROR_BANK(error_status);
667 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
668 
669 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
670 		intel_uncore_posting_read(&dev_priv->uncore, reg);
671 
672 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
673 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
674 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
675 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
676 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
677 		parity_event[5] = NULL;
678 
679 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
680 				   KOBJ_CHANGE, parity_event);
681 
682 		drm_dbg(&dev_priv->drm,
683 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
684 			slice, row, bank, subbank);
685 
686 		kfree(parity_event[4]);
687 		kfree(parity_event[3]);
688 		kfree(parity_event[2]);
689 		kfree(parity_event[1]);
690 	}
691 
692 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
693 
694 out:
695 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
696 	spin_lock_irq(gt->irq_lock);
697 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
698 	spin_unlock_irq(gt->irq_lock);
699 
700 	mutex_unlock(&dev_priv->drm.struct_mutex);
701 }
702 
703 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
704 {
705 	switch (pin) {
706 	case HPD_PORT_TC1:
707 	case HPD_PORT_TC2:
708 	case HPD_PORT_TC3:
709 	case HPD_PORT_TC4:
710 	case HPD_PORT_TC5:
711 	case HPD_PORT_TC6:
712 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
713 	default:
714 		return false;
715 	}
716 }
717 
718 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
719 {
720 	switch (pin) {
721 	case HPD_PORT_A:
722 		return val & PORTA_HOTPLUG_LONG_DETECT;
723 	case HPD_PORT_B:
724 		return val & PORTB_HOTPLUG_LONG_DETECT;
725 	case HPD_PORT_C:
726 		return val & PORTC_HOTPLUG_LONG_DETECT;
727 	default:
728 		return false;
729 	}
730 }
731 
732 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
733 {
734 	switch (pin) {
735 	case HPD_PORT_A:
736 	case HPD_PORT_B:
737 	case HPD_PORT_C:
738 	case HPD_PORT_D:
739 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
740 	default:
741 		return false;
742 	}
743 }
744 
745 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
746 {
747 	switch (pin) {
748 	case HPD_PORT_TC1:
749 	case HPD_PORT_TC2:
750 	case HPD_PORT_TC3:
751 	case HPD_PORT_TC4:
752 	case HPD_PORT_TC5:
753 	case HPD_PORT_TC6:
754 		return val & ICP_TC_HPD_LONG_DETECT(pin);
755 	default:
756 		return false;
757 	}
758 }
759 
760 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
761 {
762 	switch (pin) {
763 	case HPD_PORT_E:
764 		return val & PORTE_HOTPLUG_LONG_DETECT;
765 	default:
766 		return false;
767 	}
768 }
769 
770 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
771 {
772 	switch (pin) {
773 	case HPD_PORT_A:
774 		return val & PORTA_HOTPLUG_LONG_DETECT;
775 	case HPD_PORT_B:
776 		return val & PORTB_HOTPLUG_LONG_DETECT;
777 	case HPD_PORT_C:
778 		return val & PORTC_HOTPLUG_LONG_DETECT;
779 	case HPD_PORT_D:
780 		return val & PORTD_HOTPLUG_LONG_DETECT;
781 	default:
782 		return false;
783 	}
784 }
785 
786 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
787 {
788 	switch (pin) {
789 	case HPD_PORT_A:
790 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
791 	default:
792 		return false;
793 	}
794 }
795 
796 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
797 {
798 	switch (pin) {
799 	case HPD_PORT_B:
800 		return val & PORTB_HOTPLUG_LONG_DETECT;
801 	case HPD_PORT_C:
802 		return val & PORTC_HOTPLUG_LONG_DETECT;
803 	case HPD_PORT_D:
804 		return val & PORTD_HOTPLUG_LONG_DETECT;
805 	default:
806 		return false;
807 	}
808 }
809 
810 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
811 {
812 	switch (pin) {
813 	case HPD_PORT_B:
814 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
815 	case HPD_PORT_C:
816 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
817 	case HPD_PORT_D:
818 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
819 	default:
820 		return false;
821 	}
822 }
823 
824 /*
825  * Get a bit mask of pins that have triggered, and which ones may be long.
826  * This can be called multiple times with the same masks to accumulate
827  * hotplug detection results from several registers.
828  *
829  * Note that the caller is expected to zero out the masks initially.
830  */
831 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
832 			       u32 *pin_mask, u32 *long_mask,
833 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
834 			       const u32 hpd[HPD_NUM_PINS],
835 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
836 {
837 	enum hpd_pin pin;
838 
839 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
840 
841 	for_each_hpd_pin(pin) {
842 		if ((hpd[pin] & hotplug_trigger) == 0)
843 			continue;
844 
845 		*pin_mask |= BIT(pin);
846 
847 		if (long_pulse_detect(pin, dig_hotplug_reg))
848 			*long_mask |= BIT(pin);
849 	}
850 
851 	drm_dbg(&dev_priv->drm,
852 		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
853 		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
854 
855 }
856 
857 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
858 				  const u32 hpd[HPD_NUM_PINS])
859 {
860 	struct intel_encoder *encoder;
861 	u32 enabled_irqs = 0;
862 
863 	for_each_intel_encoder(&dev_priv->drm, encoder)
864 		if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
865 			enabled_irqs |= hpd[encoder->hpd_pin];
866 
867 	return enabled_irqs;
868 }
869 
870 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
871 				  const u32 hpd[HPD_NUM_PINS])
872 {
873 	struct intel_encoder *encoder;
874 	u32 hotplug_irqs = 0;
875 
876 	for_each_intel_encoder(&dev_priv->drm, encoder)
877 		hotplug_irqs |= hpd[encoder->hpd_pin];
878 
879 	return hotplug_irqs;
880 }
881 
882 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
883 				     hotplug_enables_func hotplug_enables)
884 {
885 	struct intel_encoder *encoder;
886 	u32 hotplug = 0;
887 
888 	for_each_intel_encoder(&i915->drm, encoder)
889 		hotplug |= hotplug_enables(encoder);
890 
891 	return hotplug;
892 }
893 
894 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
895 {
896 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
897 }
898 
899 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
900 {
901 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
902 }
903 
904 #if defined(CONFIG_DEBUG_FS)
905 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
906 					 enum pipe pipe,
907 					 u32 crc0, u32 crc1,
908 					 u32 crc2, u32 crc3,
909 					 u32 crc4)
910 {
911 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
912 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
913 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
914 
915 	trace_intel_pipe_crc(crtc, crcs);
916 
917 	spin_lock(&pipe_crc->lock);
918 	/*
919 	 * For some not yet identified reason, the first CRC is
920 	 * bonkers. So let's just wait for the next vblank and read
921 	 * out the buggy result.
922 	 *
923 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
924 	 * don't trust that one either.
925 	 */
926 	if (pipe_crc->skipped <= 0 ||
927 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
928 		pipe_crc->skipped++;
929 		spin_unlock(&pipe_crc->lock);
930 		return;
931 	}
932 	spin_unlock(&pipe_crc->lock);
933 
934 	drm_crtc_add_crc_entry(&crtc->base, true,
935 				drm_crtc_accurate_vblank_count(&crtc->base),
936 				crcs);
937 }
938 #else
939 static inline void
940 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
941 			     enum pipe pipe,
942 			     u32 crc0, u32 crc1,
943 			     u32 crc2, u32 crc3,
944 			     u32 crc4) {}
945 #endif
946 
947 static void flip_done_handler(struct drm_i915_private *i915,
948 			      enum pipe pipe)
949 {
950 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
951 	struct drm_crtc_state *crtc_state = crtc->base.state;
952 	struct drm_pending_vblank_event *e = crtc_state->event;
953 	struct drm_device *dev = &i915->drm;
954 	unsigned long irqflags;
955 
956 	spin_lock_irqsave(&dev->event_lock, irqflags);
957 
958 	crtc_state->event = NULL;
959 
960 	drm_crtc_send_vblank_event(&crtc->base, e);
961 
962 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
963 }
964 
965 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
966 				     enum pipe pipe)
967 {
968 	display_pipe_crc_irq_handler(dev_priv, pipe,
969 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
970 				     0, 0, 0, 0);
971 }
972 
973 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
974 				     enum pipe pipe)
975 {
976 	display_pipe_crc_irq_handler(dev_priv, pipe,
977 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
978 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
979 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
980 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
981 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
982 }
983 
984 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
985 				      enum pipe pipe)
986 {
987 	u32 res1, res2;
988 
989 	if (DISPLAY_VER(dev_priv) >= 3)
990 		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
991 	else
992 		res1 = 0;
993 
994 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
995 		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
996 	else
997 		res2 = 0;
998 
999 	display_pipe_crc_irq_handler(dev_priv, pipe,
1000 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1001 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1002 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1003 				     res1, res2);
1004 }
1005 
1006 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1007 {
1008 	enum pipe pipe;
1009 
1010 	for_each_pipe(dev_priv, pipe) {
1011 		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1012 			   PIPESTAT_INT_STATUS_MASK |
1013 			   PIPE_FIFO_UNDERRUN_STATUS);
1014 
1015 		dev_priv->pipestat_irq_mask[pipe] = 0;
1016 	}
1017 }
1018 
1019 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1020 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1021 {
1022 	enum pipe pipe;
1023 
1024 	spin_lock(&dev_priv->irq_lock);
1025 
1026 	if (!dev_priv->display_irqs_enabled) {
1027 		spin_unlock(&dev_priv->irq_lock);
1028 		return;
1029 	}
1030 
1031 	for_each_pipe(dev_priv, pipe) {
1032 		i915_reg_t reg;
1033 		u32 status_mask, enable_mask, iir_bit = 0;
1034 
1035 		/*
1036 		 * PIPESTAT bits get signalled even when the interrupt is
1037 		 * disabled with the mask bits, and some of the status bits do
1038 		 * not generate interrupts at all (like the underrun bit). Hence
1039 		 * we need to be careful that we only handle what we want to
1040 		 * handle.
1041 		 */
1042 
1043 		/* fifo underruns are filterered in the underrun handler. */
1044 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1045 
1046 		switch (pipe) {
1047 		default:
1048 		case PIPE_A:
1049 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1050 			break;
1051 		case PIPE_B:
1052 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1053 			break;
1054 		case PIPE_C:
1055 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1056 			break;
1057 		}
1058 		if (iir & iir_bit)
1059 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1060 
1061 		if (!status_mask)
1062 			continue;
1063 
1064 		reg = PIPESTAT(pipe);
1065 		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1066 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1067 
1068 		/*
1069 		 * Clear the PIPE*STAT regs before the IIR
1070 		 *
1071 		 * Toggle the enable bits to make sure we get an
1072 		 * edge in the ISR pipe event bit if we don't clear
1073 		 * all the enabled status bits. Otherwise the edge
1074 		 * triggered IIR on i965/g4x wouldn't notice that
1075 		 * an interrupt is still pending.
1076 		 */
1077 		if (pipe_stats[pipe]) {
1078 			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1079 			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1080 		}
1081 	}
1082 	spin_unlock(&dev_priv->irq_lock);
1083 }
1084 
1085 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1086 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1087 {
1088 	enum pipe pipe;
1089 
1090 	for_each_pipe(dev_priv, pipe) {
1091 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1092 			intel_handle_vblank(dev_priv, pipe);
1093 
1094 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1095 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1096 
1097 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1098 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1099 	}
1100 }
1101 
1102 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1103 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1104 {
1105 	bool blc_event = false;
1106 	enum pipe pipe;
1107 
1108 	for_each_pipe(dev_priv, pipe) {
1109 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1110 			intel_handle_vblank(dev_priv, pipe);
1111 
1112 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1113 			blc_event = true;
1114 
1115 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1116 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1117 
1118 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1119 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1120 	}
1121 
1122 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1123 		intel_opregion_asle_intr(dev_priv);
1124 }
1125 
1126 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1127 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1128 {
1129 	bool blc_event = false;
1130 	enum pipe pipe;
1131 
1132 	for_each_pipe(dev_priv, pipe) {
1133 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1134 			intel_handle_vblank(dev_priv, pipe);
1135 
1136 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1137 			blc_event = true;
1138 
1139 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1140 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1141 
1142 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1143 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1144 	}
1145 
1146 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1147 		intel_opregion_asle_intr(dev_priv);
1148 
1149 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1150 		gmbus_irq_handler(dev_priv);
1151 }
1152 
1153 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1154 					    u32 pipe_stats[I915_MAX_PIPES])
1155 {
1156 	enum pipe pipe;
1157 
1158 	for_each_pipe(dev_priv, pipe) {
1159 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1160 			intel_handle_vblank(dev_priv, pipe);
1161 
1162 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1163 			flip_done_handler(dev_priv, pipe);
1164 
1165 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1166 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1167 
1168 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1169 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1170 	}
1171 
1172 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1173 		gmbus_irq_handler(dev_priv);
1174 }
1175 
1176 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1177 {
1178 	u32 hotplug_status = 0, hotplug_status_mask;
1179 	int i;
1180 
1181 	if (IS_G4X(dev_priv) ||
1182 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1183 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1184 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1185 	else
1186 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1187 
1188 	/*
1189 	 * We absolutely have to clear all the pending interrupt
1190 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1191 	 * interrupt bit won't have an edge, and the i965/g4x
1192 	 * edge triggered IIR will not notice that an interrupt
1193 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1194 	 * guarantee the edge as the act of toggling the enable
1195 	 * bits can itself generate a new hotplug interrupt :(
1196 	 */
1197 	for (i = 0; i < 10; i++) {
1198 		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1199 
1200 		if (tmp == 0)
1201 			return hotplug_status;
1202 
1203 		hotplug_status |= tmp;
1204 		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1205 	}
1206 
1207 	drm_WARN_ONCE(&dev_priv->drm, 1,
1208 		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1209 		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1210 
1211 	return hotplug_status;
1212 }
1213 
1214 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1215 				 u32 hotplug_status)
1216 {
1217 	u32 pin_mask = 0, long_mask = 0;
1218 	u32 hotplug_trigger;
1219 
1220 	if (IS_G4X(dev_priv) ||
1221 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1222 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1223 	else
1224 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1225 
1226 	if (hotplug_trigger) {
1227 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1228 				   hotplug_trigger, hotplug_trigger,
1229 				   dev_priv->display.hotplug.hpd,
1230 				   i9xx_port_hotplug_long_detect);
1231 
1232 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1233 	}
1234 
1235 	if ((IS_G4X(dev_priv) ||
1236 	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1237 	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1238 		dp_aux_irq_handler(dev_priv);
1239 }
1240 
1241 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1242 {
1243 	struct drm_i915_private *dev_priv = arg;
1244 	irqreturn_t ret = IRQ_NONE;
1245 
1246 	if (!intel_irqs_enabled(dev_priv))
1247 		return IRQ_NONE;
1248 
1249 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1250 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1251 
1252 	do {
1253 		u32 iir, gt_iir, pm_iir;
1254 		u32 pipe_stats[I915_MAX_PIPES] = {};
1255 		u32 hotplug_status = 0;
1256 		u32 ier = 0;
1257 
1258 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1259 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1260 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1261 
1262 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1263 			break;
1264 
1265 		ret = IRQ_HANDLED;
1266 
1267 		/*
1268 		 * Theory on interrupt generation, based on empirical evidence:
1269 		 *
1270 		 * x = ((VLV_IIR & VLV_IER) ||
1271 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1272 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1273 		 *
1274 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1275 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1276 		 * guarantee the CPU interrupt will be raised again even if we
1277 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1278 		 * bits this time around.
1279 		 */
1280 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1281 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1282 
1283 		if (gt_iir)
1284 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1285 		if (pm_iir)
1286 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1287 
1288 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1289 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1290 
1291 		/* Call regardless, as some status bits might not be
1292 		 * signalled in iir */
1293 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1294 
1295 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1296 			   I915_LPE_PIPE_B_INTERRUPT))
1297 			intel_lpe_audio_irq_handler(dev_priv);
1298 
1299 		/*
1300 		 * VLV_IIR is single buffered, and reflects the level
1301 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1302 		 */
1303 		if (iir)
1304 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1305 
1306 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1307 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1308 
1309 		if (gt_iir)
1310 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1311 		if (pm_iir)
1312 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1313 
1314 		if (hotplug_status)
1315 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1316 
1317 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1318 	} while (0);
1319 
1320 	pmu_irq_stats(dev_priv, ret);
1321 
1322 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1323 
1324 	return ret;
1325 }
1326 
1327 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1328 {
1329 	struct drm_i915_private *dev_priv = arg;
1330 	irqreturn_t ret = IRQ_NONE;
1331 
1332 	if (!intel_irqs_enabled(dev_priv))
1333 		return IRQ_NONE;
1334 
1335 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1336 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1337 
1338 	do {
1339 		u32 master_ctl, iir;
1340 		u32 pipe_stats[I915_MAX_PIPES] = {};
1341 		u32 hotplug_status = 0;
1342 		u32 ier = 0;
1343 
1344 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1345 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1346 
1347 		if (master_ctl == 0 && iir == 0)
1348 			break;
1349 
1350 		ret = IRQ_HANDLED;
1351 
1352 		/*
1353 		 * Theory on interrupt generation, based on empirical evidence:
1354 		 *
1355 		 * x = ((VLV_IIR & VLV_IER) ||
1356 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1357 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1358 		 *
1359 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1360 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1361 		 * guarantee the CPU interrupt will be raised again even if we
1362 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1363 		 * bits this time around.
1364 		 */
1365 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1366 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1367 
1368 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1369 
1370 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1371 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1372 
1373 		/* Call regardless, as some status bits might not be
1374 		 * signalled in iir */
1375 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1376 
1377 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1378 			   I915_LPE_PIPE_B_INTERRUPT |
1379 			   I915_LPE_PIPE_C_INTERRUPT))
1380 			intel_lpe_audio_irq_handler(dev_priv);
1381 
1382 		/*
1383 		 * VLV_IIR is single buffered, and reflects the level
1384 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1385 		 */
1386 		if (iir)
1387 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1388 
1389 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1390 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1391 
1392 		if (hotplug_status)
1393 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1394 
1395 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1396 	} while (0);
1397 
1398 	pmu_irq_stats(dev_priv, ret);
1399 
1400 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1401 
1402 	return ret;
1403 }
1404 
1405 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1406 				u32 hotplug_trigger)
1407 {
1408 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1409 
1410 	/*
1411 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1412 	 * unless we touch the hotplug register, even if hotplug_trigger is
1413 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1414 	 * errors.
1415 	 */
1416 	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1417 	if (!hotplug_trigger) {
1418 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1419 			PORTD_HOTPLUG_STATUS_MASK |
1420 			PORTC_HOTPLUG_STATUS_MASK |
1421 			PORTB_HOTPLUG_STATUS_MASK;
1422 		dig_hotplug_reg &= ~mask;
1423 	}
1424 
1425 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1426 	if (!hotplug_trigger)
1427 		return;
1428 
1429 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1430 			   hotplug_trigger, dig_hotplug_reg,
1431 			   dev_priv->display.hotplug.pch_hpd,
1432 			   pch_port_hotplug_long_detect);
1433 
1434 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1435 }
1436 
1437 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1438 {
1439 	enum pipe pipe;
1440 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1441 
1442 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1443 
1444 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1445 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1446 			       SDE_AUDIO_POWER_SHIFT);
1447 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1448 			port_name(port));
1449 	}
1450 
1451 	if (pch_iir & SDE_AUX_MASK)
1452 		dp_aux_irq_handler(dev_priv);
1453 
1454 	if (pch_iir & SDE_GMBUS)
1455 		gmbus_irq_handler(dev_priv);
1456 
1457 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1458 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1459 
1460 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1461 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1462 
1463 	if (pch_iir & SDE_POISON)
1464 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1465 
1466 	if (pch_iir & SDE_FDI_MASK) {
1467 		for_each_pipe(dev_priv, pipe)
1468 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1469 				pipe_name(pipe),
1470 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1471 	}
1472 
1473 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1474 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1475 
1476 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1477 		drm_dbg(&dev_priv->drm,
1478 			"PCH transcoder CRC error interrupt\n");
1479 
1480 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1481 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1482 
1483 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1484 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1485 }
1486 
1487 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1488 {
1489 	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1490 	enum pipe pipe;
1491 
1492 	if (err_int & ERR_INT_POISON)
1493 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1494 
1495 	for_each_pipe(dev_priv, pipe) {
1496 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1497 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1498 
1499 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1500 			if (IS_IVYBRIDGE(dev_priv))
1501 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1502 			else
1503 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1504 		}
1505 	}
1506 
1507 	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1508 }
1509 
1510 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1511 {
1512 	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1513 	enum pipe pipe;
1514 
1515 	if (serr_int & SERR_INT_POISON)
1516 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1517 
1518 	for_each_pipe(dev_priv, pipe)
1519 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1520 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1521 
1522 	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1523 }
1524 
1525 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1526 {
1527 	enum pipe pipe;
1528 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1529 
1530 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1531 
1532 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1533 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1534 			       SDE_AUDIO_POWER_SHIFT_CPT);
1535 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1536 			port_name(port));
1537 	}
1538 
1539 	if (pch_iir & SDE_AUX_MASK_CPT)
1540 		dp_aux_irq_handler(dev_priv);
1541 
1542 	if (pch_iir & SDE_GMBUS_CPT)
1543 		gmbus_irq_handler(dev_priv);
1544 
1545 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1546 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1547 
1548 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1549 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1550 
1551 	if (pch_iir & SDE_FDI_MASK_CPT) {
1552 		for_each_pipe(dev_priv, pipe)
1553 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1554 				pipe_name(pipe),
1555 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1556 	}
1557 
1558 	if (pch_iir & SDE_ERROR_CPT)
1559 		cpt_serr_int_handler(dev_priv);
1560 }
1561 
1562 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1563 {
1564 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1565 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1566 	u32 pin_mask = 0, long_mask = 0;
1567 
1568 	if (ddi_hotplug_trigger) {
1569 		u32 dig_hotplug_reg;
1570 
1571 		/* Locking due to DSI native GPIO sequences */
1572 		spin_lock(&dev_priv->irq_lock);
1573 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1574 		spin_unlock(&dev_priv->irq_lock);
1575 
1576 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1577 				   ddi_hotplug_trigger, dig_hotplug_reg,
1578 				   dev_priv->display.hotplug.pch_hpd,
1579 				   icp_ddi_port_hotplug_long_detect);
1580 	}
1581 
1582 	if (tc_hotplug_trigger) {
1583 		u32 dig_hotplug_reg;
1584 
1585 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1586 
1587 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1588 				   tc_hotplug_trigger, dig_hotplug_reg,
1589 				   dev_priv->display.hotplug.pch_hpd,
1590 				   icp_tc_port_hotplug_long_detect);
1591 	}
1592 
1593 	if (pin_mask)
1594 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1595 
1596 	if (pch_iir & SDE_GMBUS_ICP)
1597 		gmbus_irq_handler(dev_priv);
1598 }
1599 
1600 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1601 {
1602 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1603 		~SDE_PORTE_HOTPLUG_SPT;
1604 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1605 	u32 pin_mask = 0, long_mask = 0;
1606 
1607 	if (hotplug_trigger) {
1608 		u32 dig_hotplug_reg;
1609 
1610 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1611 
1612 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1613 				   hotplug_trigger, dig_hotplug_reg,
1614 				   dev_priv->display.hotplug.pch_hpd,
1615 				   spt_port_hotplug_long_detect);
1616 	}
1617 
1618 	if (hotplug2_trigger) {
1619 		u32 dig_hotplug_reg;
1620 
1621 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
1622 
1623 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1624 				   hotplug2_trigger, dig_hotplug_reg,
1625 				   dev_priv->display.hotplug.pch_hpd,
1626 				   spt_port_hotplug2_long_detect);
1627 	}
1628 
1629 	if (pin_mask)
1630 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1631 
1632 	if (pch_iir & SDE_GMBUS_CPT)
1633 		gmbus_irq_handler(dev_priv);
1634 }
1635 
1636 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1637 				u32 hotplug_trigger)
1638 {
1639 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1640 
1641 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
1642 
1643 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1644 			   hotplug_trigger, dig_hotplug_reg,
1645 			   dev_priv->display.hotplug.hpd,
1646 			   ilk_port_hotplug_long_detect);
1647 
1648 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1649 }
1650 
1651 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1652 				    u32 de_iir)
1653 {
1654 	enum pipe pipe;
1655 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1656 
1657 	if (hotplug_trigger)
1658 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1659 
1660 	if (de_iir & DE_AUX_CHANNEL_A)
1661 		dp_aux_irq_handler(dev_priv);
1662 
1663 	if (de_iir & DE_GSE)
1664 		intel_opregion_asle_intr(dev_priv);
1665 
1666 	if (de_iir & DE_POISON)
1667 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1668 
1669 	for_each_pipe(dev_priv, pipe) {
1670 		if (de_iir & DE_PIPE_VBLANK(pipe))
1671 			intel_handle_vblank(dev_priv, pipe);
1672 
1673 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
1674 			flip_done_handler(dev_priv, pipe);
1675 
1676 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1677 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1678 
1679 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1680 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1681 	}
1682 
1683 	/* check event from PCH */
1684 	if (de_iir & DE_PCH_EVENT) {
1685 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1686 
1687 		if (HAS_PCH_CPT(dev_priv))
1688 			cpt_irq_handler(dev_priv, pch_iir);
1689 		else
1690 			ibx_irq_handler(dev_priv, pch_iir);
1691 
1692 		/* should clear PCH hotplug event before clear CPU irq */
1693 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1694 	}
1695 
1696 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
1697 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
1698 }
1699 
1700 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
1701 				    u32 de_iir)
1702 {
1703 	enum pipe pipe;
1704 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1705 
1706 	if (hotplug_trigger)
1707 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1708 
1709 	if (de_iir & DE_ERR_INT_IVB)
1710 		ivb_err_int_handler(dev_priv);
1711 
1712 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1713 		dp_aux_irq_handler(dev_priv);
1714 
1715 	if (de_iir & DE_GSE_IVB)
1716 		intel_opregion_asle_intr(dev_priv);
1717 
1718 	for_each_pipe(dev_priv, pipe) {
1719 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
1720 			intel_handle_vblank(dev_priv, pipe);
1721 
1722 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
1723 			flip_done_handler(dev_priv, pipe);
1724 	}
1725 
1726 	/* check event from PCH */
1727 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
1728 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1729 
1730 		cpt_irq_handler(dev_priv, pch_iir);
1731 
1732 		/* clear PCH hotplug event before clear CPU irq */
1733 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1734 	}
1735 }
1736 
1737 /*
1738  * To handle irqs with the minimum potential races with fresh interrupts, we:
1739  * 1 - Disable Master Interrupt Control.
1740  * 2 - Find the source(s) of the interrupt.
1741  * 3 - Clear the Interrupt Identity bits (IIR).
1742  * 4 - Process the interrupt(s) that had bits set in the IIRs.
1743  * 5 - Re-enable Master Interrupt Control.
1744  */
1745 static irqreturn_t ilk_irq_handler(int irq, void *arg)
1746 {
1747 	struct drm_i915_private *i915 = arg;
1748 	void __iomem * const regs = i915->uncore.regs;
1749 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1750 	irqreturn_t ret = IRQ_NONE;
1751 
1752 	if (unlikely(!intel_irqs_enabled(i915)))
1753 		return IRQ_NONE;
1754 
1755 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1756 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1757 
1758 	/* disable master interrupt before clearing iir  */
1759 	de_ier = raw_reg_read(regs, DEIER);
1760 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1761 
1762 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1763 	 * interrupts will will be stored on its back queue, and then we'll be
1764 	 * able to process them after we restore SDEIER (as soon as we restore
1765 	 * it, we'll get an interrupt if SDEIIR still has something to process
1766 	 * due to its back queue). */
1767 	if (!HAS_PCH_NOP(i915)) {
1768 		sde_ier = raw_reg_read(regs, SDEIER);
1769 		raw_reg_write(regs, SDEIER, 0);
1770 	}
1771 
1772 	/* Find, clear, then process each source of interrupt */
1773 
1774 	gt_iir = raw_reg_read(regs, GTIIR);
1775 	if (gt_iir) {
1776 		raw_reg_write(regs, GTIIR, gt_iir);
1777 		if (GRAPHICS_VER(i915) >= 6)
1778 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
1779 		else
1780 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
1781 		ret = IRQ_HANDLED;
1782 	}
1783 
1784 	de_iir = raw_reg_read(regs, DEIIR);
1785 	if (de_iir) {
1786 		raw_reg_write(regs, DEIIR, de_iir);
1787 		if (DISPLAY_VER(i915) >= 7)
1788 			ivb_display_irq_handler(i915, de_iir);
1789 		else
1790 			ilk_display_irq_handler(i915, de_iir);
1791 		ret = IRQ_HANDLED;
1792 	}
1793 
1794 	if (GRAPHICS_VER(i915) >= 6) {
1795 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
1796 		if (pm_iir) {
1797 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
1798 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
1799 			ret = IRQ_HANDLED;
1800 		}
1801 	}
1802 
1803 	raw_reg_write(regs, DEIER, de_ier);
1804 	if (sde_ier)
1805 		raw_reg_write(regs, SDEIER, sde_ier);
1806 
1807 	pmu_irq_stats(i915, ret);
1808 
1809 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1810 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1811 
1812 	return ret;
1813 }
1814 
1815 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
1816 				u32 hotplug_trigger)
1817 {
1818 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1819 
1820 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1821 
1822 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1823 			   hotplug_trigger, dig_hotplug_reg,
1824 			   dev_priv->display.hotplug.hpd,
1825 			   bxt_port_hotplug_long_detect);
1826 
1827 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1828 }
1829 
1830 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1831 {
1832 	u32 pin_mask = 0, long_mask = 0;
1833 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
1834 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
1835 
1836 	if (trigger_tc) {
1837 		u32 dig_hotplug_reg;
1838 
1839 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
1840 
1841 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1842 				   trigger_tc, dig_hotplug_reg,
1843 				   dev_priv->display.hotplug.hpd,
1844 				   gen11_port_hotplug_long_detect);
1845 	}
1846 
1847 	if (trigger_tbt) {
1848 		u32 dig_hotplug_reg;
1849 
1850 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
1851 
1852 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1853 				   trigger_tbt, dig_hotplug_reg,
1854 				   dev_priv->display.hotplug.hpd,
1855 				   gen11_port_hotplug_long_detect);
1856 	}
1857 
1858 	if (pin_mask)
1859 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1860 	else
1861 		drm_err(&dev_priv->drm,
1862 			"Unexpected DE HPD interrupt 0x%08x\n", iir);
1863 }
1864 
1865 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
1866 {
1867 	u32 mask;
1868 
1869 	if (DISPLAY_VER(dev_priv) >= 13)
1870 		return TGL_DE_PORT_AUX_DDIA |
1871 			TGL_DE_PORT_AUX_DDIB |
1872 			TGL_DE_PORT_AUX_DDIC |
1873 			XELPD_DE_PORT_AUX_DDID |
1874 			XELPD_DE_PORT_AUX_DDIE |
1875 			TGL_DE_PORT_AUX_USBC1 |
1876 			TGL_DE_PORT_AUX_USBC2 |
1877 			TGL_DE_PORT_AUX_USBC3 |
1878 			TGL_DE_PORT_AUX_USBC4;
1879 	else if (DISPLAY_VER(dev_priv) >= 12)
1880 		return TGL_DE_PORT_AUX_DDIA |
1881 			TGL_DE_PORT_AUX_DDIB |
1882 			TGL_DE_PORT_AUX_DDIC |
1883 			TGL_DE_PORT_AUX_USBC1 |
1884 			TGL_DE_PORT_AUX_USBC2 |
1885 			TGL_DE_PORT_AUX_USBC3 |
1886 			TGL_DE_PORT_AUX_USBC4 |
1887 			TGL_DE_PORT_AUX_USBC5 |
1888 			TGL_DE_PORT_AUX_USBC6;
1889 
1890 
1891 	mask = GEN8_AUX_CHANNEL_A;
1892 	if (DISPLAY_VER(dev_priv) >= 9)
1893 		mask |= GEN9_AUX_CHANNEL_B |
1894 			GEN9_AUX_CHANNEL_C |
1895 			GEN9_AUX_CHANNEL_D;
1896 
1897 	if (DISPLAY_VER(dev_priv) == 11) {
1898 		mask |= ICL_AUX_CHANNEL_F;
1899 		mask |= ICL_AUX_CHANNEL_E;
1900 	}
1901 
1902 	return mask;
1903 }
1904 
1905 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
1906 {
1907 	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
1908 		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
1909 	else if (DISPLAY_VER(dev_priv) >= 11)
1910 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
1911 	else if (DISPLAY_VER(dev_priv) >= 9)
1912 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
1913 	else
1914 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
1915 }
1916 
1917 static void
1918 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1919 {
1920 	bool found = false;
1921 
1922 	if (iir & GEN8_DE_MISC_GSE) {
1923 		intel_opregion_asle_intr(dev_priv);
1924 		found = true;
1925 	}
1926 
1927 	if (iir & GEN8_DE_EDP_PSR) {
1928 		struct intel_encoder *encoder;
1929 		u32 psr_iir;
1930 		i915_reg_t iir_reg;
1931 
1932 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1933 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1934 
1935 			if (DISPLAY_VER(dev_priv) >= 12)
1936 				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
1937 			else
1938 				iir_reg = EDP_PSR_IIR;
1939 
1940 			psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
1941 
1942 			if (psr_iir)
1943 				found = true;
1944 
1945 			intel_psr_irq_handler(intel_dp, psr_iir);
1946 
1947 			/* prior GEN12 only have one EDP PSR */
1948 			if (DISPLAY_VER(dev_priv) < 12)
1949 				break;
1950 		}
1951 	}
1952 
1953 	if (!found)
1954 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
1955 }
1956 
1957 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
1958 					   u32 te_trigger)
1959 {
1960 	enum pipe pipe = INVALID_PIPE;
1961 	enum transcoder dsi_trans;
1962 	enum port port;
1963 	u32 val, tmp;
1964 
1965 	/*
1966 	 * Incase of dual link, TE comes from DSI_1
1967 	 * this is to check if dual link is enabled
1968 	 */
1969 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
1970 	val &= PORT_SYNC_MODE_ENABLE;
1971 
1972 	/*
1973 	 * if dual link is enabled, then read DSI_0
1974 	 * transcoder registers
1975 	 */
1976 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
1977 						  PORT_A : PORT_B;
1978 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
1979 
1980 	/* Check if DSI configured in command mode */
1981 	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
1982 	val = val & OP_MODE_MASK;
1983 
1984 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
1985 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
1986 		return;
1987 	}
1988 
1989 	/* Get PIPE for handling VBLANK event */
1990 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
1991 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
1992 	case TRANS_DDI_EDP_INPUT_A_ON:
1993 		pipe = PIPE_A;
1994 		break;
1995 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
1996 		pipe = PIPE_B;
1997 		break;
1998 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
1999 		pipe = PIPE_C;
2000 		break;
2001 	default:
2002 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
2003 		return;
2004 	}
2005 
2006 	intel_handle_vblank(dev_priv, pipe);
2007 
2008 	/* clear TE in dsi IIR */
2009 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2010 	tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2011 }
2012 
2013 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2014 {
2015 	if (DISPLAY_VER(i915) >= 9)
2016 		return GEN9_PIPE_PLANE1_FLIP_DONE;
2017 	else
2018 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
2019 }
2020 
2021 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2022 {
2023 	u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2024 
2025 	if (DISPLAY_VER(dev_priv) >= 13)
2026 		mask |= XELPD_PIPE_SOFT_UNDERRUN |
2027 			XELPD_PIPE_HARD_UNDERRUN;
2028 
2029 	return mask;
2030 }
2031 
2032 static irqreturn_t
2033 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2034 {
2035 	irqreturn_t ret = IRQ_NONE;
2036 	u32 iir;
2037 	enum pipe pipe;
2038 
2039 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2040 
2041 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2042 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2043 		if (iir) {
2044 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2045 			ret = IRQ_HANDLED;
2046 			gen8_de_misc_irq_handler(dev_priv, iir);
2047 		} else {
2048 			drm_err_ratelimited(&dev_priv->drm,
2049 					    "The master control interrupt lied (DE MISC)!\n");
2050 		}
2051 	}
2052 
2053 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2054 		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2055 		if (iir) {
2056 			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2057 			ret = IRQ_HANDLED;
2058 			gen11_hpd_irq_handler(dev_priv, iir);
2059 		} else {
2060 			drm_err_ratelimited(&dev_priv->drm,
2061 					    "The master control interrupt lied, (DE HPD)!\n");
2062 		}
2063 	}
2064 
2065 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2066 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2067 		if (iir) {
2068 			bool found = false;
2069 
2070 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2071 			ret = IRQ_HANDLED;
2072 
2073 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2074 				dp_aux_irq_handler(dev_priv);
2075 				found = true;
2076 			}
2077 
2078 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2079 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2080 
2081 				if (hotplug_trigger) {
2082 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2083 					found = true;
2084 				}
2085 			} else if (IS_BROADWELL(dev_priv)) {
2086 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2087 
2088 				if (hotplug_trigger) {
2089 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2090 					found = true;
2091 				}
2092 			}
2093 
2094 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2095 			    (iir & BXT_DE_PORT_GMBUS)) {
2096 				gmbus_irq_handler(dev_priv);
2097 				found = true;
2098 			}
2099 
2100 			if (DISPLAY_VER(dev_priv) >= 11) {
2101 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2102 
2103 				if (te_trigger) {
2104 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2105 					found = true;
2106 				}
2107 			}
2108 
2109 			if (!found)
2110 				drm_err_ratelimited(&dev_priv->drm,
2111 						    "Unexpected DE Port interrupt\n");
2112 		}
2113 		else
2114 			drm_err_ratelimited(&dev_priv->drm,
2115 					    "The master control interrupt lied (DE PORT)!\n");
2116 	}
2117 
2118 	for_each_pipe(dev_priv, pipe) {
2119 		u32 fault_errors;
2120 
2121 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2122 			continue;
2123 
2124 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2125 		if (!iir) {
2126 			drm_err_ratelimited(&dev_priv->drm,
2127 					    "The master control interrupt lied (DE PIPE)!\n");
2128 			continue;
2129 		}
2130 
2131 		ret = IRQ_HANDLED;
2132 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2133 
2134 		if (iir & GEN8_PIPE_VBLANK)
2135 			intel_handle_vblank(dev_priv, pipe);
2136 
2137 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2138 			flip_done_handler(dev_priv, pipe);
2139 
2140 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2141 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2142 
2143 		if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2144 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2145 
2146 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2147 		if (fault_errors)
2148 			drm_err_ratelimited(&dev_priv->drm,
2149 					    "Fault errors on pipe %c: 0x%08x\n",
2150 					    pipe_name(pipe),
2151 					    fault_errors);
2152 	}
2153 
2154 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2155 	    master_ctl & GEN8_DE_PCH_IRQ) {
2156 		/*
2157 		 * FIXME(BDW): Assume for now that the new interrupt handling
2158 		 * scheme also closed the SDE interrupt handling race we've seen
2159 		 * on older pch-split platforms. But this needs testing.
2160 		 */
2161 		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2162 		if (iir) {
2163 			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2164 			ret = IRQ_HANDLED;
2165 
2166 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2167 				icp_irq_handler(dev_priv, iir);
2168 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2169 				spt_irq_handler(dev_priv, iir);
2170 			else
2171 				cpt_irq_handler(dev_priv, iir);
2172 		} else {
2173 			/*
2174 			 * Like on previous PCH there seems to be something
2175 			 * fishy going on with forwarding PCH interrupts.
2176 			 */
2177 			drm_dbg(&dev_priv->drm,
2178 				"The master control interrupt lied (SDE)!\n");
2179 		}
2180 	}
2181 
2182 	return ret;
2183 }
2184 
2185 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2186 {
2187 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2188 
2189 	/*
2190 	 * Now with master disabled, get a sample of level indications
2191 	 * for this interrupt. Indications will be cleared on related acks.
2192 	 * New indications can and will light up during processing,
2193 	 * and will generate new interrupt after enabling master.
2194 	 */
2195 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2196 }
2197 
2198 static inline void gen8_master_intr_enable(void __iomem * const regs)
2199 {
2200 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2201 }
2202 
2203 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2204 {
2205 	struct drm_i915_private *dev_priv = arg;
2206 	void __iomem * const regs = dev_priv->uncore.regs;
2207 	u32 master_ctl;
2208 
2209 	if (!intel_irqs_enabled(dev_priv))
2210 		return IRQ_NONE;
2211 
2212 	master_ctl = gen8_master_intr_disable(regs);
2213 	if (!master_ctl) {
2214 		gen8_master_intr_enable(regs);
2215 		return IRQ_NONE;
2216 	}
2217 
2218 	/* Find, queue (onto bottom-halves), then clear each source */
2219 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2220 
2221 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2222 	if (master_ctl & ~GEN8_GT_IRQS) {
2223 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2224 		gen8_de_irq_handler(dev_priv, master_ctl);
2225 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2226 	}
2227 
2228 	gen8_master_intr_enable(regs);
2229 
2230 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
2231 
2232 	return IRQ_HANDLED;
2233 }
2234 
2235 static u32
2236 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2237 {
2238 	void __iomem * const regs = i915->uncore.regs;
2239 	u32 iir;
2240 
2241 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2242 		return 0;
2243 
2244 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2245 	if (likely(iir))
2246 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2247 
2248 	return iir;
2249 }
2250 
2251 static void
2252 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2253 {
2254 	if (iir & GEN11_GU_MISC_GSE)
2255 		intel_opregion_asle_intr(i915);
2256 }
2257 
2258 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2259 {
2260 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2261 
2262 	/*
2263 	 * Now with master disabled, get a sample of level indications
2264 	 * for this interrupt. Indications will be cleared on related acks.
2265 	 * New indications can and will light up during processing,
2266 	 * and will generate new interrupt after enabling master.
2267 	 */
2268 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2269 }
2270 
2271 static inline void gen11_master_intr_enable(void __iomem * const regs)
2272 {
2273 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2274 }
2275 
2276 static void
2277 gen11_display_irq_handler(struct drm_i915_private *i915)
2278 {
2279 	void __iomem * const regs = i915->uncore.regs;
2280 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2281 
2282 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2283 	/*
2284 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2285 	 * for the display related bits.
2286 	 */
2287 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2288 	gen8_de_irq_handler(i915, disp_ctl);
2289 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2290 		      GEN11_DISPLAY_IRQ_ENABLE);
2291 
2292 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2293 }
2294 
2295 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2296 {
2297 	struct drm_i915_private *i915 = arg;
2298 	void __iomem * const regs = i915->uncore.regs;
2299 	struct intel_gt *gt = to_gt(i915);
2300 	u32 master_ctl;
2301 	u32 gu_misc_iir;
2302 
2303 	if (!intel_irqs_enabled(i915))
2304 		return IRQ_NONE;
2305 
2306 	master_ctl = gen11_master_intr_disable(regs);
2307 	if (!master_ctl) {
2308 		gen11_master_intr_enable(regs);
2309 		return IRQ_NONE;
2310 	}
2311 
2312 	/* Find, queue (onto bottom-halves), then clear each source */
2313 	gen11_gt_irq_handler(gt, master_ctl);
2314 
2315 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2316 	if (master_ctl & GEN11_DISPLAY_IRQ)
2317 		gen11_display_irq_handler(i915);
2318 
2319 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2320 
2321 	gen11_master_intr_enable(regs);
2322 
2323 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2324 
2325 	pmu_irq_stats(i915, IRQ_HANDLED);
2326 
2327 	return IRQ_HANDLED;
2328 }
2329 
2330 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2331 {
2332 	u32 val;
2333 
2334 	/* First disable interrupts */
2335 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2336 
2337 	/* Get the indication levels and ack the master unit */
2338 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2339 	if (unlikely(!val))
2340 		return 0;
2341 
2342 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2343 
2344 	return val;
2345 }
2346 
2347 static inline void dg1_master_intr_enable(void __iomem * const regs)
2348 {
2349 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2350 }
2351 
2352 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2353 {
2354 	struct drm_i915_private * const i915 = arg;
2355 	struct intel_gt *gt = to_gt(i915);
2356 	void __iomem * const regs = gt->uncore->regs;
2357 	u32 master_tile_ctl, master_ctl;
2358 	u32 gu_misc_iir;
2359 
2360 	if (!intel_irqs_enabled(i915))
2361 		return IRQ_NONE;
2362 
2363 	master_tile_ctl = dg1_master_intr_disable(regs);
2364 	if (!master_tile_ctl) {
2365 		dg1_master_intr_enable(regs);
2366 		return IRQ_NONE;
2367 	}
2368 
2369 	/* FIXME: we only support tile 0 for now. */
2370 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2371 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2372 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2373 	} else {
2374 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2375 			master_tile_ctl);
2376 		dg1_master_intr_enable(regs);
2377 		return IRQ_NONE;
2378 	}
2379 
2380 	gen11_gt_irq_handler(gt, master_ctl);
2381 
2382 	if (master_ctl & GEN11_DISPLAY_IRQ)
2383 		gen11_display_irq_handler(i915);
2384 
2385 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2386 
2387 	dg1_master_intr_enable(regs);
2388 
2389 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2390 
2391 	pmu_irq_stats(i915, IRQ_HANDLED);
2392 
2393 	return IRQ_HANDLED;
2394 }
2395 
2396 /* Called from drm generic code, passed 'crtc' which
2397  * we use as a pipe index
2398  */
2399 int i8xx_enable_vblank(struct drm_crtc *crtc)
2400 {
2401 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2402 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2403 	unsigned long irqflags;
2404 
2405 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2406 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2407 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2408 
2409 	return 0;
2410 }
2411 
2412 int i915gm_enable_vblank(struct drm_crtc *crtc)
2413 {
2414 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2415 
2416 	/*
2417 	 * Vblank interrupts fail to wake the device up from C2+.
2418 	 * Disabling render clock gating during C-states avoids
2419 	 * the problem. There is a small power cost so we do this
2420 	 * only when vblank interrupts are actually enabled.
2421 	 */
2422 	if (dev_priv->vblank_enabled++ == 0)
2423 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2424 
2425 	return i8xx_enable_vblank(crtc);
2426 }
2427 
2428 int i965_enable_vblank(struct drm_crtc *crtc)
2429 {
2430 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2431 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2432 	unsigned long irqflags;
2433 
2434 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2435 	i915_enable_pipestat(dev_priv, pipe,
2436 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2437 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2438 
2439 	return 0;
2440 }
2441 
2442 int ilk_enable_vblank(struct drm_crtc *crtc)
2443 {
2444 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2445 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2446 	unsigned long irqflags;
2447 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2448 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2449 
2450 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2451 	ilk_enable_display_irq(dev_priv, bit);
2452 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2453 
2454 	/* Even though there is no DMC, frame counter can get stuck when
2455 	 * PSR is active as no frames are generated.
2456 	 */
2457 	if (HAS_PSR(dev_priv))
2458 		drm_crtc_vblank_restore(crtc);
2459 
2460 	return 0;
2461 }
2462 
2463 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2464 				   bool enable)
2465 {
2466 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2467 	enum port port;
2468 
2469 	if (!(intel_crtc->mode_flags &
2470 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2471 		return false;
2472 
2473 	/* for dual link cases we consider TE from slave */
2474 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2475 		port = PORT_B;
2476 	else
2477 		port = PORT_A;
2478 
2479 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2480 			 enable ? 0 : DSI_TE_EVENT);
2481 
2482 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2483 
2484 	return true;
2485 }
2486 
2487 int bdw_enable_vblank(struct drm_crtc *_crtc)
2488 {
2489 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2490 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2491 	enum pipe pipe = crtc->pipe;
2492 	unsigned long irqflags;
2493 
2494 	if (gen11_dsi_configure_te(crtc, true))
2495 		return 0;
2496 
2497 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2498 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2499 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2500 
2501 	/* Even if there is no DMC, frame counter can get stuck when
2502 	 * PSR is active as no frames are generated, so check only for PSR.
2503 	 */
2504 	if (HAS_PSR(dev_priv))
2505 		drm_crtc_vblank_restore(&crtc->base);
2506 
2507 	return 0;
2508 }
2509 
2510 /* Called from drm generic code, passed 'crtc' which
2511  * we use as a pipe index
2512  */
2513 void i8xx_disable_vblank(struct drm_crtc *crtc)
2514 {
2515 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2516 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2517 	unsigned long irqflags;
2518 
2519 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2520 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2521 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2522 }
2523 
2524 void i915gm_disable_vblank(struct drm_crtc *crtc)
2525 {
2526 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2527 
2528 	i8xx_disable_vblank(crtc);
2529 
2530 	if (--dev_priv->vblank_enabled == 0)
2531 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2532 }
2533 
2534 void i965_disable_vblank(struct drm_crtc *crtc)
2535 {
2536 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2537 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2538 	unsigned long irqflags;
2539 
2540 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2541 	i915_disable_pipestat(dev_priv, pipe,
2542 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2543 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2544 }
2545 
2546 void ilk_disable_vblank(struct drm_crtc *crtc)
2547 {
2548 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2549 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2550 	unsigned long irqflags;
2551 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2552 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2553 
2554 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2555 	ilk_disable_display_irq(dev_priv, bit);
2556 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2557 }
2558 
2559 void bdw_disable_vblank(struct drm_crtc *_crtc)
2560 {
2561 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2562 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2563 	enum pipe pipe = crtc->pipe;
2564 	unsigned long irqflags;
2565 
2566 	if (gen11_dsi_configure_te(crtc, false))
2567 		return;
2568 
2569 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2570 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2571 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2572 }
2573 
2574 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2575 {
2576 	struct intel_uncore *uncore = &dev_priv->uncore;
2577 
2578 	if (HAS_PCH_NOP(dev_priv))
2579 		return;
2580 
2581 	GEN3_IRQ_RESET(uncore, SDE);
2582 
2583 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2584 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2585 }
2586 
2587 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2588 {
2589 	struct intel_uncore *uncore = &dev_priv->uncore;
2590 
2591 	if (IS_CHERRYVIEW(dev_priv))
2592 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2593 	else
2594 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
2595 
2596 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2597 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
2598 
2599 	i9xx_pipestat_irq_reset(dev_priv);
2600 
2601 	GEN3_IRQ_RESET(uncore, VLV_);
2602 	dev_priv->irq_mask = ~0u;
2603 }
2604 
2605 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2606 {
2607 	struct intel_uncore *uncore = &dev_priv->uncore;
2608 
2609 	u32 pipestat_mask;
2610 	u32 enable_mask;
2611 	enum pipe pipe;
2612 
2613 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2614 
2615 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2616 	for_each_pipe(dev_priv, pipe)
2617 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2618 
2619 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2620 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2621 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2622 		I915_LPE_PIPE_A_INTERRUPT |
2623 		I915_LPE_PIPE_B_INTERRUPT;
2624 
2625 	if (IS_CHERRYVIEW(dev_priv))
2626 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2627 			I915_LPE_PIPE_C_INTERRUPT;
2628 
2629 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2630 
2631 	dev_priv->irq_mask = ~enable_mask;
2632 
2633 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2634 }
2635 
2636 /* drm_dma.h hooks
2637 */
2638 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2639 {
2640 	struct intel_uncore *uncore = &dev_priv->uncore;
2641 
2642 	GEN3_IRQ_RESET(uncore, DE);
2643 	dev_priv->irq_mask = ~0u;
2644 
2645 	if (GRAPHICS_VER(dev_priv) == 7)
2646 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2647 
2648 	if (IS_HASWELL(dev_priv)) {
2649 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2650 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2651 	}
2652 
2653 	gen5_gt_irq_reset(to_gt(dev_priv));
2654 
2655 	ibx_irq_reset(dev_priv);
2656 }
2657 
2658 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2659 {
2660 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2661 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
2662 
2663 	gen5_gt_irq_reset(to_gt(dev_priv));
2664 
2665 	spin_lock_irq(&dev_priv->irq_lock);
2666 	if (dev_priv->display_irqs_enabled)
2667 		vlv_display_irq_reset(dev_priv);
2668 	spin_unlock_irq(&dev_priv->irq_lock);
2669 }
2670 
2671 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
2672 {
2673 	struct intel_uncore *uncore = &dev_priv->uncore;
2674 	enum pipe pipe;
2675 
2676 	if (!HAS_DISPLAY(dev_priv))
2677 		return;
2678 
2679 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2680 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2681 
2682 	for_each_pipe(dev_priv, pipe)
2683 		if (intel_display_power_is_enabled(dev_priv,
2684 						   POWER_DOMAIN_PIPE(pipe)))
2685 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2686 
2687 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2688 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2689 }
2690 
2691 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2692 {
2693 	struct intel_uncore *uncore = &dev_priv->uncore;
2694 
2695 	gen8_master_intr_disable(uncore->regs);
2696 
2697 	gen8_gt_irq_reset(to_gt(dev_priv));
2698 	gen8_display_irq_reset(dev_priv);
2699 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2700 
2701 	if (HAS_PCH_SPLIT(dev_priv))
2702 		ibx_irq_reset(dev_priv);
2703 
2704 }
2705 
2706 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2707 {
2708 	struct intel_uncore *uncore = &dev_priv->uncore;
2709 	enum pipe pipe;
2710 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2711 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2712 
2713 	if (!HAS_DISPLAY(dev_priv))
2714 		return;
2715 
2716 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2717 
2718 	if (DISPLAY_VER(dev_priv) >= 12) {
2719 		enum transcoder trans;
2720 
2721 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2722 			enum intel_display_power_domain domain;
2723 
2724 			domain = POWER_DOMAIN_TRANSCODER(trans);
2725 			if (!intel_display_power_is_enabled(dev_priv, domain))
2726 				continue;
2727 
2728 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2729 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2730 		}
2731 	} else {
2732 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2733 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2734 	}
2735 
2736 	for_each_pipe(dev_priv, pipe)
2737 		if (intel_display_power_is_enabled(dev_priv,
2738 						   POWER_DOMAIN_PIPE(pipe)))
2739 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2740 
2741 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2742 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2743 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2744 
2745 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2746 		GEN3_IRQ_RESET(uncore, SDE);
2747 }
2748 
2749 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2750 {
2751 	struct intel_gt *gt = to_gt(dev_priv);
2752 	struct intel_uncore *uncore = gt->uncore;
2753 
2754 	gen11_master_intr_disable(dev_priv->uncore.regs);
2755 
2756 	gen11_gt_irq_reset(gt);
2757 	gen11_display_irq_reset(dev_priv);
2758 
2759 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2760 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2761 }
2762 
2763 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
2764 {
2765 	struct intel_uncore *uncore = &dev_priv->uncore;
2766 	struct intel_gt *gt;
2767 	unsigned int i;
2768 
2769 	dg1_master_intr_disable(dev_priv->uncore.regs);
2770 
2771 	for_each_gt(gt, dev_priv, i)
2772 		gen11_gt_irq_reset(gt);
2773 
2774 	gen11_display_irq_reset(dev_priv);
2775 
2776 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2777 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2778 }
2779 
2780 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2781 				     u8 pipe_mask)
2782 {
2783 	struct intel_uncore *uncore = &dev_priv->uncore;
2784 	u32 extra_ier = GEN8_PIPE_VBLANK |
2785 		gen8_de_pipe_underrun_mask(dev_priv) |
2786 		gen8_de_pipe_flip_done_mask(dev_priv);
2787 	enum pipe pipe;
2788 
2789 	spin_lock_irq(&dev_priv->irq_lock);
2790 
2791 	if (!intel_irqs_enabled(dev_priv)) {
2792 		spin_unlock_irq(&dev_priv->irq_lock);
2793 		return;
2794 	}
2795 
2796 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2797 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2798 				  dev_priv->de_irq_mask[pipe],
2799 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2800 
2801 	spin_unlock_irq(&dev_priv->irq_lock);
2802 }
2803 
2804 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2805 				     u8 pipe_mask)
2806 {
2807 	struct intel_uncore *uncore = &dev_priv->uncore;
2808 	enum pipe pipe;
2809 
2810 	spin_lock_irq(&dev_priv->irq_lock);
2811 
2812 	if (!intel_irqs_enabled(dev_priv)) {
2813 		spin_unlock_irq(&dev_priv->irq_lock);
2814 		return;
2815 	}
2816 
2817 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2818 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2819 
2820 	spin_unlock_irq(&dev_priv->irq_lock);
2821 
2822 	/* make sure we're done processing display irqs */
2823 	intel_synchronize_irq(dev_priv);
2824 }
2825 
2826 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2827 {
2828 	struct intel_uncore *uncore = &dev_priv->uncore;
2829 
2830 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
2831 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
2832 
2833 	gen8_gt_irq_reset(to_gt(dev_priv));
2834 
2835 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2836 
2837 	spin_lock_irq(&dev_priv->irq_lock);
2838 	if (dev_priv->display_irqs_enabled)
2839 		vlv_display_irq_reset(dev_priv);
2840 	spin_unlock_irq(&dev_priv->irq_lock);
2841 }
2842 
2843 static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
2844 {
2845 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2846 
2847 	switch (encoder->hpd_pin) {
2848 	case HPD_PORT_A:
2849 		/*
2850 		 * When CPU and PCH are on the same package, port A
2851 		 * HPD must be enabled in both north and south.
2852 		 */
2853 		return HAS_PCH_LPT_LP(i915) ?
2854 			PORTA_HOTPLUG_ENABLE : 0;
2855 	case HPD_PORT_B:
2856 		return PORTB_HOTPLUG_ENABLE |
2857 			PORTB_PULSE_DURATION_2ms;
2858 	case HPD_PORT_C:
2859 		return PORTC_HOTPLUG_ENABLE |
2860 			PORTC_PULSE_DURATION_2ms;
2861 	case HPD_PORT_D:
2862 		return PORTD_HOTPLUG_ENABLE |
2863 			PORTD_PULSE_DURATION_2ms;
2864 	default:
2865 		return 0;
2866 	}
2867 }
2868 
2869 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2870 {
2871 	/*
2872 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2873 	 * duration to 2ms (which is the minimum in the Display Port spec).
2874 	 * The pulse duration bits are reserved on LPT+.
2875 	 */
2876 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
2877 			 PORTA_HOTPLUG_ENABLE |
2878 			 PORTB_HOTPLUG_ENABLE |
2879 			 PORTC_HOTPLUG_ENABLE |
2880 			 PORTD_HOTPLUG_ENABLE |
2881 			 PORTB_PULSE_DURATION_MASK |
2882 			 PORTC_PULSE_DURATION_MASK |
2883 			 PORTD_PULSE_DURATION_MASK,
2884 			 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
2885 }
2886 
2887 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2888 {
2889 	u32 hotplug_irqs, enabled_irqs;
2890 
2891 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2892 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2893 
2894 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2895 
2896 	ibx_hpd_detection_setup(dev_priv);
2897 }
2898 
2899 static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
2900 {
2901 	switch (encoder->hpd_pin) {
2902 	case HPD_PORT_A:
2903 	case HPD_PORT_B:
2904 	case HPD_PORT_C:
2905 	case HPD_PORT_D:
2906 		return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
2907 	default:
2908 		return 0;
2909 	}
2910 }
2911 
2912 static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
2913 {
2914 	switch (encoder->hpd_pin) {
2915 	case HPD_PORT_TC1:
2916 	case HPD_PORT_TC2:
2917 	case HPD_PORT_TC3:
2918 	case HPD_PORT_TC4:
2919 	case HPD_PORT_TC5:
2920 	case HPD_PORT_TC6:
2921 		return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
2922 	default:
2923 		return 0;
2924 	}
2925 }
2926 
2927 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
2928 {
2929 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
2930 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
2931 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
2932 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
2933 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
2934 			 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
2935 }
2936 
2937 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
2938 {
2939 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
2940 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
2941 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
2942 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
2943 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
2944 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
2945 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
2946 			 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
2947 }
2948 
2949 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
2950 {
2951 	u32 hotplug_irqs, enabled_irqs;
2952 
2953 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2954 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2955 
2956 	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
2957 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
2958 
2959 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2960 
2961 	icp_ddi_hpd_detection_setup(dev_priv);
2962 	icp_tc_hpd_detection_setup(dev_priv);
2963 }
2964 
2965 static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
2966 {
2967 	switch (encoder->hpd_pin) {
2968 	case HPD_PORT_TC1:
2969 	case HPD_PORT_TC2:
2970 	case HPD_PORT_TC3:
2971 	case HPD_PORT_TC4:
2972 	case HPD_PORT_TC5:
2973 	case HPD_PORT_TC6:
2974 		return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin);
2975 	default:
2976 		return 0;
2977 	}
2978 }
2979 
2980 static void dg1_hpd_invert(struct drm_i915_private *i915)
2981 {
2982 	u32 val = (INVERT_DDIA_HPD |
2983 		   INVERT_DDIB_HPD |
2984 		   INVERT_DDIC_HPD |
2985 		   INVERT_DDID_HPD);
2986 	intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
2987 }
2988 
2989 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
2990 {
2991 	dg1_hpd_invert(dev_priv);
2992 	icp_hpd_irq_setup(dev_priv);
2993 }
2994 
2995 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
2996 {
2997 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
2998 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
2999 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3000 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3001 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3002 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3003 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3004 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3005 }
3006 
3007 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3008 {
3009 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3010 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3011 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3012 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3013 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3014 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3015 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3016 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3017 }
3018 
3019 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3020 {
3021 	u32 hotplug_irqs, enabled_irqs;
3022 
3023 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3024 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3025 
3026 	intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3027 			 ~enabled_irqs & hotplug_irqs);
3028 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3029 
3030 	gen11_tc_hpd_detection_setup(dev_priv);
3031 	gen11_tbt_hpd_detection_setup(dev_priv);
3032 
3033 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3034 		icp_hpd_irq_setup(dev_priv);
3035 }
3036 
3037 static u32 spt_hotplug_enables(struct intel_encoder *encoder)
3038 {
3039 	switch (encoder->hpd_pin) {
3040 	case HPD_PORT_A:
3041 		return PORTA_HOTPLUG_ENABLE;
3042 	case HPD_PORT_B:
3043 		return PORTB_HOTPLUG_ENABLE;
3044 	case HPD_PORT_C:
3045 		return PORTC_HOTPLUG_ENABLE;
3046 	case HPD_PORT_D:
3047 		return PORTD_HOTPLUG_ENABLE;
3048 	default:
3049 		return 0;
3050 	}
3051 }
3052 
3053 static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
3054 {
3055 	switch (encoder->hpd_pin) {
3056 	case HPD_PORT_E:
3057 		return PORTE_HOTPLUG_ENABLE;
3058 	default:
3059 		return 0;
3060 	}
3061 }
3062 
3063 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3064 {
3065 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3066 	if (HAS_PCH_CNP(dev_priv)) {
3067 		intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3068 				 CHASSIS_CLK_REQ_DURATION(0xf));
3069 	}
3070 
3071 	/* Enable digital hotplug on the PCH */
3072 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3073 			 PORTA_HOTPLUG_ENABLE |
3074 			 PORTB_HOTPLUG_ENABLE |
3075 			 PORTC_HOTPLUG_ENABLE |
3076 			 PORTD_HOTPLUG_ENABLE,
3077 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3078 
3079 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3080 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3081 }
3082 
3083 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3084 {
3085 	u32 hotplug_irqs, enabled_irqs;
3086 
3087 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3088 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3089 
3090 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3091 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3092 
3093 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3094 
3095 	spt_hpd_detection_setup(dev_priv);
3096 }
3097 
3098 static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
3099 {
3100 	switch (encoder->hpd_pin) {
3101 	case HPD_PORT_A:
3102 		return DIGITAL_PORTA_HOTPLUG_ENABLE |
3103 			DIGITAL_PORTA_PULSE_DURATION_2ms;
3104 	default:
3105 		return 0;
3106 	}
3107 }
3108 
3109 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3110 {
3111 	/*
3112 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3113 	 * duration to 2ms (which is the minimum in the Display Port spec)
3114 	 * The pulse duration bits are reserved on HSW+.
3115 	 */
3116 	intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3117 			 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3118 			 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3119 }
3120 
3121 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3122 {
3123 	u32 hotplug_irqs, enabled_irqs;
3124 
3125 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3126 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3127 
3128 	if (DISPLAY_VER(dev_priv) >= 8)
3129 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3130 	else
3131 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3132 
3133 	ilk_hpd_detection_setup(dev_priv);
3134 
3135 	ibx_hpd_irq_setup(dev_priv);
3136 }
3137 
3138 static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
3139 {
3140 	u32 hotplug;
3141 
3142 	switch (encoder->hpd_pin) {
3143 	case HPD_PORT_A:
3144 		hotplug = PORTA_HOTPLUG_ENABLE;
3145 		if (intel_bios_encoder_hpd_invert(encoder->devdata))
3146 			hotplug |= BXT_DDIA_HPD_INVERT;
3147 		return hotplug;
3148 	case HPD_PORT_B:
3149 		hotplug = PORTB_HOTPLUG_ENABLE;
3150 		if (intel_bios_encoder_hpd_invert(encoder->devdata))
3151 			hotplug |= BXT_DDIB_HPD_INVERT;
3152 		return hotplug;
3153 	case HPD_PORT_C:
3154 		hotplug = PORTC_HOTPLUG_ENABLE;
3155 		if (intel_bios_encoder_hpd_invert(encoder->devdata))
3156 			hotplug |= BXT_DDIC_HPD_INVERT;
3157 		return hotplug;
3158 	default:
3159 		return 0;
3160 	}
3161 }
3162 
3163 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3164 {
3165 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3166 			 PORTA_HOTPLUG_ENABLE |
3167 			 PORTB_HOTPLUG_ENABLE |
3168 			 PORTC_HOTPLUG_ENABLE |
3169 			 BXT_DDI_HPD_INVERT_MASK,
3170 			 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3171 }
3172 
3173 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3174 {
3175 	u32 hotplug_irqs, enabled_irqs;
3176 
3177 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3178 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3179 
3180 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3181 
3182 	bxt_hpd_detection_setup(dev_priv);
3183 }
3184 
3185 /*
3186  * SDEIER is also touched by the interrupt handler to work around missed PCH
3187  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3188  * instead we unconditionally enable all PCH interrupt sources here, but then
3189  * only unmask them as needed with SDEIMR.
3190  *
3191  * Note that we currently do this after installing the interrupt handler,
3192  * but before we enable the master interrupt. That should be sufficient
3193  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3194  * interrupts could still race.
3195  */
3196 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3197 {
3198 	struct intel_uncore *uncore = &dev_priv->uncore;
3199 	u32 mask;
3200 
3201 	if (HAS_PCH_NOP(dev_priv))
3202 		return;
3203 
3204 	if (HAS_PCH_IBX(dev_priv))
3205 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3206 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3207 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3208 	else
3209 		mask = SDE_GMBUS_CPT;
3210 
3211 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3212 }
3213 
3214 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3215 {
3216 	struct intel_uncore *uncore = &dev_priv->uncore;
3217 	u32 display_mask, extra_mask;
3218 
3219 	if (GRAPHICS_VER(dev_priv) >= 7) {
3220 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3221 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3222 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3223 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3224 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3225 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3226 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3227 			      DE_DP_A_HOTPLUG_IVB);
3228 	} else {
3229 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3230 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3231 				DE_PIPEA_CRC_DONE | DE_POISON);
3232 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3233 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3234 			      DE_PLANE_FLIP_DONE(PLANE_A) |
3235 			      DE_PLANE_FLIP_DONE(PLANE_B) |
3236 			      DE_DP_A_HOTPLUG);
3237 	}
3238 
3239 	if (IS_HASWELL(dev_priv)) {
3240 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3241 		display_mask |= DE_EDP_PSR_INT_HSW;
3242 	}
3243 
3244 	if (IS_IRONLAKE_M(dev_priv))
3245 		extra_mask |= DE_PCU_EVENT;
3246 
3247 	dev_priv->irq_mask = ~display_mask;
3248 
3249 	ibx_irq_postinstall(dev_priv);
3250 
3251 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3252 
3253 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3254 		      display_mask | extra_mask);
3255 }
3256 
3257 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3258 {
3259 	lockdep_assert_held(&dev_priv->irq_lock);
3260 
3261 	if (dev_priv->display_irqs_enabled)
3262 		return;
3263 
3264 	dev_priv->display_irqs_enabled = true;
3265 
3266 	if (intel_irqs_enabled(dev_priv)) {
3267 		vlv_display_irq_reset(dev_priv);
3268 		vlv_display_irq_postinstall(dev_priv);
3269 	}
3270 }
3271 
3272 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3273 {
3274 	lockdep_assert_held(&dev_priv->irq_lock);
3275 
3276 	if (!dev_priv->display_irqs_enabled)
3277 		return;
3278 
3279 	dev_priv->display_irqs_enabled = false;
3280 
3281 	if (intel_irqs_enabled(dev_priv))
3282 		vlv_display_irq_reset(dev_priv);
3283 }
3284 
3285 
3286 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3287 {
3288 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3289 
3290 	spin_lock_irq(&dev_priv->irq_lock);
3291 	if (dev_priv->display_irqs_enabled)
3292 		vlv_display_irq_postinstall(dev_priv);
3293 	spin_unlock_irq(&dev_priv->irq_lock);
3294 
3295 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3296 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3297 }
3298 
3299 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3300 {
3301 	struct intel_uncore *uncore = &dev_priv->uncore;
3302 
3303 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3304 		GEN8_PIPE_CDCLK_CRC_DONE;
3305 	u32 de_pipe_enables;
3306 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3307 	u32 de_port_enables;
3308 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3309 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3310 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3311 	enum pipe pipe;
3312 
3313 	if (!HAS_DISPLAY(dev_priv))
3314 		return;
3315 
3316 	if (DISPLAY_VER(dev_priv) <= 10)
3317 		de_misc_masked |= GEN8_DE_MISC_GSE;
3318 
3319 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3320 		de_port_masked |= BXT_DE_PORT_GMBUS;
3321 
3322 	if (DISPLAY_VER(dev_priv) >= 11) {
3323 		enum port port;
3324 
3325 		if (intel_bios_is_dsi_present(dev_priv, &port))
3326 			de_port_masked |= DSI0_TE | DSI1_TE;
3327 	}
3328 
3329 	de_pipe_enables = de_pipe_masked |
3330 		GEN8_PIPE_VBLANK |
3331 		gen8_de_pipe_underrun_mask(dev_priv) |
3332 		gen8_de_pipe_flip_done_mask(dev_priv);
3333 
3334 	de_port_enables = de_port_masked;
3335 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3336 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3337 	else if (IS_BROADWELL(dev_priv))
3338 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3339 
3340 	if (DISPLAY_VER(dev_priv) >= 12) {
3341 		enum transcoder trans;
3342 
3343 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3344 			enum intel_display_power_domain domain;
3345 
3346 			domain = POWER_DOMAIN_TRANSCODER(trans);
3347 			if (!intel_display_power_is_enabled(dev_priv, domain))
3348 				continue;
3349 
3350 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3351 		}
3352 	} else {
3353 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3354 	}
3355 
3356 	for_each_pipe(dev_priv, pipe) {
3357 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3358 
3359 		if (intel_display_power_is_enabled(dev_priv,
3360 				POWER_DOMAIN_PIPE(pipe)))
3361 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3362 					  dev_priv->de_irq_mask[pipe],
3363 					  de_pipe_enables);
3364 	}
3365 
3366 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3367 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3368 
3369 	if (DISPLAY_VER(dev_priv) >= 11) {
3370 		u32 de_hpd_masked = 0;
3371 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3372 				     GEN11_DE_TBT_HOTPLUG_MASK;
3373 
3374 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3375 			      de_hpd_enables);
3376 	}
3377 }
3378 
3379 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3380 {
3381 	struct intel_uncore *uncore = &dev_priv->uncore;
3382 	u32 mask = SDE_GMBUS_ICP;
3383 
3384 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3385 }
3386 
3387 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3388 {
3389 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3390 		icp_irq_postinstall(dev_priv);
3391 	else if (HAS_PCH_SPLIT(dev_priv))
3392 		ibx_irq_postinstall(dev_priv);
3393 
3394 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3395 	gen8_de_irq_postinstall(dev_priv);
3396 
3397 	gen8_master_intr_enable(dev_priv->uncore.regs);
3398 }
3399 
3400 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3401 {
3402 	if (!HAS_DISPLAY(dev_priv))
3403 		return;
3404 
3405 	gen8_de_irq_postinstall(dev_priv);
3406 
3407 	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3408 			   GEN11_DISPLAY_IRQ_ENABLE);
3409 }
3410 
3411 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3412 {
3413 	struct intel_gt *gt = to_gt(dev_priv);
3414 	struct intel_uncore *uncore = gt->uncore;
3415 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3416 
3417 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3418 		icp_irq_postinstall(dev_priv);
3419 
3420 	gen11_gt_irq_postinstall(gt);
3421 	gen11_de_irq_postinstall(dev_priv);
3422 
3423 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3424 
3425 	gen11_master_intr_enable(uncore->regs);
3426 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3427 }
3428 
3429 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3430 {
3431 	struct intel_uncore *uncore = &dev_priv->uncore;
3432 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3433 	struct intel_gt *gt;
3434 	unsigned int i;
3435 
3436 	for_each_gt(gt, dev_priv, i)
3437 		gen11_gt_irq_postinstall(gt);
3438 
3439 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3440 
3441 	if (HAS_DISPLAY(dev_priv)) {
3442 		icp_irq_postinstall(dev_priv);
3443 		gen8_de_irq_postinstall(dev_priv);
3444 		intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3445 				   GEN11_DISPLAY_IRQ_ENABLE);
3446 	}
3447 
3448 	dg1_master_intr_enable(uncore->regs);
3449 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3450 }
3451 
3452 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3453 {
3454 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3455 
3456 	spin_lock_irq(&dev_priv->irq_lock);
3457 	if (dev_priv->display_irqs_enabled)
3458 		vlv_display_irq_postinstall(dev_priv);
3459 	spin_unlock_irq(&dev_priv->irq_lock);
3460 
3461 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3462 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3463 }
3464 
3465 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3466 {
3467 	struct intel_uncore *uncore = &dev_priv->uncore;
3468 
3469 	i9xx_pipestat_irq_reset(dev_priv);
3470 
3471 	gen2_irq_reset(uncore);
3472 	dev_priv->irq_mask = ~0u;
3473 }
3474 
3475 static u32 i9xx_error_mask(struct drm_i915_private *i915)
3476 {
3477 	/*
3478 	 * On gen2/3 FBC generates (seemingly spurious)
3479 	 * display INVALID_GTT/INVALID_GTT_PTE table errors.
3480 	 *
3481 	 * Also gen3 bspec has this to say:
3482 	 * "DISPA_INVALID_GTT_PTE
3483 	 "  [DevNapa] : Reserved. This bit does not reflect the page
3484 	 "              table error for the display plane A."
3485 	 *
3486 	 * Unfortunately we can't mask off individual PGTBL_ER bits,
3487 	 * so we just have to mask off all page table errors via EMR.
3488 	 */
3489 	if (HAS_FBC(i915))
3490 		return ~I915_ERROR_MEMORY_REFRESH;
3491 	else
3492 		return ~(I915_ERROR_PAGE_TABLE |
3493 			 I915_ERROR_MEMORY_REFRESH);
3494 }
3495 
3496 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3497 {
3498 	struct intel_uncore *uncore = &dev_priv->uncore;
3499 	u16 enable_mask;
3500 
3501 	intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
3502 
3503 	/* Unmask the interrupts that we always want on. */
3504 	dev_priv->irq_mask =
3505 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3506 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3507 		  I915_MASTER_ERROR_INTERRUPT);
3508 
3509 	enable_mask =
3510 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3511 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3512 		I915_MASTER_ERROR_INTERRUPT |
3513 		I915_USER_INTERRUPT;
3514 
3515 	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3516 
3517 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3518 	 * just to make the assert_spin_locked check happy. */
3519 	spin_lock_irq(&dev_priv->irq_lock);
3520 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3521 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3522 	spin_unlock_irq(&dev_priv->irq_lock);
3523 }
3524 
3525 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3526 			       u16 *eir, u16 *eir_stuck)
3527 {
3528 	struct intel_uncore *uncore = &i915->uncore;
3529 	u16 emr;
3530 
3531 	*eir = intel_uncore_read16(uncore, EIR);
3532 	intel_uncore_write16(uncore, EIR, *eir);
3533 
3534 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3535 	if (*eir_stuck == 0)
3536 		return;
3537 
3538 	/*
3539 	 * Toggle all EMR bits to make sure we get an edge
3540 	 * in the ISR master error bit if we don't clear
3541 	 * all the EIR bits. Otherwise the edge triggered
3542 	 * IIR on i965/g4x wouldn't notice that an interrupt
3543 	 * is still pending. Also some EIR bits can't be
3544 	 * cleared except by handling the underlying error
3545 	 * (or by a GPU reset) so we mask any bit that
3546 	 * remains set.
3547 	 */
3548 	emr = intel_uncore_read16(uncore, EMR);
3549 	intel_uncore_write16(uncore, EMR, 0xffff);
3550 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3551 }
3552 
3553 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3554 				   u16 eir, u16 eir_stuck)
3555 {
3556 	drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
3557 
3558 	if (eir_stuck)
3559 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3560 			eir_stuck);
3561 
3562 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
3563 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
3564 }
3565 
3566 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3567 			       u32 *eir, u32 *eir_stuck)
3568 {
3569 	u32 emr;
3570 
3571 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
3572 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
3573 
3574 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3575 	if (*eir_stuck == 0)
3576 		return;
3577 
3578 	/*
3579 	 * Toggle all EMR bits to make sure we get an edge
3580 	 * in the ISR master error bit if we don't clear
3581 	 * all the EIR bits. Otherwise the edge triggered
3582 	 * IIR on i965/g4x wouldn't notice that an interrupt
3583 	 * is still pending. Also some EIR bits can't be
3584 	 * cleared except by handling the underlying error
3585 	 * (or by a GPU reset) so we mask any bit that
3586 	 * remains set.
3587 	 */
3588 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
3589 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
3590 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3591 }
3592 
3593 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3594 				   u32 eir, u32 eir_stuck)
3595 {
3596 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
3597 
3598 	if (eir_stuck)
3599 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3600 			eir_stuck);
3601 
3602 	drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
3603 		intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
3604 }
3605 
3606 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3607 {
3608 	struct drm_i915_private *dev_priv = arg;
3609 	irqreturn_t ret = IRQ_NONE;
3610 
3611 	if (!intel_irqs_enabled(dev_priv))
3612 		return IRQ_NONE;
3613 
3614 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3615 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3616 
3617 	do {
3618 		u32 pipe_stats[I915_MAX_PIPES] = {};
3619 		u16 eir = 0, eir_stuck = 0;
3620 		u16 iir;
3621 
3622 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3623 		if (iir == 0)
3624 			break;
3625 
3626 		ret = IRQ_HANDLED;
3627 
3628 		/* Call regardless, as some status bits might not be
3629 		 * signalled in iir */
3630 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3631 
3632 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3633 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3634 
3635 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3636 
3637 		if (iir & I915_USER_INTERRUPT)
3638 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3639 
3640 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3641 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3642 
3643 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3644 	} while (0);
3645 
3646 	pmu_irq_stats(dev_priv, ret);
3647 
3648 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3649 
3650 	return ret;
3651 }
3652 
3653 static void i915_irq_reset(struct drm_i915_private *dev_priv)
3654 {
3655 	struct intel_uncore *uncore = &dev_priv->uncore;
3656 
3657 	if (I915_HAS_HOTPLUG(dev_priv)) {
3658 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3659 		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
3660 	}
3661 
3662 	i9xx_pipestat_irq_reset(dev_priv);
3663 
3664 	GEN3_IRQ_RESET(uncore, GEN2_);
3665 	dev_priv->irq_mask = ~0u;
3666 }
3667 
3668 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3669 {
3670 	struct intel_uncore *uncore = &dev_priv->uncore;
3671 	u32 enable_mask;
3672 
3673 	intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
3674 
3675 	/* Unmask the interrupts that we always want on. */
3676 	dev_priv->irq_mask =
3677 		~(I915_ASLE_INTERRUPT |
3678 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3679 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3680 		  I915_MASTER_ERROR_INTERRUPT);
3681 
3682 	enable_mask =
3683 		I915_ASLE_INTERRUPT |
3684 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3685 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3686 		I915_MASTER_ERROR_INTERRUPT |
3687 		I915_USER_INTERRUPT;
3688 
3689 	if (I915_HAS_HOTPLUG(dev_priv)) {
3690 		/* Enable in IER... */
3691 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3692 		/* and unmask in IMR */
3693 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3694 	}
3695 
3696 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3697 
3698 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3699 	 * just to make the assert_spin_locked check happy. */
3700 	spin_lock_irq(&dev_priv->irq_lock);
3701 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3702 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3703 	spin_unlock_irq(&dev_priv->irq_lock);
3704 
3705 	i915_enable_asle_pipestat(dev_priv);
3706 }
3707 
3708 static irqreturn_t i915_irq_handler(int irq, void *arg)
3709 {
3710 	struct drm_i915_private *dev_priv = arg;
3711 	irqreturn_t ret = IRQ_NONE;
3712 
3713 	if (!intel_irqs_enabled(dev_priv))
3714 		return IRQ_NONE;
3715 
3716 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3717 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3718 
3719 	do {
3720 		u32 pipe_stats[I915_MAX_PIPES] = {};
3721 		u32 eir = 0, eir_stuck = 0;
3722 		u32 hotplug_status = 0;
3723 		u32 iir;
3724 
3725 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
3726 		if (iir == 0)
3727 			break;
3728 
3729 		ret = IRQ_HANDLED;
3730 
3731 		if (I915_HAS_HOTPLUG(dev_priv) &&
3732 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3733 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3734 
3735 		/* Call regardless, as some status bits might not be
3736 		 * signalled in iir */
3737 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3738 
3739 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3740 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3741 
3742 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
3743 
3744 		if (iir & I915_USER_INTERRUPT)
3745 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3746 
3747 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3748 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3749 
3750 		if (hotplug_status)
3751 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3752 
3753 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3754 	} while (0);
3755 
3756 	pmu_irq_stats(dev_priv, ret);
3757 
3758 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3759 
3760 	return ret;
3761 }
3762 
3763 static void i965_irq_reset(struct drm_i915_private *dev_priv)
3764 {
3765 	struct intel_uncore *uncore = &dev_priv->uncore;
3766 
3767 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3768 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3769 
3770 	i9xx_pipestat_irq_reset(dev_priv);
3771 
3772 	GEN3_IRQ_RESET(uncore, GEN2_);
3773 	dev_priv->irq_mask = ~0u;
3774 }
3775 
3776 static u32 i965_error_mask(struct drm_i915_private *i915)
3777 {
3778 	/*
3779 	 * Enable some error detection, note the instruction error mask
3780 	 * bit is reserved, so we leave it masked.
3781 	 *
3782 	 * i965 FBC no longer generates spurious GTT errors,
3783 	 * so we can always enable the page table errors.
3784 	 */
3785 	if (IS_G4X(i915))
3786 		return ~(GM45_ERROR_PAGE_TABLE |
3787 			 GM45_ERROR_MEM_PRIV |
3788 			 GM45_ERROR_CP_PRIV |
3789 			 I915_ERROR_MEMORY_REFRESH);
3790 	else
3791 		return ~(I915_ERROR_PAGE_TABLE |
3792 			 I915_ERROR_MEMORY_REFRESH);
3793 }
3794 
3795 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3796 {
3797 	struct intel_uncore *uncore = &dev_priv->uncore;
3798 	u32 enable_mask;
3799 
3800 	intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
3801 
3802 	/* Unmask the interrupts that we always want on. */
3803 	dev_priv->irq_mask =
3804 		~(I915_ASLE_INTERRUPT |
3805 		  I915_DISPLAY_PORT_INTERRUPT |
3806 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3807 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3808 		  I915_MASTER_ERROR_INTERRUPT);
3809 
3810 	enable_mask =
3811 		I915_ASLE_INTERRUPT |
3812 		I915_DISPLAY_PORT_INTERRUPT |
3813 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3814 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3815 		I915_MASTER_ERROR_INTERRUPT |
3816 		I915_USER_INTERRUPT;
3817 
3818 	if (IS_G4X(dev_priv))
3819 		enable_mask |= I915_BSD_USER_INTERRUPT;
3820 
3821 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3822 
3823 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3824 	 * just to make the assert_spin_locked check happy. */
3825 	spin_lock_irq(&dev_priv->irq_lock);
3826 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3827 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3828 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3829 	spin_unlock_irq(&dev_priv->irq_lock);
3830 
3831 	i915_enable_asle_pipestat(dev_priv);
3832 }
3833 
3834 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3835 {
3836 	u32 hotplug_en;
3837 
3838 	lockdep_assert_held(&dev_priv->irq_lock);
3839 
3840 	/* Note HDMI and DP share hotplug bits */
3841 	/* enable bits are the same for all generations */
3842 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3843 	/* Programming the CRT detection parameters tends
3844 	   to generate a spurious hotplug event about three
3845 	   seconds later.  So just do it once.
3846 	*/
3847 	if (IS_G4X(dev_priv))
3848 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3849 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3850 
3851 	/* Ignore TV since it's buggy */
3852 	i915_hotplug_interrupt_update_locked(dev_priv,
3853 					     HOTPLUG_INT_EN_MASK |
3854 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3855 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3856 					     hotplug_en);
3857 }
3858 
3859 static irqreturn_t i965_irq_handler(int irq, void *arg)
3860 {
3861 	struct drm_i915_private *dev_priv = arg;
3862 	irqreturn_t ret = IRQ_NONE;
3863 
3864 	if (!intel_irqs_enabled(dev_priv))
3865 		return IRQ_NONE;
3866 
3867 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3868 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3869 
3870 	do {
3871 		u32 pipe_stats[I915_MAX_PIPES] = {};
3872 		u32 eir = 0, eir_stuck = 0;
3873 		u32 hotplug_status = 0;
3874 		u32 iir;
3875 
3876 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
3877 		if (iir == 0)
3878 			break;
3879 
3880 		ret = IRQ_HANDLED;
3881 
3882 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3883 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3884 
3885 		/* Call regardless, as some status bits might not be
3886 		 * signalled in iir */
3887 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3888 
3889 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3890 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3891 
3892 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
3893 
3894 		if (iir & I915_USER_INTERRUPT)
3895 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
3896 					    iir);
3897 
3898 		if (iir & I915_BSD_USER_INTERRUPT)
3899 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
3900 					    iir >> 25);
3901 
3902 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3903 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3904 
3905 		if (hotplug_status)
3906 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3907 
3908 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3909 	} while (0);
3910 
3911 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
3912 
3913 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3914 
3915 	return ret;
3916 }
3917 
3918 struct intel_hotplug_funcs {
3919 	void (*hpd_irq_setup)(struct drm_i915_private *i915);
3920 };
3921 
3922 #define HPD_FUNCS(platform)					 \
3923 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
3924 	.hpd_irq_setup = platform##_hpd_irq_setup,		 \
3925 }
3926 
3927 HPD_FUNCS(i915);
3928 HPD_FUNCS(dg1);
3929 HPD_FUNCS(gen11);
3930 HPD_FUNCS(bxt);
3931 HPD_FUNCS(icp);
3932 HPD_FUNCS(spt);
3933 HPD_FUNCS(ilk);
3934 #undef HPD_FUNCS
3935 
3936 void intel_hpd_irq_setup(struct drm_i915_private *i915)
3937 {
3938 	if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
3939 		i915->display.funcs.hotplug->hpd_irq_setup(i915);
3940 }
3941 
3942 /**
3943  * intel_irq_init - initializes irq support
3944  * @dev_priv: i915 device instance
3945  *
3946  * This function initializes all the irq support including work items, timers
3947  * and all the vtables. It does not setup the interrupt itself though.
3948  */
3949 void intel_irq_init(struct drm_i915_private *dev_priv)
3950 {
3951 	int i;
3952 
3953 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3954 	for (i = 0; i < MAX_L3_SLICES; ++i)
3955 		dev_priv->l3_parity.remap_info[i] = NULL;
3956 
3957 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3958 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
3959 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
3960 
3961 	if (!HAS_DISPLAY(dev_priv))
3962 		return;
3963 
3964 	intel_hpd_init_pins(dev_priv);
3965 
3966 	intel_hpd_init_early(dev_priv);
3967 
3968 	dev_priv->drm.vblank_disable_immediate = true;
3969 
3970 	/* Most platforms treat the display irq block as an always-on
3971 	 * power domain. vlv/chv can disable it at runtime and need
3972 	 * special care to avoid writing any of the display block registers
3973 	 * outside of the power domain. We defer setting up the display irqs
3974 	 * in this case to the runtime pm.
3975 	 */
3976 	dev_priv->display_irqs_enabled = true;
3977 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3978 		dev_priv->display_irqs_enabled = false;
3979 
3980 	if (HAS_GMCH(dev_priv)) {
3981 		if (I915_HAS_HOTPLUG(dev_priv))
3982 			dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
3983 	} else {
3984 		if (HAS_PCH_DG2(dev_priv))
3985 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
3986 		else if (HAS_PCH_DG1(dev_priv))
3987 			dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
3988 		else if (DISPLAY_VER(dev_priv) >= 11)
3989 			dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
3990 		else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3991 			dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
3992 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3993 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
3994 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3995 			dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
3996 		else
3997 			dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
3998 	}
3999 }
4000 
4001 /**
4002  * intel_irq_fini - deinitializes IRQ support
4003  * @i915: i915 device instance
4004  *
4005  * This function deinitializes all the IRQ support.
4006  */
4007 void intel_irq_fini(struct drm_i915_private *i915)
4008 {
4009 	int i;
4010 
4011 	for (i = 0; i < MAX_L3_SLICES; ++i)
4012 		kfree(i915->l3_parity.remap_info[i]);
4013 }
4014 
4015 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4016 {
4017 	if (HAS_GMCH(dev_priv)) {
4018 		if (IS_CHERRYVIEW(dev_priv))
4019 			return cherryview_irq_handler;
4020 		else if (IS_VALLEYVIEW(dev_priv))
4021 			return valleyview_irq_handler;
4022 		else if (GRAPHICS_VER(dev_priv) == 4)
4023 			return i965_irq_handler;
4024 		else if (GRAPHICS_VER(dev_priv) == 3)
4025 			return i915_irq_handler;
4026 		else
4027 			return i8xx_irq_handler;
4028 	} else {
4029 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4030 			return dg1_irq_handler;
4031 		else if (GRAPHICS_VER(dev_priv) >= 11)
4032 			return gen11_irq_handler;
4033 		else if (GRAPHICS_VER(dev_priv) >= 8)
4034 			return gen8_irq_handler;
4035 		else
4036 			return ilk_irq_handler;
4037 	}
4038 }
4039 
4040 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4041 {
4042 	if (HAS_GMCH(dev_priv)) {
4043 		if (IS_CHERRYVIEW(dev_priv))
4044 			cherryview_irq_reset(dev_priv);
4045 		else if (IS_VALLEYVIEW(dev_priv))
4046 			valleyview_irq_reset(dev_priv);
4047 		else if (GRAPHICS_VER(dev_priv) == 4)
4048 			i965_irq_reset(dev_priv);
4049 		else if (GRAPHICS_VER(dev_priv) == 3)
4050 			i915_irq_reset(dev_priv);
4051 		else
4052 			i8xx_irq_reset(dev_priv);
4053 	} else {
4054 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4055 			dg1_irq_reset(dev_priv);
4056 		else if (GRAPHICS_VER(dev_priv) >= 11)
4057 			gen11_irq_reset(dev_priv);
4058 		else if (GRAPHICS_VER(dev_priv) >= 8)
4059 			gen8_irq_reset(dev_priv);
4060 		else
4061 			ilk_irq_reset(dev_priv);
4062 	}
4063 }
4064 
4065 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4066 {
4067 	if (HAS_GMCH(dev_priv)) {
4068 		if (IS_CHERRYVIEW(dev_priv))
4069 			cherryview_irq_postinstall(dev_priv);
4070 		else if (IS_VALLEYVIEW(dev_priv))
4071 			valleyview_irq_postinstall(dev_priv);
4072 		else if (GRAPHICS_VER(dev_priv) == 4)
4073 			i965_irq_postinstall(dev_priv);
4074 		else if (GRAPHICS_VER(dev_priv) == 3)
4075 			i915_irq_postinstall(dev_priv);
4076 		else
4077 			i8xx_irq_postinstall(dev_priv);
4078 	} else {
4079 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4080 			dg1_irq_postinstall(dev_priv);
4081 		else if (GRAPHICS_VER(dev_priv) >= 11)
4082 			gen11_irq_postinstall(dev_priv);
4083 		else if (GRAPHICS_VER(dev_priv) >= 8)
4084 			gen8_irq_postinstall(dev_priv);
4085 		else
4086 			ilk_irq_postinstall(dev_priv);
4087 	}
4088 }
4089 
4090 /**
4091  * intel_irq_install - enables the hardware interrupt
4092  * @dev_priv: i915 device instance
4093  *
4094  * This function enables the hardware interrupt handling, but leaves the hotplug
4095  * handling still disabled. It is called after intel_irq_init().
4096  *
4097  * In the driver load and resume code we need working interrupts in a few places
4098  * but don't want to deal with the hassle of concurrent probe and hotplug
4099  * workers. Hence the split into this two-stage approach.
4100  */
4101 int intel_irq_install(struct drm_i915_private *dev_priv)
4102 {
4103 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4104 	int ret;
4105 
4106 	/*
4107 	 * We enable some interrupt sources in our postinstall hooks, so mark
4108 	 * interrupts as enabled _before_ actually enabling them to avoid
4109 	 * special cases in our ordering checks.
4110 	 */
4111 	dev_priv->runtime_pm.irqs_enabled = true;
4112 
4113 	dev_priv->irq_enabled = true;
4114 
4115 	intel_irq_reset(dev_priv);
4116 
4117 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4118 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4119 	if (ret < 0) {
4120 		dev_priv->irq_enabled = false;
4121 		return ret;
4122 	}
4123 
4124 	intel_irq_postinstall(dev_priv);
4125 
4126 	return ret;
4127 }
4128 
4129 /**
4130  * intel_irq_uninstall - finilizes all irq handling
4131  * @dev_priv: i915 device instance
4132  *
4133  * This stops interrupt and hotplug handling and unregisters and frees all
4134  * resources acquired in the init functions.
4135  */
4136 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4137 {
4138 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4139 
4140 	/*
4141 	 * FIXME we can get called twice during driver probe
4142 	 * error handling as well as during driver remove due to
4143 	 * intel_modeset_driver_remove() calling us out of sequence.
4144 	 * Would be nice if it didn't do that...
4145 	 */
4146 	if (!dev_priv->irq_enabled)
4147 		return;
4148 
4149 	dev_priv->irq_enabled = false;
4150 
4151 	intel_irq_reset(dev_priv);
4152 
4153 	free_irq(irq, dev_priv);
4154 
4155 	intel_hpd_cancel_work(dev_priv);
4156 	dev_priv->runtime_pm.irqs_enabled = false;
4157 }
4158 
4159 /**
4160  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4161  * @dev_priv: i915 device instance
4162  *
4163  * This function is used to disable interrupts at runtime, both in the runtime
4164  * pm and the system suspend/resume code.
4165  */
4166 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4167 {
4168 	intel_irq_reset(dev_priv);
4169 	dev_priv->runtime_pm.irqs_enabled = false;
4170 	intel_synchronize_irq(dev_priv);
4171 }
4172 
4173 /**
4174  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4175  * @dev_priv: i915 device instance
4176  *
4177  * This function is used to enable interrupts at runtime, both in the runtime
4178  * pm and the system suspend/resume code.
4179  */
4180 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4181 {
4182 	dev_priv->runtime_pm.irqs_enabled = true;
4183 	intel_irq_reset(dev_priv);
4184 	intel_irq_postinstall(dev_priv);
4185 }
4186 
4187 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4188 {
4189 	return dev_priv->runtime_pm.irqs_enabled;
4190 }
4191 
4192 void intel_synchronize_irq(struct drm_i915_private *i915)
4193 {
4194 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4195 }
4196 
4197 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4198 {
4199 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4200 }
4201