xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 2f164822)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33 
34 #include <drm/drm_drv.h>
35 
36 #include "display/icl_dsi_regs.h"
37 #include "display/intel_de.h"
38 #include "display/intel_display_trace.h"
39 #include "display/intel_display_types.h"
40 #include "display/intel_fifo_underrun.h"
41 #include "display/intel_hotplug.h"
42 #include "display/intel_lpe_audio.h"
43 #include "display/intel_psr.h"
44 
45 #include "gt/intel_breadcrumbs.h"
46 #include "gt/intel_gt.h"
47 #include "gt/intel_gt_irq.h"
48 #include "gt/intel_gt_pm_irq.h"
49 #include "gt/intel_gt_regs.h"
50 #include "gt/intel_rps.h"
51 
52 #include "i915_driver.h"
53 #include "i915_drv.h"
54 #include "i915_irq.h"
55 #include "intel_pm.h"
56 
57 /**
58  * DOC: interrupt handling
59  *
60  * These functions provide the basic support for enabling and disabling the
61  * interrupt handling support. There's a lot more functionality in i915_irq.c
62  * and related files, but that will be described in separate chapters.
63  */
64 
65 /*
66  * Interrupt statistic for PMU. Increments the counter only if the
67  * interrupt originated from the GPU so interrupts from a device which
68  * shares the interrupt line are not accounted.
69  */
70 static inline void pmu_irq_stats(struct drm_i915_private *i915,
71 				 irqreturn_t res)
72 {
73 	if (unlikely(res != IRQ_HANDLED))
74 		return;
75 
76 	/*
77 	 * A clever compiler translates that into INC. A not so clever one
78 	 * should at least prevent store tearing.
79 	 */
80 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
81 }
82 
83 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
84 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
85 				    enum hpd_pin pin);
86 
87 static const u32 hpd_ilk[HPD_NUM_PINS] = {
88 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
89 };
90 
91 static const u32 hpd_ivb[HPD_NUM_PINS] = {
92 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
93 };
94 
95 static const u32 hpd_bdw[HPD_NUM_PINS] = {
96 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
97 };
98 
99 static const u32 hpd_ibx[HPD_NUM_PINS] = {
100 	[HPD_CRT] = SDE_CRT_HOTPLUG,
101 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
102 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
103 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
104 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
105 };
106 
107 static const u32 hpd_cpt[HPD_NUM_PINS] = {
108 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
109 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
110 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
111 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
112 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
113 };
114 
115 static const u32 hpd_spt[HPD_NUM_PINS] = {
116 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
117 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
118 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
119 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
120 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
121 };
122 
123 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
124 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
125 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
126 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
127 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
128 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
129 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
130 };
131 
132 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
133 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
134 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
135 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
136 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
137 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
138 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
139 };
140 
141 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
142 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
143 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
144 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
145 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
146 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
147 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
148 };
149 
150 static const u32 hpd_bxt[HPD_NUM_PINS] = {
151 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
152 	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
153 	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
154 };
155 
156 static const u32 hpd_gen11[HPD_NUM_PINS] = {
157 	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
158 	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
159 	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
160 	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
161 	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
162 	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
163 };
164 
165 static const u32 hpd_icp[HPD_NUM_PINS] = {
166 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
167 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
168 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
169 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
170 	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
171 	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
172 	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
173 	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
174 	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
175 };
176 
177 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
178 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
179 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
180 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
181 	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
182 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
183 };
184 
185 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
186 {
187 	struct intel_hotplug *hpd = &dev_priv->display.hotplug;
188 
189 	if (HAS_GMCH(dev_priv)) {
190 		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
191 		    IS_CHERRYVIEW(dev_priv))
192 			hpd->hpd = hpd_status_g4x;
193 		else
194 			hpd->hpd = hpd_status_i915;
195 		return;
196 	}
197 
198 	if (DISPLAY_VER(dev_priv) >= 11)
199 		hpd->hpd = hpd_gen11;
200 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
201 		hpd->hpd = hpd_bxt;
202 	else if (DISPLAY_VER(dev_priv) >= 8)
203 		hpd->hpd = hpd_bdw;
204 	else if (DISPLAY_VER(dev_priv) >= 7)
205 		hpd->hpd = hpd_ivb;
206 	else
207 		hpd->hpd = hpd_ilk;
208 
209 	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
210 	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
211 		return;
212 
213 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
214 		hpd->pch_hpd = hpd_sde_dg1;
215 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
216 		hpd->pch_hpd = hpd_icp;
217 	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
218 		hpd->pch_hpd = hpd_spt;
219 	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
220 		hpd->pch_hpd = hpd_cpt;
221 	else if (HAS_PCH_IBX(dev_priv))
222 		hpd->pch_hpd = hpd_ibx;
223 	else
224 		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
225 }
226 
227 static void
228 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
229 {
230 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
231 
232 	drm_crtc_handle_vblank(&crtc->base);
233 }
234 
235 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
236 		    i915_reg_t iir, i915_reg_t ier)
237 {
238 	intel_uncore_write(uncore, imr, 0xffffffff);
239 	intel_uncore_posting_read(uncore, imr);
240 
241 	intel_uncore_write(uncore, ier, 0);
242 
243 	/* IIR can theoretically queue up two events. Be paranoid. */
244 	intel_uncore_write(uncore, iir, 0xffffffff);
245 	intel_uncore_posting_read(uncore, iir);
246 	intel_uncore_write(uncore, iir, 0xffffffff);
247 	intel_uncore_posting_read(uncore, iir);
248 }
249 
250 static void gen2_irq_reset(struct intel_uncore *uncore)
251 {
252 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
253 	intel_uncore_posting_read16(uncore, GEN2_IMR);
254 
255 	intel_uncore_write16(uncore, GEN2_IER, 0);
256 
257 	/* IIR can theoretically queue up two events. Be paranoid. */
258 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
259 	intel_uncore_posting_read16(uncore, GEN2_IIR);
260 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
261 	intel_uncore_posting_read16(uncore, GEN2_IIR);
262 }
263 
264 /*
265  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
266  */
267 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
268 {
269 	u32 val = intel_uncore_read(uncore, reg);
270 
271 	if (val == 0)
272 		return;
273 
274 	drm_WARN(&uncore->i915->drm, 1,
275 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
276 		 i915_mmio_reg_offset(reg), val);
277 	intel_uncore_write(uncore, reg, 0xffffffff);
278 	intel_uncore_posting_read(uncore, reg);
279 	intel_uncore_write(uncore, reg, 0xffffffff);
280 	intel_uncore_posting_read(uncore, reg);
281 }
282 
283 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
284 {
285 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
286 
287 	if (val == 0)
288 		return;
289 
290 	drm_WARN(&uncore->i915->drm, 1,
291 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
292 		 i915_mmio_reg_offset(GEN2_IIR), val);
293 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
294 	intel_uncore_posting_read16(uncore, GEN2_IIR);
295 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
296 	intel_uncore_posting_read16(uncore, GEN2_IIR);
297 }
298 
299 void gen3_irq_init(struct intel_uncore *uncore,
300 		   i915_reg_t imr, u32 imr_val,
301 		   i915_reg_t ier, u32 ier_val,
302 		   i915_reg_t iir)
303 {
304 	gen3_assert_iir_is_zero(uncore, iir);
305 
306 	intel_uncore_write(uncore, ier, ier_val);
307 	intel_uncore_write(uncore, imr, imr_val);
308 	intel_uncore_posting_read(uncore, imr);
309 }
310 
311 static void gen2_irq_init(struct intel_uncore *uncore,
312 			  u32 imr_val, u32 ier_val)
313 {
314 	gen2_assert_iir_is_zero(uncore);
315 
316 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
317 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
318 	intel_uncore_posting_read16(uncore, GEN2_IMR);
319 }
320 
321 /* For display hotplug interrupt */
322 static inline void
323 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
324 				     u32 mask,
325 				     u32 bits)
326 {
327 	lockdep_assert_held(&dev_priv->irq_lock);
328 	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
329 
330 	intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
331 }
332 
333 /**
334  * i915_hotplug_interrupt_update - update hotplug interrupt enable
335  * @dev_priv: driver private
336  * @mask: bits to update
337  * @bits: bits to enable
338  * NOTE: the HPD enable bits are modified both inside and outside
339  * of an interrupt context. To avoid that read-modify-write cycles
340  * interfer, these bits are protected by a spinlock. Since this
341  * function is usually not called from a context where the lock is
342  * held already, this function acquires the lock itself. A non-locking
343  * version is also available.
344  */
345 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
346 				   u32 mask,
347 				   u32 bits)
348 {
349 	spin_lock_irq(&dev_priv->irq_lock);
350 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
351 	spin_unlock_irq(&dev_priv->irq_lock);
352 }
353 
354 /**
355  * ilk_update_display_irq - update DEIMR
356  * @dev_priv: driver private
357  * @interrupt_mask: mask of interrupt bits to update
358  * @enabled_irq_mask: mask of interrupt bits to enable
359  */
360 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
361 				   u32 interrupt_mask, u32 enabled_irq_mask)
362 {
363 	u32 new_val;
364 
365 	lockdep_assert_held(&dev_priv->irq_lock);
366 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
367 
368 	new_val = dev_priv->irq_mask;
369 	new_val &= ~interrupt_mask;
370 	new_val |= (~enabled_irq_mask & interrupt_mask);
371 
372 	if (new_val != dev_priv->irq_mask &&
373 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
374 		dev_priv->irq_mask = new_val;
375 		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
376 		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
377 	}
378 }
379 
380 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
381 {
382 	ilk_update_display_irq(i915, bits, bits);
383 }
384 
385 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
386 {
387 	ilk_update_display_irq(i915, bits, 0);
388 }
389 
390 /**
391  * bdw_update_port_irq - update DE port interrupt
392  * @dev_priv: driver private
393  * @interrupt_mask: mask of interrupt bits to update
394  * @enabled_irq_mask: mask of interrupt bits to enable
395  */
396 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
397 				u32 interrupt_mask,
398 				u32 enabled_irq_mask)
399 {
400 	u32 new_val;
401 	u32 old_val;
402 
403 	lockdep_assert_held(&dev_priv->irq_lock);
404 
405 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
406 
407 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
408 		return;
409 
410 	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
411 
412 	new_val = old_val;
413 	new_val &= ~interrupt_mask;
414 	new_val |= (~enabled_irq_mask & interrupt_mask);
415 
416 	if (new_val != old_val) {
417 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
418 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
419 	}
420 }
421 
422 /**
423  * bdw_update_pipe_irq - update DE pipe interrupt
424  * @dev_priv: driver private
425  * @pipe: pipe whose interrupt to update
426  * @interrupt_mask: mask of interrupt bits to update
427  * @enabled_irq_mask: mask of interrupt bits to enable
428  */
429 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
430 				enum pipe pipe, u32 interrupt_mask,
431 				u32 enabled_irq_mask)
432 {
433 	u32 new_val;
434 
435 	lockdep_assert_held(&dev_priv->irq_lock);
436 
437 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
438 
439 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
440 		return;
441 
442 	new_val = dev_priv->de_irq_mask[pipe];
443 	new_val &= ~interrupt_mask;
444 	new_val |= (~enabled_irq_mask & interrupt_mask);
445 
446 	if (new_val != dev_priv->de_irq_mask[pipe]) {
447 		dev_priv->de_irq_mask[pipe] = new_val;
448 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
449 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
450 	}
451 }
452 
453 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
454 			 enum pipe pipe, u32 bits)
455 {
456 	bdw_update_pipe_irq(i915, pipe, bits, bits);
457 }
458 
459 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
460 			  enum pipe pipe, u32 bits)
461 {
462 	bdw_update_pipe_irq(i915, pipe, bits, 0);
463 }
464 
465 /**
466  * ibx_display_interrupt_update - update SDEIMR
467  * @dev_priv: driver private
468  * @interrupt_mask: mask of interrupt bits to update
469  * @enabled_irq_mask: mask of interrupt bits to enable
470  */
471 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
472 					 u32 interrupt_mask,
473 					 u32 enabled_irq_mask)
474 {
475 	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
476 	sdeimr &= ~interrupt_mask;
477 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
478 
479 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
480 
481 	lockdep_assert_held(&dev_priv->irq_lock);
482 
483 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
484 		return;
485 
486 	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
487 	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
488 }
489 
490 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
491 {
492 	ibx_display_interrupt_update(i915, bits, bits);
493 }
494 
495 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
496 {
497 	ibx_display_interrupt_update(i915, bits, 0);
498 }
499 
500 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
501 			      enum pipe pipe)
502 {
503 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
504 	u32 enable_mask = status_mask << 16;
505 
506 	lockdep_assert_held(&dev_priv->irq_lock);
507 
508 	if (DISPLAY_VER(dev_priv) < 5)
509 		goto out;
510 
511 	/*
512 	 * On pipe A we don't support the PSR interrupt yet,
513 	 * on pipe B and C the same bit MBZ.
514 	 */
515 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
516 			     status_mask & PIPE_A_PSR_STATUS_VLV))
517 		return 0;
518 	/*
519 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
520 	 * A the same bit is for perf counters which we don't use either.
521 	 */
522 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
523 			     status_mask & PIPE_B_PSR_STATUS_VLV))
524 		return 0;
525 
526 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
527 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
528 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
529 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
530 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
531 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
532 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
533 
534 out:
535 	drm_WARN_ONCE(&dev_priv->drm,
536 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
537 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
538 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
539 		      pipe_name(pipe), enable_mask, status_mask);
540 
541 	return enable_mask;
542 }
543 
544 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
545 			  enum pipe pipe, u32 status_mask)
546 {
547 	i915_reg_t reg = PIPESTAT(pipe);
548 	u32 enable_mask;
549 
550 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
551 		      "pipe %c: status_mask=0x%x\n",
552 		      pipe_name(pipe), status_mask);
553 
554 	lockdep_assert_held(&dev_priv->irq_lock);
555 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
556 
557 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
558 		return;
559 
560 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
561 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
562 
563 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
564 	intel_uncore_posting_read(&dev_priv->uncore, reg);
565 }
566 
567 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
568 			   enum pipe pipe, u32 status_mask)
569 {
570 	i915_reg_t reg = PIPESTAT(pipe);
571 	u32 enable_mask;
572 
573 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
574 		      "pipe %c: status_mask=0x%x\n",
575 		      pipe_name(pipe), status_mask);
576 
577 	lockdep_assert_held(&dev_priv->irq_lock);
578 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
579 
580 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
581 		return;
582 
583 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
584 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
585 
586 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
587 	intel_uncore_posting_read(&dev_priv->uncore, reg);
588 }
589 
590 static bool i915_has_asle(struct drm_i915_private *dev_priv)
591 {
592 	if (!dev_priv->display.opregion.asle)
593 		return false;
594 
595 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
596 }
597 
598 /**
599  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
600  * @dev_priv: i915 device private
601  */
602 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
603 {
604 	if (!i915_has_asle(dev_priv))
605 		return;
606 
607 	spin_lock_irq(&dev_priv->irq_lock);
608 
609 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
610 	if (DISPLAY_VER(dev_priv) >= 4)
611 		i915_enable_pipestat(dev_priv, PIPE_A,
612 				     PIPE_LEGACY_BLC_EVENT_STATUS);
613 
614 	spin_unlock_irq(&dev_priv->irq_lock);
615 }
616 
617 /**
618  * ivb_parity_work - Workqueue called when a parity error interrupt
619  * occurred.
620  * @work: workqueue struct
621  *
622  * Doesn't actually do anything except notify userspace. As a consequence of
623  * this event, userspace should try to remap the bad rows since statistically
624  * it is likely the same row is more likely to go bad again.
625  */
626 static void ivb_parity_work(struct work_struct *work)
627 {
628 	struct drm_i915_private *dev_priv =
629 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
630 	struct intel_gt *gt = to_gt(dev_priv);
631 	u32 error_status, row, bank, subbank;
632 	char *parity_event[6];
633 	u32 misccpctl;
634 	u8 slice = 0;
635 
636 	/* We must turn off DOP level clock gating to access the L3 registers.
637 	 * In order to prevent a get/put style interface, acquire struct mutex
638 	 * any time we access those registers.
639 	 */
640 	mutex_lock(&dev_priv->drm.struct_mutex);
641 
642 	/* If we've screwed up tracking, just let the interrupt fire again */
643 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
644 		goto out;
645 
646 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
647 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
648 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
649 
650 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
651 		i915_reg_t reg;
652 
653 		slice--;
654 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
655 				     slice >= NUM_L3_SLICES(dev_priv)))
656 			break;
657 
658 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
659 
660 		reg = GEN7_L3CDERRST1(slice);
661 
662 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
663 		row = GEN7_PARITY_ERROR_ROW(error_status);
664 		bank = GEN7_PARITY_ERROR_BANK(error_status);
665 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
666 
667 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
668 		intel_uncore_posting_read(&dev_priv->uncore, reg);
669 
670 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
671 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
672 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
673 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
674 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
675 		parity_event[5] = NULL;
676 
677 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
678 				   KOBJ_CHANGE, parity_event);
679 
680 		drm_dbg(&dev_priv->drm,
681 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
682 			slice, row, bank, subbank);
683 
684 		kfree(parity_event[4]);
685 		kfree(parity_event[3]);
686 		kfree(parity_event[2]);
687 		kfree(parity_event[1]);
688 	}
689 
690 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
691 
692 out:
693 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
694 	spin_lock_irq(gt->irq_lock);
695 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
696 	spin_unlock_irq(gt->irq_lock);
697 
698 	mutex_unlock(&dev_priv->drm.struct_mutex);
699 }
700 
701 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
702 {
703 	switch (pin) {
704 	case HPD_PORT_TC1:
705 	case HPD_PORT_TC2:
706 	case HPD_PORT_TC3:
707 	case HPD_PORT_TC4:
708 	case HPD_PORT_TC5:
709 	case HPD_PORT_TC6:
710 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
711 	default:
712 		return false;
713 	}
714 }
715 
716 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
717 {
718 	switch (pin) {
719 	case HPD_PORT_A:
720 		return val & PORTA_HOTPLUG_LONG_DETECT;
721 	case HPD_PORT_B:
722 		return val & PORTB_HOTPLUG_LONG_DETECT;
723 	case HPD_PORT_C:
724 		return val & PORTC_HOTPLUG_LONG_DETECT;
725 	default:
726 		return false;
727 	}
728 }
729 
730 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
731 {
732 	switch (pin) {
733 	case HPD_PORT_A:
734 	case HPD_PORT_B:
735 	case HPD_PORT_C:
736 	case HPD_PORT_D:
737 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
738 	default:
739 		return false;
740 	}
741 }
742 
743 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
744 {
745 	switch (pin) {
746 	case HPD_PORT_TC1:
747 	case HPD_PORT_TC2:
748 	case HPD_PORT_TC3:
749 	case HPD_PORT_TC4:
750 	case HPD_PORT_TC5:
751 	case HPD_PORT_TC6:
752 		return val & ICP_TC_HPD_LONG_DETECT(pin);
753 	default:
754 		return false;
755 	}
756 }
757 
758 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
759 {
760 	switch (pin) {
761 	case HPD_PORT_E:
762 		return val & PORTE_HOTPLUG_LONG_DETECT;
763 	default:
764 		return false;
765 	}
766 }
767 
768 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
769 {
770 	switch (pin) {
771 	case HPD_PORT_A:
772 		return val & PORTA_HOTPLUG_LONG_DETECT;
773 	case HPD_PORT_B:
774 		return val & PORTB_HOTPLUG_LONG_DETECT;
775 	case HPD_PORT_C:
776 		return val & PORTC_HOTPLUG_LONG_DETECT;
777 	case HPD_PORT_D:
778 		return val & PORTD_HOTPLUG_LONG_DETECT;
779 	default:
780 		return false;
781 	}
782 }
783 
784 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
785 {
786 	switch (pin) {
787 	case HPD_PORT_A:
788 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
789 	default:
790 		return false;
791 	}
792 }
793 
794 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
795 {
796 	switch (pin) {
797 	case HPD_PORT_B:
798 		return val & PORTB_HOTPLUG_LONG_DETECT;
799 	case HPD_PORT_C:
800 		return val & PORTC_HOTPLUG_LONG_DETECT;
801 	case HPD_PORT_D:
802 		return val & PORTD_HOTPLUG_LONG_DETECT;
803 	default:
804 		return false;
805 	}
806 }
807 
808 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
809 {
810 	switch (pin) {
811 	case HPD_PORT_B:
812 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
813 	case HPD_PORT_C:
814 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
815 	case HPD_PORT_D:
816 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
817 	default:
818 		return false;
819 	}
820 }
821 
822 /*
823  * Get a bit mask of pins that have triggered, and which ones may be long.
824  * This can be called multiple times with the same masks to accumulate
825  * hotplug detection results from several registers.
826  *
827  * Note that the caller is expected to zero out the masks initially.
828  */
829 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
830 			       u32 *pin_mask, u32 *long_mask,
831 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
832 			       const u32 hpd[HPD_NUM_PINS],
833 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
834 {
835 	enum hpd_pin pin;
836 
837 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
838 
839 	for_each_hpd_pin(pin) {
840 		if ((hpd[pin] & hotplug_trigger) == 0)
841 			continue;
842 
843 		*pin_mask |= BIT(pin);
844 
845 		if (long_pulse_detect(pin, dig_hotplug_reg))
846 			*long_mask |= BIT(pin);
847 	}
848 
849 	drm_dbg(&dev_priv->drm,
850 		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
851 		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
852 
853 }
854 
855 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
856 				  const u32 hpd[HPD_NUM_PINS])
857 {
858 	struct intel_encoder *encoder;
859 	u32 enabled_irqs = 0;
860 
861 	for_each_intel_encoder(&dev_priv->drm, encoder)
862 		if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
863 			enabled_irqs |= hpd[encoder->hpd_pin];
864 
865 	return enabled_irqs;
866 }
867 
868 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
869 				  const u32 hpd[HPD_NUM_PINS])
870 {
871 	struct intel_encoder *encoder;
872 	u32 hotplug_irqs = 0;
873 
874 	for_each_intel_encoder(&dev_priv->drm, encoder)
875 		hotplug_irqs |= hpd[encoder->hpd_pin];
876 
877 	return hotplug_irqs;
878 }
879 
880 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
881 				     hotplug_enables_func hotplug_enables)
882 {
883 	struct intel_encoder *encoder;
884 	u32 hotplug = 0;
885 
886 	for_each_intel_encoder(&i915->drm, encoder)
887 		hotplug |= hotplug_enables(i915, encoder->hpd_pin);
888 
889 	return hotplug;
890 }
891 
892 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
893 {
894 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
895 }
896 
897 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
898 {
899 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
900 }
901 
902 #if defined(CONFIG_DEBUG_FS)
903 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
904 					 enum pipe pipe,
905 					 u32 crc0, u32 crc1,
906 					 u32 crc2, u32 crc3,
907 					 u32 crc4)
908 {
909 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
910 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
911 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
912 
913 	trace_intel_pipe_crc(crtc, crcs);
914 
915 	spin_lock(&pipe_crc->lock);
916 	/*
917 	 * For some not yet identified reason, the first CRC is
918 	 * bonkers. So let's just wait for the next vblank and read
919 	 * out the buggy result.
920 	 *
921 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
922 	 * don't trust that one either.
923 	 */
924 	if (pipe_crc->skipped <= 0 ||
925 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
926 		pipe_crc->skipped++;
927 		spin_unlock(&pipe_crc->lock);
928 		return;
929 	}
930 	spin_unlock(&pipe_crc->lock);
931 
932 	drm_crtc_add_crc_entry(&crtc->base, true,
933 				drm_crtc_accurate_vblank_count(&crtc->base),
934 				crcs);
935 }
936 #else
937 static inline void
938 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
939 			     enum pipe pipe,
940 			     u32 crc0, u32 crc1,
941 			     u32 crc2, u32 crc3,
942 			     u32 crc4) {}
943 #endif
944 
945 static void flip_done_handler(struct drm_i915_private *i915,
946 			      enum pipe pipe)
947 {
948 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
949 	struct drm_crtc_state *crtc_state = crtc->base.state;
950 	struct drm_pending_vblank_event *e = crtc_state->event;
951 	struct drm_device *dev = &i915->drm;
952 	unsigned long irqflags;
953 
954 	spin_lock_irqsave(&dev->event_lock, irqflags);
955 
956 	crtc_state->event = NULL;
957 
958 	drm_crtc_send_vblank_event(&crtc->base, e);
959 
960 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
961 }
962 
963 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
964 				     enum pipe pipe)
965 {
966 	display_pipe_crc_irq_handler(dev_priv, pipe,
967 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
968 				     0, 0, 0, 0);
969 }
970 
971 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
972 				     enum pipe pipe)
973 {
974 	display_pipe_crc_irq_handler(dev_priv, pipe,
975 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
976 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
977 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
978 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
979 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
980 }
981 
982 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
983 				      enum pipe pipe)
984 {
985 	u32 res1, res2;
986 
987 	if (DISPLAY_VER(dev_priv) >= 3)
988 		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
989 	else
990 		res1 = 0;
991 
992 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
993 		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
994 	else
995 		res2 = 0;
996 
997 	display_pipe_crc_irq_handler(dev_priv, pipe,
998 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
999 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1000 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1001 				     res1, res2);
1002 }
1003 
1004 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1005 {
1006 	enum pipe pipe;
1007 
1008 	for_each_pipe(dev_priv, pipe) {
1009 		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1010 			   PIPESTAT_INT_STATUS_MASK |
1011 			   PIPE_FIFO_UNDERRUN_STATUS);
1012 
1013 		dev_priv->pipestat_irq_mask[pipe] = 0;
1014 	}
1015 }
1016 
1017 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1018 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1019 {
1020 	enum pipe pipe;
1021 
1022 	spin_lock(&dev_priv->irq_lock);
1023 
1024 	if (!dev_priv->display_irqs_enabled) {
1025 		spin_unlock(&dev_priv->irq_lock);
1026 		return;
1027 	}
1028 
1029 	for_each_pipe(dev_priv, pipe) {
1030 		i915_reg_t reg;
1031 		u32 status_mask, enable_mask, iir_bit = 0;
1032 
1033 		/*
1034 		 * PIPESTAT bits get signalled even when the interrupt is
1035 		 * disabled with the mask bits, and some of the status bits do
1036 		 * not generate interrupts at all (like the underrun bit). Hence
1037 		 * we need to be careful that we only handle what we want to
1038 		 * handle.
1039 		 */
1040 
1041 		/* fifo underruns are filterered in the underrun handler. */
1042 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1043 
1044 		switch (pipe) {
1045 		default:
1046 		case PIPE_A:
1047 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1048 			break;
1049 		case PIPE_B:
1050 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1051 			break;
1052 		case PIPE_C:
1053 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1054 			break;
1055 		}
1056 		if (iir & iir_bit)
1057 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1058 
1059 		if (!status_mask)
1060 			continue;
1061 
1062 		reg = PIPESTAT(pipe);
1063 		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1064 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1065 
1066 		/*
1067 		 * Clear the PIPE*STAT regs before the IIR
1068 		 *
1069 		 * Toggle the enable bits to make sure we get an
1070 		 * edge in the ISR pipe event bit if we don't clear
1071 		 * all the enabled status bits. Otherwise the edge
1072 		 * triggered IIR on i965/g4x wouldn't notice that
1073 		 * an interrupt is still pending.
1074 		 */
1075 		if (pipe_stats[pipe]) {
1076 			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1077 			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1078 		}
1079 	}
1080 	spin_unlock(&dev_priv->irq_lock);
1081 }
1082 
1083 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1084 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1085 {
1086 	enum pipe pipe;
1087 
1088 	for_each_pipe(dev_priv, pipe) {
1089 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1090 			intel_handle_vblank(dev_priv, pipe);
1091 
1092 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1093 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1094 
1095 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1096 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1097 	}
1098 }
1099 
1100 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1101 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1102 {
1103 	bool blc_event = false;
1104 	enum pipe pipe;
1105 
1106 	for_each_pipe(dev_priv, pipe) {
1107 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1108 			intel_handle_vblank(dev_priv, pipe);
1109 
1110 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1111 			blc_event = true;
1112 
1113 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1114 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1115 
1116 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1117 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1118 	}
1119 
1120 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1121 		intel_opregion_asle_intr(dev_priv);
1122 }
1123 
1124 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1125 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1126 {
1127 	bool blc_event = false;
1128 	enum pipe pipe;
1129 
1130 	for_each_pipe(dev_priv, pipe) {
1131 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1132 			intel_handle_vblank(dev_priv, pipe);
1133 
1134 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1135 			blc_event = true;
1136 
1137 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1138 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1139 
1140 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1141 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1142 	}
1143 
1144 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1145 		intel_opregion_asle_intr(dev_priv);
1146 
1147 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1148 		gmbus_irq_handler(dev_priv);
1149 }
1150 
1151 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1152 					    u32 pipe_stats[I915_MAX_PIPES])
1153 {
1154 	enum pipe pipe;
1155 
1156 	for_each_pipe(dev_priv, pipe) {
1157 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1158 			intel_handle_vblank(dev_priv, pipe);
1159 
1160 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1161 			flip_done_handler(dev_priv, pipe);
1162 
1163 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1164 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1165 
1166 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1167 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1168 	}
1169 
1170 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1171 		gmbus_irq_handler(dev_priv);
1172 }
1173 
1174 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1175 {
1176 	u32 hotplug_status = 0, hotplug_status_mask;
1177 	int i;
1178 
1179 	if (IS_G4X(dev_priv) ||
1180 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1181 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1182 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1183 	else
1184 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1185 
1186 	/*
1187 	 * We absolutely have to clear all the pending interrupt
1188 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1189 	 * interrupt bit won't have an edge, and the i965/g4x
1190 	 * edge triggered IIR will not notice that an interrupt
1191 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1192 	 * guarantee the edge as the act of toggling the enable
1193 	 * bits can itself generate a new hotplug interrupt :(
1194 	 */
1195 	for (i = 0; i < 10; i++) {
1196 		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1197 
1198 		if (tmp == 0)
1199 			return hotplug_status;
1200 
1201 		hotplug_status |= tmp;
1202 		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1203 	}
1204 
1205 	drm_WARN_ONCE(&dev_priv->drm, 1,
1206 		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1207 		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1208 
1209 	return hotplug_status;
1210 }
1211 
1212 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1213 				 u32 hotplug_status)
1214 {
1215 	u32 pin_mask = 0, long_mask = 0;
1216 	u32 hotplug_trigger;
1217 
1218 	if (IS_G4X(dev_priv) ||
1219 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1220 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1221 	else
1222 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1223 
1224 	if (hotplug_trigger) {
1225 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1226 				   hotplug_trigger, hotplug_trigger,
1227 				   dev_priv->display.hotplug.hpd,
1228 				   i9xx_port_hotplug_long_detect);
1229 
1230 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1231 	}
1232 
1233 	if ((IS_G4X(dev_priv) ||
1234 	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1235 	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1236 		dp_aux_irq_handler(dev_priv);
1237 }
1238 
1239 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1240 {
1241 	struct drm_i915_private *dev_priv = arg;
1242 	irqreturn_t ret = IRQ_NONE;
1243 
1244 	if (!intel_irqs_enabled(dev_priv))
1245 		return IRQ_NONE;
1246 
1247 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1248 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1249 
1250 	do {
1251 		u32 iir, gt_iir, pm_iir;
1252 		u32 pipe_stats[I915_MAX_PIPES] = {};
1253 		u32 hotplug_status = 0;
1254 		u32 ier = 0;
1255 
1256 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1257 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1258 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1259 
1260 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1261 			break;
1262 
1263 		ret = IRQ_HANDLED;
1264 
1265 		/*
1266 		 * Theory on interrupt generation, based on empirical evidence:
1267 		 *
1268 		 * x = ((VLV_IIR & VLV_IER) ||
1269 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1270 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1271 		 *
1272 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1273 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1274 		 * guarantee the CPU interrupt will be raised again even if we
1275 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1276 		 * bits this time around.
1277 		 */
1278 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1279 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1280 
1281 		if (gt_iir)
1282 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1283 		if (pm_iir)
1284 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1285 
1286 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1287 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1288 
1289 		/* Call regardless, as some status bits might not be
1290 		 * signalled in iir */
1291 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1292 
1293 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1294 			   I915_LPE_PIPE_B_INTERRUPT))
1295 			intel_lpe_audio_irq_handler(dev_priv);
1296 
1297 		/*
1298 		 * VLV_IIR is single buffered, and reflects the level
1299 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1300 		 */
1301 		if (iir)
1302 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1303 
1304 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1305 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1306 
1307 		if (gt_iir)
1308 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1309 		if (pm_iir)
1310 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1311 
1312 		if (hotplug_status)
1313 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1314 
1315 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1316 	} while (0);
1317 
1318 	pmu_irq_stats(dev_priv, ret);
1319 
1320 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1321 
1322 	return ret;
1323 }
1324 
1325 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1326 {
1327 	struct drm_i915_private *dev_priv = arg;
1328 	irqreturn_t ret = IRQ_NONE;
1329 
1330 	if (!intel_irqs_enabled(dev_priv))
1331 		return IRQ_NONE;
1332 
1333 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1334 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1335 
1336 	do {
1337 		u32 master_ctl, iir;
1338 		u32 pipe_stats[I915_MAX_PIPES] = {};
1339 		u32 hotplug_status = 0;
1340 		u32 ier = 0;
1341 
1342 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1343 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1344 
1345 		if (master_ctl == 0 && iir == 0)
1346 			break;
1347 
1348 		ret = IRQ_HANDLED;
1349 
1350 		/*
1351 		 * Theory on interrupt generation, based on empirical evidence:
1352 		 *
1353 		 * x = ((VLV_IIR & VLV_IER) ||
1354 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1355 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1356 		 *
1357 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1358 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1359 		 * guarantee the CPU interrupt will be raised again even if we
1360 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1361 		 * bits this time around.
1362 		 */
1363 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1364 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1365 
1366 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1367 
1368 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1369 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1370 
1371 		/* Call regardless, as some status bits might not be
1372 		 * signalled in iir */
1373 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1374 
1375 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1376 			   I915_LPE_PIPE_B_INTERRUPT |
1377 			   I915_LPE_PIPE_C_INTERRUPT))
1378 			intel_lpe_audio_irq_handler(dev_priv);
1379 
1380 		/*
1381 		 * VLV_IIR is single buffered, and reflects the level
1382 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1383 		 */
1384 		if (iir)
1385 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1386 
1387 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1388 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1389 
1390 		if (hotplug_status)
1391 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1392 
1393 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1394 	} while (0);
1395 
1396 	pmu_irq_stats(dev_priv, ret);
1397 
1398 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1399 
1400 	return ret;
1401 }
1402 
1403 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1404 				u32 hotplug_trigger)
1405 {
1406 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1407 
1408 	/*
1409 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1410 	 * unless we touch the hotplug register, even if hotplug_trigger is
1411 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1412 	 * errors.
1413 	 */
1414 	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1415 	if (!hotplug_trigger) {
1416 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1417 			PORTD_HOTPLUG_STATUS_MASK |
1418 			PORTC_HOTPLUG_STATUS_MASK |
1419 			PORTB_HOTPLUG_STATUS_MASK;
1420 		dig_hotplug_reg &= ~mask;
1421 	}
1422 
1423 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1424 	if (!hotplug_trigger)
1425 		return;
1426 
1427 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1428 			   hotplug_trigger, dig_hotplug_reg,
1429 			   dev_priv->display.hotplug.pch_hpd,
1430 			   pch_port_hotplug_long_detect);
1431 
1432 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1433 }
1434 
1435 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1436 {
1437 	enum pipe pipe;
1438 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1439 
1440 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1441 
1442 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1443 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1444 			       SDE_AUDIO_POWER_SHIFT);
1445 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1446 			port_name(port));
1447 	}
1448 
1449 	if (pch_iir & SDE_AUX_MASK)
1450 		dp_aux_irq_handler(dev_priv);
1451 
1452 	if (pch_iir & SDE_GMBUS)
1453 		gmbus_irq_handler(dev_priv);
1454 
1455 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1456 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1457 
1458 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1459 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1460 
1461 	if (pch_iir & SDE_POISON)
1462 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1463 
1464 	if (pch_iir & SDE_FDI_MASK) {
1465 		for_each_pipe(dev_priv, pipe)
1466 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1467 				pipe_name(pipe),
1468 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1469 	}
1470 
1471 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1472 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1473 
1474 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1475 		drm_dbg(&dev_priv->drm,
1476 			"PCH transcoder CRC error interrupt\n");
1477 
1478 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1479 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1480 
1481 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1482 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1483 }
1484 
1485 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1486 {
1487 	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1488 	enum pipe pipe;
1489 
1490 	if (err_int & ERR_INT_POISON)
1491 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1492 
1493 	for_each_pipe(dev_priv, pipe) {
1494 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1495 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1496 
1497 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1498 			if (IS_IVYBRIDGE(dev_priv))
1499 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1500 			else
1501 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1502 		}
1503 	}
1504 
1505 	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1506 }
1507 
1508 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1509 {
1510 	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1511 	enum pipe pipe;
1512 
1513 	if (serr_int & SERR_INT_POISON)
1514 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1515 
1516 	for_each_pipe(dev_priv, pipe)
1517 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1518 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1519 
1520 	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1521 }
1522 
1523 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1524 {
1525 	enum pipe pipe;
1526 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1527 
1528 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1529 
1530 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1531 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1532 			       SDE_AUDIO_POWER_SHIFT_CPT);
1533 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1534 			port_name(port));
1535 	}
1536 
1537 	if (pch_iir & SDE_AUX_MASK_CPT)
1538 		dp_aux_irq_handler(dev_priv);
1539 
1540 	if (pch_iir & SDE_GMBUS_CPT)
1541 		gmbus_irq_handler(dev_priv);
1542 
1543 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1544 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1545 
1546 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1547 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1548 
1549 	if (pch_iir & SDE_FDI_MASK_CPT) {
1550 		for_each_pipe(dev_priv, pipe)
1551 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1552 				pipe_name(pipe),
1553 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1554 	}
1555 
1556 	if (pch_iir & SDE_ERROR_CPT)
1557 		cpt_serr_int_handler(dev_priv);
1558 }
1559 
1560 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1561 {
1562 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1563 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1564 	u32 pin_mask = 0, long_mask = 0;
1565 
1566 	if (ddi_hotplug_trigger) {
1567 		u32 dig_hotplug_reg;
1568 
1569 		/* Locking due to DSI native GPIO sequences */
1570 		spin_lock(&dev_priv->irq_lock);
1571 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1572 		spin_unlock(&dev_priv->irq_lock);
1573 
1574 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1575 				   ddi_hotplug_trigger, dig_hotplug_reg,
1576 				   dev_priv->display.hotplug.pch_hpd,
1577 				   icp_ddi_port_hotplug_long_detect);
1578 	}
1579 
1580 	if (tc_hotplug_trigger) {
1581 		u32 dig_hotplug_reg;
1582 
1583 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1584 
1585 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1586 				   tc_hotplug_trigger, dig_hotplug_reg,
1587 				   dev_priv->display.hotplug.pch_hpd,
1588 				   icp_tc_port_hotplug_long_detect);
1589 	}
1590 
1591 	if (pin_mask)
1592 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1593 
1594 	if (pch_iir & SDE_GMBUS_ICP)
1595 		gmbus_irq_handler(dev_priv);
1596 }
1597 
1598 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1599 {
1600 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1601 		~SDE_PORTE_HOTPLUG_SPT;
1602 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1603 	u32 pin_mask = 0, long_mask = 0;
1604 
1605 	if (hotplug_trigger) {
1606 		u32 dig_hotplug_reg;
1607 
1608 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1609 
1610 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1611 				   hotplug_trigger, dig_hotplug_reg,
1612 				   dev_priv->display.hotplug.pch_hpd,
1613 				   spt_port_hotplug_long_detect);
1614 	}
1615 
1616 	if (hotplug2_trigger) {
1617 		u32 dig_hotplug_reg;
1618 
1619 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
1620 
1621 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1622 				   hotplug2_trigger, dig_hotplug_reg,
1623 				   dev_priv->display.hotplug.pch_hpd,
1624 				   spt_port_hotplug2_long_detect);
1625 	}
1626 
1627 	if (pin_mask)
1628 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1629 
1630 	if (pch_iir & SDE_GMBUS_CPT)
1631 		gmbus_irq_handler(dev_priv);
1632 }
1633 
1634 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1635 				u32 hotplug_trigger)
1636 {
1637 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1638 
1639 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
1640 
1641 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1642 			   hotplug_trigger, dig_hotplug_reg,
1643 			   dev_priv->display.hotplug.hpd,
1644 			   ilk_port_hotplug_long_detect);
1645 
1646 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1647 }
1648 
1649 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1650 				    u32 de_iir)
1651 {
1652 	enum pipe pipe;
1653 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1654 
1655 	if (hotplug_trigger)
1656 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1657 
1658 	if (de_iir & DE_AUX_CHANNEL_A)
1659 		dp_aux_irq_handler(dev_priv);
1660 
1661 	if (de_iir & DE_GSE)
1662 		intel_opregion_asle_intr(dev_priv);
1663 
1664 	if (de_iir & DE_POISON)
1665 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1666 
1667 	for_each_pipe(dev_priv, pipe) {
1668 		if (de_iir & DE_PIPE_VBLANK(pipe))
1669 			intel_handle_vblank(dev_priv, pipe);
1670 
1671 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
1672 			flip_done_handler(dev_priv, pipe);
1673 
1674 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1675 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1676 
1677 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1678 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1679 	}
1680 
1681 	/* check event from PCH */
1682 	if (de_iir & DE_PCH_EVENT) {
1683 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1684 
1685 		if (HAS_PCH_CPT(dev_priv))
1686 			cpt_irq_handler(dev_priv, pch_iir);
1687 		else
1688 			ibx_irq_handler(dev_priv, pch_iir);
1689 
1690 		/* should clear PCH hotplug event before clear CPU irq */
1691 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1692 	}
1693 
1694 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
1695 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
1696 }
1697 
1698 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
1699 				    u32 de_iir)
1700 {
1701 	enum pipe pipe;
1702 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1703 
1704 	if (hotplug_trigger)
1705 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1706 
1707 	if (de_iir & DE_ERR_INT_IVB)
1708 		ivb_err_int_handler(dev_priv);
1709 
1710 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1711 		dp_aux_irq_handler(dev_priv);
1712 
1713 	if (de_iir & DE_GSE_IVB)
1714 		intel_opregion_asle_intr(dev_priv);
1715 
1716 	for_each_pipe(dev_priv, pipe) {
1717 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
1718 			intel_handle_vblank(dev_priv, pipe);
1719 
1720 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
1721 			flip_done_handler(dev_priv, pipe);
1722 	}
1723 
1724 	/* check event from PCH */
1725 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
1726 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1727 
1728 		cpt_irq_handler(dev_priv, pch_iir);
1729 
1730 		/* clear PCH hotplug event before clear CPU irq */
1731 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1732 	}
1733 }
1734 
1735 /*
1736  * To handle irqs with the minimum potential races with fresh interrupts, we:
1737  * 1 - Disable Master Interrupt Control.
1738  * 2 - Find the source(s) of the interrupt.
1739  * 3 - Clear the Interrupt Identity bits (IIR).
1740  * 4 - Process the interrupt(s) that had bits set in the IIRs.
1741  * 5 - Re-enable Master Interrupt Control.
1742  */
1743 static irqreturn_t ilk_irq_handler(int irq, void *arg)
1744 {
1745 	struct drm_i915_private *i915 = arg;
1746 	void __iomem * const regs = i915->uncore.regs;
1747 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1748 	irqreturn_t ret = IRQ_NONE;
1749 
1750 	if (unlikely(!intel_irqs_enabled(i915)))
1751 		return IRQ_NONE;
1752 
1753 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1754 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1755 
1756 	/* disable master interrupt before clearing iir  */
1757 	de_ier = raw_reg_read(regs, DEIER);
1758 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1759 
1760 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1761 	 * interrupts will will be stored on its back queue, and then we'll be
1762 	 * able to process them after we restore SDEIER (as soon as we restore
1763 	 * it, we'll get an interrupt if SDEIIR still has something to process
1764 	 * due to its back queue). */
1765 	if (!HAS_PCH_NOP(i915)) {
1766 		sde_ier = raw_reg_read(regs, SDEIER);
1767 		raw_reg_write(regs, SDEIER, 0);
1768 	}
1769 
1770 	/* Find, clear, then process each source of interrupt */
1771 
1772 	gt_iir = raw_reg_read(regs, GTIIR);
1773 	if (gt_iir) {
1774 		raw_reg_write(regs, GTIIR, gt_iir);
1775 		if (GRAPHICS_VER(i915) >= 6)
1776 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
1777 		else
1778 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
1779 		ret = IRQ_HANDLED;
1780 	}
1781 
1782 	de_iir = raw_reg_read(regs, DEIIR);
1783 	if (de_iir) {
1784 		raw_reg_write(regs, DEIIR, de_iir);
1785 		if (DISPLAY_VER(i915) >= 7)
1786 			ivb_display_irq_handler(i915, de_iir);
1787 		else
1788 			ilk_display_irq_handler(i915, de_iir);
1789 		ret = IRQ_HANDLED;
1790 	}
1791 
1792 	if (GRAPHICS_VER(i915) >= 6) {
1793 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
1794 		if (pm_iir) {
1795 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
1796 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
1797 			ret = IRQ_HANDLED;
1798 		}
1799 	}
1800 
1801 	raw_reg_write(regs, DEIER, de_ier);
1802 	if (sde_ier)
1803 		raw_reg_write(regs, SDEIER, sde_ier);
1804 
1805 	pmu_irq_stats(i915, ret);
1806 
1807 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1808 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1809 
1810 	return ret;
1811 }
1812 
1813 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
1814 				u32 hotplug_trigger)
1815 {
1816 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1817 
1818 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1819 
1820 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1821 			   hotplug_trigger, dig_hotplug_reg,
1822 			   dev_priv->display.hotplug.hpd,
1823 			   bxt_port_hotplug_long_detect);
1824 
1825 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1826 }
1827 
1828 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1829 {
1830 	u32 pin_mask = 0, long_mask = 0;
1831 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
1832 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
1833 
1834 	if (trigger_tc) {
1835 		u32 dig_hotplug_reg;
1836 
1837 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
1838 
1839 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1840 				   trigger_tc, dig_hotplug_reg,
1841 				   dev_priv->display.hotplug.hpd,
1842 				   gen11_port_hotplug_long_detect);
1843 	}
1844 
1845 	if (trigger_tbt) {
1846 		u32 dig_hotplug_reg;
1847 
1848 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
1849 
1850 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1851 				   trigger_tbt, dig_hotplug_reg,
1852 				   dev_priv->display.hotplug.hpd,
1853 				   gen11_port_hotplug_long_detect);
1854 	}
1855 
1856 	if (pin_mask)
1857 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1858 	else
1859 		drm_err(&dev_priv->drm,
1860 			"Unexpected DE HPD interrupt 0x%08x\n", iir);
1861 }
1862 
1863 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
1864 {
1865 	u32 mask;
1866 
1867 	if (DISPLAY_VER(dev_priv) >= 13)
1868 		return TGL_DE_PORT_AUX_DDIA |
1869 			TGL_DE_PORT_AUX_DDIB |
1870 			TGL_DE_PORT_AUX_DDIC |
1871 			XELPD_DE_PORT_AUX_DDID |
1872 			XELPD_DE_PORT_AUX_DDIE |
1873 			TGL_DE_PORT_AUX_USBC1 |
1874 			TGL_DE_PORT_AUX_USBC2 |
1875 			TGL_DE_PORT_AUX_USBC3 |
1876 			TGL_DE_PORT_AUX_USBC4;
1877 	else if (DISPLAY_VER(dev_priv) >= 12)
1878 		return TGL_DE_PORT_AUX_DDIA |
1879 			TGL_DE_PORT_AUX_DDIB |
1880 			TGL_DE_PORT_AUX_DDIC |
1881 			TGL_DE_PORT_AUX_USBC1 |
1882 			TGL_DE_PORT_AUX_USBC2 |
1883 			TGL_DE_PORT_AUX_USBC3 |
1884 			TGL_DE_PORT_AUX_USBC4 |
1885 			TGL_DE_PORT_AUX_USBC5 |
1886 			TGL_DE_PORT_AUX_USBC6;
1887 
1888 
1889 	mask = GEN8_AUX_CHANNEL_A;
1890 	if (DISPLAY_VER(dev_priv) >= 9)
1891 		mask |= GEN9_AUX_CHANNEL_B |
1892 			GEN9_AUX_CHANNEL_C |
1893 			GEN9_AUX_CHANNEL_D;
1894 
1895 	if (DISPLAY_VER(dev_priv) == 11) {
1896 		mask |= ICL_AUX_CHANNEL_F;
1897 		mask |= ICL_AUX_CHANNEL_E;
1898 	}
1899 
1900 	return mask;
1901 }
1902 
1903 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
1904 {
1905 	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
1906 		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
1907 	else if (DISPLAY_VER(dev_priv) >= 11)
1908 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
1909 	else if (DISPLAY_VER(dev_priv) >= 9)
1910 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
1911 	else
1912 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
1913 }
1914 
1915 static void
1916 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1917 {
1918 	bool found = false;
1919 
1920 	if (iir & GEN8_DE_MISC_GSE) {
1921 		intel_opregion_asle_intr(dev_priv);
1922 		found = true;
1923 	}
1924 
1925 	if (iir & GEN8_DE_EDP_PSR) {
1926 		struct intel_encoder *encoder;
1927 		u32 psr_iir;
1928 		i915_reg_t iir_reg;
1929 
1930 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1931 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1932 
1933 			if (DISPLAY_VER(dev_priv) >= 12)
1934 				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
1935 			else
1936 				iir_reg = EDP_PSR_IIR;
1937 
1938 			psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
1939 
1940 			if (psr_iir)
1941 				found = true;
1942 
1943 			intel_psr_irq_handler(intel_dp, psr_iir);
1944 
1945 			/* prior GEN12 only have one EDP PSR */
1946 			if (DISPLAY_VER(dev_priv) < 12)
1947 				break;
1948 		}
1949 	}
1950 
1951 	if (!found)
1952 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
1953 }
1954 
1955 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
1956 					   u32 te_trigger)
1957 {
1958 	enum pipe pipe = INVALID_PIPE;
1959 	enum transcoder dsi_trans;
1960 	enum port port;
1961 	u32 val, tmp;
1962 
1963 	/*
1964 	 * Incase of dual link, TE comes from DSI_1
1965 	 * this is to check if dual link is enabled
1966 	 */
1967 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
1968 	val &= PORT_SYNC_MODE_ENABLE;
1969 
1970 	/*
1971 	 * if dual link is enabled, then read DSI_0
1972 	 * transcoder registers
1973 	 */
1974 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
1975 						  PORT_A : PORT_B;
1976 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
1977 
1978 	/* Check if DSI configured in command mode */
1979 	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
1980 	val = val & OP_MODE_MASK;
1981 
1982 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
1983 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
1984 		return;
1985 	}
1986 
1987 	/* Get PIPE for handling VBLANK event */
1988 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
1989 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
1990 	case TRANS_DDI_EDP_INPUT_A_ON:
1991 		pipe = PIPE_A;
1992 		break;
1993 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
1994 		pipe = PIPE_B;
1995 		break;
1996 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
1997 		pipe = PIPE_C;
1998 		break;
1999 	default:
2000 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
2001 		return;
2002 	}
2003 
2004 	intel_handle_vblank(dev_priv, pipe);
2005 
2006 	/* clear TE in dsi IIR */
2007 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2008 	tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2009 }
2010 
2011 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2012 {
2013 	if (DISPLAY_VER(i915) >= 9)
2014 		return GEN9_PIPE_PLANE1_FLIP_DONE;
2015 	else
2016 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
2017 }
2018 
2019 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2020 {
2021 	u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2022 
2023 	if (DISPLAY_VER(dev_priv) >= 13)
2024 		mask |= XELPD_PIPE_SOFT_UNDERRUN |
2025 			XELPD_PIPE_HARD_UNDERRUN;
2026 
2027 	return mask;
2028 }
2029 
2030 static irqreturn_t
2031 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2032 {
2033 	irqreturn_t ret = IRQ_NONE;
2034 	u32 iir;
2035 	enum pipe pipe;
2036 
2037 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2038 
2039 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2040 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2041 		if (iir) {
2042 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2043 			ret = IRQ_HANDLED;
2044 			gen8_de_misc_irq_handler(dev_priv, iir);
2045 		} else {
2046 			drm_err_ratelimited(&dev_priv->drm,
2047 					    "The master control interrupt lied (DE MISC)!\n");
2048 		}
2049 	}
2050 
2051 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2052 		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2053 		if (iir) {
2054 			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2055 			ret = IRQ_HANDLED;
2056 			gen11_hpd_irq_handler(dev_priv, iir);
2057 		} else {
2058 			drm_err_ratelimited(&dev_priv->drm,
2059 					    "The master control interrupt lied, (DE HPD)!\n");
2060 		}
2061 	}
2062 
2063 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2064 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2065 		if (iir) {
2066 			bool found = false;
2067 
2068 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2069 			ret = IRQ_HANDLED;
2070 
2071 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2072 				dp_aux_irq_handler(dev_priv);
2073 				found = true;
2074 			}
2075 
2076 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2077 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2078 
2079 				if (hotplug_trigger) {
2080 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2081 					found = true;
2082 				}
2083 			} else if (IS_BROADWELL(dev_priv)) {
2084 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2085 
2086 				if (hotplug_trigger) {
2087 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2088 					found = true;
2089 				}
2090 			}
2091 
2092 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2093 			    (iir & BXT_DE_PORT_GMBUS)) {
2094 				gmbus_irq_handler(dev_priv);
2095 				found = true;
2096 			}
2097 
2098 			if (DISPLAY_VER(dev_priv) >= 11) {
2099 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2100 
2101 				if (te_trigger) {
2102 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2103 					found = true;
2104 				}
2105 			}
2106 
2107 			if (!found)
2108 				drm_err_ratelimited(&dev_priv->drm,
2109 						    "Unexpected DE Port interrupt\n");
2110 		}
2111 		else
2112 			drm_err_ratelimited(&dev_priv->drm,
2113 					    "The master control interrupt lied (DE PORT)!\n");
2114 	}
2115 
2116 	for_each_pipe(dev_priv, pipe) {
2117 		u32 fault_errors;
2118 
2119 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2120 			continue;
2121 
2122 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2123 		if (!iir) {
2124 			drm_err_ratelimited(&dev_priv->drm,
2125 					    "The master control interrupt lied (DE PIPE)!\n");
2126 			continue;
2127 		}
2128 
2129 		ret = IRQ_HANDLED;
2130 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2131 
2132 		if (iir & GEN8_PIPE_VBLANK)
2133 			intel_handle_vblank(dev_priv, pipe);
2134 
2135 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2136 			flip_done_handler(dev_priv, pipe);
2137 
2138 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2139 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2140 
2141 		if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2142 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2143 
2144 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2145 		if (fault_errors)
2146 			drm_err_ratelimited(&dev_priv->drm,
2147 					    "Fault errors on pipe %c: 0x%08x\n",
2148 					    pipe_name(pipe),
2149 					    fault_errors);
2150 	}
2151 
2152 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2153 	    master_ctl & GEN8_DE_PCH_IRQ) {
2154 		/*
2155 		 * FIXME(BDW): Assume for now that the new interrupt handling
2156 		 * scheme also closed the SDE interrupt handling race we've seen
2157 		 * on older pch-split platforms. But this needs testing.
2158 		 */
2159 		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2160 		if (iir) {
2161 			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2162 			ret = IRQ_HANDLED;
2163 
2164 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2165 				icp_irq_handler(dev_priv, iir);
2166 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2167 				spt_irq_handler(dev_priv, iir);
2168 			else
2169 				cpt_irq_handler(dev_priv, iir);
2170 		} else {
2171 			/*
2172 			 * Like on previous PCH there seems to be something
2173 			 * fishy going on with forwarding PCH interrupts.
2174 			 */
2175 			drm_dbg(&dev_priv->drm,
2176 				"The master control interrupt lied (SDE)!\n");
2177 		}
2178 	}
2179 
2180 	return ret;
2181 }
2182 
2183 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2184 {
2185 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2186 
2187 	/*
2188 	 * Now with master disabled, get a sample of level indications
2189 	 * for this interrupt. Indications will be cleared on related acks.
2190 	 * New indications can and will light up during processing,
2191 	 * and will generate new interrupt after enabling master.
2192 	 */
2193 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2194 }
2195 
2196 static inline void gen8_master_intr_enable(void __iomem * const regs)
2197 {
2198 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2199 }
2200 
2201 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2202 {
2203 	struct drm_i915_private *dev_priv = arg;
2204 	void __iomem * const regs = dev_priv->uncore.regs;
2205 	u32 master_ctl;
2206 
2207 	if (!intel_irqs_enabled(dev_priv))
2208 		return IRQ_NONE;
2209 
2210 	master_ctl = gen8_master_intr_disable(regs);
2211 	if (!master_ctl) {
2212 		gen8_master_intr_enable(regs);
2213 		return IRQ_NONE;
2214 	}
2215 
2216 	/* Find, queue (onto bottom-halves), then clear each source */
2217 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2218 
2219 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2220 	if (master_ctl & ~GEN8_GT_IRQS) {
2221 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2222 		gen8_de_irq_handler(dev_priv, master_ctl);
2223 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2224 	}
2225 
2226 	gen8_master_intr_enable(regs);
2227 
2228 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
2229 
2230 	return IRQ_HANDLED;
2231 }
2232 
2233 static u32
2234 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2235 {
2236 	void __iomem * const regs = i915->uncore.regs;
2237 	u32 iir;
2238 
2239 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2240 		return 0;
2241 
2242 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2243 	if (likely(iir))
2244 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2245 
2246 	return iir;
2247 }
2248 
2249 static void
2250 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2251 {
2252 	if (iir & GEN11_GU_MISC_GSE)
2253 		intel_opregion_asle_intr(i915);
2254 }
2255 
2256 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2257 {
2258 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2259 
2260 	/*
2261 	 * Now with master disabled, get a sample of level indications
2262 	 * for this interrupt. Indications will be cleared on related acks.
2263 	 * New indications can and will light up during processing,
2264 	 * and will generate new interrupt after enabling master.
2265 	 */
2266 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2267 }
2268 
2269 static inline void gen11_master_intr_enable(void __iomem * const regs)
2270 {
2271 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2272 }
2273 
2274 static void
2275 gen11_display_irq_handler(struct drm_i915_private *i915)
2276 {
2277 	void __iomem * const regs = i915->uncore.regs;
2278 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2279 
2280 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2281 	/*
2282 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2283 	 * for the display related bits.
2284 	 */
2285 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2286 	gen8_de_irq_handler(i915, disp_ctl);
2287 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2288 		      GEN11_DISPLAY_IRQ_ENABLE);
2289 
2290 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2291 }
2292 
2293 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2294 {
2295 	struct drm_i915_private *i915 = arg;
2296 	void __iomem * const regs = i915->uncore.regs;
2297 	struct intel_gt *gt = to_gt(i915);
2298 	u32 master_ctl;
2299 	u32 gu_misc_iir;
2300 
2301 	if (!intel_irqs_enabled(i915))
2302 		return IRQ_NONE;
2303 
2304 	master_ctl = gen11_master_intr_disable(regs);
2305 	if (!master_ctl) {
2306 		gen11_master_intr_enable(regs);
2307 		return IRQ_NONE;
2308 	}
2309 
2310 	/* Find, queue (onto bottom-halves), then clear each source */
2311 	gen11_gt_irq_handler(gt, master_ctl);
2312 
2313 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2314 	if (master_ctl & GEN11_DISPLAY_IRQ)
2315 		gen11_display_irq_handler(i915);
2316 
2317 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2318 
2319 	gen11_master_intr_enable(regs);
2320 
2321 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2322 
2323 	pmu_irq_stats(i915, IRQ_HANDLED);
2324 
2325 	return IRQ_HANDLED;
2326 }
2327 
2328 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2329 {
2330 	u32 val;
2331 
2332 	/* First disable interrupts */
2333 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2334 
2335 	/* Get the indication levels and ack the master unit */
2336 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2337 	if (unlikely(!val))
2338 		return 0;
2339 
2340 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2341 
2342 	return val;
2343 }
2344 
2345 static inline void dg1_master_intr_enable(void __iomem * const regs)
2346 {
2347 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2348 }
2349 
2350 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2351 {
2352 	struct drm_i915_private * const i915 = arg;
2353 	struct intel_gt *gt = to_gt(i915);
2354 	void __iomem * const regs = gt->uncore->regs;
2355 	u32 master_tile_ctl, master_ctl;
2356 	u32 gu_misc_iir;
2357 
2358 	if (!intel_irqs_enabled(i915))
2359 		return IRQ_NONE;
2360 
2361 	master_tile_ctl = dg1_master_intr_disable(regs);
2362 	if (!master_tile_ctl) {
2363 		dg1_master_intr_enable(regs);
2364 		return IRQ_NONE;
2365 	}
2366 
2367 	/* FIXME: we only support tile 0 for now. */
2368 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2369 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2370 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2371 	} else {
2372 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2373 			master_tile_ctl);
2374 		dg1_master_intr_enable(regs);
2375 		return IRQ_NONE;
2376 	}
2377 
2378 	gen11_gt_irq_handler(gt, master_ctl);
2379 
2380 	if (master_ctl & GEN11_DISPLAY_IRQ)
2381 		gen11_display_irq_handler(i915);
2382 
2383 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2384 
2385 	dg1_master_intr_enable(regs);
2386 
2387 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2388 
2389 	pmu_irq_stats(i915, IRQ_HANDLED);
2390 
2391 	return IRQ_HANDLED;
2392 }
2393 
2394 /* Called from drm generic code, passed 'crtc' which
2395  * we use as a pipe index
2396  */
2397 int i8xx_enable_vblank(struct drm_crtc *crtc)
2398 {
2399 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2400 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2401 	unsigned long irqflags;
2402 
2403 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2404 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2405 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2406 
2407 	return 0;
2408 }
2409 
2410 int i915gm_enable_vblank(struct drm_crtc *crtc)
2411 {
2412 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2413 
2414 	/*
2415 	 * Vblank interrupts fail to wake the device up from C2+.
2416 	 * Disabling render clock gating during C-states avoids
2417 	 * the problem. There is a small power cost so we do this
2418 	 * only when vblank interrupts are actually enabled.
2419 	 */
2420 	if (dev_priv->vblank_enabled++ == 0)
2421 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2422 
2423 	return i8xx_enable_vblank(crtc);
2424 }
2425 
2426 int i965_enable_vblank(struct drm_crtc *crtc)
2427 {
2428 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2429 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2430 	unsigned long irqflags;
2431 
2432 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2433 	i915_enable_pipestat(dev_priv, pipe,
2434 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2435 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2436 
2437 	return 0;
2438 }
2439 
2440 int ilk_enable_vblank(struct drm_crtc *crtc)
2441 {
2442 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2443 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2444 	unsigned long irqflags;
2445 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2446 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2447 
2448 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2449 	ilk_enable_display_irq(dev_priv, bit);
2450 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2451 
2452 	/* Even though there is no DMC, frame counter can get stuck when
2453 	 * PSR is active as no frames are generated.
2454 	 */
2455 	if (HAS_PSR(dev_priv))
2456 		drm_crtc_vblank_restore(crtc);
2457 
2458 	return 0;
2459 }
2460 
2461 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2462 				   bool enable)
2463 {
2464 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2465 	enum port port;
2466 
2467 	if (!(intel_crtc->mode_flags &
2468 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2469 		return false;
2470 
2471 	/* for dual link cases we consider TE from slave */
2472 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2473 		port = PORT_B;
2474 	else
2475 		port = PORT_A;
2476 
2477 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2478 			 enable ? 0 : DSI_TE_EVENT);
2479 
2480 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2481 
2482 	return true;
2483 }
2484 
2485 int bdw_enable_vblank(struct drm_crtc *_crtc)
2486 {
2487 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2488 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2489 	enum pipe pipe = crtc->pipe;
2490 	unsigned long irqflags;
2491 
2492 	if (gen11_dsi_configure_te(crtc, true))
2493 		return 0;
2494 
2495 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2496 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2497 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2498 
2499 	/* Even if there is no DMC, frame counter can get stuck when
2500 	 * PSR is active as no frames are generated, so check only for PSR.
2501 	 */
2502 	if (HAS_PSR(dev_priv))
2503 		drm_crtc_vblank_restore(&crtc->base);
2504 
2505 	return 0;
2506 }
2507 
2508 /* Called from drm generic code, passed 'crtc' which
2509  * we use as a pipe index
2510  */
2511 void i8xx_disable_vblank(struct drm_crtc *crtc)
2512 {
2513 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2514 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2515 	unsigned long irqflags;
2516 
2517 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2518 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2519 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2520 }
2521 
2522 void i915gm_disable_vblank(struct drm_crtc *crtc)
2523 {
2524 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2525 
2526 	i8xx_disable_vblank(crtc);
2527 
2528 	if (--dev_priv->vblank_enabled == 0)
2529 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2530 }
2531 
2532 void i965_disable_vblank(struct drm_crtc *crtc)
2533 {
2534 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2535 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2536 	unsigned long irqflags;
2537 
2538 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2539 	i915_disable_pipestat(dev_priv, pipe,
2540 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2541 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2542 }
2543 
2544 void ilk_disable_vblank(struct drm_crtc *crtc)
2545 {
2546 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2547 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2548 	unsigned long irqflags;
2549 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2550 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2551 
2552 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2553 	ilk_disable_display_irq(dev_priv, bit);
2554 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2555 }
2556 
2557 void bdw_disable_vblank(struct drm_crtc *_crtc)
2558 {
2559 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2560 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2561 	enum pipe pipe = crtc->pipe;
2562 	unsigned long irqflags;
2563 
2564 	if (gen11_dsi_configure_te(crtc, false))
2565 		return;
2566 
2567 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2568 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2569 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2570 }
2571 
2572 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2573 {
2574 	struct intel_uncore *uncore = &dev_priv->uncore;
2575 
2576 	if (HAS_PCH_NOP(dev_priv))
2577 		return;
2578 
2579 	GEN3_IRQ_RESET(uncore, SDE);
2580 
2581 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2582 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2583 }
2584 
2585 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2586 {
2587 	struct intel_uncore *uncore = &dev_priv->uncore;
2588 
2589 	if (IS_CHERRYVIEW(dev_priv))
2590 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2591 	else
2592 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
2593 
2594 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2595 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
2596 
2597 	i9xx_pipestat_irq_reset(dev_priv);
2598 
2599 	GEN3_IRQ_RESET(uncore, VLV_);
2600 	dev_priv->irq_mask = ~0u;
2601 }
2602 
2603 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2604 {
2605 	struct intel_uncore *uncore = &dev_priv->uncore;
2606 
2607 	u32 pipestat_mask;
2608 	u32 enable_mask;
2609 	enum pipe pipe;
2610 
2611 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2612 
2613 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2614 	for_each_pipe(dev_priv, pipe)
2615 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2616 
2617 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2618 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2619 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2620 		I915_LPE_PIPE_A_INTERRUPT |
2621 		I915_LPE_PIPE_B_INTERRUPT;
2622 
2623 	if (IS_CHERRYVIEW(dev_priv))
2624 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2625 			I915_LPE_PIPE_C_INTERRUPT;
2626 
2627 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2628 
2629 	dev_priv->irq_mask = ~enable_mask;
2630 
2631 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2632 }
2633 
2634 /* drm_dma.h hooks
2635 */
2636 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2637 {
2638 	struct intel_uncore *uncore = &dev_priv->uncore;
2639 
2640 	GEN3_IRQ_RESET(uncore, DE);
2641 	dev_priv->irq_mask = ~0u;
2642 
2643 	if (GRAPHICS_VER(dev_priv) == 7)
2644 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2645 
2646 	if (IS_HASWELL(dev_priv)) {
2647 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2648 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2649 	}
2650 
2651 	gen5_gt_irq_reset(to_gt(dev_priv));
2652 
2653 	ibx_irq_reset(dev_priv);
2654 }
2655 
2656 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2657 {
2658 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2659 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
2660 
2661 	gen5_gt_irq_reset(to_gt(dev_priv));
2662 
2663 	spin_lock_irq(&dev_priv->irq_lock);
2664 	if (dev_priv->display_irqs_enabled)
2665 		vlv_display_irq_reset(dev_priv);
2666 	spin_unlock_irq(&dev_priv->irq_lock);
2667 }
2668 
2669 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
2670 {
2671 	struct intel_uncore *uncore = &dev_priv->uncore;
2672 	enum pipe pipe;
2673 
2674 	if (!HAS_DISPLAY(dev_priv))
2675 		return;
2676 
2677 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2678 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2679 
2680 	for_each_pipe(dev_priv, pipe)
2681 		if (intel_display_power_is_enabled(dev_priv,
2682 						   POWER_DOMAIN_PIPE(pipe)))
2683 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2684 
2685 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2686 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2687 }
2688 
2689 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2690 {
2691 	struct intel_uncore *uncore = &dev_priv->uncore;
2692 
2693 	gen8_master_intr_disable(uncore->regs);
2694 
2695 	gen8_gt_irq_reset(to_gt(dev_priv));
2696 	gen8_display_irq_reset(dev_priv);
2697 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2698 
2699 	if (HAS_PCH_SPLIT(dev_priv))
2700 		ibx_irq_reset(dev_priv);
2701 
2702 }
2703 
2704 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2705 {
2706 	struct intel_uncore *uncore = &dev_priv->uncore;
2707 	enum pipe pipe;
2708 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2709 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2710 
2711 	if (!HAS_DISPLAY(dev_priv))
2712 		return;
2713 
2714 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2715 
2716 	if (DISPLAY_VER(dev_priv) >= 12) {
2717 		enum transcoder trans;
2718 
2719 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2720 			enum intel_display_power_domain domain;
2721 
2722 			domain = POWER_DOMAIN_TRANSCODER(trans);
2723 			if (!intel_display_power_is_enabled(dev_priv, domain))
2724 				continue;
2725 
2726 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2727 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2728 		}
2729 	} else {
2730 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2731 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2732 	}
2733 
2734 	for_each_pipe(dev_priv, pipe)
2735 		if (intel_display_power_is_enabled(dev_priv,
2736 						   POWER_DOMAIN_PIPE(pipe)))
2737 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2738 
2739 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2740 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2741 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2742 
2743 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2744 		GEN3_IRQ_RESET(uncore, SDE);
2745 }
2746 
2747 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2748 {
2749 	struct intel_gt *gt = to_gt(dev_priv);
2750 	struct intel_uncore *uncore = gt->uncore;
2751 
2752 	gen11_master_intr_disable(dev_priv->uncore.regs);
2753 
2754 	gen11_gt_irq_reset(gt);
2755 	gen11_display_irq_reset(dev_priv);
2756 
2757 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2758 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2759 }
2760 
2761 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
2762 {
2763 	struct intel_gt *gt = to_gt(dev_priv);
2764 	struct intel_uncore *uncore = gt->uncore;
2765 
2766 	dg1_master_intr_disable(dev_priv->uncore.regs);
2767 
2768 	gen11_gt_irq_reset(gt);
2769 	gen11_display_irq_reset(dev_priv);
2770 
2771 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2772 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2773 }
2774 
2775 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2776 				     u8 pipe_mask)
2777 {
2778 	struct intel_uncore *uncore = &dev_priv->uncore;
2779 	u32 extra_ier = GEN8_PIPE_VBLANK |
2780 		gen8_de_pipe_underrun_mask(dev_priv) |
2781 		gen8_de_pipe_flip_done_mask(dev_priv);
2782 	enum pipe pipe;
2783 
2784 	spin_lock_irq(&dev_priv->irq_lock);
2785 
2786 	if (!intel_irqs_enabled(dev_priv)) {
2787 		spin_unlock_irq(&dev_priv->irq_lock);
2788 		return;
2789 	}
2790 
2791 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2792 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2793 				  dev_priv->de_irq_mask[pipe],
2794 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2795 
2796 	spin_unlock_irq(&dev_priv->irq_lock);
2797 }
2798 
2799 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2800 				     u8 pipe_mask)
2801 {
2802 	struct intel_uncore *uncore = &dev_priv->uncore;
2803 	enum pipe pipe;
2804 
2805 	spin_lock_irq(&dev_priv->irq_lock);
2806 
2807 	if (!intel_irqs_enabled(dev_priv)) {
2808 		spin_unlock_irq(&dev_priv->irq_lock);
2809 		return;
2810 	}
2811 
2812 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2813 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2814 
2815 	spin_unlock_irq(&dev_priv->irq_lock);
2816 
2817 	/* make sure we're done processing display irqs */
2818 	intel_synchronize_irq(dev_priv);
2819 }
2820 
2821 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2822 {
2823 	struct intel_uncore *uncore = &dev_priv->uncore;
2824 
2825 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
2826 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
2827 
2828 	gen8_gt_irq_reset(to_gt(dev_priv));
2829 
2830 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2831 
2832 	spin_lock_irq(&dev_priv->irq_lock);
2833 	if (dev_priv->display_irqs_enabled)
2834 		vlv_display_irq_reset(dev_priv);
2835 	spin_unlock_irq(&dev_priv->irq_lock);
2836 }
2837 
2838 static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
2839 			       enum hpd_pin pin)
2840 {
2841 	switch (pin) {
2842 	case HPD_PORT_A:
2843 		/*
2844 		 * When CPU and PCH are on the same package, port A
2845 		 * HPD must be enabled in both north and south.
2846 		 */
2847 		return HAS_PCH_LPT_LP(i915) ?
2848 			PORTA_HOTPLUG_ENABLE : 0;
2849 	case HPD_PORT_B:
2850 		return PORTB_HOTPLUG_ENABLE |
2851 			PORTB_PULSE_DURATION_2ms;
2852 	case HPD_PORT_C:
2853 		return PORTC_HOTPLUG_ENABLE |
2854 			PORTC_PULSE_DURATION_2ms;
2855 	case HPD_PORT_D:
2856 		return PORTD_HOTPLUG_ENABLE |
2857 			PORTD_PULSE_DURATION_2ms;
2858 	default:
2859 		return 0;
2860 	}
2861 }
2862 
2863 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2864 {
2865 	/*
2866 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2867 	 * duration to 2ms (which is the minimum in the Display Port spec).
2868 	 * The pulse duration bits are reserved on LPT+.
2869 	 */
2870 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
2871 			 PORTA_HOTPLUG_ENABLE |
2872 			 PORTB_HOTPLUG_ENABLE |
2873 			 PORTC_HOTPLUG_ENABLE |
2874 			 PORTD_HOTPLUG_ENABLE |
2875 			 PORTB_PULSE_DURATION_MASK |
2876 			 PORTC_PULSE_DURATION_MASK |
2877 			 PORTD_PULSE_DURATION_MASK,
2878 			 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
2879 }
2880 
2881 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2882 {
2883 	u32 hotplug_irqs, enabled_irqs;
2884 
2885 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2886 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2887 
2888 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2889 
2890 	ibx_hpd_detection_setup(dev_priv);
2891 }
2892 
2893 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
2894 				   enum hpd_pin pin)
2895 {
2896 	switch (pin) {
2897 	case HPD_PORT_A:
2898 	case HPD_PORT_B:
2899 	case HPD_PORT_C:
2900 	case HPD_PORT_D:
2901 		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
2902 	default:
2903 		return 0;
2904 	}
2905 }
2906 
2907 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
2908 				  enum hpd_pin pin)
2909 {
2910 	switch (pin) {
2911 	case HPD_PORT_TC1:
2912 	case HPD_PORT_TC2:
2913 	case HPD_PORT_TC3:
2914 	case HPD_PORT_TC4:
2915 	case HPD_PORT_TC5:
2916 	case HPD_PORT_TC6:
2917 		return ICP_TC_HPD_ENABLE(pin);
2918 	default:
2919 		return 0;
2920 	}
2921 }
2922 
2923 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
2924 {
2925 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
2926 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
2927 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
2928 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
2929 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
2930 			 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
2931 }
2932 
2933 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
2934 {
2935 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
2936 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
2937 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
2938 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
2939 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
2940 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
2941 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
2942 			 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
2943 }
2944 
2945 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
2946 {
2947 	u32 hotplug_irqs, enabled_irqs;
2948 
2949 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2950 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
2951 
2952 	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
2953 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
2954 
2955 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2956 
2957 	icp_ddi_hpd_detection_setup(dev_priv);
2958 	icp_tc_hpd_detection_setup(dev_priv);
2959 }
2960 
2961 static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
2962 				 enum hpd_pin pin)
2963 {
2964 	switch (pin) {
2965 	case HPD_PORT_TC1:
2966 	case HPD_PORT_TC2:
2967 	case HPD_PORT_TC3:
2968 	case HPD_PORT_TC4:
2969 	case HPD_PORT_TC5:
2970 	case HPD_PORT_TC6:
2971 		return GEN11_HOTPLUG_CTL_ENABLE(pin);
2972 	default:
2973 		return 0;
2974 	}
2975 }
2976 
2977 static void dg1_hpd_invert(struct drm_i915_private *i915)
2978 {
2979 	u32 val = (INVERT_DDIA_HPD |
2980 		   INVERT_DDIB_HPD |
2981 		   INVERT_DDIC_HPD |
2982 		   INVERT_DDID_HPD);
2983 	intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
2984 }
2985 
2986 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
2987 {
2988 	dg1_hpd_invert(dev_priv);
2989 	icp_hpd_irq_setup(dev_priv);
2990 }
2991 
2992 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
2993 {
2994 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
2995 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
2996 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
2997 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
2998 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
2999 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3000 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3001 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3002 }
3003 
3004 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3005 {
3006 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3007 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3008 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3009 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3010 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3011 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3012 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3013 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3014 }
3015 
3016 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3017 {
3018 	u32 hotplug_irqs, enabled_irqs;
3019 
3020 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3021 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3022 
3023 	intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3024 			 ~enabled_irqs & hotplug_irqs);
3025 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3026 
3027 	gen11_tc_hpd_detection_setup(dev_priv);
3028 	gen11_tbt_hpd_detection_setup(dev_priv);
3029 
3030 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3031 		icp_hpd_irq_setup(dev_priv);
3032 }
3033 
3034 static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3035 			       enum hpd_pin pin)
3036 {
3037 	switch (pin) {
3038 	case HPD_PORT_A:
3039 		return PORTA_HOTPLUG_ENABLE;
3040 	case HPD_PORT_B:
3041 		return PORTB_HOTPLUG_ENABLE;
3042 	case HPD_PORT_C:
3043 		return PORTC_HOTPLUG_ENABLE;
3044 	case HPD_PORT_D:
3045 		return PORTD_HOTPLUG_ENABLE;
3046 	default:
3047 		return 0;
3048 	}
3049 }
3050 
3051 static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3052 				enum hpd_pin pin)
3053 {
3054 	switch (pin) {
3055 	case HPD_PORT_E:
3056 		return PORTE_HOTPLUG_ENABLE;
3057 	default:
3058 		return 0;
3059 	}
3060 }
3061 
3062 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3063 {
3064 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3065 	if (HAS_PCH_CNP(dev_priv)) {
3066 		intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3067 				 CHASSIS_CLK_REQ_DURATION(0xf));
3068 	}
3069 
3070 	/* Enable digital hotplug on the PCH */
3071 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3072 			 PORTA_HOTPLUG_ENABLE |
3073 			 PORTB_HOTPLUG_ENABLE |
3074 			 PORTC_HOTPLUG_ENABLE |
3075 			 PORTD_HOTPLUG_ENABLE,
3076 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3077 
3078 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3079 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3080 }
3081 
3082 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3083 {
3084 	u32 hotplug_irqs, enabled_irqs;
3085 
3086 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3087 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3088 
3089 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3090 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3091 
3092 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3093 
3094 	spt_hpd_detection_setup(dev_priv);
3095 }
3096 
3097 static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3098 			       enum hpd_pin pin)
3099 {
3100 	switch (pin) {
3101 	case HPD_PORT_A:
3102 		return DIGITAL_PORTA_HOTPLUG_ENABLE |
3103 			DIGITAL_PORTA_PULSE_DURATION_2ms;
3104 	default:
3105 		return 0;
3106 	}
3107 }
3108 
3109 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3110 {
3111 	/*
3112 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3113 	 * duration to 2ms (which is the minimum in the Display Port spec)
3114 	 * The pulse duration bits are reserved on HSW+.
3115 	 */
3116 	intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3117 			 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3118 			 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3119 }
3120 
3121 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3122 {
3123 	u32 hotplug_irqs, enabled_irqs;
3124 
3125 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3126 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3127 
3128 	if (DISPLAY_VER(dev_priv) >= 8)
3129 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3130 	else
3131 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3132 
3133 	ilk_hpd_detection_setup(dev_priv);
3134 
3135 	ibx_hpd_irq_setup(dev_priv);
3136 }
3137 
3138 static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3139 			       enum hpd_pin pin)
3140 {
3141 	u32 hotplug;
3142 
3143 	switch (pin) {
3144 	case HPD_PORT_A:
3145 		hotplug = PORTA_HOTPLUG_ENABLE;
3146 		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3147 			hotplug |= BXT_DDIA_HPD_INVERT;
3148 		return hotplug;
3149 	case HPD_PORT_B:
3150 		hotplug = PORTB_HOTPLUG_ENABLE;
3151 		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3152 			hotplug |= BXT_DDIB_HPD_INVERT;
3153 		return hotplug;
3154 	case HPD_PORT_C:
3155 		hotplug = PORTC_HOTPLUG_ENABLE;
3156 		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3157 			hotplug |= BXT_DDIC_HPD_INVERT;
3158 		return hotplug;
3159 	default:
3160 		return 0;
3161 	}
3162 }
3163 
3164 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3165 {
3166 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3167 			 PORTA_HOTPLUG_ENABLE |
3168 			 PORTB_HOTPLUG_ENABLE |
3169 			 PORTC_HOTPLUG_ENABLE |
3170 			 BXT_DDI_HPD_INVERT_MASK,
3171 			 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3172 }
3173 
3174 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3175 {
3176 	u32 hotplug_irqs, enabled_irqs;
3177 
3178 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3179 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3180 
3181 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3182 
3183 	bxt_hpd_detection_setup(dev_priv);
3184 }
3185 
3186 /*
3187  * SDEIER is also touched by the interrupt handler to work around missed PCH
3188  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3189  * instead we unconditionally enable all PCH interrupt sources here, but then
3190  * only unmask them as needed with SDEIMR.
3191  *
3192  * Note that we currently do this after installing the interrupt handler,
3193  * but before we enable the master interrupt. That should be sufficient
3194  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3195  * interrupts could still race.
3196  */
3197 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3198 {
3199 	struct intel_uncore *uncore = &dev_priv->uncore;
3200 	u32 mask;
3201 
3202 	if (HAS_PCH_NOP(dev_priv))
3203 		return;
3204 
3205 	if (HAS_PCH_IBX(dev_priv))
3206 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3207 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3208 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3209 	else
3210 		mask = SDE_GMBUS_CPT;
3211 
3212 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3213 }
3214 
3215 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3216 {
3217 	struct intel_uncore *uncore = &dev_priv->uncore;
3218 	u32 display_mask, extra_mask;
3219 
3220 	if (GRAPHICS_VER(dev_priv) >= 7) {
3221 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3222 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3223 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3224 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3225 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3226 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3227 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3228 			      DE_DP_A_HOTPLUG_IVB);
3229 	} else {
3230 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3231 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3232 				DE_PIPEA_CRC_DONE | DE_POISON);
3233 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3234 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3235 			      DE_PLANE_FLIP_DONE(PLANE_A) |
3236 			      DE_PLANE_FLIP_DONE(PLANE_B) |
3237 			      DE_DP_A_HOTPLUG);
3238 	}
3239 
3240 	if (IS_HASWELL(dev_priv)) {
3241 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3242 		display_mask |= DE_EDP_PSR_INT_HSW;
3243 	}
3244 
3245 	if (IS_IRONLAKE_M(dev_priv))
3246 		extra_mask |= DE_PCU_EVENT;
3247 
3248 	dev_priv->irq_mask = ~display_mask;
3249 
3250 	ibx_irq_postinstall(dev_priv);
3251 
3252 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3253 
3254 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3255 		      display_mask | extra_mask);
3256 }
3257 
3258 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3259 {
3260 	lockdep_assert_held(&dev_priv->irq_lock);
3261 
3262 	if (dev_priv->display_irqs_enabled)
3263 		return;
3264 
3265 	dev_priv->display_irqs_enabled = true;
3266 
3267 	if (intel_irqs_enabled(dev_priv)) {
3268 		vlv_display_irq_reset(dev_priv);
3269 		vlv_display_irq_postinstall(dev_priv);
3270 	}
3271 }
3272 
3273 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3274 {
3275 	lockdep_assert_held(&dev_priv->irq_lock);
3276 
3277 	if (!dev_priv->display_irqs_enabled)
3278 		return;
3279 
3280 	dev_priv->display_irqs_enabled = false;
3281 
3282 	if (intel_irqs_enabled(dev_priv))
3283 		vlv_display_irq_reset(dev_priv);
3284 }
3285 
3286 
3287 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3288 {
3289 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3290 
3291 	spin_lock_irq(&dev_priv->irq_lock);
3292 	if (dev_priv->display_irqs_enabled)
3293 		vlv_display_irq_postinstall(dev_priv);
3294 	spin_unlock_irq(&dev_priv->irq_lock);
3295 
3296 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3297 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3298 }
3299 
3300 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3301 {
3302 	struct intel_uncore *uncore = &dev_priv->uncore;
3303 
3304 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3305 		GEN8_PIPE_CDCLK_CRC_DONE;
3306 	u32 de_pipe_enables;
3307 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3308 	u32 de_port_enables;
3309 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3310 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3311 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3312 	enum pipe pipe;
3313 
3314 	if (!HAS_DISPLAY(dev_priv))
3315 		return;
3316 
3317 	if (DISPLAY_VER(dev_priv) <= 10)
3318 		de_misc_masked |= GEN8_DE_MISC_GSE;
3319 
3320 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3321 		de_port_masked |= BXT_DE_PORT_GMBUS;
3322 
3323 	if (DISPLAY_VER(dev_priv) >= 11) {
3324 		enum port port;
3325 
3326 		if (intel_bios_is_dsi_present(dev_priv, &port))
3327 			de_port_masked |= DSI0_TE | DSI1_TE;
3328 	}
3329 
3330 	de_pipe_enables = de_pipe_masked |
3331 		GEN8_PIPE_VBLANK |
3332 		gen8_de_pipe_underrun_mask(dev_priv) |
3333 		gen8_de_pipe_flip_done_mask(dev_priv);
3334 
3335 	de_port_enables = de_port_masked;
3336 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3337 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3338 	else if (IS_BROADWELL(dev_priv))
3339 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3340 
3341 	if (DISPLAY_VER(dev_priv) >= 12) {
3342 		enum transcoder trans;
3343 
3344 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3345 			enum intel_display_power_domain domain;
3346 
3347 			domain = POWER_DOMAIN_TRANSCODER(trans);
3348 			if (!intel_display_power_is_enabled(dev_priv, domain))
3349 				continue;
3350 
3351 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3352 		}
3353 	} else {
3354 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3355 	}
3356 
3357 	for_each_pipe(dev_priv, pipe) {
3358 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3359 
3360 		if (intel_display_power_is_enabled(dev_priv,
3361 				POWER_DOMAIN_PIPE(pipe)))
3362 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3363 					  dev_priv->de_irq_mask[pipe],
3364 					  de_pipe_enables);
3365 	}
3366 
3367 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3368 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3369 
3370 	if (DISPLAY_VER(dev_priv) >= 11) {
3371 		u32 de_hpd_masked = 0;
3372 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3373 				     GEN11_DE_TBT_HOTPLUG_MASK;
3374 
3375 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3376 			      de_hpd_enables);
3377 	}
3378 }
3379 
3380 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3381 {
3382 	struct intel_uncore *uncore = &dev_priv->uncore;
3383 	u32 mask = SDE_GMBUS_ICP;
3384 
3385 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3386 }
3387 
3388 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3389 {
3390 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3391 		icp_irq_postinstall(dev_priv);
3392 	else if (HAS_PCH_SPLIT(dev_priv))
3393 		ibx_irq_postinstall(dev_priv);
3394 
3395 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3396 	gen8_de_irq_postinstall(dev_priv);
3397 
3398 	gen8_master_intr_enable(dev_priv->uncore.regs);
3399 }
3400 
3401 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3402 {
3403 	if (!HAS_DISPLAY(dev_priv))
3404 		return;
3405 
3406 	gen8_de_irq_postinstall(dev_priv);
3407 
3408 	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3409 			   GEN11_DISPLAY_IRQ_ENABLE);
3410 }
3411 
3412 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3413 {
3414 	struct intel_gt *gt = to_gt(dev_priv);
3415 	struct intel_uncore *uncore = gt->uncore;
3416 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3417 
3418 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3419 		icp_irq_postinstall(dev_priv);
3420 
3421 	gen11_gt_irq_postinstall(gt);
3422 	gen11_de_irq_postinstall(dev_priv);
3423 
3424 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3425 
3426 	gen11_master_intr_enable(uncore->regs);
3427 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3428 }
3429 
3430 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3431 {
3432 	struct intel_gt *gt = to_gt(dev_priv);
3433 	struct intel_uncore *uncore = gt->uncore;
3434 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3435 
3436 	gen11_gt_irq_postinstall(gt);
3437 
3438 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3439 
3440 	if (HAS_DISPLAY(dev_priv)) {
3441 		icp_irq_postinstall(dev_priv);
3442 		gen8_de_irq_postinstall(dev_priv);
3443 		intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3444 				   GEN11_DISPLAY_IRQ_ENABLE);
3445 	}
3446 
3447 	dg1_master_intr_enable(uncore->regs);
3448 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3449 }
3450 
3451 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3452 {
3453 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3454 
3455 	spin_lock_irq(&dev_priv->irq_lock);
3456 	if (dev_priv->display_irqs_enabled)
3457 		vlv_display_irq_postinstall(dev_priv);
3458 	spin_unlock_irq(&dev_priv->irq_lock);
3459 
3460 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3461 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3462 }
3463 
3464 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3465 {
3466 	struct intel_uncore *uncore = &dev_priv->uncore;
3467 
3468 	i9xx_pipestat_irq_reset(dev_priv);
3469 
3470 	gen2_irq_reset(uncore);
3471 	dev_priv->irq_mask = ~0u;
3472 }
3473 
3474 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3475 {
3476 	struct intel_uncore *uncore = &dev_priv->uncore;
3477 	u16 enable_mask;
3478 
3479 	intel_uncore_write16(uncore,
3480 			     EMR,
3481 			     ~(I915_ERROR_PAGE_TABLE |
3482 			       I915_ERROR_MEMORY_REFRESH));
3483 
3484 	/* Unmask the interrupts that we always want on. */
3485 	dev_priv->irq_mask =
3486 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3487 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3488 		  I915_MASTER_ERROR_INTERRUPT);
3489 
3490 	enable_mask =
3491 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3492 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3493 		I915_MASTER_ERROR_INTERRUPT |
3494 		I915_USER_INTERRUPT;
3495 
3496 	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3497 
3498 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3499 	 * just to make the assert_spin_locked check happy. */
3500 	spin_lock_irq(&dev_priv->irq_lock);
3501 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3502 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3503 	spin_unlock_irq(&dev_priv->irq_lock);
3504 }
3505 
3506 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3507 			       u16 *eir, u16 *eir_stuck)
3508 {
3509 	struct intel_uncore *uncore = &i915->uncore;
3510 	u16 emr;
3511 
3512 	*eir = intel_uncore_read16(uncore, EIR);
3513 
3514 	if (*eir)
3515 		intel_uncore_write16(uncore, EIR, *eir);
3516 
3517 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3518 	if (*eir_stuck == 0)
3519 		return;
3520 
3521 	/*
3522 	 * Toggle all EMR bits to make sure we get an edge
3523 	 * in the ISR master error bit if we don't clear
3524 	 * all the EIR bits. Otherwise the edge triggered
3525 	 * IIR on i965/g4x wouldn't notice that an interrupt
3526 	 * is still pending. Also some EIR bits can't be
3527 	 * cleared except by handling the underlying error
3528 	 * (or by a GPU reset) so we mask any bit that
3529 	 * remains set.
3530 	 */
3531 	emr = intel_uncore_read16(uncore, EMR);
3532 	intel_uncore_write16(uncore, EMR, 0xffff);
3533 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3534 }
3535 
3536 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3537 				   u16 eir, u16 eir_stuck)
3538 {
3539 	drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
3540 
3541 	if (eir_stuck)
3542 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3543 			eir_stuck);
3544 }
3545 
3546 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3547 			       u32 *eir, u32 *eir_stuck)
3548 {
3549 	u32 emr;
3550 
3551 	*eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
3552 
3553 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3554 	if (*eir_stuck == 0)
3555 		return;
3556 
3557 	/*
3558 	 * Toggle all EMR bits to make sure we get an edge
3559 	 * in the ISR master error bit if we don't clear
3560 	 * all the EIR bits. Otherwise the edge triggered
3561 	 * IIR on i965/g4x wouldn't notice that an interrupt
3562 	 * is still pending. Also some EIR bits can't be
3563 	 * cleared except by handling the underlying error
3564 	 * (or by a GPU reset) so we mask any bit that
3565 	 * remains set.
3566 	 */
3567 	emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
3568 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3569 }
3570 
3571 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3572 				   u32 eir, u32 eir_stuck)
3573 {
3574 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
3575 
3576 	if (eir_stuck)
3577 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3578 			eir_stuck);
3579 }
3580 
3581 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3582 {
3583 	struct drm_i915_private *dev_priv = arg;
3584 	irqreturn_t ret = IRQ_NONE;
3585 
3586 	if (!intel_irqs_enabled(dev_priv))
3587 		return IRQ_NONE;
3588 
3589 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3590 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3591 
3592 	do {
3593 		u32 pipe_stats[I915_MAX_PIPES] = {};
3594 		u16 eir = 0, eir_stuck = 0;
3595 		u16 iir;
3596 
3597 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3598 		if (iir == 0)
3599 			break;
3600 
3601 		ret = IRQ_HANDLED;
3602 
3603 		/* Call regardless, as some status bits might not be
3604 		 * signalled in iir */
3605 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3606 
3607 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3608 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3609 
3610 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3611 
3612 		if (iir & I915_USER_INTERRUPT)
3613 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3614 
3615 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3616 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3617 
3618 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3619 	} while (0);
3620 
3621 	pmu_irq_stats(dev_priv, ret);
3622 
3623 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3624 
3625 	return ret;
3626 }
3627 
3628 static void i915_irq_reset(struct drm_i915_private *dev_priv)
3629 {
3630 	struct intel_uncore *uncore = &dev_priv->uncore;
3631 
3632 	if (I915_HAS_HOTPLUG(dev_priv)) {
3633 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3634 		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
3635 	}
3636 
3637 	i9xx_pipestat_irq_reset(dev_priv);
3638 
3639 	GEN3_IRQ_RESET(uncore, GEN2_);
3640 	dev_priv->irq_mask = ~0u;
3641 }
3642 
3643 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3644 {
3645 	struct intel_uncore *uncore = &dev_priv->uncore;
3646 	u32 enable_mask;
3647 
3648 	intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
3649 					  I915_ERROR_MEMORY_REFRESH));
3650 
3651 	/* Unmask the interrupts that we always want on. */
3652 	dev_priv->irq_mask =
3653 		~(I915_ASLE_INTERRUPT |
3654 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3655 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3656 		  I915_MASTER_ERROR_INTERRUPT);
3657 
3658 	enable_mask =
3659 		I915_ASLE_INTERRUPT |
3660 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3661 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3662 		I915_MASTER_ERROR_INTERRUPT |
3663 		I915_USER_INTERRUPT;
3664 
3665 	if (I915_HAS_HOTPLUG(dev_priv)) {
3666 		/* Enable in IER... */
3667 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3668 		/* and unmask in IMR */
3669 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3670 	}
3671 
3672 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3673 
3674 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3675 	 * just to make the assert_spin_locked check happy. */
3676 	spin_lock_irq(&dev_priv->irq_lock);
3677 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3678 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3679 	spin_unlock_irq(&dev_priv->irq_lock);
3680 
3681 	i915_enable_asle_pipestat(dev_priv);
3682 }
3683 
3684 static irqreturn_t i915_irq_handler(int irq, void *arg)
3685 {
3686 	struct drm_i915_private *dev_priv = arg;
3687 	irqreturn_t ret = IRQ_NONE;
3688 
3689 	if (!intel_irqs_enabled(dev_priv))
3690 		return IRQ_NONE;
3691 
3692 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3693 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3694 
3695 	do {
3696 		u32 pipe_stats[I915_MAX_PIPES] = {};
3697 		u32 eir = 0, eir_stuck = 0;
3698 		u32 hotplug_status = 0;
3699 		u32 iir;
3700 
3701 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
3702 		if (iir == 0)
3703 			break;
3704 
3705 		ret = IRQ_HANDLED;
3706 
3707 		if (I915_HAS_HOTPLUG(dev_priv) &&
3708 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3709 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3710 
3711 		/* Call regardless, as some status bits might not be
3712 		 * signalled in iir */
3713 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3714 
3715 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3716 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3717 
3718 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
3719 
3720 		if (iir & I915_USER_INTERRUPT)
3721 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
3722 
3723 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3724 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3725 
3726 		if (hotplug_status)
3727 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3728 
3729 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3730 	} while (0);
3731 
3732 	pmu_irq_stats(dev_priv, ret);
3733 
3734 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3735 
3736 	return ret;
3737 }
3738 
3739 static void i965_irq_reset(struct drm_i915_private *dev_priv)
3740 {
3741 	struct intel_uncore *uncore = &dev_priv->uncore;
3742 
3743 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3744 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3745 
3746 	i9xx_pipestat_irq_reset(dev_priv);
3747 
3748 	GEN3_IRQ_RESET(uncore, GEN2_);
3749 	dev_priv->irq_mask = ~0u;
3750 }
3751 
3752 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3753 {
3754 	struct intel_uncore *uncore = &dev_priv->uncore;
3755 	u32 enable_mask;
3756 	u32 error_mask;
3757 
3758 	/*
3759 	 * Enable some error detection, note the instruction error mask
3760 	 * bit is reserved, so we leave it masked.
3761 	 */
3762 	if (IS_G4X(dev_priv)) {
3763 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3764 			       GM45_ERROR_MEM_PRIV |
3765 			       GM45_ERROR_CP_PRIV |
3766 			       I915_ERROR_MEMORY_REFRESH);
3767 	} else {
3768 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3769 			       I915_ERROR_MEMORY_REFRESH);
3770 	}
3771 	intel_uncore_write(uncore, EMR, error_mask);
3772 
3773 	/* Unmask the interrupts that we always want on. */
3774 	dev_priv->irq_mask =
3775 		~(I915_ASLE_INTERRUPT |
3776 		  I915_DISPLAY_PORT_INTERRUPT |
3777 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3778 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3779 		  I915_MASTER_ERROR_INTERRUPT);
3780 
3781 	enable_mask =
3782 		I915_ASLE_INTERRUPT |
3783 		I915_DISPLAY_PORT_INTERRUPT |
3784 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3785 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3786 		I915_MASTER_ERROR_INTERRUPT |
3787 		I915_USER_INTERRUPT;
3788 
3789 	if (IS_G4X(dev_priv))
3790 		enable_mask |= I915_BSD_USER_INTERRUPT;
3791 
3792 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3793 
3794 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3795 	 * just to make the assert_spin_locked check happy. */
3796 	spin_lock_irq(&dev_priv->irq_lock);
3797 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3798 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3799 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3800 	spin_unlock_irq(&dev_priv->irq_lock);
3801 
3802 	i915_enable_asle_pipestat(dev_priv);
3803 }
3804 
3805 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3806 {
3807 	u32 hotplug_en;
3808 
3809 	lockdep_assert_held(&dev_priv->irq_lock);
3810 
3811 	/* Note HDMI and DP share hotplug bits */
3812 	/* enable bits are the same for all generations */
3813 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3814 	/* Programming the CRT detection parameters tends
3815 	   to generate a spurious hotplug event about three
3816 	   seconds later.  So just do it once.
3817 	*/
3818 	if (IS_G4X(dev_priv))
3819 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3820 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3821 
3822 	/* Ignore TV since it's buggy */
3823 	i915_hotplug_interrupt_update_locked(dev_priv,
3824 					     HOTPLUG_INT_EN_MASK |
3825 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3826 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3827 					     hotplug_en);
3828 }
3829 
3830 static irqreturn_t i965_irq_handler(int irq, void *arg)
3831 {
3832 	struct drm_i915_private *dev_priv = arg;
3833 	irqreturn_t ret = IRQ_NONE;
3834 
3835 	if (!intel_irqs_enabled(dev_priv))
3836 		return IRQ_NONE;
3837 
3838 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3839 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3840 
3841 	do {
3842 		u32 pipe_stats[I915_MAX_PIPES] = {};
3843 		u32 eir = 0, eir_stuck = 0;
3844 		u32 hotplug_status = 0;
3845 		u32 iir;
3846 
3847 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
3848 		if (iir == 0)
3849 			break;
3850 
3851 		ret = IRQ_HANDLED;
3852 
3853 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3854 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3855 
3856 		/* Call regardless, as some status bits might not be
3857 		 * signalled in iir */
3858 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3859 
3860 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3861 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3862 
3863 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
3864 
3865 		if (iir & I915_USER_INTERRUPT)
3866 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
3867 					    iir);
3868 
3869 		if (iir & I915_BSD_USER_INTERRUPT)
3870 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
3871 					    iir >> 25);
3872 
3873 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3874 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3875 
3876 		if (hotplug_status)
3877 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3878 
3879 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3880 	} while (0);
3881 
3882 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
3883 
3884 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3885 
3886 	return ret;
3887 }
3888 
3889 struct intel_hotplug_funcs {
3890 	void (*hpd_irq_setup)(struct drm_i915_private *i915);
3891 };
3892 
3893 #define HPD_FUNCS(platform)					 \
3894 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
3895 	.hpd_irq_setup = platform##_hpd_irq_setup,		 \
3896 }
3897 
3898 HPD_FUNCS(i915);
3899 HPD_FUNCS(dg1);
3900 HPD_FUNCS(gen11);
3901 HPD_FUNCS(bxt);
3902 HPD_FUNCS(icp);
3903 HPD_FUNCS(spt);
3904 HPD_FUNCS(ilk);
3905 #undef HPD_FUNCS
3906 
3907 void intel_hpd_irq_setup(struct drm_i915_private *i915)
3908 {
3909 	if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
3910 		i915->display.funcs.hotplug->hpd_irq_setup(i915);
3911 }
3912 
3913 /**
3914  * intel_irq_init - initializes irq support
3915  * @dev_priv: i915 device instance
3916  *
3917  * This function initializes all the irq support including work items, timers
3918  * and all the vtables. It does not setup the interrupt itself though.
3919  */
3920 void intel_irq_init(struct drm_i915_private *dev_priv)
3921 {
3922 	int i;
3923 
3924 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3925 	for (i = 0; i < MAX_L3_SLICES; ++i)
3926 		dev_priv->l3_parity.remap_info[i] = NULL;
3927 
3928 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3929 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
3930 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
3931 
3932 	if (!HAS_DISPLAY(dev_priv))
3933 		return;
3934 
3935 	intel_hpd_init_pins(dev_priv);
3936 
3937 	intel_hpd_init_early(dev_priv);
3938 
3939 	dev_priv->drm.vblank_disable_immediate = true;
3940 
3941 	/* Most platforms treat the display irq block as an always-on
3942 	 * power domain. vlv/chv can disable it at runtime and need
3943 	 * special care to avoid writing any of the display block registers
3944 	 * outside of the power domain. We defer setting up the display irqs
3945 	 * in this case to the runtime pm.
3946 	 */
3947 	dev_priv->display_irqs_enabled = true;
3948 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3949 		dev_priv->display_irqs_enabled = false;
3950 
3951 	if (HAS_GMCH(dev_priv)) {
3952 		if (I915_HAS_HOTPLUG(dev_priv))
3953 			dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
3954 	} else {
3955 		if (HAS_PCH_DG2(dev_priv))
3956 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
3957 		else if (HAS_PCH_DG1(dev_priv))
3958 			dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
3959 		else if (DISPLAY_VER(dev_priv) >= 11)
3960 			dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
3961 		else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3962 			dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
3963 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3964 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
3965 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3966 			dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
3967 		else
3968 			dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
3969 	}
3970 }
3971 
3972 /**
3973  * intel_irq_fini - deinitializes IRQ support
3974  * @i915: i915 device instance
3975  *
3976  * This function deinitializes all the IRQ support.
3977  */
3978 void intel_irq_fini(struct drm_i915_private *i915)
3979 {
3980 	int i;
3981 
3982 	for (i = 0; i < MAX_L3_SLICES; ++i)
3983 		kfree(i915->l3_parity.remap_info[i]);
3984 }
3985 
3986 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
3987 {
3988 	if (HAS_GMCH(dev_priv)) {
3989 		if (IS_CHERRYVIEW(dev_priv))
3990 			return cherryview_irq_handler;
3991 		else if (IS_VALLEYVIEW(dev_priv))
3992 			return valleyview_irq_handler;
3993 		else if (GRAPHICS_VER(dev_priv) == 4)
3994 			return i965_irq_handler;
3995 		else if (GRAPHICS_VER(dev_priv) == 3)
3996 			return i915_irq_handler;
3997 		else
3998 			return i8xx_irq_handler;
3999 	} else {
4000 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4001 			return dg1_irq_handler;
4002 		else if (GRAPHICS_VER(dev_priv) >= 11)
4003 			return gen11_irq_handler;
4004 		else if (GRAPHICS_VER(dev_priv) >= 8)
4005 			return gen8_irq_handler;
4006 		else
4007 			return ilk_irq_handler;
4008 	}
4009 }
4010 
4011 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4012 {
4013 	if (HAS_GMCH(dev_priv)) {
4014 		if (IS_CHERRYVIEW(dev_priv))
4015 			cherryview_irq_reset(dev_priv);
4016 		else if (IS_VALLEYVIEW(dev_priv))
4017 			valleyview_irq_reset(dev_priv);
4018 		else if (GRAPHICS_VER(dev_priv) == 4)
4019 			i965_irq_reset(dev_priv);
4020 		else if (GRAPHICS_VER(dev_priv) == 3)
4021 			i915_irq_reset(dev_priv);
4022 		else
4023 			i8xx_irq_reset(dev_priv);
4024 	} else {
4025 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4026 			dg1_irq_reset(dev_priv);
4027 		else if (GRAPHICS_VER(dev_priv) >= 11)
4028 			gen11_irq_reset(dev_priv);
4029 		else if (GRAPHICS_VER(dev_priv) >= 8)
4030 			gen8_irq_reset(dev_priv);
4031 		else
4032 			ilk_irq_reset(dev_priv);
4033 	}
4034 }
4035 
4036 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4037 {
4038 	if (HAS_GMCH(dev_priv)) {
4039 		if (IS_CHERRYVIEW(dev_priv))
4040 			cherryview_irq_postinstall(dev_priv);
4041 		else if (IS_VALLEYVIEW(dev_priv))
4042 			valleyview_irq_postinstall(dev_priv);
4043 		else if (GRAPHICS_VER(dev_priv) == 4)
4044 			i965_irq_postinstall(dev_priv);
4045 		else if (GRAPHICS_VER(dev_priv) == 3)
4046 			i915_irq_postinstall(dev_priv);
4047 		else
4048 			i8xx_irq_postinstall(dev_priv);
4049 	} else {
4050 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4051 			dg1_irq_postinstall(dev_priv);
4052 		else if (GRAPHICS_VER(dev_priv) >= 11)
4053 			gen11_irq_postinstall(dev_priv);
4054 		else if (GRAPHICS_VER(dev_priv) >= 8)
4055 			gen8_irq_postinstall(dev_priv);
4056 		else
4057 			ilk_irq_postinstall(dev_priv);
4058 	}
4059 }
4060 
4061 /**
4062  * intel_irq_install - enables the hardware interrupt
4063  * @dev_priv: i915 device instance
4064  *
4065  * This function enables the hardware interrupt handling, but leaves the hotplug
4066  * handling still disabled. It is called after intel_irq_init().
4067  *
4068  * In the driver load and resume code we need working interrupts in a few places
4069  * but don't want to deal with the hassle of concurrent probe and hotplug
4070  * workers. Hence the split into this two-stage approach.
4071  */
4072 int intel_irq_install(struct drm_i915_private *dev_priv)
4073 {
4074 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4075 	int ret;
4076 
4077 	/*
4078 	 * We enable some interrupt sources in our postinstall hooks, so mark
4079 	 * interrupts as enabled _before_ actually enabling them to avoid
4080 	 * special cases in our ordering checks.
4081 	 */
4082 	dev_priv->runtime_pm.irqs_enabled = true;
4083 
4084 	dev_priv->irq_enabled = true;
4085 
4086 	intel_irq_reset(dev_priv);
4087 
4088 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4089 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4090 	if (ret < 0) {
4091 		dev_priv->irq_enabled = false;
4092 		return ret;
4093 	}
4094 
4095 	intel_irq_postinstall(dev_priv);
4096 
4097 	return ret;
4098 }
4099 
4100 /**
4101  * intel_irq_uninstall - finilizes all irq handling
4102  * @dev_priv: i915 device instance
4103  *
4104  * This stops interrupt and hotplug handling and unregisters and frees all
4105  * resources acquired in the init functions.
4106  */
4107 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4108 {
4109 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4110 
4111 	/*
4112 	 * FIXME we can get called twice during driver probe
4113 	 * error handling as well as during driver remove due to
4114 	 * intel_modeset_driver_remove() calling us out of sequence.
4115 	 * Would be nice if it didn't do that...
4116 	 */
4117 	if (!dev_priv->irq_enabled)
4118 		return;
4119 
4120 	dev_priv->irq_enabled = false;
4121 
4122 	intel_irq_reset(dev_priv);
4123 
4124 	free_irq(irq, dev_priv);
4125 
4126 	intel_hpd_cancel_work(dev_priv);
4127 	dev_priv->runtime_pm.irqs_enabled = false;
4128 }
4129 
4130 /**
4131  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4132  * @dev_priv: i915 device instance
4133  *
4134  * This function is used to disable interrupts at runtime, both in the runtime
4135  * pm and the system suspend/resume code.
4136  */
4137 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4138 {
4139 	intel_irq_reset(dev_priv);
4140 	dev_priv->runtime_pm.irqs_enabled = false;
4141 	intel_synchronize_irq(dev_priv);
4142 }
4143 
4144 /**
4145  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4146  * @dev_priv: i915 device instance
4147  *
4148  * This function is used to enable interrupts at runtime, both in the runtime
4149  * pm and the system suspend/resume code.
4150  */
4151 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4152 {
4153 	dev_priv->runtime_pm.irqs_enabled = true;
4154 	intel_irq_reset(dev_priv);
4155 	intel_irq_postinstall(dev_priv);
4156 }
4157 
4158 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4159 {
4160 	return dev_priv->runtime_pm.irqs_enabled;
4161 }
4162 
4163 void intel_synchronize_irq(struct drm_i915_private *i915)
4164 {
4165 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4166 }
4167 
4168 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4169 {
4170 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4171 }
4172