xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_display_irq.c (revision 901bdf5ea1a836400ee69aa32b04e9c209271ec7)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "gt/intel_rps.h"
7 #include "i915_drv.h"
8 #include "i915_irq.h"
9 #include "i915_reg.h"
10 #include "icl_dsi_regs.h"
11 #include "intel_crtc.h"
12 #include "intel_de.h"
13 #include "intel_display_irq.h"
14 #include "intel_display_trace.h"
15 #include "intel_display_types.h"
16 #include "intel_dp_aux.h"
17 #include "intel_fdi_regs.h"
18 #include "intel_fifo_underrun.h"
19 #include "intel_gmbus.h"
20 #include "intel_hotplug_irq.h"
21 #include "intel_psr.h"
22 #include "intel_psr_regs.h"
23 
24 static void
25 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
26 {
27 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
28 
29 	drm_crtc_handle_vblank(&crtc->base);
30 }
31 
32 /**
33  * ilk_update_display_irq - update DEIMR
34  * @dev_priv: driver private
35  * @interrupt_mask: mask of interrupt bits to update
36  * @enabled_irq_mask: mask of interrupt bits to enable
37  */
38 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
39 			    u32 interrupt_mask, u32 enabled_irq_mask)
40 {
41 	u32 new_val;
42 
43 	lockdep_assert_held(&dev_priv->irq_lock);
44 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
45 
46 	new_val = dev_priv->irq_mask;
47 	new_val &= ~interrupt_mask;
48 	new_val |= (~enabled_irq_mask & interrupt_mask);
49 
50 	if (new_val != dev_priv->irq_mask &&
51 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
52 		dev_priv->irq_mask = new_val;
53 		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
54 		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
55 	}
56 }
57 
58 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
59 {
60 	ilk_update_display_irq(i915, bits, bits);
61 }
62 
63 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
64 {
65 	ilk_update_display_irq(i915, bits, 0);
66 }
67 
68 /**
69  * bdw_update_port_irq - update DE port interrupt
70  * @dev_priv: driver private
71  * @interrupt_mask: mask of interrupt bits to update
72  * @enabled_irq_mask: mask of interrupt bits to enable
73  */
74 void bdw_update_port_irq(struct drm_i915_private *dev_priv,
75 			 u32 interrupt_mask, u32 enabled_irq_mask)
76 {
77 	u32 new_val;
78 	u32 old_val;
79 
80 	lockdep_assert_held(&dev_priv->irq_lock);
81 
82 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
83 
84 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
85 		return;
86 
87 	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
88 
89 	new_val = old_val;
90 	new_val &= ~interrupt_mask;
91 	new_val |= (~enabled_irq_mask & interrupt_mask);
92 
93 	if (new_val != old_val) {
94 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
95 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
96 	}
97 }
98 
99 /**
100  * bdw_update_pipe_irq - update DE pipe interrupt
101  * @dev_priv: driver private
102  * @pipe: pipe whose interrupt to update
103  * @interrupt_mask: mask of interrupt bits to update
104  * @enabled_irq_mask: mask of interrupt bits to enable
105  */
106 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
107 				enum pipe pipe, u32 interrupt_mask,
108 				u32 enabled_irq_mask)
109 {
110 	u32 new_val;
111 
112 	lockdep_assert_held(&dev_priv->irq_lock);
113 
114 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
115 
116 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
117 		return;
118 
119 	new_val = dev_priv->de_irq_mask[pipe];
120 	new_val &= ~interrupt_mask;
121 	new_val |= (~enabled_irq_mask & interrupt_mask);
122 
123 	if (new_val != dev_priv->de_irq_mask[pipe]) {
124 		dev_priv->de_irq_mask[pipe] = new_val;
125 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
126 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
127 	}
128 }
129 
130 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
131 			 enum pipe pipe, u32 bits)
132 {
133 	bdw_update_pipe_irq(i915, pipe, bits, bits);
134 }
135 
136 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
137 			  enum pipe pipe, u32 bits)
138 {
139 	bdw_update_pipe_irq(i915, pipe, bits, 0);
140 }
141 
142 /**
143  * ibx_display_interrupt_update - update SDEIMR
144  * @dev_priv: driver private
145  * @interrupt_mask: mask of interrupt bits to update
146  * @enabled_irq_mask: mask of interrupt bits to enable
147  */
148 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
149 				  u32 interrupt_mask,
150 				  u32 enabled_irq_mask)
151 {
152 	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
153 
154 	sdeimr &= ~interrupt_mask;
155 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
156 
157 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
158 
159 	lockdep_assert_held(&dev_priv->irq_lock);
160 
161 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
162 		return;
163 
164 	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
165 	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
166 }
167 
168 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
169 {
170 	ibx_display_interrupt_update(i915, bits, bits);
171 }
172 
173 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
174 {
175 	ibx_display_interrupt_update(i915, bits, 0);
176 }
177 
178 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
179 			      enum pipe pipe)
180 {
181 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
182 	u32 enable_mask = status_mask << 16;
183 
184 	lockdep_assert_held(&dev_priv->irq_lock);
185 
186 	if (DISPLAY_VER(dev_priv) < 5)
187 		goto out;
188 
189 	/*
190 	 * On pipe A we don't support the PSR interrupt yet,
191 	 * on pipe B and C the same bit MBZ.
192 	 */
193 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
194 			     status_mask & PIPE_A_PSR_STATUS_VLV))
195 		return 0;
196 	/*
197 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
198 	 * A the same bit is for perf counters which we don't use either.
199 	 */
200 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
201 			     status_mask & PIPE_B_PSR_STATUS_VLV))
202 		return 0;
203 
204 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
205 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
206 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
207 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
208 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
209 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
210 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
211 
212 out:
213 	drm_WARN_ONCE(&dev_priv->drm,
214 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
215 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
216 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
217 		      pipe_name(pipe), enable_mask, status_mask);
218 
219 	return enable_mask;
220 }
221 
222 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
223 			  enum pipe pipe, u32 status_mask)
224 {
225 	i915_reg_t reg = PIPESTAT(pipe);
226 	u32 enable_mask;
227 
228 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
229 		      "pipe %c: status_mask=0x%x\n",
230 		      pipe_name(pipe), status_mask);
231 
232 	lockdep_assert_held(&dev_priv->irq_lock);
233 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
234 
235 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
236 		return;
237 
238 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
239 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
240 
241 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
242 	intel_uncore_posting_read(&dev_priv->uncore, reg);
243 }
244 
245 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
246 			   enum pipe pipe, u32 status_mask)
247 {
248 	i915_reg_t reg = PIPESTAT(pipe);
249 	u32 enable_mask;
250 
251 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
252 		      "pipe %c: status_mask=0x%x\n",
253 		      pipe_name(pipe), status_mask);
254 
255 	lockdep_assert_held(&dev_priv->irq_lock);
256 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
257 
258 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
259 		return;
260 
261 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
262 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
263 
264 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
265 	intel_uncore_posting_read(&dev_priv->uncore, reg);
266 }
267 
268 static bool i915_has_asle(struct drm_i915_private *dev_priv)
269 {
270 	if (!dev_priv->display.opregion.asle)
271 		return false;
272 
273 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
274 }
275 
276 /**
277  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
278  * @dev_priv: i915 device private
279  */
280 void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
281 {
282 	if (!i915_has_asle(dev_priv))
283 		return;
284 
285 	spin_lock_irq(&dev_priv->irq_lock);
286 
287 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
288 	if (DISPLAY_VER(dev_priv) >= 4)
289 		i915_enable_pipestat(dev_priv, PIPE_A,
290 				     PIPE_LEGACY_BLC_EVENT_STATUS);
291 
292 	spin_unlock_irq(&dev_priv->irq_lock);
293 }
294 
295 #if defined(CONFIG_DEBUG_FS)
296 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
297 					 enum pipe pipe,
298 					 u32 crc0, u32 crc1,
299 					 u32 crc2, u32 crc3,
300 					 u32 crc4)
301 {
302 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
303 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
304 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
305 
306 	trace_intel_pipe_crc(crtc, crcs);
307 
308 	spin_lock(&pipe_crc->lock);
309 	/*
310 	 * For some not yet identified reason, the first CRC is
311 	 * bonkers. So let's just wait for the next vblank and read
312 	 * out the buggy result.
313 	 *
314 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
315 	 * don't trust that one either.
316 	 */
317 	if (pipe_crc->skipped <= 0 ||
318 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
319 		pipe_crc->skipped++;
320 		spin_unlock(&pipe_crc->lock);
321 		return;
322 	}
323 	spin_unlock(&pipe_crc->lock);
324 
325 	drm_crtc_add_crc_entry(&crtc->base, true,
326 			       drm_crtc_accurate_vblank_count(&crtc->base),
327 			       crcs);
328 }
329 #else
330 static inline void
331 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
332 			     enum pipe pipe,
333 			     u32 crc0, u32 crc1,
334 			     u32 crc2, u32 crc3,
335 			     u32 crc4) {}
336 #endif
337 
338 static void flip_done_handler(struct drm_i915_private *i915,
339 			      enum pipe pipe)
340 {
341 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
342 	struct drm_crtc_state *crtc_state = crtc->base.state;
343 	struct drm_pending_vblank_event *e = crtc_state->event;
344 	struct drm_device *dev = &i915->drm;
345 	unsigned long irqflags;
346 
347 	spin_lock_irqsave(&dev->event_lock, irqflags);
348 
349 	crtc_state->event = NULL;
350 
351 	drm_crtc_send_vblank_event(&crtc->base, e);
352 
353 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
354 }
355 
356 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
357 				     enum pipe pipe)
358 {
359 	display_pipe_crc_irq_handler(dev_priv, pipe,
360 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
361 				     0, 0, 0, 0);
362 }
363 
364 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
365 				     enum pipe pipe)
366 {
367 	display_pipe_crc_irq_handler(dev_priv, pipe,
368 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
369 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
370 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
371 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
372 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
373 }
374 
375 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
376 				      enum pipe pipe)
377 {
378 	u32 res1, res2;
379 
380 	if (DISPLAY_VER(dev_priv) >= 3)
381 		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
382 	else
383 		res1 = 0;
384 
385 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
386 		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
387 	else
388 		res2 = 0;
389 
390 	display_pipe_crc_irq_handler(dev_priv, pipe,
391 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
392 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
393 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
394 				     res1, res2);
395 }
396 
397 void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
398 {
399 	enum pipe pipe;
400 
401 	for_each_pipe(dev_priv, pipe) {
402 		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
403 				   PIPESTAT_INT_STATUS_MASK |
404 				   PIPE_FIFO_UNDERRUN_STATUS);
405 
406 		dev_priv->pipestat_irq_mask[pipe] = 0;
407 	}
408 }
409 
410 void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
411 			   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
412 {
413 	enum pipe pipe;
414 
415 	spin_lock(&dev_priv->irq_lock);
416 
417 	if (!dev_priv->display_irqs_enabled) {
418 		spin_unlock(&dev_priv->irq_lock);
419 		return;
420 	}
421 
422 	for_each_pipe(dev_priv, pipe) {
423 		i915_reg_t reg;
424 		u32 status_mask, enable_mask, iir_bit = 0;
425 
426 		/*
427 		 * PIPESTAT bits get signalled even when the interrupt is
428 		 * disabled with the mask bits, and some of the status bits do
429 		 * not generate interrupts at all (like the underrun bit). Hence
430 		 * we need to be careful that we only handle what we want to
431 		 * handle.
432 		 */
433 
434 		/* fifo underruns are filterered in the underrun handler. */
435 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
436 
437 		switch (pipe) {
438 		default:
439 		case PIPE_A:
440 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
441 			break;
442 		case PIPE_B:
443 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
444 			break;
445 		case PIPE_C:
446 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
447 			break;
448 		}
449 		if (iir & iir_bit)
450 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
451 
452 		if (!status_mask)
453 			continue;
454 
455 		reg = PIPESTAT(pipe);
456 		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
457 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
458 
459 		/*
460 		 * Clear the PIPE*STAT regs before the IIR
461 		 *
462 		 * Toggle the enable bits to make sure we get an
463 		 * edge in the ISR pipe event bit if we don't clear
464 		 * all the enabled status bits. Otherwise the edge
465 		 * triggered IIR on i965/g4x wouldn't notice that
466 		 * an interrupt is still pending.
467 		 */
468 		if (pipe_stats[pipe]) {
469 			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
470 			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
471 		}
472 	}
473 	spin_unlock(&dev_priv->irq_lock);
474 }
475 
476 void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
477 			       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
478 {
479 	enum pipe pipe;
480 
481 	for_each_pipe(dev_priv, pipe) {
482 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
483 			intel_handle_vblank(dev_priv, pipe);
484 
485 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
486 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
487 
488 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
489 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
490 	}
491 }
492 
493 void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
494 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
495 {
496 	bool blc_event = false;
497 	enum pipe pipe;
498 
499 	for_each_pipe(dev_priv, pipe) {
500 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
501 			intel_handle_vblank(dev_priv, pipe);
502 
503 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
504 			blc_event = true;
505 
506 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
507 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
508 
509 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
510 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
511 	}
512 
513 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
514 		intel_opregion_asle_intr(dev_priv);
515 }
516 
517 void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
518 			       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
519 {
520 	bool blc_event = false;
521 	enum pipe pipe;
522 
523 	for_each_pipe(dev_priv, pipe) {
524 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
525 			intel_handle_vblank(dev_priv, pipe);
526 
527 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
528 			blc_event = true;
529 
530 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
531 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
532 
533 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
534 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
535 	}
536 
537 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
538 		intel_opregion_asle_intr(dev_priv);
539 
540 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
541 		intel_gmbus_irq_handler(dev_priv);
542 }
543 
544 void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
545 				     u32 pipe_stats[I915_MAX_PIPES])
546 {
547 	enum pipe pipe;
548 
549 	for_each_pipe(dev_priv, pipe) {
550 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
551 			intel_handle_vblank(dev_priv, pipe);
552 
553 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
554 			flip_done_handler(dev_priv, pipe);
555 
556 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
557 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
558 
559 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
560 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
561 	}
562 
563 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
564 		intel_gmbus_irq_handler(dev_priv);
565 }
566 
567 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
568 {
569 	enum pipe pipe;
570 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
571 
572 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
573 
574 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
575 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
576 			       SDE_AUDIO_POWER_SHIFT);
577 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
578 			port_name(port));
579 	}
580 
581 	if (pch_iir & SDE_AUX_MASK)
582 		intel_dp_aux_irq_handler(dev_priv);
583 
584 	if (pch_iir & SDE_GMBUS)
585 		intel_gmbus_irq_handler(dev_priv);
586 
587 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
588 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
589 
590 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
591 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
592 
593 	if (pch_iir & SDE_POISON)
594 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
595 
596 	if (pch_iir & SDE_FDI_MASK) {
597 		for_each_pipe(dev_priv, pipe)
598 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
599 				pipe_name(pipe),
600 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
601 	}
602 
603 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
604 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
605 
606 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
607 		drm_dbg(&dev_priv->drm,
608 			"PCH transcoder CRC error interrupt\n");
609 
610 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
611 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
612 
613 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
614 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
615 }
616 
617 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
618 {
619 	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
620 	enum pipe pipe;
621 
622 	if (err_int & ERR_INT_POISON)
623 		drm_err(&dev_priv->drm, "Poison interrupt\n");
624 
625 	for_each_pipe(dev_priv, pipe) {
626 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
627 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
628 
629 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
630 			if (IS_IVYBRIDGE(dev_priv))
631 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
632 			else
633 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
634 		}
635 	}
636 
637 	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
638 }
639 
640 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
641 {
642 	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
643 	enum pipe pipe;
644 
645 	if (serr_int & SERR_INT_POISON)
646 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
647 
648 	for_each_pipe(dev_priv, pipe)
649 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
650 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
651 
652 	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
653 }
654 
655 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
656 {
657 	enum pipe pipe;
658 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
659 
660 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
661 
662 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
663 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
664 			       SDE_AUDIO_POWER_SHIFT_CPT);
665 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
666 			port_name(port));
667 	}
668 
669 	if (pch_iir & SDE_AUX_MASK_CPT)
670 		intel_dp_aux_irq_handler(dev_priv);
671 
672 	if (pch_iir & SDE_GMBUS_CPT)
673 		intel_gmbus_irq_handler(dev_priv);
674 
675 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
676 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
677 
678 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
679 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
680 
681 	if (pch_iir & SDE_FDI_MASK_CPT) {
682 		for_each_pipe(dev_priv, pipe)
683 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
684 				pipe_name(pipe),
685 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
686 	}
687 
688 	if (pch_iir & SDE_ERROR_CPT)
689 		cpt_serr_int_handler(dev_priv);
690 }
691 
692 void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
693 {
694 	enum pipe pipe;
695 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
696 
697 	if (hotplug_trigger)
698 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
699 
700 	if (de_iir & DE_AUX_CHANNEL_A)
701 		intel_dp_aux_irq_handler(dev_priv);
702 
703 	if (de_iir & DE_GSE)
704 		intel_opregion_asle_intr(dev_priv);
705 
706 	if (de_iir & DE_POISON)
707 		drm_err(&dev_priv->drm, "Poison interrupt\n");
708 
709 	for_each_pipe(dev_priv, pipe) {
710 		if (de_iir & DE_PIPE_VBLANK(pipe))
711 			intel_handle_vblank(dev_priv, pipe);
712 
713 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
714 			flip_done_handler(dev_priv, pipe);
715 
716 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
717 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
718 
719 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
720 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
721 	}
722 
723 	/* check event from PCH */
724 	if (de_iir & DE_PCH_EVENT) {
725 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
726 
727 		if (HAS_PCH_CPT(dev_priv))
728 			cpt_irq_handler(dev_priv, pch_iir);
729 		else
730 			ibx_irq_handler(dev_priv, pch_iir);
731 
732 		/* should clear PCH hotplug event before clear CPU irq */
733 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
734 	}
735 
736 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
737 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
738 }
739 
740 void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
741 {
742 	enum pipe pipe;
743 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
744 
745 	if (hotplug_trigger)
746 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
747 
748 	if (de_iir & DE_ERR_INT_IVB)
749 		ivb_err_int_handler(dev_priv);
750 
751 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
752 		intel_dp_aux_irq_handler(dev_priv);
753 
754 	if (de_iir & DE_GSE_IVB)
755 		intel_opregion_asle_intr(dev_priv);
756 
757 	for_each_pipe(dev_priv, pipe) {
758 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
759 			intel_handle_vblank(dev_priv, pipe);
760 
761 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
762 			flip_done_handler(dev_priv, pipe);
763 	}
764 
765 	/* check event from PCH */
766 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
767 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
768 
769 		cpt_irq_handler(dev_priv, pch_iir);
770 
771 		/* clear PCH hotplug event before clear CPU irq */
772 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
773 	}
774 }
775 
776 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
777 {
778 	u32 mask;
779 
780 	if (DISPLAY_VER(dev_priv) >= 14)
781 		return TGL_DE_PORT_AUX_DDIA |
782 			TGL_DE_PORT_AUX_DDIB;
783 	else if (DISPLAY_VER(dev_priv) >= 13)
784 		return TGL_DE_PORT_AUX_DDIA |
785 			TGL_DE_PORT_AUX_DDIB |
786 			TGL_DE_PORT_AUX_DDIC |
787 			XELPD_DE_PORT_AUX_DDID |
788 			XELPD_DE_PORT_AUX_DDIE |
789 			TGL_DE_PORT_AUX_USBC1 |
790 			TGL_DE_PORT_AUX_USBC2 |
791 			TGL_DE_PORT_AUX_USBC3 |
792 			TGL_DE_PORT_AUX_USBC4;
793 	else if (DISPLAY_VER(dev_priv) >= 12)
794 		return TGL_DE_PORT_AUX_DDIA |
795 			TGL_DE_PORT_AUX_DDIB |
796 			TGL_DE_PORT_AUX_DDIC |
797 			TGL_DE_PORT_AUX_USBC1 |
798 			TGL_DE_PORT_AUX_USBC2 |
799 			TGL_DE_PORT_AUX_USBC3 |
800 			TGL_DE_PORT_AUX_USBC4 |
801 			TGL_DE_PORT_AUX_USBC5 |
802 			TGL_DE_PORT_AUX_USBC6;
803 
804 	mask = GEN8_AUX_CHANNEL_A;
805 	if (DISPLAY_VER(dev_priv) >= 9)
806 		mask |= GEN9_AUX_CHANNEL_B |
807 			GEN9_AUX_CHANNEL_C |
808 			GEN9_AUX_CHANNEL_D;
809 
810 	if (DISPLAY_VER(dev_priv) == 11) {
811 		mask |= ICL_AUX_CHANNEL_F;
812 		mask |= ICL_AUX_CHANNEL_E;
813 	}
814 
815 	return mask;
816 }
817 
818 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
819 {
820 	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
821 		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
822 	else if (DISPLAY_VER(dev_priv) >= 11)
823 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
824 	else if (DISPLAY_VER(dev_priv) >= 9)
825 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
826 	else
827 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
828 }
829 
830 static void
831 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
832 {
833 	bool found = false;
834 
835 	if (iir & GEN8_DE_MISC_GSE) {
836 		intel_opregion_asle_intr(dev_priv);
837 		found = true;
838 	}
839 
840 	if (iir & GEN8_DE_EDP_PSR) {
841 		struct intel_encoder *encoder;
842 		u32 psr_iir;
843 		i915_reg_t iir_reg;
844 
845 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
846 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
847 
848 			if (DISPLAY_VER(dev_priv) >= 12)
849 				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
850 			else
851 				iir_reg = EDP_PSR_IIR;
852 
853 			psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
854 
855 			if (psr_iir)
856 				found = true;
857 
858 			intel_psr_irq_handler(intel_dp, psr_iir);
859 
860 			/* prior GEN12 only have one EDP PSR */
861 			if (DISPLAY_VER(dev_priv) < 12)
862 				break;
863 		}
864 	}
865 
866 	if (!found)
867 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
868 }
869 
870 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
871 					   u32 te_trigger)
872 {
873 	enum pipe pipe = INVALID_PIPE;
874 	enum transcoder dsi_trans;
875 	enum port port;
876 	u32 val, tmp;
877 
878 	/*
879 	 * Incase of dual link, TE comes from DSI_1
880 	 * this is to check if dual link is enabled
881 	 */
882 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
883 	val &= PORT_SYNC_MODE_ENABLE;
884 
885 	/*
886 	 * if dual link is enabled, then read DSI_0
887 	 * transcoder registers
888 	 */
889 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
890 						  PORT_A : PORT_B;
891 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
892 
893 	/* Check if DSI configured in command mode */
894 	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
895 	val = val & OP_MODE_MASK;
896 
897 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
898 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
899 		return;
900 	}
901 
902 	/* Get PIPE for handling VBLANK event */
903 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
904 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
905 	case TRANS_DDI_EDP_INPUT_A_ON:
906 		pipe = PIPE_A;
907 		break;
908 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
909 		pipe = PIPE_B;
910 		break;
911 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
912 		pipe = PIPE_C;
913 		break;
914 	default:
915 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
916 		return;
917 	}
918 
919 	intel_handle_vblank(dev_priv, pipe);
920 
921 	/* clear TE in dsi IIR */
922 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
923 	tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
924 }
925 
926 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
927 {
928 	if (DISPLAY_VER(i915) >= 9)
929 		return GEN9_PIPE_PLANE1_FLIP_DONE;
930 	else
931 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
932 }
933 
934 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
935 {
936 	u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
937 
938 	if (DISPLAY_VER(dev_priv) >= 13)
939 		mask |= XELPD_PIPE_SOFT_UNDERRUN |
940 			XELPD_PIPE_HARD_UNDERRUN;
941 
942 	return mask;
943 }
944 
945 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
946 {
947 	u32 pica_ier = 0;
948 
949 	*pica_iir = 0;
950 	*pch_iir = intel_de_read(i915, SDEIIR);
951 	if (!*pch_iir)
952 		return;
953 
954 	/**
955 	 * PICA IER must be disabled/re-enabled around clearing PICA IIR and
956 	 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
957 	 * their flags both in the PICA and SDE IIR.
958 	 */
959 	if (*pch_iir & SDE_PICAINTERRUPT) {
960 		drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
961 
962 		pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
963 		*pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
964 		intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
965 	}
966 
967 	intel_de_write(i915, SDEIIR, *pch_iir);
968 
969 	if (pica_ier)
970 		intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
971 }
972 
973 void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
974 {
975 	u32 iir;
976 	enum pipe pipe;
977 
978 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
979 
980 	if (master_ctl & GEN8_DE_MISC_IRQ) {
981 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
982 		if (iir) {
983 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
984 			gen8_de_misc_irq_handler(dev_priv, iir);
985 		} else {
986 			drm_err_ratelimited(&dev_priv->drm,
987 					    "The master control interrupt lied (DE MISC)!\n");
988 		}
989 	}
990 
991 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
992 		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
993 		if (iir) {
994 			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
995 			gen11_hpd_irq_handler(dev_priv, iir);
996 		} else {
997 			drm_err_ratelimited(&dev_priv->drm,
998 					    "The master control interrupt lied, (DE HPD)!\n");
999 		}
1000 	}
1001 
1002 	if (master_ctl & GEN8_DE_PORT_IRQ) {
1003 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
1004 		if (iir) {
1005 			bool found = false;
1006 
1007 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
1008 
1009 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
1010 				intel_dp_aux_irq_handler(dev_priv);
1011 				found = true;
1012 			}
1013 
1014 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
1015 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
1016 
1017 				if (hotplug_trigger) {
1018 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
1019 					found = true;
1020 				}
1021 			} else if (IS_BROADWELL(dev_priv)) {
1022 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
1023 
1024 				if (hotplug_trigger) {
1025 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1026 					found = true;
1027 				}
1028 			}
1029 
1030 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
1031 			    (iir & BXT_DE_PORT_GMBUS)) {
1032 				intel_gmbus_irq_handler(dev_priv);
1033 				found = true;
1034 			}
1035 
1036 			if (DISPLAY_VER(dev_priv) >= 11) {
1037 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
1038 
1039 				if (te_trigger) {
1040 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
1041 					found = true;
1042 				}
1043 			}
1044 
1045 			if (!found)
1046 				drm_err_ratelimited(&dev_priv->drm,
1047 						    "Unexpected DE Port interrupt\n");
1048 		} else {
1049 			drm_err_ratelimited(&dev_priv->drm,
1050 					    "The master control interrupt lied (DE PORT)!\n");
1051 		}
1052 	}
1053 
1054 	for_each_pipe(dev_priv, pipe) {
1055 		u32 fault_errors;
1056 
1057 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1058 			continue;
1059 
1060 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
1061 		if (!iir) {
1062 			drm_err_ratelimited(&dev_priv->drm,
1063 					    "The master control interrupt lied (DE PIPE)!\n");
1064 			continue;
1065 		}
1066 
1067 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
1068 
1069 		if (iir & GEN8_PIPE_VBLANK)
1070 			intel_handle_vblank(dev_priv, pipe);
1071 
1072 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
1073 			flip_done_handler(dev_priv, pipe);
1074 
1075 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
1076 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
1077 
1078 		if (iir & gen8_de_pipe_underrun_mask(dev_priv))
1079 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1080 
1081 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
1082 		if (fault_errors)
1083 			drm_err_ratelimited(&dev_priv->drm,
1084 					    "Fault errors on pipe %c: 0x%08x\n",
1085 					    pipe_name(pipe),
1086 					    fault_errors);
1087 	}
1088 
1089 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
1090 	    master_ctl & GEN8_DE_PCH_IRQ) {
1091 		u32 pica_iir;
1092 
1093 		/*
1094 		 * FIXME(BDW): Assume for now that the new interrupt handling
1095 		 * scheme also closed the SDE interrupt handling race we've seen
1096 		 * on older pch-split platforms. But this needs testing.
1097 		 */
1098 		gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
1099 		if (iir) {
1100 			if (pica_iir)
1101 				xelpdp_pica_irq_handler(dev_priv, pica_iir);
1102 
1103 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1104 				icp_irq_handler(dev_priv, iir);
1105 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
1106 				spt_irq_handler(dev_priv, iir);
1107 			else
1108 				cpt_irq_handler(dev_priv, iir);
1109 		} else {
1110 			/*
1111 			 * Like on previous PCH there seems to be something
1112 			 * fishy going on with forwarding PCH interrupts.
1113 			 */
1114 			drm_dbg(&dev_priv->drm,
1115 				"The master control interrupt lied (SDE)!\n");
1116 		}
1117 	}
1118 }
1119 
1120 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
1121 {
1122 	void __iomem * const regs = i915->uncore.regs;
1123 	u32 iir;
1124 
1125 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
1126 		return 0;
1127 
1128 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
1129 	if (likely(iir))
1130 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
1131 
1132 	return iir;
1133 }
1134 
1135 void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
1136 {
1137 	if (iir & GEN11_GU_MISC_GSE)
1138 		intel_opregion_asle_intr(i915);
1139 }
1140 
1141 void gen11_display_irq_handler(struct drm_i915_private *i915)
1142 {
1143 	void __iomem * const regs = i915->uncore.regs;
1144 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
1145 
1146 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1147 	/*
1148 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
1149 	 * for the display related bits.
1150 	 */
1151 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
1152 	gen8_de_irq_handler(i915, disp_ctl);
1153 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
1154 		      GEN11_DISPLAY_IRQ_ENABLE);
1155 
1156 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1157 }
1158 
1159 /* Called from drm generic code, passed 'crtc' which
1160  * we use as a pipe index
1161  */
1162 int i8xx_enable_vblank(struct drm_crtc *crtc)
1163 {
1164 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1165 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1166 	unsigned long irqflags;
1167 
1168 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1169 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1170 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1171 
1172 	return 0;
1173 }
1174 
1175 int i915gm_enable_vblank(struct drm_crtc *crtc)
1176 {
1177 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1178 
1179 	/*
1180 	 * Vblank interrupts fail to wake the device up from C2+.
1181 	 * Disabling render clock gating during C-states avoids
1182 	 * the problem. There is a small power cost so we do this
1183 	 * only when vblank interrupts are actually enabled.
1184 	 */
1185 	if (dev_priv->vblank_enabled++ == 0)
1186 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1187 
1188 	return i8xx_enable_vblank(crtc);
1189 }
1190 
1191 int i965_enable_vblank(struct drm_crtc *crtc)
1192 {
1193 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1194 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1195 	unsigned long irqflags;
1196 
1197 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1198 	i915_enable_pipestat(dev_priv, pipe,
1199 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
1200 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1201 
1202 	return 0;
1203 }
1204 
1205 int ilk_enable_vblank(struct drm_crtc *crtc)
1206 {
1207 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1208 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1209 	unsigned long irqflags;
1210 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1211 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1212 
1213 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1214 	ilk_enable_display_irq(dev_priv, bit);
1215 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1216 
1217 	/* Even though there is no DMC, frame counter can get stuck when
1218 	 * PSR is active as no frames are generated.
1219 	 */
1220 	if (HAS_PSR(dev_priv))
1221 		drm_crtc_vblank_restore(crtc);
1222 
1223 	return 0;
1224 }
1225 
1226 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
1227 				   bool enable)
1228 {
1229 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
1230 	enum port port;
1231 
1232 	if (!(intel_crtc->mode_flags &
1233 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
1234 		return false;
1235 
1236 	/* for dual link cases we consider TE from slave */
1237 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
1238 		port = PORT_B;
1239 	else
1240 		port = PORT_A;
1241 
1242 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
1243 			 enable ? 0 : DSI_TE_EVENT);
1244 
1245 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
1246 
1247 	return true;
1248 }
1249 
1250 int bdw_enable_vblank(struct drm_crtc *_crtc)
1251 {
1252 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1253 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1254 	enum pipe pipe = crtc->pipe;
1255 	unsigned long irqflags;
1256 
1257 	if (gen11_dsi_configure_te(crtc, true))
1258 		return 0;
1259 
1260 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1261 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1262 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1263 
1264 	/* Even if there is no DMC, frame counter can get stuck when
1265 	 * PSR is active as no frames are generated, so check only for PSR.
1266 	 */
1267 	if (HAS_PSR(dev_priv))
1268 		drm_crtc_vblank_restore(&crtc->base);
1269 
1270 	return 0;
1271 }
1272 
1273 /* Called from drm generic code, passed 'crtc' which
1274  * we use as a pipe index
1275  */
1276 void i8xx_disable_vblank(struct drm_crtc *crtc)
1277 {
1278 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1279 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1280 	unsigned long irqflags;
1281 
1282 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1283 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1284 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1285 }
1286 
1287 void i915gm_disable_vblank(struct drm_crtc *crtc)
1288 {
1289 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1290 
1291 	i8xx_disable_vblank(crtc);
1292 
1293 	if (--dev_priv->vblank_enabled == 0)
1294 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1295 }
1296 
1297 void i965_disable_vblank(struct drm_crtc *crtc)
1298 {
1299 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1300 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1301 	unsigned long irqflags;
1302 
1303 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1304 	i915_disable_pipestat(dev_priv, pipe,
1305 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
1306 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1307 }
1308 
1309 void ilk_disable_vblank(struct drm_crtc *crtc)
1310 {
1311 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1312 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
1313 	unsigned long irqflags;
1314 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1315 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1316 
1317 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1318 	ilk_disable_display_irq(dev_priv, bit);
1319 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1320 }
1321 
1322 void bdw_disable_vblank(struct drm_crtc *_crtc)
1323 {
1324 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
1325 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1326 	enum pipe pipe = crtc->pipe;
1327 	unsigned long irqflags;
1328 
1329 	if (gen11_dsi_configure_te(crtc, false))
1330 		return;
1331 
1332 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1333 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1334 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1335 }
1336 
1337 void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
1338 {
1339 	struct intel_uncore *uncore = &dev_priv->uncore;
1340 
1341 	if (IS_CHERRYVIEW(dev_priv))
1342 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
1343 	else
1344 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
1345 
1346 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
1347 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
1348 
1349 	i9xx_pipestat_irq_reset(dev_priv);
1350 
1351 	GEN3_IRQ_RESET(uncore, VLV_);
1352 	dev_priv->irq_mask = ~0u;
1353 }
1354 
1355 void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
1356 {
1357 	struct intel_uncore *uncore = &dev_priv->uncore;
1358 
1359 	u32 pipestat_mask;
1360 	u32 enable_mask;
1361 	enum pipe pipe;
1362 
1363 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
1364 
1365 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1366 	for_each_pipe(dev_priv, pipe)
1367 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
1368 
1369 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
1370 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1371 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1372 		I915_LPE_PIPE_A_INTERRUPT |
1373 		I915_LPE_PIPE_B_INTERRUPT;
1374 
1375 	if (IS_CHERRYVIEW(dev_priv))
1376 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
1377 			I915_LPE_PIPE_C_INTERRUPT;
1378 
1379 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
1380 
1381 	dev_priv->irq_mask = ~enable_mask;
1382 
1383 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
1384 }
1385 
1386 void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
1387 {
1388 	struct intel_uncore *uncore = &dev_priv->uncore;
1389 	enum pipe pipe;
1390 
1391 	if (!HAS_DISPLAY(dev_priv))
1392 		return;
1393 
1394 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
1395 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
1396 
1397 	for_each_pipe(dev_priv, pipe)
1398 		if (intel_display_power_is_enabled(dev_priv,
1399 						   POWER_DOMAIN_PIPE(pipe)))
1400 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
1401 
1402 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
1403 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
1404 }
1405 
1406 void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
1407 {
1408 	struct intel_uncore *uncore = &dev_priv->uncore;
1409 	enum pipe pipe;
1410 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1411 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1412 
1413 	if (!HAS_DISPLAY(dev_priv))
1414 		return;
1415 
1416 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
1417 
1418 	if (DISPLAY_VER(dev_priv) >= 12) {
1419 		enum transcoder trans;
1420 
1421 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1422 			enum intel_display_power_domain domain;
1423 
1424 			domain = POWER_DOMAIN_TRANSCODER(trans);
1425 			if (!intel_display_power_is_enabled(dev_priv, domain))
1426 				continue;
1427 
1428 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
1429 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
1430 		}
1431 	} else {
1432 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
1433 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
1434 	}
1435 
1436 	for_each_pipe(dev_priv, pipe)
1437 		if (intel_display_power_is_enabled(dev_priv,
1438 						   POWER_DOMAIN_PIPE(pipe)))
1439 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
1440 
1441 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
1442 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
1443 
1444 	if (DISPLAY_VER(dev_priv) >= 14)
1445 		GEN3_IRQ_RESET(uncore, PICAINTERRUPT_);
1446 	else
1447 		GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
1448 
1449 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1450 		GEN3_IRQ_RESET(uncore, SDE);
1451 }
1452 
1453 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
1454 				     u8 pipe_mask)
1455 {
1456 	struct intel_uncore *uncore = &dev_priv->uncore;
1457 	u32 extra_ier = GEN8_PIPE_VBLANK |
1458 		gen8_de_pipe_underrun_mask(dev_priv) |
1459 		gen8_de_pipe_flip_done_mask(dev_priv);
1460 	enum pipe pipe;
1461 
1462 	spin_lock_irq(&dev_priv->irq_lock);
1463 
1464 	if (!intel_irqs_enabled(dev_priv)) {
1465 		spin_unlock_irq(&dev_priv->irq_lock);
1466 		return;
1467 	}
1468 
1469 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1470 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
1471 				  dev_priv->de_irq_mask[pipe],
1472 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
1473 
1474 	spin_unlock_irq(&dev_priv->irq_lock);
1475 }
1476 
1477 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
1478 				     u8 pipe_mask)
1479 {
1480 	struct intel_uncore *uncore = &dev_priv->uncore;
1481 	enum pipe pipe;
1482 
1483 	spin_lock_irq(&dev_priv->irq_lock);
1484 
1485 	if (!intel_irqs_enabled(dev_priv)) {
1486 		spin_unlock_irq(&dev_priv->irq_lock);
1487 		return;
1488 	}
1489 
1490 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1491 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
1492 
1493 	spin_unlock_irq(&dev_priv->irq_lock);
1494 
1495 	/* make sure we're done processing display irqs */
1496 	intel_synchronize_irq(dev_priv);
1497 }
1498 
1499 /*
1500  * SDEIER is also touched by the interrupt handler to work around missed PCH
1501  * interrupts. Hence we can't update it after the interrupt handler is enabled -
1502  * instead we unconditionally enable all PCH interrupt sources here, but then
1503  * only unmask them as needed with SDEIMR.
1504  *
1505  * Note that we currently do this after installing the interrupt handler,
1506  * but before we enable the master interrupt. That should be sufficient
1507  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
1508  * interrupts could still race.
1509  */
1510 void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
1511 {
1512 	struct intel_uncore *uncore = &dev_priv->uncore;
1513 	u32 mask;
1514 
1515 	if (HAS_PCH_NOP(dev_priv))
1516 		return;
1517 
1518 	if (HAS_PCH_IBX(dev_priv))
1519 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
1520 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
1521 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
1522 	else
1523 		mask = SDE_GMBUS_CPT;
1524 
1525 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
1526 }
1527 
1528 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
1529 {
1530 	lockdep_assert_held(&dev_priv->irq_lock);
1531 
1532 	if (dev_priv->display_irqs_enabled)
1533 		return;
1534 
1535 	dev_priv->display_irqs_enabled = true;
1536 
1537 	if (intel_irqs_enabled(dev_priv)) {
1538 		vlv_display_irq_reset(dev_priv);
1539 		vlv_display_irq_postinstall(dev_priv);
1540 	}
1541 }
1542 
1543 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
1544 {
1545 	lockdep_assert_held(&dev_priv->irq_lock);
1546 
1547 	if (!dev_priv->display_irqs_enabled)
1548 		return;
1549 
1550 	dev_priv->display_irqs_enabled = false;
1551 
1552 	if (intel_irqs_enabled(dev_priv))
1553 		vlv_display_irq_reset(dev_priv);
1554 }
1555 
1556 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
1557 {
1558 	struct intel_uncore *uncore = &dev_priv->uncore;
1559 
1560 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
1561 		GEN8_PIPE_CDCLK_CRC_DONE;
1562 	u32 de_pipe_enables;
1563 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
1564 	u32 de_port_enables;
1565 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
1566 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1567 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1568 	enum pipe pipe;
1569 
1570 	if (!HAS_DISPLAY(dev_priv))
1571 		return;
1572 
1573 	if (DISPLAY_VER(dev_priv) <= 10)
1574 		de_misc_masked |= GEN8_DE_MISC_GSE;
1575 
1576 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1577 		de_port_masked |= BXT_DE_PORT_GMBUS;
1578 
1579 	if (DISPLAY_VER(dev_priv) >= 11) {
1580 		enum port port;
1581 
1582 		if (intel_bios_is_dsi_present(dev_priv, &port))
1583 			de_port_masked |= DSI0_TE | DSI1_TE;
1584 	}
1585 
1586 	de_pipe_enables = de_pipe_masked |
1587 		GEN8_PIPE_VBLANK |
1588 		gen8_de_pipe_underrun_mask(dev_priv) |
1589 		gen8_de_pipe_flip_done_mask(dev_priv);
1590 
1591 	de_port_enables = de_port_masked;
1592 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1593 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
1594 	else if (IS_BROADWELL(dev_priv))
1595 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
1596 
1597 	if (DISPLAY_VER(dev_priv) >= 12) {
1598 		enum transcoder trans;
1599 
1600 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1601 			enum intel_display_power_domain domain;
1602 
1603 			domain = POWER_DOMAIN_TRANSCODER(trans);
1604 			if (!intel_display_power_is_enabled(dev_priv, domain))
1605 				continue;
1606 
1607 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
1608 		}
1609 	} else {
1610 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
1611 	}
1612 
1613 	for_each_pipe(dev_priv, pipe) {
1614 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
1615 
1616 		if (intel_display_power_is_enabled(dev_priv,
1617 						   POWER_DOMAIN_PIPE(pipe)))
1618 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
1619 					  dev_priv->de_irq_mask[pipe],
1620 					  de_pipe_enables);
1621 	}
1622 
1623 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
1624 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
1625 
1626 	if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
1627 		u32 de_hpd_masked = 0;
1628 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
1629 				     GEN11_DE_TBT_HOTPLUG_MASK;
1630 
1631 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
1632 			      de_hpd_enables);
1633 	}
1634 }
1635 
1636 void mtp_irq_postinstall(struct drm_i915_private *i915)
1637 {
1638 	struct intel_uncore *uncore = &i915->uncore;
1639 	u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
1640 	u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
1641 	u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
1642 			     XELPDP_TBT_HOTPLUG_MASK;
1643 
1644 	GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask,
1645 		      de_hpd_enables);
1646 
1647 	GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
1648 }
1649 
1650 void icp_irq_postinstall(struct drm_i915_private *dev_priv)
1651 {
1652 	struct intel_uncore *uncore = &dev_priv->uncore;
1653 	u32 mask = SDE_GMBUS_ICP;
1654 
1655 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
1656 }
1657 
1658 void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
1659 {
1660 	if (!HAS_DISPLAY(dev_priv))
1661 		return;
1662 
1663 	gen8_de_irq_postinstall(dev_priv);
1664 
1665 	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
1666 			   GEN11_DISPLAY_IRQ_ENABLE);
1667 }
1668 
1669