xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 801543b2)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/circ_buf.h>
32 #include <linux/slab.h>
33 #include <linux/sysrq.h>
34 
35 #include <drm/drm_drv.h>
36 
37 #include "display/icl_dsi_regs.h"
38 #include "display/intel_de.h"
39 #include "display/intel_display_trace.h"
40 #include "display/intel_display_types.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45 
46 #include "gt/intel_breadcrumbs.h"
47 #include "gt/intel_gt.h"
48 #include "gt/intel_gt_irq.h"
49 #include "gt/intel_gt_pm_irq.h"
50 #include "gt/intel_gt_regs.h"
51 #include "gt/intel_rps.h"
52 
53 #include "i915_driver.h"
54 #include "i915_drv.h"
55 #include "i915_irq.h"
56 #include "intel_pm.h"
57 
58 /**
59  * DOC: interrupt handling
60  *
61  * These functions provide the basic support for enabling and disabling the
62  * interrupt handling support. There's a lot more functionality in i915_irq.c
63  * and related files, but that will be described in separate chapters.
64  */
65 
66 /*
67  * Interrupt statistic for PMU. Increments the counter only if the
68  * interrupt originated from the GPU so interrupts from a device which
69  * shares the interrupt line are not accounted.
70  */
71 static inline void pmu_irq_stats(struct drm_i915_private *i915,
72 				 irqreturn_t res)
73 {
74 	if (unlikely(res != IRQ_HANDLED))
75 		return;
76 
77 	/*
78 	 * A clever compiler translates that into INC. A not so clever one
79 	 * should at least prevent store tearing.
80 	 */
81 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
82 }
83 
84 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
85 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
86 				    enum hpd_pin pin);
87 
88 static const u32 hpd_ilk[HPD_NUM_PINS] = {
89 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
90 };
91 
92 static const u32 hpd_ivb[HPD_NUM_PINS] = {
93 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
94 };
95 
96 static const u32 hpd_bdw[HPD_NUM_PINS] = {
97 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
98 };
99 
100 static const u32 hpd_ibx[HPD_NUM_PINS] = {
101 	[HPD_CRT] = SDE_CRT_HOTPLUG,
102 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
103 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
104 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
105 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
106 };
107 
108 static const u32 hpd_cpt[HPD_NUM_PINS] = {
109 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
110 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
111 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
112 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
113 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
114 };
115 
116 static const u32 hpd_spt[HPD_NUM_PINS] = {
117 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
118 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
119 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
120 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
121 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
122 };
123 
124 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
125 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
126 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
127 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
128 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
129 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
130 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
131 };
132 
133 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
134 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
135 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
136 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
137 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
138 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
139 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
140 };
141 
142 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
143 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
144 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
145 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
146 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
147 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
148 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
149 };
150 
151 static const u32 hpd_bxt[HPD_NUM_PINS] = {
152 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
153 	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
154 	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
155 };
156 
157 static const u32 hpd_gen11[HPD_NUM_PINS] = {
158 	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
159 	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
160 	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
161 	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
162 	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
163 	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
164 };
165 
166 static const u32 hpd_icp[HPD_NUM_PINS] = {
167 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
168 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
169 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
170 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
171 	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
172 	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
173 	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
174 	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
175 	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
176 };
177 
178 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
179 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
180 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
181 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
182 	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
183 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
184 };
185 
186 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
187 {
188 	struct intel_hotplug *hpd = &dev_priv->display.hotplug;
189 
190 	if (HAS_GMCH(dev_priv)) {
191 		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
192 		    IS_CHERRYVIEW(dev_priv))
193 			hpd->hpd = hpd_status_g4x;
194 		else
195 			hpd->hpd = hpd_status_i915;
196 		return;
197 	}
198 
199 	if (DISPLAY_VER(dev_priv) >= 11)
200 		hpd->hpd = hpd_gen11;
201 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
202 		hpd->hpd = hpd_bxt;
203 	else if (DISPLAY_VER(dev_priv) >= 8)
204 		hpd->hpd = hpd_bdw;
205 	else if (DISPLAY_VER(dev_priv) >= 7)
206 		hpd->hpd = hpd_ivb;
207 	else
208 		hpd->hpd = hpd_ilk;
209 
210 	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
211 	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
212 		return;
213 
214 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
215 		hpd->pch_hpd = hpd_sde_dg1;
216 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
217 		hpd->pch_hpd = hpd_icp;
218 	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
219 		hpd->pch_hpd = hpd_spt;
220 	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
221 		hpd->pch_hpd = hpd_cpt;
222 	else if (HAS_PCH_IBX(dev_priv))
223 		hpd->pch_hpd = hpd_ibx;
224 	else
225 		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
226 }
227 
228 static void
229 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
230 {
231 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
232 
233 	drm_crtc_handle_vblank(&crtc->base);
234 }
235 
236 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
237 		    i915_reg_t iir, i915_reg_t ier)
238 {
239 	intel_uncore_write(uncore, imr, 0xffffffff);
240 	intel_uncore_posting_read(uncore, imr);
241 
242 	intel_uncore_write(uncore, ier, 0);
243 
244 	/* IIR can theoretically queue up two events. Be paranoid. */
245 	intel_uncore_write(uncore, iir, 0xffffffff);
246 	intel_uncore_posting_read(uncore, iir);
247 	intel_uncore_write(uncore, iir, 0xffffffff);
248 	intel_uncore_posting_read(uncore, iir);
249 }
250 
251 static void gen2_irq_reset(struct intel_uncore *uncore)
252 {
253 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
254 	intel_uncore_posting_read16(uncore, GEN2_IMR);
255 
256 	intel_uncore_write16(uncore, GEN2_IER, 0);
257 
258 	/* IIR can theoretically queue up two events. Be paranoid. */
259 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
260 	intel_uncore_posting_read16(uncore, GEN2_IIR);
261 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
262 	intel_uncore_posting_read16(uncore, GEN2_IIR);
263 }
264 
265 /*
266  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
267  */
268 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
269 {
270 	u32 val = intel_uncore_read(uncore, reg);
271 
272 	if (val == 0)
273 		return;
274 
275 	drm_WARN(&uncore->i915->drm, 1,
276 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
277 		 i915_mmio_reg_offset(reg), val);
278 	intel_uncore_write(uncore, reg, 0xffffffff);
279 	intel_uncore_posting_read(uncore, reg);
280 	intel_uncore_write(uncore, reg, 0xffffffff);
281 	intel_uncore_posting_read(uncore, reg);
282 }
283 
284 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
285 {
286 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
287 
288 	if (val == 0)
289 		return;
290 
291 	drm_WARN(&uncore->i915->drm, 1,
292 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
293 		 i915_mmio_reg_offset(GEN2_IIR), val);
294 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
295 	intel_uncore_posting_read16(uncore, GEN2_IIR);
296 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
297 	intel_uncore_posting_read16(uncore, GEN2_IIR);
298 }
299 
300 void gen3_irq_init(struct intel_uncore *uncore,
301 		   i915_reg_t imr, u32 imr_val,
302 		   i915_reg_t ier, u32 ier_val,
303 		   i915_reg_t iir)
304 {
305 	gen3_assert_iir_is_zero(uncore, iir);
306 
307 	intel_uncore_write(uncore, ier, ier_val);
308 	intel_uncore_write(uncore, imr, imr_val);
309 	intel_uncore_posting_read(uncore, imr);
310 }
311 
312 static void gen2_irq_init(struct intel_uncore *uncore,
313 			  u32 imr_val, u32 ier_val)
314 {
315 	gen2_assert_iir_is_zero(uncore);
316 
317 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
318 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
319 	intel_uncore_posting_read16(uncore, GEN2_IMR);
320 }
321 
322 /* For display hotplug interrupt */
323 static inline void
324 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
325 				     u32 mask,
326 				     u32 bits)
327 {
328 	lockdep_assert_held(&dev_priv->irq_lock);
329 	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
330 
331 	intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
332 }
333 
334 /**
335  * i915_hotplug_interrupt_update - update hotplug interrupt enable
336  * @dev_priv: driver private
337  * @mask: bits to update
338  * @bits: bits to enable
339  * NOTE: the HPD enable bits are modified both inside and outside
340  * of an interrupt context. To avoid that read-modify-write cycles
341  * interfer, these bits are protected by a spinlock. Since this
342  * function is usually not called from a context where the lock is
343  * held already, this function acquires the lock itself. A non-locking
344  * version is also available.
345  */
346 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
347 				   u32 mask,
348 				   u32 bits)
349 {
350 	spin_lock_irq(&dev_priv->irq_lock);
351 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
352 	spin_unlock_irq(&dev_priv->irq_lock);
353 }
354 
355 /**
356  * ilk_update_display_irq - update DEIMR
357  * @dev_priv: driver private
358  * @interrupt_mask: mask of interrupt bits to update
359  * @enabled_irq_mask: mask of interrupt bits to enable
360  */
361 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
362 				   u32 interrupt_mask, u32 enabled_irq_mask)
363 {
364 	u32 new_val;
365 
366 	lockdep_assert_held(&dev_priv->irq_lock);
367 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
368 
369 	new_val = dev_priv->irq_mask;
370 	new_val &= ~interrupt_mask;
371 	new_val |= (~enabled_irq_mask & interrupt_mask);
372 
373 	if (new_val != dev_priv->irq_mask &&
374 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
375 		dev_priv->irq_mask = new_val;
376 		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
377 		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
378 	}
379 }
380 
381 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
382 {
383 	ilk_update_display_irq(i915, bits, bits);
384 }
385 
386 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
387 {
388 	ilk_update_display_irq(i915, bits, 0);
389 }
390 
391 /**
392  * bdw_update_port_irq - update DE port interrupt
393  * @dev_priv: driver private
394  * @interrupt_mask: mask of interrupt bits to update
395  * @enabled_irq_mask: mask of interrupt bits to enable
396  */
397 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
398 				u32 interrupt_mask,
399 				u32 enabled_irq_mask)
400 {
401 	u32 new_val;
402 	u32 old_val;
403 
404 	lockdep_assert_held(&dev_priv->irq_lock);
405 
406 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
407 
408 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
409 		return;
410 
411 	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
412 
413 	new_val = old_val;
414 	new_val &= ~interrupt_mask;
415 	new_val |= (~enabled_irq_mask & interrupt_mask);
416 
417 	if (new_val != old_val) {
418 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
419 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
420 	}
421 }
422 
423 /**
424  * bdw_update_pipe_irq - update DE pipe interrupt
425  * @dev_priv: driver private
426  * @pipe: pipe whose interrupt to update
427  * @interrupt_mask: mask of interrupt bits to update
428  * @enabled_irq_mask: mask of interrupt bits to enable
429  */
430 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
431 				enum pipe pipe, u32 interrupt_mask,
432 				u32 enabled_irq_mask)
433 {
434 	u32 new_val;
435 
436 	lockdep_assert_held(&dev_priv->irq_lock);
437 
438 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
439 
440 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
441 		return;
442 
443 	new_val = dev_priv->de_irq_mask[pipe];
444 	new_val &= ~interrupt_mask;
445 	new_val |= (~enabled_irq_mask & interrupt_mask);
446 
447 	if (new_val != dev_priv->de_irq_mask[pipe]) {
448 		dev_priv->de_irq_mask[pipe] = new_val;
449 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
450 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
451 	}
452 }
453 
454 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
455 			 enum pipe pipe, u32 bits)
456 {
457 	bdw_update_pipe_irq(i915, pipe, bits, bits);
458 }
459 
460 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
461 			  enum pipe pipe, u32 bits)
462 {
463 	bdw_update_pipe_irq(i915, pipe, bits, 0);
464 }
465 
466 /**
467  * ibx_display_interrupt_update - update SDEIMR
468  * @dev_priv: driver private
469  * @interrupt_mask: mask of interrupt bits to update
470  * @enabled_irq_mask: mask of interrupt bits to enable
471  */
472 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
473 					 u32 interrupt_mask,
474 					 u32 enabled_irq_mask)
475 {
476 	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
477 	sdeimr &= ~interrupt_mask;
478 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
479 
480 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
481 
482 	lockdep_assert_held(&dev_priv->irq_lock);
483 
484 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
485 		return;
486 
487 	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
488 	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
489 }
490 
491 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
492 {
493 	ibx_display_interrupt_update(i915, bits, bits);
494 }
495 
496 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
497 {
498 	ibx_display_interrupt_update(i915, bits, 0);
499 }
500 
501 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
502 			      enum pipe pipe)
503 {
504 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
505 	u32 enable_mask = status_mask << 16;
506 
507 	lockdep_assert_held(&dev_priv->irq_lock);
508 
509 	if (DISPLAY_VER(dev_priv) < 5)
510 		goto out;
511 
512 	/*
513 	 * On pipe A we don't support the PSR interrupt yet,
514 	 * on pipe B and C the same bit MBZ.
515 	 */
516 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
517 			     status_mask & PIPE_A_PSR_STATUS_VLV))
518 		return 0;
519 	/*
520 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
521 	 * A the same bit is for perf counters which we don't use either.
522 	 */
523 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
524 			     status_mask & PIPE_B_PSR_STATUS_VLV))
525 		return 0;
526 
527 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
528 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
529 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
530 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
531 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
532 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
533 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
534 
535 out:
536 	drm_WARN_ONCE(&dev_priv->drm,
537 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
538 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
539 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
540 		      pipe_name(pipe), enable_mask, status_mask);
541 
542 	return enable_mask;
543 }
544 
545 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
546 			  enum pipe pipe, u32 status_mask)
547 {
548 	i915_reg_t reg = PIPESTAT(pipe);
549 	u32 enable_mask;
550 
551 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
552 		      "pipe %c: status_mask=0x%x\n",
553 		      pipe_name(pipe), status_mask);
554 
555 	lockdep_assert_held(&dev_priv->irq_lock);
556 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
557 
558 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
559 		return;
560 
561 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
562 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
563 
564 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
565 	intel_uncore_posting_read(&dev_priv->uncore, reg);
566 }
567 
568 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
569 			   enum pipe pipe, u32 status_mask)
570 {
571 	i915_reg_t reg = PIPESTAT(pipe);
572 	u32 enable_mask;
573 
574 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
575 		      "pipe %c: status_mask=0x%x\n",
576 		      pipe_name(pipe), status_mask);
577 
578 	lockdep_assert_held(&dev_priv->irq_lock);
579 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
580 
581 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
582 		return;
583 
584 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
585 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
586 
587 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
588 	intel_uncore_posting_read(&dev_priv->uncore, reg);
589 }
590 
591 static bool i915_has_asle(struct drm_i915_private *dev_priv)
592 {
593 	if (!dev_priv->display.opregion.asle)
594 		return false;
595 
596 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
597 }
598 
599 /**
600  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
601  * @dev_priv: i915 device private
602  */
603 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
604 {
605 	if (!i915_has_asle(dev_priv))
606 		return;
607 
608 	spin_lock_irq(&dev_priv->irq_lock);
609 
610 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
611 	if (DISPLAY_VER(dev_priv) >= 4)
612 		i915_enable_pipestat(dev_priv, PIPE_A,
613 				     PIPE_LEGACY_BLC_EVENT_STATUS);
614 
615 	spin_unlock_irq(&dev_priv->irq_lock);
616 }
617 
618 /*
619  * This timing diagram depicts the video signal in and
620  * around the vertical blanking period.
621  *
622  * Assumptions about the fictitious mode used in this example:
623  *  vblank_start >= 3
624  *  vsync_start = vblank_start + 1
625  *  vsync_end = vblank_start + 2
626  *  vtotal = vblank_start + 3
627  *
628  *           start of vblank:
629  *           latch double buffered registers
630  *           increment frame counter (ctg+)
631  *           generate start of vblank interrupt (gen4+)
632  *           |
633  *           |          frame start:
634  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
635  *           |          may be shifted forward 1-3 extra lines via PIPECONF
636  *           |          |
637  *           |          |  start of vsync:
638  *           |          |  generate vsync interrupt
639  *           |          |  |
640  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
641  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
642  * ----va---> <-----------------vb--------------------> <--------va-------------
643  *       |          |       <----vs----->                     |
644  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
645  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
646  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
647  *       |          |                                         |
648  *       last visible pixel                                   first visible pixel
649  *                  |                                         increment frame counter (gen3/4)
650  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
651  *
652  * x  = horizontal active
653  * _  = horizontal blanking
654  * hs = horizontal sync
655  * va = vertical active
656  * vb = vertical blanking
657  * vs = vertical sync
658  * vbs = vblank_start (number)
659  *
660  * Summary:
661  * - most events happen at the start of horizontal sync
662  * - frame start happens at the start of horizontal blank, 1-4 lines
663  *   (depending on PIPECONF settings) after the start of vblank
664  * - gen3/4 pixel and frame counter are synchronized with the start
665  *   of horizontal active on the first line of vertical active
666  */
667 
668 /* Called from drm generic code, passed a 'crtc', which
669  * we use as a pipe index
670  */
671 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
672 {
673 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
674 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
675 	const struct drm_display_mode *mode = &vblank->hwmode;
676 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
677 	i915_reg_t high_frame, low_frame;
678 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
679 	unsigned long irqflags;
680 
681 	/*
682 	 * On i965gm TV output the frame counter only works up to
683 	 * the point when we enable the TV encoder. After that the
684 	 * frame counter ceases to work and reads zero. We need a
685 	 * vblank wait before enabling the TV encoder and so we
686 	 * have to enable vblank interrupts while the frame counter
687 	 * is still in a working state. However the core vblank code
688 	 * does not like us returning non-zero frame counter values
689 	 * when we've told it that we don't have a working frame
690 	 * counter. Thus we must stop non-zero values leaking out.
691 	 */
692 	if (!vblank->max_vblank_count)
693 		return 0;
694 
695 	htotal = mode->crtc_htotal;
696 	hsync_start = mode->crtc_hsync_start;
697 	vbl_start = mode->crtc_vblank_start;
698 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
699 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
700 
701 	/* Convert to pixel count */
702 	vbl_start *= htotal;
703 
704 	/* Start of vblank event occurs at start of hsync */
705 	vbl_start -= htotal - hsync_start;
706 
707 	high_frame = PIPEFRAME(pipe);
708 	low_frame = PIPEFRAMEPIXEL(pipe);
709 
710 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
711 
712 	/*
713 	 * High & low register fields aren't synchronized, so make sure
714 	 * we get a low value that's stable across two reads of the high
715 	 * register.
716 	 */
717 	do {
718 		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
719 		low   = intel_de_read_fw(dev_priv, low_frame);
720 		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
721 	} while (high1 != high2);
722 
723 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
724 
725 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
726 	pixel = low & PIPE_PIXEL_MASK;
727 	low >>= PIPE_FRAME_LOW_SHIFT;
728 
729 	/*
730 	 * The frame counter increments at beginning of active.
731 	 * Cook up a vblank counter by also checking the pixel
732 	 * counter against vblank start.
733 	 */
734 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
735 }
736 
737 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
738 {
739 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
740 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
741 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
742 
743 	if (!vblank->max_vblank_count)
744 		return 0;
745 
746 	return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
747 }
748 
749 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
750 {
751 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
752 	struct drm_vblank_crtc *vblank =
753 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
754 	const struct drm_display_mode *mode = &vblank->hwmode;
755 	u32 htotal = mode->crtc_htotal;
756 	u32 clock = mode->crtc_clock;
757 	u32 scan_prev_time, scan_curr_time, scan_post_time;
758 
759 	/*
760 	 * To avoid the race condition where we might cross into the
761 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
762 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
763 	 * during the same frame.
764 	 */
765 	do {
766 		/*
767 		 * This field provides read back of the display
768 		 * pipe frame time stamp. The time stamp value
769 		 * is sampled at every start of vertical blank.
770 		 */
771 		scan_prev_time = intel_de_read_fw(dev_priv,
772 						  PIPE_FRMTMSTMP(crtc->pipe));
773 
774 		/*
775 		 * The TIMESTAMP_CTR register has the current
776 		 * time stamp value.
777 		 */
778 		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
779 
780 		scan_post_time = intel_de_read_fw(dev_priv,
781 						  PIPE_FRMTMSTMP(crtc->pipe));
782 	} while (scan_post_time != scan_prev_time);
783 
784 	return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
785 				   clock), 1000 * htotal);
786 }
787 
788 /*
789  * On certain encoders on certain platforms, pipe
790  * scanline register will not work to get the scanline,
791  * since the timings are driven from the PORT or issues
792  * with scanline register updates.
793  * This function will use Framestamp and current
794  * timestamp registers to calculate the scanline.
795  */
796 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
797 {
798 	struct drm_vblank_crtc *vblank =
799 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
800 	const struct drm_display_mode *mode = &vblank->hwmode;
801 	u32 vblank_start = mode->crtc_vblank_start;
802 	u32 vtotal = mode->crtc_vtotal;
803 	u32 scanline;
804 
805 	scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
806 	scanline = min(scanline, vtotal - 1);
807 	scanline = (scanline + vblank_start) % vtotal;
808 
809 	return scanline;
810 }
811 
812 /*
813  * intel_de_read_fw(), only for fast reads of display block, no need for
814  * forcewake etc.
815  */
816 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
817 {
818 	struct drm_device *dev = crtc->base.dev;
819 	struct drm_i915_private *dev_priv = to_i915(dev);
820 	const struct drm_display_mode *mode;
821 	struct drm_vblank_crtc *vblank;
822 	enum pipe pipe = crtc->pipe;
823 	int position, vtotal;
824 
825 	if (!crtc->active)
826 		return 0;
827 
828 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
829 	mode = &vblank->hwmode;
830 
831 	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
832 		return __intel_get_crtc_scanline_from_timestamp(crtc);
833 
834 	vtotal = mode->crtc_vtotal;
835 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
836 		vtotal /= 2;
837 
838 	position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
839 
840 	/*
841 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
842 	 * read it just before the start of vblank.  So try it again
843 	 * so we don't accidentally end up spanning a vblank frame
844 	 * increment, causing the pipe_update_end() code to squak at us.
845 	 *
846 	 * The nature of this problem means we can't simply check the ISR
847 	 * bit and return the vblank start value; nor can we use the scanline
848 	 * debug register in the transcoder as it appears to have the same
849 	 * problem.  We may need to extend this to include other platforms,
850 	 * but so far testing only shows the problem on HSW.
851 	 */
852 	if (HAS_DDI(dev_priv) && !position) {
853 		int i, temp;
854 
855 		for (i = 0; i < 100; i++) {
856 			udelay(1);
857 			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
858 			if (temp != position) {
859 				position = temp;
860 				break;
861 			}
862 		}
863 	}
864 
865 	/*
866 	 * See update_scanline_offset() for the details on the
867 	 * scanline_offset adjustment.
868 	 */
869 	return (position + crtc->scanline_offset) % vtotal;
870 }
871 
872 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
873 				     bool in_vblank_irq,
874 				     int *vpos, int *hpos,
875 				     ktime_t *stime, ktime_t *etime,
876 				     const struct drm_display_mode *mode)
877 {
878 	struct drm_device *dev = _crtc->dev;
879 	struct drm_i915_private *dev_priv = to_i915(dev);
880 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
881 	enum pipe pipe = crtc->pipe;
882 	int position;
883 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
884 	unsigned long irqflags;
885 	bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
886 		IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
887 		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
888 
889 	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
890 		drm_dbg(&dev_priv->drm,
891 			"trying to get scanoutpos for disabled "
892 			"pipe %c\n", pipe_name(pipe));
893 		return false;
894 	}
895 
896 	htotal = mode->crtc_htotal;
897 	hsync_start = mode->crtc_hsync_start;
898 	vtotal = mode->crtc_vtotal;
899 	vbl_start = mode->crtc_vblank_start;
900 	vbl_end = mode->crtc_vblank_end;
901 
902 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
903 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
904 		vbl_end /= 2;
905 		vtotal /= 2;
906 	}
907 
908 	/*
909 	 * Lock uncore.lock, as we will do multiple timing critical raw
910 	 * register reads, potentially with preemption disabled, so the
911 	 * following code must not block on uncore.lock.
912 	 */
913 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
914 
915 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
916 
917 	/* Get optional system timestamp before query. */
918 	if (stime)
919 		*stime = ktime_get();
920 
921 	if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
922 		int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
923 
924 		position = __intel_get_crtc_scanline(crtc);
925 
926 		/*
927 		 * Already exiting vblank? If so, shift our position
928 		 * so it looks like we're already apporaching the full
929 		 * vblank end. This should make the generated timestamp
930 		 * more or less match when the active portion will start.
931 		 */
932 		if (position >= vbl_start && scanlines < position)
933 			position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
934 	} else if (use_scanline_counter) {
935 		/* No obvious pixelcount register. Only query vertical
936 		 * scanout position from Display scan line register.
937 		 */
938 		position = __intel_get_crtc_scanline(crtc);
939 	} else {
940 		/* Have access to pixelcount since start of frame.
941 		 * We can split this into vertical and horizontal
942 		 * scanout position.
943 		 */
944 		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
945 
946 		/* convert to pixel counts */
947 		vbl_start *= htotal;
948 		vbl_end *= htotal;
949 		vtotal *= htotal;
950 
951 		/*
952 		 * In interlaced modes, the pixel counter counts all pixels,
953 		 * so one field will have htotal more pixels. In order to avoid
954 		 * the reported position from jumping backwards when the pixel
955 		 * counter is beyond the length of the shorter field, just
956 		 * clamp the position the length of the shorter field. This
957 		 * matches how the scanline counter based position works since
958 		 * the scanline counter doesn't count the two half lines.
959 		 */
960 		if (position >= vtotal)
961 			position = vtotal - 1;
962 
963 		/*
964 		 * Start of vblank interrupt is triggered at start of hsync,
965 		 * just prior to the first active line of vblank. However we
966 		 * consider lines to start at the leading edge of horizontal
967 		 * active. So, should we get here before we've crossed into
968 		 * the horizontal active of the first line in vblank, we would
969 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
970 		 * always add htotal-hsync_start to the current pixel position.
971 		 */
972 		position = (position + htotal - hsync_start) % vtotal;
973 	}
974 
975 	/* Get optional system timestamp after query. */
976 	if (etime)
977 		*etime = ktime_get();
978 
979 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
980 
981 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
982 
983 	/*
984 	 * While in vblank, position will be negative
985 	 * counting up towards 0 at vbl_end. And outside
986 	 * vblank, position will be positive counting
987 	 * up since vbl_end.
988 	 */
989 	if (position >= vbl_start)
990 		position -= vbl_end;
991 	else
992 		position += vtotal - vbl_end;
993 
994 	if (use_scanline_counter) {
995 		*vpos = position;
996 		*hpos = 0;
997 	} else {
998 		*vpos = position / htotal;
999 		*hpos = position - (*vpos * htotal);
1000 	}
1001 
1002 	return true;
1003 }
1004 
1005 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1006 				     ktime_t *vblank_time, bool in_vblank_irq)
1007 {
1008 	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1009 		crtc, max_error, vblank_time, in_vblank_irq,
1010 		i915_get_crtc_scanoutpos);
1011 }
1012 
1013 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1014 {
1015 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1016 	unsigned long irqflags;
1017 	int position;
1018 
1019 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1020 	position = __intel_get_crtc_scanline(crtc);
1021 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1022 
1023 	return position;
1024 }
1025 
1026 /**
1027  * ivb_parity_work - Workqueue called when a parity error interrupt
1028  * occurred.
1029  * @work: workqueue struct
1030  *
1031  * Doesn't actually do anything except notify userspace. As a consequence of
1032  * this event, userspace should try to remap the bad rows since statistically
1033  * it is likely the same row is more likely to go bad again.
1034  */
1035 static void ivb_parity_work(struct work_struct *work)
1036 {
1037 	struct drm_i915_private *dev_priv =
1038 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1039 	struct intel_gt *gt = to_gt(dev_priv);
1040 	u32 error_status, row, bank, subbank;
1041 	char *parity_event[6];
1042 	u32 misccpctl;
1043 	u8 slice = 0;
1044 
1045 	/* We must turn off DOP level clock gating to access the L3 registers.
1046 	 * In order to prevent a get/put style interface, acquire struct mutex
1047 	 * any time we access those registers.
1048 	 */
1049 	mutex_lock(&dev_priv->drm.struct_mutex);
1050 
1051 	/* If we've screwed up tracking, just let the interrupt fire again */
1052 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1053 		goto out;
1054 
1055 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
1056 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
1057 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1058 
1059 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1060 		i915_reg_t reg;
1061 
1062 		slice--;
1063 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
1064 				     slice >= NUM_L3_SLICES(dev_priv)))
1065 			break;
1066 
1067 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1068 
1069 		reg = GEN7_L3CDERRST1(slice);
1070 
1071 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
1072 		row = GEN7_PARITY_ERROR_ROW(error_status);
1073 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1074 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1075 
1076 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1077 		intel_uncore_posting_read(&dev_priv->uncore, reg);
1078 
1079 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1080 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1081 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1082 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1083 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1084 		parity_event[5] = NULL;
1085 
1086 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1087 				   KOBJ_CHANGE, parity_event);
1088 
1089 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1090 			  slice, row, bank, subbank);
1091 
1092 		kfree(parity_event[4]);
1093 		kfree(parity_event[3]);
1094 		kfree(parity_event[2]);
1095 		kfree(parity_event[1]);
1096 	}
1097 
1098 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1099 
1100 out:
1101 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1102 	spin_lock_irq(&gt->irq_lock);
1103 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1104 	spin_unlock_irq(&gt->irq_lock);
1105 
1106 	mutex_unlock(&dev_priv->drm.struct_mutex);
1107 }
1108 
1109 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1110 {
1111 	switch (pin) {
1112 	case HPD_PORT_TC1:
1113 	case HPD_PORT_TC2:
1114 	case HPD_PORT_TC3:
1115 	case HPD_PORT_TC4:
1116 	case HPD_PORT_TC5:
1117 	case HPD_PORT_TC6:
1118 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1119 	default:
1120 		return false;
1121 	}
1122 }
1123 
1124 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1125 {
1126 	switch (pin) {
1127 	case HPD_PORT_A:
1128 		return val & PORTA_HOTPLUG_LONG_DETECT;
1129 	case HPD_PORT_B:
1130 		return val & PORTB_HOTPLUG_LONG_DETECT;
1131 	case HPD_PORT_C:
1132 		return val & PORTC_HOTPLUG_LONG_DETECT;
1133 	default:
1134 		return false;
1135 	}
1136 }
1137 
1138 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1139 {
1140 	switch (pin) {
1141 	case HPD_PORT_A:
1142 	case HPD_PORT_B:
1143 	case HPD_PORT_C:
1144 	case HPD_PORT_D:
1145 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1146 	default:
1147 		return false;
1148 	}
1149 }
1150 
1151 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1152 {
1153 	switch (pin) {
1154 	case HPD_PORT_TC1:
1155 	case HPD_PORT_TC2:
1156 	case HPD_PORT_TC3:
1157 	case HPD_PORT_TC4:
1158 	case HPD_PORT_TC5:
1159 	case HPD_PORT_TC6:
1160 		return val & ICP_TC_HPD_LONG_DETECT(pin);
1161 	default:
1162 		return false;
1163 	}
1164 }
1165 
1166 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1167 {
1168 	switch (pin) {
1169 	case HPD_PORT_E:
1170 		return val & PORTE_HOTPLUG_LONG_DETECT;
1171 	default:
1172 		return false;
1173 	}
1174 }
1175 
1176 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177 {
1178 	switch (pin) {
1179 	case HPD_PORT_A:
1180 		return val & PORTA_HOTPLUG_LONG_DETECT;
1181 	case HPD_PORT_B:
1182 		return val & PORTB_HOTPLUG_LONG_DETECT;
1183 	case HPD_PORT_C:
1184 		return val & PORTC_HOTPLUG_LONG_DETECT;
1185 	case HPD_PORT_D:
1186 		return val & PORTD_HOTPLUG_LONG_DETECT;
1187 	default:
1188 		return false;
1189 	}
1190 }
1191 
1192 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1193 {
1194 	switch (pin) {
1195 	case HPD_PORT_A:
1196 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1197 	default:
1198 		return false;
1199 	}
1200 }
1201 
1202 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1203 {
1204 	switch (pin) {
1205 	case HPD_PORT_B:
1206 		return val & PORTB_HOTPLUG_LONG_DETECT;
1207 	case HPD_PORT_C:
1208 		return val & PORTC_HOTPLUG_LONG_DETECT;
1209 	case HPD_PORT_D:
1210 		return val & PORTD_HOTPLUG_LONG_DETECT;
1211 	default:
1212 		return false;
1213 	}
1214 }
1215 
1216 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1217 {
1218 	switch (pin) {
1219 	case HPD_PORT_B:
1220 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1221 	case HPD_PORT_C:
1222 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1223 	case HPD_PORT_D:
1224 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1225 	default:
1226 		return false;
1227 	}
1228 }
1229 
1230 /*
1231  * Get a bit mask of pins that have triggered, and which ones may be long.
1232  * This can be called multiple times with the same masks to accumulate
1233  * hotplug detection results from several registers.
1234  *
1235  * Note that the caller is expected to zero out the masks initially.
1236  */
1237 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1238 			       u32 *pin_mask, u32 *long_mask,
1239 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1240 			       const u32 hpd[HPD_NUM_PINS],
1241 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1242 {
1243 	enum hpd_pin pin;
1244 
1245 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1246 
1247 	for_each_hpd_pin(pin) {
1248 		if ((hpd[pin] & hotplug_trigger) == 0)
1249 			continue;
1250 
1251 		*pin_mask |= BIT(pin);
1252 
1253 		if (long_pulse_detect(pin, dig_hotplug_reg))
1254 			*long_mask |= BIT(pin);
1255 	}
1256 
1257 	drm_dbg(&dev_priv->drm,
1258 		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1259 		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1260 
1261 }
1262 
1263 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1264 				  const u32 hpd[HPD_NUM_PINS])
1265 {
1266 	struct intel_encoder *encoder;
1267 	u32 enabled_irqs = 0;
1268 
1269 	for_each_intel_encoder(&dev_priv->drm, encoder)
1270 		if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1271 			enabled_irqs |= hpd[encoder->hpd_pin];
1272 
1273 	return enabled_irqs;
1274 }
1275 
1276 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1277 				  const u32 hpd[HPD_NUM_PINS])
1278 {
1279 	struct intel_encoder *encoder;
1280 	u32 hotplug_irqs = 0;
1281 
1282 	for_each_intel_encoder(&dev_priv->drm, encoder)
1283 		hotplug_irqs |= hpd[encoder->hpd_pin];
1284 
1285 	return hotplug_irqs;
1286 }
1287 
1288 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1289 				     hotplug_enables_func hotplug_enables)
1290 {
1291 	struct intel_encoder *encoder;
1292 	u32 hotplug = 0;
1293 
1294 	for_each_intel_encoder(&i915->drm, encoder)
1295 		hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1296 
1297 	return hotplug;
1298 }
1299 
1300 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1301 {
1302 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
1303 }
1304 
1305 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1306 {
1307 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
1308 }
1309 
1310 #if defined(CONFIG_DEBUG_FS)
1311 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1312 					 enum pipe pipe,
1313 					 u32 crc0, u32 crc1,
1314 					 u32 crc2, u32 crc3,
1315 					 u32 crc4)
1316 {
1317 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1318 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1319 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1320 
1321 	trace_intel_pipe_crc(crtc, crcs);
1322 
1323 	spin_lock(&pipe_crc->lock);
1324 	/*
1325 	 * For some not yet identified reason, the first CRC is
1326 	 * bonkers. So let's just wait for the next vblank and read
1327 	 * out the buggy result.
1328 	 *
1329 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1330 	 * don't trust that one either.
1331 	 */
1332 	if (pipe_crc->skipped <= 0 ||
1333 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1334 		pipe_crc->skipped++;
1335 		spin_unlock(&pipe_crc->lock);
1336 		return;
1337 	}
1338 	spin_unlock(&pipe_crc->lock);
1339 
1340 	drm_crtc_add_crc_entry(&crtc->base, true,
1341 				drm_crtc_accurate_vblank_count(&crtc->base),
1342 				crcs);
1343 }
1344 #else
1345 static inline void
1346 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1347 			     enum pipe pipe,
1348 			     u32 crc0, u32 crc1,
1349 			     u32 crc2, u32 crc3,
1350 			     u32 crc4) {}
1351 #endif
1352 
1353 static void flip_done_handler(struct drm_i915_private *i915,
1354 			      enum pipe pipe)
1355 {
1356 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1357 	struct drm_crtc_state *crtc_state = crtc->base.state;
1358 	struct drm_pending_vblank_event *e = crtc_state->event;
1359 	struct drm_device *dev = &i915->drm;
1360 	unsigned long irqflags;
1361 
1362 	spin_lock_irqsave(&dev->event_lock, irqflags);
1363 
1364 	crtc_state->event = NULL;
1365 
1366 	drm_crtc_send_vblank_event(&crtc->base, e);
1367 
1368 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
1369 }
1370 
1371 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1372 				     enum pipe pipe)
1373 {
1374 	display_pipe_crc_irq_handler(dev_priv, pipe,
1375 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1376 				     0, 0, 0, 0);
1377 }
1378 
1379 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1380 				     enum pipe pipe)
1381 {
1382 	display_pipe_crc_irq_handler(dev_priv, pipe,
1383 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1384 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1385 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1386 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1387 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1388 }
1389 
1390 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1391 				      enum pipe pipe)
1392 {
1393 	u32 res1, res2;
1394 
1395 	if (DISPLAY_VER(dev_priv) >= 3)
1396 		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1397 	else
1398 		res1 = 0;
1399 
1400 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1401 		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1402 	else
1403 		res2 = 0;
1404 
1405 	display_pipe_crc_irq_handler(dev_priv, pipe,
1406 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1407 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1408 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1409 				     res1, res2);
1410 }
1411 
1412 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1413 {
1414 	enum pipe pipe;
1415 
1416 	for_each_pipe(dev_priv, pipe) {
1417 		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1418 			   PIPESTAT_INT_STATUS_MASK |
1419 			   PIPE_FIFO_UNDERRUN_STATUS);
1420 
1421 		dev_priv->pipestat_irq_mask[pipe] = 0;
1422 	}
1423 }
1424 
1425 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1426 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1427 {
1428 	enum pipe pipe;
1429 
1430 	spin_lock(&dev_priv->irq_lock);
1431 
1432 	if (!dev_priv->display_irqs_enabled) {
1433 		spin_unlock(&dev_priv->irq_lock);
1434 		return;
1435 	}
1436 
1437 	for_each_pipe(dev_priv, pipe) {
1438 		i915_reg_t reg;
1439 		u32 status_mask, enable_mask, iir_bit = 0;
1440 
1441 		/*
1442 		 * PIPESTAT bits get signalled even when the interrupt is
1443 		 * disabled with the mask bits, and some of the status bits do
1444 		 * not generate interrupts at all (like the underrun bit). Hence
1445 		 * we need to be careful that we only handle what we want to
1446 		 * handle.
1447 		 */
1448 
1449 		/* fifo underruns are filterered in the underrun handler. */
1450 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1451 
1452 		switch (pipe) {
1453 		default:
1454 		case PIPE_A:
1455 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1456 			break;
1457 		case PIPE_B:
1458 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1459 			break;
1460 		case PIPE_C:
1461 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1462 			break;
1463 		}
1464 		if (iir & iir_bit)
1465 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1466 
1467 		if (!status_mask)
1468 			continue;
1469 
1470 		reg = PIPESTAT(pipe);
1471 		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1472 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1473 
1474 		/*
1475 		 * Clear the PIPE*STAT regs before the IIR
1476 		 *
1477 		 * Toggle the enable bits to make sure we get an
1478 		 * edge in the ISR pipe event bit if we don't clear
1479 		 * all the enabled status bits. Otherwise the edge
1480 		 * triggered IIR on i965/g4x wouldn't notice that
1481 		 * an interrupt is still pending.
1482 		 */
1483 		if (pipe_stats[pipe]) {
1484 			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1485 			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1486 		}
1487 	}
1488 	spin_unlock(&dev_priv->irq_lock);
1489 }
1490 
1491 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1492 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1493 {
1494 	enum pipe pipe;
1495 
1496 	for_each_pipe(dev_priv, pipe) {
1497 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1498 			intel_handle_vblank(dev_priv, pipe);
1499 
1500 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1501 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1502 
1503 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1504 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1505 	}
1506 }
1507 
1508 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1509 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1510 {
1511 	bool blc_event = false;
1512 	enum pipe pipe;
1513 
1514 	for_each_pipe(dev_priv, pipe) {
1515 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1516 			intel_handle_vblank(dev_priv, pipe);
1517 
1518 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1519 			blc_event = true;
1520 
1521 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1522 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1523 
1524 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1525 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1526 	}
1527 
1528 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1529 		intel_opregion_asle_intr(dev_priv);
1530 }
1531 
1532 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1533 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1534 {
1535 	bool blc_event = false;
1536 	enum pipe pipe;
1537 
1538 	for_each_pipe(dev_priv, pipe) {
1539 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540 			intel_handle_vblank(dev_priv, pipe);
1541 
1542 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1543 			blc_event = true;
1544 
1545 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547 
1548 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550 	}
1551 
1552 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1553 		intel_opregion_asle_intr(dev_priv);
1554 
1555 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1556 		gmbus_irq_handler(dev_priv);
1557 }
1558 
1559 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1560 					    u32 pipe_stats[I915_MAX_PIPES])
1561 {
1562 	enum pipe pipe;
1563 
1564 	for_each_pipe(dev_priv, pipe) {
1565 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1566 			intel_handle_vblank(dev_priv, pipe);
1567 
1568 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1569 			flip_done_handler(dev_priv, pipe);
1570 
1571 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1572 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1573 
1574 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1575 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1576 	}
1577 
1578 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1579 		gmbus_irq_handler(dev_priv);
1580 }
1581 
1582 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1583 {
1584 	u32 hotplug_status = 0, hotplug_status_mask;
1585 	int i;
1586 
1587 	if (IS_G4X(dev_priv) ||
1588 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1589 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1590 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1591 	else
1592 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1593 
1594 	/*
1595 	 * We absolutely have to clear all the pending interrupt
1596 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1597 	 * interrupt bit won't have an edge, and the i965/g4x
1598 	 * edge triggered IIR will not notice that an interrupt
1599 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1600 	 * guarantee the edge as the act of toggling the enable
1601 	 * bits can itself generate a new hotplug interrupt :(
1602 	 */
1603 	for (i = 0; i < 10; i++) {
1604 		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1605 
1606 		if (tmp == 0)
1607 			return hotplug_status;
1608 
1609 		hotplug_status |= tmp;
1610 		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1611 	}
1612 
1613 	drm_WARN_ONCE(&dev_priv->drm, 1,
1614 		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1615 		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1616 
1617 	return hotplug_status;
1618 }
1619 
1620 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1621 				 u32 hotplug_status)
1622 {
1623 	u32 pin_mask = 0, long_mask = 0;
1624 	u32 hotplug_trigger;
1625 
1626 	if (IS_G4X(dev_priv) ||
1627 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1628 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1629 	else
1630 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1631 
1632 	if (hotplug_trigger) {
1633 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1634 				   hotplug_trigger, hotplug_trigger,
1635 				   dev_priv->display.hotplug.hpd,
1636 				   i9xx_port_hotplug_long_detect);
1637 
1638 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1639 	}
1640 
1641 	if ((IS_G4X(dev_priv) ||
1642 	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1643 	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1644 		dp_aux_irq_handler(dev_priv);
1645 }
1646 
1647 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1648 {
1649 	struct drm_i915_private *dev_priv = arg;
1650 	irqreturn_t ret = IRQ_NONE;
1651 
1652 	if (!intel_irqs_enabled(dev_priv))
1653 		return IRQ_NONE;
1654 
1655 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1656 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1657 
1658 	do {
1659 		u32 iir, gt_iir, pm_iir;
1660 		u32 pipe_stats[I915_MAX_PIPES] = {};
1661 		u32 hotplug_status = 0;
1662 		u32 ier = 0;
1663 
1664 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1665 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1666 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1667 
1668 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1669 			break;
1670 
1671 		ret = IRQ_HANDLED;
1672 
1673 		/*
1674 		 * Theory on interrupt generation, based on empirical evidence:
1675 		 *
1676 		 * x = ((VLV_IIR & VLV_IER) ||
1677 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1678 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1679 		 *
1680 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1681 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1682 		 * guarantee the CPU interrupt will be raised again even if we
1683 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1684 		 * bits this time around.
1685 		 */
1686 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1687 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1688 
1689 		if (gt_iir)
1690 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1691 		if (pm_iir)
1692 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1693 
1694 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1695 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1696 
1697 		/* Call regardless, as some status bits might not be
1698 		 * signalled in iir */
1699 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1700 
1701 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1702 			   I915_LPE_PIPE_B_INTERRUPT))
1703 			intel_lpe_audio_irq_handler(dev_priv);
1704 
1705 		/*
1706 		 * VLV_IIR is single buffered, and reflects the level
1707 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1708 		 */
1709 		if (iir)
1710 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1711 
1712 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1713 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1714 
1715 		if (gt_iir)
1716 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1717 		if (pm_iir)
1718 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1719 
1720 		if (hotplug_status)
1721 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1722 
1723 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1724 	} while (0);
1725 
1726 	pmu_irq_stats(dev_priv, ret);
1727 
1728 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1729 
1730 	return ret;
1731 }
1732 
1733 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1734 {
1735 	struct drm_i915_private *dev_priv = arg;
1736 	irqreturn_t ret = IRQ_NONE;
1737 
1738 	if (!intel_irqs_enabled(dev_priv))
1739 		return IRQ_NONE;
1740 
1741 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1742 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1743 
1744 	do {
1745 		u32 master_ctl, iir;
1746 		u32 pipe_stats[I915_MAX_PIPES] = {};
1747 		u32 hotplug_status = 0;
1748 		u32 ier = 0;
1749 
1750 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1751 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1752 
1753 		if (master_ctl == 0 && iir == 0)
1754 			break;
1755 
1756 		ret = IRQ_HANDLED;
1757 
1758 		/*
1759 		 * Theory on interrupt generation, based on empirical evidence:
1760 		 *
1761 		 * x = ((VLV_IIR & VLV_IER) ||
1762 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1763 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1764 		 *
1765 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1766 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1767 		 * guarantee the CPU interrupt will be raised again even if we
1768 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1769 		 * bits this time around.
1770 		 */
1771 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1772 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1773 
1774 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1775 
1776 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1777 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1778 
1779 		/* Call regardless, as some status bits might not be
1780 		 * signalled in iir */
1781 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1782 
1783 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1784 			   I915_LPE_PIPE_B_INTERRUPT |
1785 			   I915_LPE_PIPE_C_INTERRUPT))
1786 			intel_lpe_audio_irq_handler(dev_priv);
1787 
1788 		/*
1789 		 * VLV_IIR is single buffered, and reflects the level
1790 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1791 		 */
1792 		if (iir)
1793 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1794 
1795 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1796 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1797 
1798 		if (hotplug_status)
1799 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1800 
1801 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1802 	} while (0);
1803 
1804 	pmu_irq_stats(dev_priv, ret);
1805 
1806 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1807 
1808 	return ret;
1809 }
1810 
1811 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1812 				u32 hotplug_trigger)
1813 {
1814 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1815 
1816 	/*
1817 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1818 	 * unless we touch the hotplug register, even if hotplug_trigger is
1819 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1820 	 * errors.
1821 	 */
1822 	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1823 	if (!hotplug_trigger) {
1824 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1825 			PORTD_HOTPLUG_STATUS_MASK |
1826 			PORTC_HOTPLUG_STATUS_MASK |
1827 			PORTB_HOTPLUG_STATUS_MASK;
1828 		dig_hotplug_reg &= ~mask;
1829 	}
1830 
1831 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1832 	if (!hotplug_trigger)
1833 		return;
1834 
1835 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1836 			   hotplug_trigger, dig_hotplug_reg,
1837 			   dev_priv->display.hotplug.pch_hpd,
1838 			   pch_port_hotplug_long_detect);
1839 
1840 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1841 }
1842 
1843 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1844 {
1845 	enum pipe pipe;
1846 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1847 
1848 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1849 
1850 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1851 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1852 			       SDE_AUDIO_POWER_SHIFT);
1853 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1854 			port_name(port));
1855 	}
1856 
1857 	if (pch_iir & SDE_AUX_MASK)
1858 		dp_aux_irq_handler(dev_priv);
1859 
1860 	if (pch_iir & SDE_GMBUS)
1861 		gmbus_irq_handler(dev_priv);
1862 
1863 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1864 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1865 
1866 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1867 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1868 
1869 	if (pch_iir & SDE_POISON)
1870 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1871 
1872 	if (pch_iir & SDE_FDI_MASK) {
1873 		for_each_pipe(dev_priv, pipe)
1874 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1875 				pipe_name(pipe),
1876 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1877 	}
1878 
1879 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1880 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1881 
1882 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1883 		drm_dbg(&dev_priv->drm,
1884 			"PCH transcoder CRC error interrupt\n");
1885 
1886 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1887 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1888 
1889 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1890 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1891 }
1892 
1893 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1894 {
1895 	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1896 	enum pipe pipe;
1897 
1898 	if (err_int & ERR_INT_POISON)
1899 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1900 
1901 	for_each_pipe(dev_priv, pipe) {
1902 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1903 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1904 
1905 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1906 			if (IS_IVYBRIDGE(dev_priv))
1907 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1908 			else
1909 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1910 		}
1911 	}
1912 
1913 	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1914 }
1915 
1916 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1917 {
1918 	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1919 	enum pipe pipe;
1920 
1921 	if (serr_int & SERR_INT_POISON)
1922 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1923 
1924 	for_each_pipe(dev_priv, pipe)
1925 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1926 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1927 
1928 	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1929 }
1930 
1931 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1932 {
1933 	enum pipe pipe;
1934 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1935 
1936 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1937 
1938 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1939 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1940 			       SDE_AUDIO_POWER_SHIFT_CPT);
1941 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1942 			port_name(port));
1943 	}
1944 
1945 	if (pch_iir & SDE_AUX_MASK_CPT)
1946 		dp_aux_irq_handler(dev_priv);
1947 
1948 	if (pch_iir & SDE_GMBUS_CPT)
1949 		gmbus_irq_handler(dev_priv);
1950 
1951 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1952 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1953 
1954 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1955 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1956 
1957 	if (pch_iir & SDE_FDI_MASK_CPT) {
1958 		for_each_pipe(dev_priv, pipe)
1959 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1960 				pipe_name(pipe),
1961 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1962 	}
1963 
1964 	if (pch_iir & SDE_ERROR_CPT)
1965 		cpt_serr_int_handler(dev_priv);
1966 }
1967 
1968 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1969 {
1970 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1971 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1972 	u32 pin_mask = 0, long_mask = 0;
1973 
1974 	if (ddi_hotplug_trigger) {
1975 		u32 dig_hotplug_reg;
1976 
1977 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1978 
1979 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1980 				   ddi_hotplug_trigger, dig_hotplug_reg,
1981 				   dev_priv->display.hotplug.pch_hpd,
1982 				   icp_ddi_port_hotplug_long_detect);
1983 	}
1984 
1985 	if (tc_hotplug_trigger) {
1986 		u32 dig_hotplug_reg;
1987 
1988 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1989 
1990 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1991 				   tc_hotplug_trigger, dig_hotplug_reg,
1992 				   dev_priv->display.hotplug.pch_hpd,
1993 				   icp_tc_port_hotplug_long_detect);
1994 	}
1995 
1996 	if (pin_mask)
1997 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1998 
1999 	if (pch_iir & SDE_GMBUS_ICP)
2000 		gmbus_irq_handler(dev_priv);
2001 }
2002 
2003 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2004 {
2005 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2006 		~SDE_PORTE_HOTPLUG_SPT;
2007 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2008 	u32 pin_mask = 0, long_mask = 0;
2009 
2010 	if (hotplug_trigger) {
2011 		u32 dig_hotplug_reg;
2012 
2013 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
2014 
2015 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2016 				   hotplug_trigger, dig_hotplug_reg,
2017 				   dev_priv->display.hotplug.pch_hpd,
2018 				   spt_port_hotplug_long_detect);
2019 	}
2020 
2021 	if (hotplug2_trigger) {
2022 		u32 dig_hotplug_reg;
2023 
2024 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
2025 
2026 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2027 				   hotplug2_trigger, dig_hotplug_reg,
2028 				   dev_priv->display.hotplug.pch_hpd,
2029 				   spt_port_hotplug2_long_detect);
2030 	}
2031 
2032 	if (pin_mask)
2033 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2034 
2035 	if (pch_iir & SDE_GMBUS_CPT)
2036 		gmbus_irq_handler(dev_priv);
2037 }
2038 
2039 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2040 				u32 hotplug_trigger)
2041 {
2042 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2043 
2044 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
2045 
2046 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2047 			   hotplug_trigger, dig_hotplug_reg,
2048 			   dev_priv->display.hotplug.hpd,
2049 			   ilk_port_hotplug_long_detect);
2050 
2051 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2052 }
2053 
2054 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2055 				    u32 de_iir)
2056 {
2057 	enum pipe pipe;
2058 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2059 
2060 	if (hotplug_trigger)
2061 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2062 
2063 	if (de_iir & DE_AUX_CHANNEL_A)
2064 		dp_aux_irq_handler(dev_priv);
2065 
2066 	if (de_iir & DE_GSE)
2067 		intel_opregion_asle_intr(dev_priv);
2068 
2069 	if (de_iir & DE_POISON)
2070 		drm_err(&dev_priv->drm, "Poison interrupt\n");
2071 
2072 	for_each_pipe(dev_priv, pipe) {
2073 		if (de_iir & DE_PIPE_VBLANK(pipe))
2074 			intel_handle_vblank(dev_priv, pipe);
2075 
2076 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2077 			flip_done_handler(dev_priv, pipe);
2078 
2079 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2080 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2081 
2082 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2083 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2084 	}
2085 
2086 	/* check event from PCH */
2087 	if (de_iir & DE_PCH_EVENT) {
2088 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2089 
2090 		if (HAS_PCH_CPT(dev_priv))
2091 			cpt_irq_handler(dev_priv, pch_iir);
2092 		else
2093 			ibx_irq_handler(dev_priv, pch_iir);
2094 
2095 		/* should clear PCH hotplug event before clear CPU irq */
2096 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2097 	}
2098 
2099 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2100 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2101 }
2102 
2103 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2104 				    u32 de_iir)
2105 {
2106 	enum pipe pipe;
2107 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2108 
2109 	if (hotplug_trigger)
2110 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2111 
2112 	if (de_iir & DE_ERR_INT_IVB)
2113 		ivb_err_int_handler(dev_priv);
2114 
2115 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2116 		dp_aux_irq_handler(dev_priv);
2117 
2118 	if (de_iir & DE_GSE_IVB)
2119 		intel_opregion_asle_intr(dev_priv);
2120 
2121 	for_each_pipe(dev_priv, pipe) {
2122 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2123 			intel_handle_vblank(dev_priv, pipe);
2124 
2125 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2126 			flip_done_handler(dev_priv, pipe);
2127 	}
2128 
2129 	/* check event from PCH */
2130 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2131 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2132 
2133 		cpt_irq_handler(dev_priv, pch_iir);
2134 
2135 		/* clear PCH hotplug event before clear CPU irq */
2136 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2137 	}
2138 }
2139 
2140 /*
2141  * To handle irqs with the minimum potential races with fresh interrupts, we:
2142  * 1 - Disable Master Interrupt Control.
2143  * 2 - Find the source(s) of the interrupt.
2144  * 3 - Clear the Interrupt Identity bits (IIR).
2145  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2146  * 5 - Re-enable Master Interrupt Control.
2147  */
2148 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2149 {
2150 	struct drm_i915_private *i915 = arg;
2151 	void __iomem * const regs = i915->uncore.regs;
2152 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2153 	irqreturn_t ret = IRQ_NONE;
2154 
2155 	if (unlikely(!intel_irqs_enabled(i915)))
2156 		return IRQ_NONE;
2157 
2158 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2159 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2160 
2161 	/* disable master interrupt before clearing iir  */
2162 	de_ier = raw_reg_read(regs, DEIER);
2163 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2164 
2165 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2166 	 * interrupts will will be stored on its back queue, and then we'll be
2167 	 * able to process them after we restore SDEIER (as soon as we restore
2168 	 * it, we'll get an interrupt if SDEIIR still has something to process
2169 	 * due to its back queue). */
2170 	if (!HAS_PCH_NOP(i915)) {
2171 		sde_ier = raw_reg_read(regs, SDEIER);
2172 		raw_reg_write(regs, SDEIER, 0);
2173 	}
2174 
2175 	/* Find, clear, then process each source of interrupt */
2176 
2177 	gt_iir = raw_reg_read(regs, GTIIR);
2178 	if (gt_iir) {
2179 		raw_reg_write(regs, GTIIR, gt_iir);
2180 		if (GRAPHICS_VER(i915) >= 6)
2181 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
2182 		else
2183 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
2184 		ret = IRQ_HANDLED;
2185 	}
2186 
2187 	de_iir = raw_reg_read(regs, DEIIR);
2188 	if (de_iir) {
2189 		raw_reg_write(regs, DEIIR, de_iir);
2190 		if (DISPLAY_VER(i915) >= 7)
2191 			ivb_display_irq_handler(i915, de_iir);
2192 		else
2193 			ilk_display_irq_handler(i915, de_iir);
2194 		ret = IRQ_HANDLED;
2195 	}
2196 
2197 	if (GRAPHICS_VER(i915) >= 6) {
2198 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2199 		if (pm_iir) {
2200 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2201 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2202 			ret = IRQ_HANDLED;
2203 		}
2204 	}
2205 
2206 	raw_reg_write(regs, DEIER, de_ier);
2207 	if (sde_ier)
2208 		raw_reg_write(regs, SDEIER, sde_ier);
2209 
2210 	pmu_irq_stats(i915, ret);
2211 
2212 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2213 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2214 
2215 	return ret;
2216 }
2217 
2218 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2219 				u32 hotplug_trigger)
2220 {
2221 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2222 
2223 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
2224 
2225 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2226 			   hotplug_trigger, dig_hotplug_reg,
2227 			   dev_priv->display.hotplug.hpd,
2228 			   bxt_port_hotplug_long_detect);
2229 
2230 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2231 }
2232 
2233 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2234 {
2235 	u32 pin_mask = 0, long_mask = 0;
2236 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2237 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2238 
2239 	if (trigger_tc) {
2240 		u32 dig_hotplug_reg;
2241 
2242 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
2243 
2244 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2245 				   trigger_tc, dig_hotplug_reg,
2246 				   dev_priv->display.hotplug.hpd,
2247 				   gen11_port_hotplug_long_detect);
2248 	}
2249 
2250 	if (trigger_tbt) {
2251 		u32 dig_hotplug_reg;
2252 
2253 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
2254 
2255 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2256 				   trigger_tbt, dig_hotplug_reg,
2257 				   dev_priv->display.hotplug.hpd,
2258 				   gen11_port_hotplug_long_detect);
2259 	}
2260 
2261 	if (pin_mask)
2262 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2263 	else
2264 		drm_err(&dev_priv->drm,
2265 			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2266 }
2267 
2268 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2269 {
2270 	u32 mask;
2271 
2272 	if (DISPLAY_VER(dev_priv) >= 13)
2273 		return TGL_DE_PORT_AUX_DDIA |
2274 			TGL_DE_PORT_AUX_DDIB |
2275 			TGL_DE_PORT_AUX_DDIC |
2276 			XELPD_DE_PORT_AUX_DDID |
2277 			XELPD_DE_PORT_AUX_DDIE |
2278 			TGL_DE_PORT_AUX_USBC1 |
2279 			TGL_DE_PORT_AUX_USBC2 |
2280 			TGL_DE_PORT_AUX_USBC3 |
2281 			TGL_DE_PORT_AUX_USBC4;
2282 	else if (DISPLAY_VER(dev_priv) >= 12)
2283 		return TGL_DE_PORT_AUX_DDIA |
2284 			TGL_DE_PORT_AUX_DDIB |
2285 			TGL_DE_PORT_AUX_DDIC |
2286 			TGL_DE_PORT_AUX_USBC1 |
2287 			TGL_DE_PORT_AUX_USBC2 |
2288 			TGL_DE_PORT_AUX_USBC3 |
2289 			TGL_DE_PORT_AUX_USBC4 |
2290 			TGL_DE_PORT_AUX_USBC5 |
2291 			TGL_DE_PORT_AUX_USBC6;
2292 
2293 
2294 	mask = GEN8_AUX_CHANNEL_A;
2295 	if (DISPLAY_VER(dev_priv) >= 9)
2296 		mask |= GEN9_AUX_CHANNEL_B |
2297 			GEN9_AUX_CHANNEL_C |
2298 			GEN9_AUX_CHANNEL_D;
2299 
2300 	if (DISPLAY_VER(dev_priv) == 11) {
2301 		mask |= ICL_AUX_CHANNEL_F;
2302 		mask |= ICL_AUX_CHANNEL_E;
2303 	}
2304 
2305 	return mask;
2306 }
2307 
2308 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2309 {
2310 	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2311 		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2312 	else if (DISPLAY_VER(dev_priv) >= 11)
2313 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2314 	else if (DISPLAY_VER(dev_priv) >= 9)
2315 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2316 	else
2317 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2318 }
2319 
2320 static void
2321 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2322 {
2323 	bool found = false;
2324 
2325 	if (iir & GEN8_DE_MISC_GSE) {
2326 		intel_opregion_asle_intr(dev_priv);
2327 		found = true;
2328 	}
2329 
2330 	if (iir & GEN8_DE_EDP_PSR) {
2331 		struct intel_encoder *encoder;
2332 		u32 psr_iir;
2333 		i915_reg_t iir_reg;
2334 
2335 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2336 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2337 
2338 			if (DISPLAY_VER(dev_priv) >= 12)
2339 				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2340 			else
2341 				iir_reg = EDP_PSR_IIR;
2342 
2343 			psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
2344 
2345 			if (psr_iir)
2346 				found = true;
2347 
2348 			intel_psr_irq_handler(intel_dp, psr_iir);
2349 
2350 			/* prior GEN12 only have one EDP PSR */
2351 			if (DISPLAY_VER(dev_priv) < 12)
2352 				break;
2353 		}
2354 	}
2355 
2356 	if (!found)
2357 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2358 }
2359 
2360 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2361 					   u32 te_trigger)
2362 {
2363 	enum pipe pipe = INVALID_PIPE;
2364 	enum transcoder dsi_trans;
2365 	enum port port;
2366 	u32 val, tmp;
2367 
2368 	/*
2369 	 * Incase of dual link, TE comes from DSI_1
2370 	 * this is to check if dual link is enabled
2371 	 */
2372 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2373 	val &= PORT_SYNC_MODE_ENABLE;
2374 
2375 	/*
2376 	 * if dual link is enabled, then read DSI_0
2377 	 * transcoder registers
2378 	 */
2379 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2380 						  PORT_A : PORT_B;
2381 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2382 
2383 	/* Check if DSI configured in command mode */
2384 	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2385 	val = val & OP_MODE_MASK;
2386 
2387 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2388 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2389 		return;
2390 	}
2391 
2392 	/* Get PIPE for handling VBLANK event */
2393 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2394 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2395 	case TRANS_DDI_EDP_INPUT_A_ON:
2396 		pipe = PIPE_A;
2397 		break;
2398 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
2399 		pipe = PIPE_B;
2400 		break;
2401 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
2402 		pipe = PIPE_C;
2403 		break;
2404 	default:
2405 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
2406 		return;
2407 	}
2408 
2409 	intel_handle_vblank(dev_priv, pipe);
2410 
2411 	/* clear TE in dsi IIR */
2412 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2413 	tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2414 }
2415 
2416 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2417 {
2418 	if (DISPLAY_VER(i915) >= 9)
2419 		return GEN9_PIPE_PLANE1_FLIP_DONE;
2420 	else
2421 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
2422 }
2423 
2424 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2425 {
2426 	u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2427 
2428 	if (DISPLAY_VER(dev_priv) >= 13)
2429 		mask |= XELPD_PIPE_SOFT_UNDERRUN |
2430 			XELPD_PIPE_HARD_UNDERRUN;
2431 
2432 	return mask;
2433 }
2434 
2435 static irqreturn_t
2436 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2437 {
2438 	irqreturn_t ret = IRQ_NONE;
2439 	u32 iir;
2440 	enum pipe pipe;
2441 
2442 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2443 
2444 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2445 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2446 		if (iir) {
2447 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2448 			ret = IRQ_HANDLED;
2449 			gen8_de_misc_irq_handler(dev_priv, iir);
2450 		} else {
2451 			drm_err(&dev_priv->drm,
2452 				"The master control interrupt lied (DE MISC)!\n");
2453 		}
2454 	}
2455 
2456 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2457 		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2458 		if (iir) {
2459 			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2460 			ret = IRQ_HANDLED;
2461 			gen11_hpd_irq_handler(dev_priv, iir);
2462 		} else {
2463 			drm_err(&dev_priv->drm,
2464 				"The master control interrupt lied, (DE HPD)!\n");
2465 		}
2466 	}
2467 
2468 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2469 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2470 		if (iir) {
2471 			bool found = false;
2472 
2473 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2474 			ret = IRQ_HANDLED;
2475 
2476 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2477 				dp_aux_irq_handler(dev_priv);
2478 				found = true;
2479 			}
2480 
2481 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2482 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2483 
2484 				if (hotplug_trigger) {
2485 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2486 					found = true;
2487 				}
2488 			} else if (IS_BROADWELL(dev_priv)) {
2489 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2490 
2491 				if (hotplug_trigger) {
2492 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2493 					found = true;
2494 				}
2495 			}
2496 
2497 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2498 			    (iir & BXT_DE_PORT_GMBUS)) {
2499 				gmbus_irq_handler(dev_priv);
2500 				found = true;
2501 			}
2502 
2503 			if (DISPLAY_VER(dev_priv) >= 11) {
2504 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2505 
2506 				if (te_trigger) {
2507 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2508 					found = true;
2509 				}
2510 			}
2511 
2512 			if (!found)
2513 				drm_err(&dev_priv->drm,
2514 					"Unexpected DE Port interrupt\n");
2515 		}
2516 		else
2517 			drm_err(&dev_priv->drm,
2518 				"The master control interrupt lied (DE PORT)!\n");
2519 	}
2520 
2521 	for_each_pipe(dev_priv, pipe) {
2522 		u32 fault_errors;
2523 
2524 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2525 			continue;
2526 
2527 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2528 		if (!iir) {
2529 			drm_err(&dev_priv->drm,
2530 				"The master control interrupt lied (DE PIPE)!\n");
2531 			continue;
2532 		}
2533 
2534 		ret = IRQ_HANDLED;
2535 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2536 
2537 		if (iir & GEN8_PIPE_VBLANK)
2538 			intel_handle_vblank(dev_priv, pipe);
2539 
2540 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2541 			flip_done_handler(dev_priv, pipe);
2542 
2543 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2544 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2545 
2546 		if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2547 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2548 
2549 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2550 		if (fault_errors)
2551 			drm_err(&dev_priv->drm,
2552 				"Fault errors on pipe %c: 0x%08x\n",
2553 				pipe_name(pipe),
2554 				fault_errors);
2555 	}
2556 
2557 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2558 	    master_ctl & GEN8_DE_PCH_IRQ) {
2559 		/*
2560 		 * FIXME(BDW): Assume for now that the new interrupt handling
2561 		 * scheme also closed the SDE interrupt handling race we've seen
2562 		 * on older pch-split platforms. But this needs testing.
2563 		 */
2564 		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2565 		if (iir) {
2566 			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2567 			ret = IRQ_HANDLED;
2568 
2569 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2570 				icp_irq_handler(dev_priv, iir);
2571 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2572 				spt_irq_handler(dev_priv, iir);
2573 			else
2574 				cpt_irq_handler(dev_priv, iir);
2575 		} else {
2576 			/*
2577 			 * Like on previous PCH there seems to be something
2578 			 * fishy going on with forwarding PCH interrupts.
2579 			 */
2580 			drm_dbg(&dev_priv->drm,
2581 				"The master control interrupt lied (SDE)!\n");
2582 		}
2583 	}
2584 
2585 	return ret;
2586 }
2587 
2588 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2589 {
2590 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2591 
2592 	/*
2593 	 * Now with master disabled, get a sample of level indications
2594 	 * for this interrupt. Indications will be cleared on related acks.
2595 	 * New indications can and will light up during processing,
2596 	 * and will generate new interrupt after enabling master.
2597 	 */
2598 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2599 }
2600 
2601 static inline void gen8_master_intr_enable(void __iomem * const regs)
2602 {
2603 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2604 }
2605 
2606 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2607 {
2608 	struct drm_i915_private *dev_priv = arg;
2609 	void __iomem * const regs = dev_priv->uncore.regs;
2610 	u32 master_ctl;
2611 
2612 	if (!intel_irqs_enabled(dev_priv))
2613 		return IRQ_NONE;
2614 
2615 	master_ctl = gen8_master_intr_disable(regs);
2616 	if (!master_ctl) {
2617 		gen8_master_intr_enable(regs);
2618 		return IRQ_NONE;
2619 	}
2620 
2621 	/* Find, queue (onto bottom-halves), then clear each source */
2622 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2623 
2624 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2625 	if (master_ctl & ~GEN8_GT_IRQS) {
2626 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2627 		gen8_de_irq_handler(dev_priv, master_ctl);
2628 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2629 	}
2630 
2631 	gen8_master_intr_enable(regs);
2632 
2633 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
2634 
2635 	return IRQ_HANDLED;
2636 }
2637 
2638 static u32
2639 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2640 {
2641 	void __iomem * const regs = i915->uncore.regs;
2642 	u32 iir;
2643 
2644 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2645 		return 0;
2646 
2647 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2648 	if (likely(iir))
2649 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2650 
2651 	return iir;
2652 }
2653 
2654 static void
2655 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2656 {
2657 	if (iir & GEN11_GU_MISC_GSE)
2658 		intel_opregion_asle_intr(i915);
2659 }
2660 
2661 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2662 {
2663 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2664 
2665 	/*
2666 	 * Now with master disabled, get a sample of level indications
2667 	 * for this interrupt. Indications will be cleared on related acks.
2668 	 * New indications can and will light up during processing,
2669 	 * and will generate new interrupt after enabling master.
2670 	 */
2671 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2672 }
2673 
2674 static inline void gen11_master_intr_enable(void __iomem * const regs)
2675 {
2676 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2677 }
2678 
2679 static void
2680 gen11_display_irq_handler(struct drm_i915_private *i915)
2681 {
2682 	void __iomem * const regs = i915->uncore.regs;
2683 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2684 
2685 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2686 	/*
2687 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2688 	 * for the display related bits.
2689 	 */
2690 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2691 	gen8_de_irq_handler(i915, disp_ctl);
2692 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2693 		      GEN11_DISPLAY_IRQ_ENABLE);
2694 
2695 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2696 }
2697 
2698 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2699 {
2700 	struct drm_i915_private *i915 = arg;
2701 	void __iomem * const regs = i915->uncore.regs;
2702 	struct intel_gt *gt = to_gt(i915);
2703 	u32 master_ctl;
2704 	u32 gu_misc_iir;
2705 
2706 	if (!intel_irqs_enabled(i915))
2707 		return IRQ_NONE;
2708 
2709 	master_ctl = gen11_master_intr_disable(regs);
2710 	if (!master_ctl) {
2711 		gen11_master_intr_enable(regs);
2712 		return IRQ_NONE;
2713 	}
2714 
2715 	/* Find, queue (onto bottom-halves), then clear each source */
2716 	gen11_gt_irq_handler(gt, master_ctl);
2717 
2718 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2719 	if (master_ctl & GEN11_DISPLAY_IRQ)
2720 		gen11_display_irq_handler(i915);
2721 
2722 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2723 
2724 	gen11_master_intr_enable(regs);
2725 
2726 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2727 
2728 	pmu_irq_stats(i915, IRQ_HANDLED);
2729 
2730 	return IRQ_HANDLED;
2731 }
2732 
2733 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2734 {
2735 	u32 val;
2736 
2737 	/* First disable interrupts */
2738 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2739 
2740 	/* Get the indication levels and ack the master unit */
2741 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2742 	if (unlikely(!val))
2743 		return 0;
2744 
2745 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2746 
2747 	return val;
2748 }
2749 
2750 static inline void dg1_master_intr_enable(void __iomem * const regs)
2751 {
2752 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2753 }
2754 
2755 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2756 {
2757 	struct drm_i915_private * const i915 = arg;
2758 	struct intel_gt *gt = to_gt(i915);
2759 	void __iomem * const regs = gt->uncore->regs;
2760 	u32 master_tile_ctl, master_ctl;
2761 	u32 gu_misc_iir;
2762 
2763 	if (!intel_irqs_enabled(i915))
2764 		return IRQ_NONE;
2765 
2766 	master_tile_ctl = dg1_master_intr_disable(regs);
2767 	if (!master_tile_ctl) {
2768 		dg1_master_intr_enable(regs);
2769 		return IRQ_NONE;
2770 	}
2771 
2772 	/* FIXME: we only support tile 0 for now. */
2773 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2774 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2775 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2776 	} else {
2777 		DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl);
2778 		dg1_master_intr_enable(regs);
2779 		return IRQ_NONE;
2780 	}
2781 
2782 	gen11_gt_irq_handler(gt, master_ctl);
2783 
2784 	if (master_ctl & GEN11_DISPLAY_IRQ)
2785 		gen11_display_irq_handler(i915);
2786 
2787 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2788 
2789 	dg1_master_intr_enable(regs);
2790 
2791 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2792 
2793 	pmu_irq_stats(i915, IRQ_HANDLED);
2794 
2795 	return IRQ_HANDLED;
2796 }
2797 
2798 /* Called from drm generic code, passed 'crtc' which
2799  * we use as a pipe index
2800  */
2801 int i8xx_enable_vblank(struct drm_crtc *crtc)
2802 {
2803 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2804 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2805 	unsigned long irqflags;
2806 
2807 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2808 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2809 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2810 
2811 	return 0;
2812 }
2813 
2814 int i915gm_enable_vblank(struct drm_crtc *crtc)
2815 {
2816 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2817 
2818 	/*
2819 	 * Vblank interrupts fail to wake the device up from C2+.
2820 	 * Disabling render clock gating during C-states avoids
2821 	 * the problem. There is a small power cost so we do this
2822 	 * only when vblank interrupts are actually enabled.
2823 	 */
2824 	if (dev_priv->vblank_enabled++ == 0)
2825 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2826 
2827 	return i8xx_enable_vblank(crtc);
2828 }
2829 
2830 int i965_enable_vblank(struct drm_crtc *crtc)
2831 {
2832 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2833 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2834 	unsigned long irqflags;
2835 
2836 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2837 	i915_enable_pipestat(dev_priv, pipe,
2838 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2839 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2840 
2841 	return 0;
2842 }
2843 
2844 int ilk_enable_vblank(struct drm_crtc *crtc)
2845 {
2846 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2847 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2848 	unsigned long irqflags;
2849 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2850 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2851 
2852 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2853 	ilk_enable_display_irq(dev_priv, bit);
2854 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2855 
2856 	/* Even though there is no DMC, frame counter can get stuck when
2857 	 * PSR is active as no frames are generated.
2858 	 */
2859 	if (HAS_PSR(dev_priv))
2860 		drm_crtc_vblank_restore(crtc);
2861 
2862 	return 0;
2863 }
2864 
2865 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2866 				   bool enable)
2867 {
2868 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2869 	enum port port;
2870 
2871 	if (!(intel_crtc->mode_flags &
2872 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2873 		return false;
2874 
2875 	/* for dual link cases we consider TE from slave */
2876 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2877 		port = PORT_B;
2878 	else
2879 		port = PORT_A;
2880 
2881 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2882 			 enable ? 0 : DSI_TE_EVENT);
2883 
2884 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2885 
2886 	return true;
2887 }
2888 
2889 int bdw_enable_vblank(struct drm_crtc *_crtc)
2890 {
2891 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2892 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2893 	enum pipe pipe = crtc->pipe;
2894 	unsigned long irqflags;
2895 
2896 	if (gen11_dsi_configure_te(crtc, true))
2897 		return 0;
2898 
2899 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2900 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2901 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2902 
2903 	/* Even if there is no DMC, frame counter can get stuck when
2904 	 * PSR is active as no frames are generated, so check only for PSR.
2905 	 */
2906 	if (HAS_PSR(dev_priv))
2907 		drm_crtc_vblank_restore(&crtc->base);
2908 
2909 	return 0;
2910 }
2911 
2912 /* Called from drm generic code, passed 'crtc' which
2913  * we use as a pipe index
2914  */
2915 void i8xx_disable_vblank(struct drm_crtc *crtc)
2916 {
2917 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2918 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2919 	unsigned long irqflags;
2920 
2921 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2922 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2923 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2924 }
2925 
2926 void i915gm_disable_vblank(struct drm_crtc *crtc)
2927 {
2928 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2929 
2930 	i8xx_disable_vblank(crtc);
2931 
2932 	if (--dev_priv->vblank_enabled == 0)
2933 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2934 }
2935 
2936 void i965_disable_vblank(struct drm_crtc *crtc)
2937 {
2938 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2939 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2940 	unsigned long irqflags;
2941 
2942 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2943 	i915_disable_pipestat(dev_priv, pipe,
2944 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2945 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2946 }
2947 
2948 void ilk_disable_vblank(struct drm_crtc *crtc)
2949 {
2950 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2951 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2952 	unsigned long irqflags;
2953 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2954 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2955 
2956 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2957 	ilk_disable_display_irq(dev_priv, bit);
2958 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2959 }
2960 
2961 void bdw_disable_vblank(struct drm_crtc *_crtc)
2962 {
2963 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2964 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2965 	enum pipe pipe = crtc->pipe;
2966 	unsigned long irqflags;
2967 
2968 	if (gen11_dsi_configure_te(crtc, false))
2969 		return;
2970 
2971 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2972 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2973 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2974 }
2975 
2976 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2977 {
2978 	struct intel_uncore *uncore = &dev_priv->uncore;
2979 
2980 	if (HAS_PCH_NOP(dev_priv))
2981 		return;
2982 
2983 	GEN3_IRQ_RESET(uncore, SDE);
2984 
2985 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2986 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2987 }
2988 
2989 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2990 {
2991 	struct intel_uncore *uncore = &dev_priv->uncore;
2992 
2993 	if (IS_CHERRYVIEW(dev_priv))
2994 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2995 	else
2996 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
2997 
2998 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2999 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3000 
3001 	i9xx_pipestat_irq_reset(dev_priv);
3002 
3003 	GEN3_IRQ_RESET(uncore, VLV_);
3004 	dev_priv->irq_mask = ~0u;
3005 }
3006 
3007 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3008 {
3009 	struct intel_uncore *uncore = &dev_priv->uncore;
3010 
3011 	u32 pipestat_mask;
3012 	u32 enable_mask;
3013 	enum pipe pipe;
3014 
3015 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3016 
3017 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3018 	for_each_pipe(dev_priv, pipe)
3019 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3020 
3021 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3022 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3023 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3024 		I915_LPE_PIPE_A_INTERRUPT |
3025 		I915_LPE_PIPE_B_INTERRUPT;
3026 
3027 	if (IS_CHERRYVIEW(dev_priv))
3028 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3029 			I915_LPE_PIPE_C_INTERRUPT;
3030 
3031 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3032 
3033 	dev_priv->irq_mask = ~enable_mask;
3034 
3035 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3036 }
3037 
3038 /* drm_dma.h hooks
3039 */
3040 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3041 {
3042 	struct intel_uncore *uncore = &dev_priv->uncore;
3043 
3044 	GEN3_IRQ_RESET(uncore, DE);
3045 	dev_priv->irq_mask = ~0u;
3046 
3047 	if (GRAPHICS_VER(dev_priv) == 7)
3048 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3049 
3050 	if (IS_HASWELL(dev_priv)) {
3051 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3052 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3053 	}
3054 
3055 	gen5_gt_irq_reset(to_gt(dev_priv));
3056 
3057 	ibx_irq_reset(dev_priv);
3058 }
3059 
3060 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3061 {
3062 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3063 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3064 
3065 	gen5_gt_irq_reset(to_gt(dev_priv));
3066 
3067 	spin_lock_irq(&dev_priv->irq_lock);
3068 	if (dev_priv->display_irqs_enabled)
3069 		vlv_display_irq_reset(dev_priv);
3070 	spin_unlock_irq(&dev_priv->irq_lock);
3071 }
3072 
3073 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3074 {
3075 	struct intel_uncore *uncore = &dev_priv->uncore;
3076 	enum pipe pipe;
3077 
3078 	if (!HAS_DISPLAY(dev_priv))
3079 		return;
3080 
3081 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3082 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3083 
3084 	for_each_pipe(dev_priv, pipe)
3085 		if (intel_display_power_is_enabled(dev_priv,
3086 						   POWER_DOMAIN_PIPE(pipe)))
3087 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3088 
3089 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3090 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3091 }
3092 
3093 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3094 {
3095 	struct intel_uncore *uncore = &dev_priv->uncore;
3096 
3097 	gen8_master_intr_disable(uncore->regs);
3098 
3099 	gen8_gt_irq_reset(to_gt(dev_priv));
3100 	gen8_display_irq_reset(dev_priv);
3101 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3102 
3103 	if (HAS_PCH_SPLIT(dev_priv))
3104 		ibx_irq_reset(dev_priv);
3105 
3106 }
3107 
3108 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3109 {
3110 	struct intel_uncore *uncore = &dev_priv->uncore;
3111 	enum pipe pipe;
3112 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3113 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3114 
3115 	if (!HAS_DISPLAY(dev_priv))
3116 		return;
3117 
3118 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3119 
3120 	if (DISPLAY_VER(dev_priv) >= 12) {
3121 		enum transcoder trans;
3122 
3123 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3124 			enum intel_display_power_domain domain;
3125 
3126 			domain = POWER_DOMAIN_TRANSCODER(trans);
3127 			if (!intel_display_power_is_enabled(dev_priv, domain))
3128 				continue;
3129 
3130 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3131 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3132 		}
3133 	} else {
3134 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3135 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3136 	}
3137 
3138 	for_each_pipe(dev_priv, pipe)
3139 		if (intel_display_power_is_enabled(dev_priv,
3140 						   POWER_DOMAIN_PIPE(pipe)))
3141 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3142 
3143 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3144 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3145 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3146 
3147 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3148 		GEN3_IRQ_RESET(uncore, SDE);
3149 }
3150 
3151 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3152 {
3153 	struct intel_gt *gt = to_gt(dev_priv);
3154 	struct intel_uncore *uncore = gt->uncore;
3155 
3156 	gen11_master_intr_disable(dev_priv->uncore.regs);
3157 
3158 	gen11_gt_irq_reset(gt);
3159 	gen11_display_irq_reset(dev_priv);
3160 
3161 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3162 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3163 }
3164 
3165 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3166 {
3167 	struct intel_gt *gt = to_gt(dev_priv);
3168 	struct intel_uncore *uncore = gt->uncore;
3169 
3170 	dg1_master_intr_disable(dev_priv->uncore.regs);
3171 
3172 	gen11_gt_irq_reset(gt);
3173 	gen11_display_irq_reset(dev_priv);
3174 
3175 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3176 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3177 }
3178 
3179 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3180 				     u8 pipe_mask)
3181 {
3182 	struct intel_uncore *uncore = &dev_priv->uncore;
3183 	u32 extra_ier = GEN8_PIPE_VBLANK |
3184 		gen8_de_pipe_underrun_mask(dev_priv) |
3185 		gen8_de_pipe_flip_done_mask(dev_priv);
3186 	enum pipe pipe;
3187 
3188 	spin_lock_irq(&dev_priv->irq_lock);
3189 
3190 	if (!intel_irqs_enabled(dev_priv)) {
3191 		spin_unlock_irq(&dev_priv->irq_lock);
3192 		return;
3193 	}
3194 
3195 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3196 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3197 				  dev_priv->de_irq_mask[pipe],
3198 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3199 
3200 	spin_unlock_irq(&dev_priv->irq_lock);
3201 }
3202 
3203 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3204 				     u8 pipe_mask)
3205 {
3206 	struct intel_uncore *uncore = &dev_priv->uncore;
3207 	enum pipe pipe;
3208 
3209 	spin_lock_irq(&dev_priv->irq_lock);
3210 
3211 	if (!intel_irqs_enabled(dev_priv)) {
3212 		spin_unlock_irq(&dev_priv->irq_lock);
3213 		return;
3214 	}
3215 
3216 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3217 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3218 
3219 	spin_unlock_irq(&dev_priv->irq_lock);
3220 
3221 	/* make sure we're done processing display irqs */
3222 	intel_synchronize_irq(dev_priv);
3223 }
3224 
3225 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3226 {
3227 	struct intel_uncore *uncore = &dev_priv->uncore;
3228 
3229 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
3230 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3231 
3232 	gen8_gt_irq_reset(to_gt(dev_priv));
3233 
3234 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3235 
3236 	spin_lock_irq(&dev_priv->irq_lock);
3237 	if (dev_priv->display_irqs_enabled)
3238 		vlv_display_irq_reset(dev_priv);
3239 	spin_unlock_irq(&dev_priv->irq_lock);
3240 }
3241 
3242 static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3243 			       enum hpd_pin pin)
3244 {
3245 	switch (pin) {
3246 	case HPD_PORT_A:
3247 		/*
3248 		 * When CPU and PCH are on the same package, port A
3249 		 * HPD must be enabled in both north and south.
3250 		 */
3251 		return HAS_PCH_LPT_LP(i915) ?
3252 			PORTA_HOTPLUG_ENABLE : 0;
3253 	case HPD_PORT_B:
3254 		return PORTB_HOTPLUG_ENABLE |
3255 			PORTB_PULSE_DURATION_2ms;
3256 	case HPD_PORT_C:
3257 		return PORTC_HOTPLUG_ENABLE |
3258 			PORTC_PULSE_DURATION_2ms;
3259 	case HPD_PORT_D:
3260 		return PORTD_HOTPLUG_ENABLE |
3261 			PORTD_PULSE_DURATION_2ms;
3262 	default:
3263 		return 0;
3264 	}
3265 }
3266 
3267 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3268 {
3269 	/*
3270 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3271 	 * duration to 2ms (which is the minimum in the Display Port spec).
3272 	 * The pulse duration bits are reserved on LPT+.
3273 	 */
3274 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3275 			 PORTA_HOTPLUG_ENABLE |
3276 			 PORTB_HOTPLUG_ENABLE |
3277 			 PORTC_HOTPLUG_ENABLE |
3278 			 PORTD_HOTPLUG_ENABLE |
3279 			 PORTB_PULSE_DURATION_MASK |
3280 			 PORTC_PULSE_DURATION_MASK |
3281 			 PORTD_PULSE_DURATION_MASK,
3282 			 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
3283 }
3284 
3285 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3286 {
3287 	u32 hotplug_irqs, enabled_irqs;
3288 
3289 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3290 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3291 
3292 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3293 
3294 	ibx_hpd_detection_setup(dev_priv);
3295 }
3296 
3297 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3298 				   enum hpd_pin pin)
3299 {
3300 	switch (pin) {
3301 	case HPD_PORT_A:
3302 	case HPD_PORT_B:
3303 	case HPD_PORT_C:
3304 	case HPD_PORT_D:
3305 		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3306 	default:
3307 		return 0;
3308 	}
3309 }
3310 
3311 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3312 				  enum hpd_pin pin)
3313 {
3314 	switch (pin) {
3315 	case HPD_PORT_TC1:
3316 	case HPD_PORT_TC2:
3317 	case HPD_PORT_TC3:
3318 	case HPD_PORT_TC4:
3319 	case HPD_PORT_TC5:
3320 	case HPD_PORT_TC6:
3321 		return ICP_TC_HPD_ENABLE(pin);
3322 	default:
3323 		return 0;
3324 	}
3325 }
3326 
3327 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3328 {
3329 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
3330 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3331 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3332 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3333 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
3334 			 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
3335 }
3336 
3337 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3338 {
3339 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
3340 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3341 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3342 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3343 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3344 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3345 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
3346 			 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
3347 }
3348 
3349 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3350 {
3351 	u32 hotplug_irqs, enabled_irqs;
3352 
3353 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3354 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3355 
3356 	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3357 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3358 
3359 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3360 
3361 	icp_ddi_hpd_detection_setup(dev_priv);
3362 	icp_tc_hpd_detection_setup(dev_priv);
3363 }
3364 
3365 static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3366 				 enum hpd_pin pin)
3367 {
3368 	switch (pin) {
3369 	case HPD_PORT_TC1:
3370 	case HPD_PORT_TC2:
3371 	case HPD_PORT_TC3:
3372 	case HPD_PORT_TC4:
3373 	case HPD_PORT_TC5:
3374 	case HPD_PORT_TC6:
3375 		return GEN11_HOTPLUG_CTL_ENABLE(pin);
3376 	default:
3377 		return 0;
3378 	}
3379 }
3380 
3381 static void dg1_hpd_invert(struct drm_i915_private *i915)
3382 {
3383 	u32 val = (INVERT_DDIA_HPD |
3384 		   INVERT_DDIB_HPD |
3385 		   INVERT_DDIC_HPD |
3386 		   INVERT_DDID_HPD);
3387 	intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
3388 }
3389 
3390 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3391 {
3392 	dg1_hpd_invert(dev_priv);
3393 	icp_hpd_irq_setup(dev_priv);
3394 }
3395 
3396 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3397 {
3398 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
3399 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3400 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3401 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3402 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3403 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3404 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3405 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3406 }
3407 
3408 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3409 {
3410 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3411 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3412 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3413 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3414 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3415 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3416 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3417 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3418 }
3419 
3420 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3421 {
3422 	u32 hotplug_irqs, enabled_irqs;
3423 
3424 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3425 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3426 
3427 	intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3428 			 ~enabled_irqs & hotplug_irqs);
3429 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3430 
3431 	gen11_tc_hpd_detection_setup(dev_priv);
3432 	gen11_tbt_hpd_detection_setup(dev_priv);
3433 
3434 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3435 		icp_hpd_irq_setup(dev_priv);
3436 }
3437 
3438 static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3439 			       enum hpd_pin pin)
3440 {
3441 	switch (pin) {
3442 	case HPD_PORT_A:
3443 		return PORTA_HOTPLUG_ENABLE;
3444 	case HPD_PORT_B:
3445 		return PORTB_HOTPLUG_ENABLE;
3446 	case HPD_PORT_C:
3447 		return PORTC_HOTPLUG_ENABLE;
3448 	case HPD_PORT_D:
3449 		return PORTD_HOTPLUG_ENABLE;
3450 	default:
3451 		return 0;
3452 	}
3453 }
3454 
3455 static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3456 				enum hpd_pin pin)
3457 {
3458 	switch (pin) {
3459 	case HPD_PORT_E:
3460 		return PORTE_HOTPLUG_ENABLE;
3461 	default:
3462 		return 0;
3463 	}
3464 }
3465 
3466 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3467 {
3468 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3469 	if (HAS_PCH_CNP(dev_priv)) {
3470 		intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3471 				 CHASSIS_CLK_REQ_DURATION(0xf));
3472 	}
3473 
3474 	/* Enable digital hotplug on the PCH */
3475 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3476 			 PORTA_HOTPLUG_ENABLE |
3477 			 PORTB_HOTPLUG_ENABLE |
3478 			 PORTC_HOTPLUG_ENABLE |
3479 			 PORTD_HOTPLUG_ENABLE,
3480 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3481 
3482 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3483 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3484 }
3485 
3486 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3487 {
3488 	u32 hotplug_irqs, enabled_irqs;
3489 
3490 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3491 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3492 
3493 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3494 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3495 
3496 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3497 
3498 	spt_hpd_detection_setup(dev_priv);
3499 }
3500 
3501 static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3502 			       enum hpd_pin pin)
3503 {
3504 	switch (pin) {
3505 	case HPD_PORT_A:
3506 		return DIGITAL_PORTA_HOTPLUG_ENABLE |
3507 			DIGITAL_PORTA_PULSE_DURATION_2ms;
3508 	default:
3509 		return 0;
3510 	}
3511 }
3512 
3513 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3514 {
3515 	/*
3516 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3517 	 * duration to 2ms (which is the minimum in the Display Port spec)
3518 	 * The pulse duration bits are reserved on HSW+.
3519 	 */
3520 	intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3521 			 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3522 			 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3523 }
3524 
3525 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3526 {
3527 	u32 hotplug_irqs, enabled_irqs;
3528 
3529 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3530 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3531 
3532 	if (DISPLAY_VER(dev_priv) >= 8)
3533 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3534 	else
3535 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3536 
3537 	ilk_hpd_detection_setup(dev_priv);
3538 
3539 	ibx_hpd_irq_setup(dev_priv);
3540 }
3541 
3542 static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3543 			       enum hpd_pin pin)
3544 {
3545 	u32 hotplug;
3546 
3547 	switch (pin) {
3548 	case HPD_PORT_A:
3549 		hotplug = PORTA_HOTPLUG_ENABLE;
3550 		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3551 			hotplug |= BXT_DDIA_HPD_INVERT;
3552 		return hotplug;
3553 	case HPD_PORT_B:
3554 		hotplug = PORTB_HOTPLUG_ENABLE;
3555 		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3556 			hotplug |= BXT_DDIB_HPD_INVERT;
3557 		return hotplug;
3558 	case HPD_PORT_C:
3559 		hotplug = PORTC_HOTPLUG_ENABLE;
3560 		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3561 			hotplug |= BXT_DDIC_HPD_INVERT;
3562 		return hotplug;
3563 	default:
3564 		return 0;
3565 	}
3566 }
3567 
3568 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3569 {
3570 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3571 			 PORTA_HOTPLUG_ENABLE |
3572 			 PORTB_HOTPLUG_ENABLE |
3573 			 PORTC_HOTPLUG_ENABLE |
3574 			 BXT_DDI_HPD_INVERT_MASK,
3575 			 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3576 }
3577 
3578 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3579 {
3580 	u32 hotplug_irqs, enabled_irqs;
3581 
3582 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3583 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3584 
3585 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3586 
3587 	bxt_hpd_detection_setup(dev_priv);
3588 }
3589 
3590 /*
3591  * SDEIER is also touched by the interrupt handler to work around missed PCH
3592  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3593  * instead we unconditionally enable all PCH interrupt sources here, but then
3594  * only unmask them as needed with SDEIMR.
3595  *
3596  * Note that we currently do this after installing the interrupt handler,
3597  * but before we enable the master interrupt. That should be sufficient
3598  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3599  * interrupts could still race.
3600  */
3601 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3602 {
3603 	struct intel_uncore *uncore = &dev_priv->uncore;
3604 	u32 mask;
3605 
3606 	if (HAS_PCH_NOP(dev_priv))
3607 		return;
3608 
3609 	if (HAS_PCH_IBX(dev_priv))
3610 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3611 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3612 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3613 	else
3614 		mask = SDE_GMBUS_CPT;
3615 
3616 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3617 }
3618 
3619 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3620 {
3621 	struct intel_uncore *uncore = &dev_priv->uncore;
3622 	u32 display_mask, extra_mask;
3623 
3624 	if (GRAPHICS_VER(dev_priv) >= 7) {
3625 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3626 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3627 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3628 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3629 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3630 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3631 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3632 			      DE_DP_A_HOTPLUG_IVB);
3633 	} else {
3634 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3635 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3636 				DE_PIPEA_CRC_DONE | DE_POISON);
3637 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3638 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3639 			      DE_PLANE_FLIP_DONE(PLANE_A) |
3640 			      DE_PLANE_FLIP_DONE(PLANE_B) |
3641 			      DE_DP_A_HOTPLUG);
3642 	}
3643 
3644 	if (IS_HASWELL(dev_priv)) {
3645 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3646 		display_mask |= DE_EDP_PSR_INT_HSW;
3647 	}
3648 
3649 	if (IS_IRONLAKE_M(dev_priv))
3650 		extra_mask |= DE_PCU_EVENT;
3651 
3652 	dev_priv->irq_mask = ~display_mask;
3653 
3654 	ibx_irq_postinstall(dev_priv);
3655 
3656 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3657 
3658 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3659 		      display_mask | extra_mask);
3660 }
3661 
3662 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3663 {
3664 	lockdep_assert_held(&dev_priv->irq_lock);
3665 
3666 	if (dev_priv->display_irqs_enabled)
3667 		return;
3668 
3669 	dev_priv->display_irqs_enabled = true;
3670 
3671 	if (intel_irqs_enabled(dev_priv)) {
3672 		vlv_display_irq_reset(dev_priv);
3673 		vlv_display_irq_postinstall(dev_priv);
3674 	}
3675 }
3676 
3677 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3678 {
3679 	lockdep_assert_held(&dev_priv->irq_lock);
3680 
3681 	if (!dev_priv->display_irqs_enabled)
3682 		return;
3683 
3684 	dev_priv->display_irqs_enabled = false;
3685 
3686 	if (intel_irqs_enabled(dev_priv))
3687 		vlv_display_irq_reset(dev_priv);
3688 }
3689 
3690 
3691 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3692 {
3693 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3694 
3695 	spin_lock_irq(&dev_priv->irq_lock);
3696 	if (dev_priv->display_irqs_enabled)
3697 		vlv_display_irq_postinstall(dev_priv);
3698 	spin_unlock_irq(&dev_priv->irq_lock);
3699 
3700 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3701 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3702 }
3703 
3704 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3705 {
3706 	struct intel_uncore *uncore = &dev_priv->uncore;
3707 
3708 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3709 		GEN8_PIPE_CDCLK_CRC_DONE;
3710 	u32 de_pipe_enables;
3711 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3712 	u32 de_port_enables;
3713 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3714 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3715 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3716 	enum pipe pipe;
3717 
3718 	if (!HAS_DISPLAY(dev_priv))
3719 		return;
3720 
3721 	if (DISPLAY_VER(dev_priv) <= 10)
3722 		de_misc_masked |= GEN8_DE_MISC_GSE;
3723 
3724 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3725 		de_port_masked |= BXT_DE_PORT_GMBUS;
3726 
3727 	if (DISPLAY_VER(dev_priv) >= 11) {
3728 		enum port port;
3729 
3730 		if (intel_bios_is_dsi_present(dev_priv, &port))
3731 			de_port_masked |= DSI0_TE | DSI1_TE;
3732 	}
3733 
3734 	de_pipe_enables = de_pipe_masked |
3735 		GEN8_PIPE_VBLANK |
3736 		gen8_de_pipe_underrun_mask(dev_priv) |
3737 		gen8_de_pipe_flip_done_mask(dev_priv);
3738 
3739 	de_port_enables = de_port_masked;
3740 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3741 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3742 	else if (IS_BROADWELL(dev_priv))
3743 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3744 
3745 	if (DISPLAY_VER(dev_priv) >= 12) {
3746 		enum transcoder trans;
3747 
3748 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3749 			enum intel_display_power_domain domain;
3750 
3751 			domain = POWER_DOMAIN_TRANSCODER(trans);
3752 			if (!intel_display_power_is_enabled(dev_priv, domain))
3753 				continue;
3754 
3755 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3756 		}
3757 	} else {
3758 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3759 	}
3760 
3761 	for_each_pipe(dev_priv, pipe) {
3762 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3763 
3764 		if (intel_display_power_is_enabled(dev_priv,
3765 				POWER_DOMAIN_PIPE(pipe)))
3766 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3767 					  dev_priv->de_irq_mask[pipe],
3768 					  de_pipe_enables);
3769 	}
3770 
3771 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3772 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3773 
3774 	if (DISPLAY_VER(dev_priv) >= 11) {
3775 		u32 de_hpd_masked = 0;
3776 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3777 				     GEN11_DE_TBT_HOTPLUG_MASK;
3778 
3779 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3780 			      de_hpd_enables);
3781 	}
3782 }
3783 
3784 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3785 {
3786 	struct intel_uncore *uncore = &dev_priv->uncore;
3787 	u32 mask = SDE_GMBUS_ICP;
3788 
3789 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3790 }
3791 
3792 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3793 {
3794 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3795 		icp_irq_postinstall(dev_priv);
3796 	else if (HAS_PCH_SPLIT(dev_priv))
3797 		ibx_irq_postinstall(dev_priv);
3798 
3799 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3800 	gen8_de_irq_postinstall(dev_priv);
3801 
3802 	gen8_master_intr_enable(dev_priv->uncore.regs);
3803 }
3804 
3805 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3806 {
3807 	if (!HAS_DISPLAY(dev_priv))
3808 		return;
3809 
3810 	gen8_de_irq_postinstall(dev_priv);
3811 
3812 	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3813 			   GEN11_DISPLAY_IRQ_ENABLE);
3814 }
3815 
3816 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3817 {
3818 	struct intel_gt *gt = to_gt(dev_priv);
3819 	struct intel_uncore *uncore = gt->uncore;
3820 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3821 
3822 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3823 		icp_irq_postinstall(dev_priv);
3824 
3825 	gen11_gt_irq_postinstall(gt);
3826 	gen11_de_irq_postinstall(dev_priv);
3827 
3828 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3829 
3830 	gen11_master_intr_enable(uncore->regs);
3831 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3832 }
3833 
3834 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3835 {
3836 	struct intel_gt *gt = to_gt(dev_priv);
3837 	struct intel_uncore *uncore = gt->uncore;
3838 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3839 
3840 	gen11_gt_irq_postinstall(gt);
3841 
3842 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3843 
3844 	if (HAS_DISPLAY(dev_priv)) {
3845 		icp_irq_postinstall(dev_priv);
3846 		gen8_de_irq_postinstall(dev_priv);
3847 		intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3848 				   GEN11_DISPLAY_IRQ_ENABLE);
3849 	}
3850 
3851 	dg1_master_intr_enable(uncore->regs);
3852 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3853 }
3854 
3855 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3856 {
3857 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3858 
3859 	spin_lock_irq(&dev_priv->irq_lock);
3860 	if (dev_priv->display_irqs_enabled)
3861 		vlv_display_irq_postinstall(dev_priv);
3862 	spin_unlock_irq(&dev_priv->irq_lock);
3863 
3864 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3865 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3866 }
3867 
3868 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3869 {
3870 	struct intel_uncore *uncore = &dev_priv->uncore;
3871 
3872 	i9xx_pipestat_irq_reset(dev_priv);
3873 
3874 	gen2_irq_reset(uncore);
3875 	dev_priv->irq_mask = ~0u;
3876 }
3877 
3878 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3879 {
3880 	struct intel_uncore *uncore = &dev_priv->uncore;
3881 	u16 enable_mask;
3882 
3883 	intel_uncore_write16(uncore,
3884 			     EMR,
3885 			     ~(I915_ERROR_PAGE_TABLE |
3886 			       I915_ERROR_MEMORY_REFRESH));
3887 
3888 	/* Unmask the interrupts that we always want on. */
3889 	dev_priv->irq_mask =
3890 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3891 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3892 		  I915_MASTER_ERROR_INTERRUPT);
3893 
3894 	enable_mask =
3895 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3896 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3897 		I915_MASTER_ERROR_INTERRUPT |
3898 		I915_USER_INTERRUPT;
3899 
3900 	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3901 
3902 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3903 	 * just to make the assert_spin_locked check happy. */
3904 	spin_lock_irq(&dev_priv->irq_lock);
3905 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3906 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3907 	spin_unlock_irq(&dev_priv->irq_lock);
3908 }
3909 
3910 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3911 			       u16 *eir, u16 *eir_stuck)
3912 {
3913 	struct intel_uncore *uncore = &i915->uncore;
3914 	u16 emr;
3915 
3916 	*eir = intel_uncore_read16(uncore, EIR);
3917 
3918 	if (*eir)
3919 		intel_uncore_write16(uncore, EIR, *eir);
3920 
3921 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3922 	if (*eir_stuck == 0)
3923 		return;
3924 
3925 	/*
3926 	 * Toggle all EMR bits to make sure we get an edge
3927 	 * in the ISR master error bit if we don't clear
3928 	 * all the EIR bits. Otherwise the edge triggered
3929 	 * IIR on i965/g4x wouldn't notice that an interrupt
3930 	 * is still pending. Also some EIR bits can't be
3931 	 * cleared except by handling the underlying error
3932 	 * (or by a GPU reset) so we mask any bit that
3933 	 * remains set.
3934 	 */
3935 	emr = intel_uncore_read16(uncore, EMR);
3936 	intel_uncore_write16(uncore, EMR, 0xffff);
3937 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3938 }
3939 
3940 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3941 				   u16 eir, u16 eir_stuck)
3942 {
3943 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3944 
3945 	if (eir_stuck)
3946 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3947 			eir_stuck);
3948 }
3949 
3950 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3951 			       u32 *eir, u32 *eir_stuck)
3952 {
3953 	u32 emr;
3954 
3955 	*eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
3956 
3957 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3958 	if (*eir_stuck == 0)
3959 		return;
3960 
3961 	/*
3962 	 * Toggle all EMR bits to make sure we get an edge
3963 	 * in the ISR master error bit if we don't clear
3964 	 * all the EIR bits. Otherwise the edge triggered
3965 	 * IIR on i965/g4x wouldn't notice that an interrupt
3966 	 * is still pending. Also some EIR bits can't be
3967 	 * cleared except by handling the underlying error
3968 	 * (or by a GPU reset) so we mask any bit that
3969 	 * remains set.
3970 	 */
3971 	emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
3972 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3973 }
3974 
3975 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3976 				   u32 eir, u32 eir_stuck)
3977 {
3978 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3979 
3980 	if (eir_stuck)
3981 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3982 			eir_stuck);
3983 }
3984 
3985 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3986 {
3987 	struct drm_i915_private *dev_priv = arg;
3988 	irqreturn_t ret = IRQ_NONE;
3989 
3990 	if (!intel_irqs_enabled(dev_priv))
3991 		return IRQ_NONE;
3992 
3993 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3994 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3995 
3996 	do {
3997 		u32 pipe_stats[I915_MAX_PIPES] = {};
3998 		u16 eir = 0, eir_stuck = 0;
3999 		u16 iir;
4000 
4001 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4002 		if (iir == 0)
4003 			break;
4004 
4005 		ret = IRQ_HANDLED;
4006 
4007 		/* Call regardless, as some status bits might not be
4008 		 * signalled in iir */
4009 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4010 
4011 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4012 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4013 
4014 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4015 
4016 		if (iir & I915_USER_INTERRUPT)
4017 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4018 
4019 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4020 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4021 
4022 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4023 	} while (0);
4024 
4025 	pmu_irq_stats(dev_priv, ret);
4026 
4027 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4028 
4029 	return ret;
4030 }
4031 
4032 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4033 {
4034 	struct intel_uncore *uncore = &dev_priv->uncore;
4035 
4036 	if (I915_HAS_HOTPLUG(dev_priv)) {
4037 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4038 		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
4039 	}
4040 
4041 	i9xx_pipestat_irq_reset(dev_priv);
4042 
4043 	GEN3_IRQ_RESET(uncore, GEN2_);
4044 	dev_priv->irq_mask = ~0u;
4045 }
4046 
4047 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4048 {
4049 	struct intel_uncore *uncore = &dev_priv->uncore;
4050 	u32 enable_mask;
4051 
4052 	intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4053 					  I915_ERROR_MEMORY_REFRESH));
4054 
4055 	/* Unmask the interrupts that we always want on. */
4056 	dev_priv->irq_mask =
4057 		~(I915_ASLE_INTERRUPT |
4058 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4059 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4060 		  I915_MASTER_ERROR_INTERRUPT);
4061 
4062 	enable_mask =
4063 		I915_ASLE_INTERRUPT |
4064 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4065 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4066 		I915_MASTER_ERROR_INTERRUPT |
4067 		I915_USER_INTERRUPT;
4068 
4069 	if (I915_HAS_HOTPLUG(dev_priv)) {
4070 		/* Enable in IER... */
4071 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4072 		/* and unmask in IMR */
4073 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4074 	}
4075 
4076 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4077 
4078 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4079 	 * just to make the assert_spin_locked check happy. */
4080 	spin_lock_irq(&dev_priv->irq_lock);
4081 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4082 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4083 	spin_unlock_irq(&dev_priv->irq_lock);
4084 
4085 	i915_enable_asle_pipestat(dev_priv);
4086 }
4087 
4088 static irqreturn_t i915_irq_handler(int irq, void *arg)
4089 {
4090 	struct drm_i915_private *dev_priv = arg;
4091 	irqreturn_t ret = IRQ_NONE;
4092 
4093 	if (!intel_irqs_enabled(dev_priv))
4094 		return IRQ_NONE;
4095 
4096 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4097 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4098 
4099 	do {
4100 		u32 pipe_stats[I915_MAX_PIPES] = {};
4101 		u32 eir = 0, eir_stuck = 0;
4102 		u32 hotplug_status = 0;
4103 		u32 iir;
4104 
4105 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4106 		if (iir == 0)
4107 			break;
4108 
4109 		ret = IRQ_HANDLED;
4110 
4111 		if (I915_HAS_HOTPLUG(dev_priv) &&
4112 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4113 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4114 
4115 		/* Call regardless, as some status bits might not be
4116 		 * signalled in iir */
4117 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4118 
4119 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4120 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4121 
4122 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4123 
4124 		if (iir & I915_USER_INTERRUPT)
4125 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4126 
4127 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4128 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4129 
4130 		if (hotplug_status)
4131 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4132 
4133 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4134 	} while (0);
4135 
4136 	pmu_irq_stats(dev_priv, ret);
4137 
4138 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4139 
4140 	return ret;
4141 }
4142 
4143 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4144 {
4145 	struct intel_uncore *uncore = &dev_priv->uncore;
4146 
4147 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4148 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
4149 
4150 	i9xx_pipestat_irq_reset(dev_priv);
4151 
4152 	GEN3_IRQ_RESET(uncore, GEN2_);
4153 	dev_priv->irq_mask = ~0u;
4154 }
4155 
4156 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4157 {
4158 	struct intel_uncore *uncore = &dev_priv->uncore;
4159 	u32 enable_mask;
4160 	u32 error_mask;
4161 
4162 	/*
4163 	 * Enable some error detection, note the instruction error mask
4164 	 * bit is reserved, so we leave it masked.
4165 	 */
4166 	if (IS_G4X(dev_priv)) {
4167 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4168 			       GM45_ERROR_MEM_PRIV |
4169 			       GM45_ERROR_CP_PRIV |
4170 			       I915_ERROR_MEMORY_REFRESH);
4171 	} else {
4172 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4173 			       I915_ERROR_MEMORY_REFRESH);
4174 	}
4175 	intel_uncore_write(uncore, EMR, error_mask);
4176 
4177 	/* Unmask the interrupts that we always want on. */
4178 	dev_priv->irq_mask =
4179 		~(I915_ASLE_INTERRUPT |
4180 		  I915_DISPLAY_PORT_INTERRUPT |
4181 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4182 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4183 		  I915_MASTER_ERROR_INTERRUPT);
4184 
4185 	enable_mask =
4186 		I915_ASLE_INTERRUPT |
4187 		I915_DISPLAY_PORT_INTERRUPT |
4188 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4189 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4190 		I915_MASTER_ERROR_INTERRUPT |
4191 		I915_USER_INTERRUPT;
4192 
4193 	if (IS_G4X(dev_priv))
4194 		enable_mask |= I915_BSD_USER_INTERRUPT;
4195 
4196 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4197 
4198 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4199 	 * just to make the assert_spin_locked check happy. */
4200 	spin_lock_irq(&dev_priv->irq_lock);
4201 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4202 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4203 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4204 	spin_unlock_irq(&dev_priv->irq_lock);
4205 
4206 	i915_enable_asle_pipestat(dev_priv);
4207 }
4208 
4209 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4210 {
4211 	u32 hotplug_en;
4212 
4213 	lockdep_assert_held(&dev_priv->irq_lock);
4214 
4215 	/* Note HDMI and DP share hotplug bits */
4216 	/* enable bits are the same for all generations */
4217 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4218 	/* Programming the CRT detection parameters tends
4219 	   to generate a spurious hotplug event about three
4220 	   seconds later.  So just do it once.
4221 	*/
4222 	if (IS_G4X(dev_priv))
4223 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4224 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4225 
4226 	/* Ignore TV since it's buggy */
4227 	i915_hotplug_interrupt_update_locked(dev_priv,
4228 					     HOTPLUG_INT_EN_MASK |
4229 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4230 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4231 					     hotplug_en);
4232 }
4233 
4234 static irqreturn_t i965_irq_handler(int irq, void *arg)
4235 {
4236 	struct drm_i915_private *dev_priv = arg;
4237 	irqreturn_t ret = IRQ_NONE;
4238 
4239 	if (!intel_irqs_enabled(dev_priv))
4240 		return IRQ_NONE;
4241 
4242 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4243 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4244 
4245 	do {
4246 		u32 pipe_stats[I915_MAX_PIPES] = {};
4247 		u32 eir = 0, eir_stuck = 0;
4248 		u32 hotplug_status = 0;
4249 		u32 iir;
4250 
4251 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4252 		if (iir == 0)
4253 			break;
4254 
4255 		ret = IRQ_HANDLED;
4256 
4257 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4258 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4259 
4260 		/* Call regardless, as some status bits might not be
4261 		 * signalled in iir */
4262 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4263 
4264 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4265 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4266 
4267 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4268 
4269 		if (iir & I915_USER_INTERRUPT)
4270 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4271 					    iir);
4272 
4273 		if (iir & I915_BSD_USER_INTERRUPT)
4274 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4275 					    iir >> 25);
4276 
4277 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4278 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4279 
4280 		if (hotplug_status)
4281 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4282 
4283 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4284 	} while (0);
4285 
4286 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
4287 
4288 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4289 
4290 	return ret;
4291 }
4292 
4293 struct intel_hotplug_funcs {
4294 	void (*hpd_irq_setup)(struct drm_i915_private *i915);
4295 };
4296 
4297 #define HPD_FUNCS(platform)					 \
4298 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4299 	.hpd_irq_setup = platform##_hpd_irq_setup,		 \
4300 }
4301 
4302 HPD_FUNCS(i915);
4303 HPD_FUNCS(dg1);
4304 HPD_FUNCS(gen11);
4305 HPD_FUNCS(bxt);
4306 HPD_FUNCS(icp);
4307 HPD_FUNCS(spt);
4308 HPD_FUNCS(ilk);
4309 #undef HPD_FUNCS
4310 
4311 void intel_hpd_irq_setup(struct drm_i915_private *i915)
4312 {
4313 	if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4314 		i915->display.funcs.hotplug->hpd_irq_setup(i915);
4315 }
4316 
4317 /**
4318  * intel_irq_init - initializes irq support
4319  * @dev_priv: i915 device instance
4320  *
4321  * This function initializes all the irq support including work items, timers
4322  * and all the vtables. It does not setup the interrupt itself though.
4323  */
4324 void intel_irq_init(struct drm_i915_private *dev_priv)
4325 {
4326 	int i;
4327 
4328 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4329 	for (i = 0; i < MAX_L3_SLICES; ++i)
4330 		dev_priv->l3_parity.remap_info[i] = NULL;
4331 
4332 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4333 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4334 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4335 
4336 	if (!HAS_DISPLAY(dev_priv))
4337 		return;
4338 
4339 	intel_hpd_init_pins(dev_priv);
4340 
4341 	intel_hpd_init_early(dev_priv);
4342 
4343 	dev_priv->drm.vblank_disable_immediate = true;
4344 
4345 	/* Most platforms treat the display irq block as an always-on
4346 	 * power domain. vlv/chv can disable it at runtime and need
4347 	 * special care to avoid writing any of the display block registers
4348 	 * outside of the power domain. We defer setting up the display irqs
4349 	 * in this case to the runtime pm.
4350 	 */
4351 	dev_priv->display_irqs_enabled = true;
4352 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4353 		dev_priv->display_irqs_enabled = false;
4354 
4355 	if (HAS_GMCH(dev_priv)) {
4356 		if (I915_HAS_HOTPLUG(dev_priv))
4357 			dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4358 	} else {
4359 		if (HAS_PCH_DG2(dev_priv))
4360 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4361 		else if (HAS_PCH_DG1(dev_priv))
4362 			dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4363 		else if (DISPLAY_VER(dev_priv) >= 11)
4364 			dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4365 		else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4366 			dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4367 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4368 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4369 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4370 			dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4371 		else
4372 			dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4373 	}
4374 }
4375 
4376 /**
4377  * intel_irq_fini - deinitializes IRQ support
4378  * @i915: i915 device instance
4379  *
4380  * This function deinitializes all the IRQ support.
4381  */
4382 void intel_irq_fini(struct drm_i915_private *i915)
4383 {
4384 	int i;
4385 
4386 	for (i = 0; i < MAX_L3_SLICES; ++i)
4387 		kfree(i915->l3_parity.remap_info[i]);
4388 }
4389 
4390 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4391 {
4392 	if (HAS_GMCH(dev_priv)) {
4393 		if (IS_CHERRYVIEW(dev_priv))
4394 			return cherryview_irq_handler;
4395 		else if (IS_VALLEYVIEW(dev_priv))
4396 			return valleyview_irq_handler;
4397 		else if (GRAPHICS_VER(dev_priv) == 4)
4398 			return i965_irq_handler;
4399 		else if (GRAPHICS_VER(dev_priv) == 3)
4400 			return i915_irq_handler;
4401 		else
4402 			return i8xx_irq_handler;
4403 	} else {
4404 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4405 			return dg1_irq_handler;
4406 		else if (GRAPHICS_VER(dev_priv) >= 11)
4407 			return gen11_irq_handler;
4408 		else if (GRAPHICS_VER(dev_priv) >= 8)
4409 			return gen8_irq_handler;
4410 		else
4411 			return ilk_irq_handler;
4412 	}
4413 }
4414 
4415 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4416 {
4417 	if (HAS_GMCH(dev_priv)) {
4418 		if (IS_CHERRYVIEW(dev_priv))
4419 			cherryview_irq_reset(dev_priv);
4420 		else if (IS_VALLEYVIEW(dev_priv))
4421 			valleyview_irq_reset(dev_priv);
4422 		else if (GRAPHICS_VER(dev_priv) == 4)
4423 			i965_irq_reset(dev_priv);
4424 		else if (GRAPHICS_VER(dev_priv) == 3)
4425 			i915_irq_reset(dev_priv);
4426 		else
4427 			i8xx_irq_reset(dev_priv);
4428 	} else {
4429 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4430 			dg1_irq_reset(dev_priv);
4431 		else if (GRAPHICS_VER(dev_priv) >= 11)
4432 			gen11_irq_reset(dev_priv);
4433 		else if (GRAPHICS_VER(dev_priv) >= 8)
4434 			gen8_irq_reset(dev_priv);
4435 		else
4436 			ilk_irq_reset(dev_priv);
4437 	}
4438 }
4439 
4440 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4441 {
4442 	if (HAS_GMCH(dev_priv)) {
4443 		if (IS_CHERRYVIEW(dev_priv))
4444 			cherryview_irq_postinstall(dev_priv);
4445 		else if (IS_VALLEYVIEW(dev_priv))
4446 			valleyview_irq_postinstall(dev_priv);
4447 		else if (GRAPHICS_VER(dev_priv) == 4)
4448 			i965_irq_postinstall(dev_priv);
4449 		else if (GRAPHICS_VER(dev_priv) == 3)
4450 			i915_irq_postinstall(dev_priv);
4451 		else
4452 			i8xx_irq_postinstall(dev_priv);
4453 	} else {
4454 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4455 			dg1_irq_postinstall(dev_priv);
4456 		else if (GRAPHICS_VER(dev_priv) >= 11)
4457 			gen11_irq_postinstall(dev_priv);
4458 		else if (GRAPHICS_VER(dev_priv) >= 8)
4459 			gen8_irq_postinstall(dev_priv);
4460 		else
4461 			ilk_irq_postinstall(dev_priv);
4462 	}
4463 }
4464 
4465 /**
4466  * intel_irq_install - enables the hardware interrupt
4467  * @dev_priv: i915 device instance
4468  *
4469  * This function enables the hardware interrupt handling, but leaves the hotplug
4470  * handling still disabled. It is called after intel_irq_init().
4471  *
4472  * In the driver load and resume code we need working interrupts in a few places
4473  * but don't want to deal with the hassle of concurrent probe and hotplug
4474  * workers. Hence the split into this two-stage approach.
4475  */
4476 int intel_irq_install(struct drm_i915_private *dev_priv)
4477 {
4478 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4479 	int ret;
4480 
4481 	/*
4482 	 * We enable some interrupt sources in our postinstall hooks, so mark
4483 	 * interrupts as enabled _before_ actually enabling them to avoid
4484 	 * special cases in our ordering checks.
4485 	 */
4486 	dev_priv->runtime_pm.irqs_enabled = true;
4487 
4488 	dev_priv->irq_enabled = true;
4489 
4490 	intel_irq_reset(dev_priv);
4491 
4492 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4493 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4494 	if (ret < 0) {
4495 		dev_priv->irq_enabled = false;
4496 		return ret;
4497 	}
4498 
4499 	intel_irq_postinstall(dev_priv);
4500 
4501 	return ret;
4502 }
4503 
4504 /**
4505  * intel_irq_uninstall - finilizes all irq handling
4506  * @dev_priv: i915 device instance
4507  *
4508  * This stops interrupt and hotplug handling and unregisters and frees all
4509  * resources acquired in the init functions.
4510  */
4511 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4512 {
4513 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4514 
4515 	/*
4516 	 * FIXME we can get called twice during driver probe
4517 	 * error handling as well as during driver remove due to
4518 	 * intel_modeset_driver_remove() calling us out of sequence.
4519 	 * Would be nice if it didn't do that...
4520 	 */
4521 	if (!dev_priv->irq_enabled)
4522 		return;
4523 
4524 	dev_priv->irq_enabled = false;
4525 
4526 	intel_irq_reset(dev_priv);
4527 
4528 	free_irq(irq, dev_priv);
4529 
4530 	intel_hpd_cancel_work(dev_priv);
4531 	dev_priv->runtime_pm.irqs_enabled = false;
4532 }
4533 
4534 /**
4535  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4536  * @dev_priv: i915 device instance
4537  *
4538  * This function is used to disable interrupts at runtime, both in the runtime
4539  * pm and the system suspend/resume code.
4540  */
4541 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4542 {
4543 	intel_irq_reset(dev_priv);
4544 	dev_priv->runtime_pm.irqs_enabled = false;
4545 	intel_synchronize_irq(dev_priv);
4546 }
4547 
4548 /**
4549  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4550  * @dev_priv: i915 device instance
4551  *
4552  * This function is used to enable interrupts at runtime, both in the runtime
4553  * pm and the system suspend/resume code.
4554  */
4555 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4556 {
4557 	dev_priv->runtime_pm.irqs_enabled = true;
4558 	intel_irq_reset(dev_priv);
4559 	intel_irq_postinstall(dev_priv);
4560 }
4561 
4562 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4563 {
4564 	return dev_priv->runtime_pm.irqs_enabled;
4565 }
4566 
4567 void intel_synchronize_irq(struct drm_i915_private *i915)
4568 {
4569 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4570 }
4571 
4572 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4573 {
4574 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4575 }
4576