xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 301306a9)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/circ_buf.h>
32 #include <linux/slab.h>
33 #include <linux/sysrq.h>
34 
35 #include <drm/drm_drv.h>
36 
37 #include "display/intel_de.h"
38 #include "display/intel_display_trace.h"
39 #include "display/intel_display_types.h"
40 #include "display/intel_fifo_underrun.h"
41 #include "display/intel_hotplug.h"
42 #include "display/intel_lpe_audio.h"
43 #include "display/intel_psr.h"
44 
45 #include "gt/intel_breadcrumbs.h"
46 #include "gt/intel_gt.h"
47 #include "gt/intel_gt_irq.h"
48 #include "gt/intel_gt_pm_irq.h"
49 #include "gt/intel_gt_regs.h"
50 #include "gt/intel_rps.h"
51 
52 #include "i915_drv.h"
53 #include "i915_irq.h"
54 #include "intel_pm.h"
55 
56 /**
57  * DOC: interrupt handling
58  *
59  * These functions provide the basic support for enabling and disabling the
60  * interrupt handling support. There's a lot more functionality in i915_irq.c
61  * and related files, but that will be described in separate chapters.
62  */
63 
64 /*
65  * Interrupt statistic for PMU. Increments the counter only if the
66  * interrupt originated from the the GPU so interrupts from a device which
67  * shares the interrupt line are not accounted.
68  */
69 static inline void pmu_irq_stats(struct drm_i915_private *i915,
70 				 irqreturn_t res)
71 {
72 	if (unlikely(res != IRQ_HANDLED))
73 		return;
74 
75 	/*
76 	 * A clever compiler translates that into INC. A not so clever one
77 	 * should at least prevent store tearing.
78 	 */
79 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
80 }
81 
82 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
83 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
84 				    enum hpd_pin pin);
85 
86 static const u32 hpd_ilk[HPD_NUM_PINS] = {
87 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
88 };
89 
90 static const u32 hpd_ivb[HPD_NUM_PINS] = {
91 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
92 };
93 
94 static const u32 hpd_bdw[HPD_NUM_PINS] = {
95 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
96 };
97 
98 static const u32 hpd_ibx[HPD_NUM_PINS] = {
99 	[HPD_CRT] = SDE_CRT_HOTPLUG,
100 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
101 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
102 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
103 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
104 };
105 
106 static const u32 hpd_cpt[HPD_NUM_PINS] = {
107 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
108 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
109 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
110 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
111 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
112 };
113 
114 static const u32 hpd_spt[HPD_NUM_PINS] = {
115 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
116 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
117 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
118 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
119 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
120 };
121 
122 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
123 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
124 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
125 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
126 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
127 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
128 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
129 };
130 
131 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
132 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
133 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
134 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
135 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
136 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
137 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
138 };
139 
140 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
141 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
142 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
143 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
144 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
145 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
146 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
147 };
148 
149 static const u32 hpd_bxt[HPD_NUM_PINS] = {
150 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
151 	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
152 	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
153 };
154 
155 static const u32 hpd_gen11[HPD_NUM_PINS] = {
156 	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
157 	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
158 	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
159 	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
160 	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
161 	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
162 };
163 
164 static const u32 hpd_icp[HPD_NUM_PINS] = {
165 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
166 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
167 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
168 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
169 	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
170 	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
171 	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
172 	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
173 	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
174 };
175 
176 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
177 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
178 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
179 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
180 	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
181 };
182 
183 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
184 {
185 	struct i915_hotplug *hpd = &dev_priv->hotplug;
186 
187 	if (HAS_GMCH(dev_priv)) {
188 		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
189 		    IS_CHERRYVIEW(dev_priv))
190 			hpd->hpd = hpd_status_g4x;
191 		else
192 			hpd->hpd = hpd_status_i915;
193 		return;
194 	}
195 
196 	if (DISPLAY_VER(dev_priv) >= 11)
197 		hpd->hpd = hpd_gen11;
198 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
199 		hpd->hpd = hpd_bxt;
200 	else if (DISPLAY_VER(dev_priv) >= 8)
201 		hpd->hpd = hpd_bdw;
202 	else if (DISPLAY_VER(dev_priv) >= 7)
203 		hpd->hpd = hpd_ivb;
204 	else
205 		hpd->hpd = hpd_ilk;
206 
207 	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
208 	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
209 		return;
210 
211 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
212 		hpd->pch_hpd = hpd_sde_dg1;
213 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
214 		hpd->pch_hpd = hpd_icp;
215 	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
216 		hpd->pch_hpd = hpd_spt;
217 	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
218 		hpd->pch_hpd = hpd_cpt;
219 	else if (HAS_PCH_IBX(dev_priv))
220 		hpd->pch_hpd = hpd_ibx;
221 	else
222 		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
223 }
224 
225 static void
226 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
227 {
228 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
229 
230 	drm_crtc_handle_vblank(&crtc->base);
231 }
232 
233 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
234 		    i915_reg_t iir, i915_reg_t ier)
235 {
236 	intel_uncore_write(uncore, imr, 0xffffffff);
237 	intel_uncore_posting_read(uncore, imr);
238 
239 	intel_uncore_write(uncore, ier, 0);
240 
241 	/* IIR can theoretically queue up two events. Be paranoid. */
242 	intel_uncore_write(uncore, iir, 0xffffffff);
243 	intel_uncore_posting_read(uncore, iir);
244 	intel_uncore_write(uncore, iir, 0xffffffff);
245 	intel_uncore_posting_read(uncore, iir);
246 }
247 
248 void gen2_irq_reset(struct intel_uncore *uncore)
249 {
250 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
251 	intel_uncore_posting_read16(uncore, GEN2_IMR);
252 
253 	intel_uncore_write16(uncore, GEN2_IER, 0);
254 
255 	/* IIR can theoretically queue up two events. Be paranoid. */
256 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
257 	intel_uncore_posting_read16(uncore, GEN2_IIR);
258 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
259 	intel_uncore_posting_read16(uncore, GEN2_IIR);
260 }
261 
262 /*
263  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
264  */
265 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
266 {
267 	u32 val = intel_uncore_read(uncore, reg);
268 
269 	if (val == 0)
270 		return;
271 
272 	drm_WARN(&uncore->i915->drm, 1,
273 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
274 		 i915_mmio_reg_offset(reg), val);
275 	intel_uncore_write(uncore, reg, 0xffffffff);
276 	intel_uncore_posting_read(uncore, reg);
277 	intel_uncore_write(uncore, reg, 0xffffffff);
278 	intel_uncore_posting_read(uncore, reg);
279 }
280 
281 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
282 {
283 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
284 
285 	if (val == 0)
286 		return;
287 
288 	drm_WARN(&uncore->i915->drm, 1,
289 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
290 		 i915_mmio_reg_offset(GEN2_IIR), val);
291 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
292 	intel_uncore_posting_read16(uncore, GEN2_IIR);
293 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
294 	intel_uncore_posting_read16(uncore, GEN2_IIR);
295 }
296 
297 void gen3_irq_init(struct intel_uncore *uncore,
298 		   i915_reg_t imr, u32 imr_val,
299 		   i915_reg_t ier, u32 ier_val,
300 		   i915_reg_t iir)
301 {
302 	gen3_assert_iir_is_zero(uncore, iir);
303 
304 	intel_uncore_write(uncore, ier, ier_val);
305 	intel_uncore_write(uncore, imr, imr_val);
306 	intel_uncore_posting_read(uncore, imr);
307 }
308 
309 void gen2_irq_init(struct intel_uncore *uncore,
310 		   u32 imr_val, u32 ier_val)
311 {
312 	gen2_assert_iir_is_zero(uncore);
313 
314 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
315 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
316 	intel_uncore_posting_read16(uncore, GEN2_IMR);
317 }
318 
319 /* For display hotplug interrupt */
320 static inline void
321 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
322 				     u32 mask,
323 				     u32 bits)
324 {
325 	u32 val;
326 
327 	lockdep_assert_held(&dev_priv->irq_lock);
328 	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
329 
330 	val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
331 	val &= ~mask;
332 	val |= bits;
333 	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
334 }
335 
336 /**
337  * i915_hotplug_interrupt_update - update hotplug interrupt enable
338  * @dev_priv: driver private
339  * @mask: bits to update
340  * @bits: bits to enable
341  * NOTE: the HPD enable bits are modified both inside and outside
342  * of an interrupt context. To avoid that read-modify-write cycles
343  * interfer, these bits are protected by a spinlock. Since this
344  * function is usually not called from a context where the lock is
345  * held already, this function acquires the lock itself. A non-locking
346  * version is also available.
347  */
348 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
349 				   u32 mask,
350 				   u32 bits)
351 {
352 	spin_lock_irq(&dev_priv->irq_lock);
353 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
354 	spin_unlock_irq(&dev_priv->irq_lock);
355 }
356 
357 /**
358  * ilk_update_display_irq - update DEIMR
359  * @dev_priv: driver private
360  * @interrupt_mask: mask of interrupt bits to update
361  * @enabled_irq_mask: mask of interrupt bits to enable
362  */
363 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
364 				   u32 interrupt_mask, u32 enabled_irq_mask)
365 {
366 	u32 new_val;
367 
368 	lockdep_assert_held(&dev_priv->irq_lock);
369 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
370 
371 	new_val = dev_priv->irq_mask;
372 	new_val &= ~interrupt_mask;
373 	new_val |= (~enabled_irq_mask & interrupt_mask);
374 
375 	if (new_val != dev_priv->irq_mask &&
376 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
377 		dev_priv->irq_mask = new_val;
378 		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
379 		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
380 	}
381 }
382 
383 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
384 {
385 	ilk_update_display_irq(i915, bits, bits);
386 }
387 
388 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
389 {
390 	ilk_update_display_irq(i915, bits, 0);
391 }
392 
393 /**
394  * bdw_update_port_irq - update DE port interrupt
395  * @dev_priv: driver private
396  * @interrupt_mask: mask of interrupt bits to update
397  * @enabled_irq_mask: mask of interrupt bits to enable
398  */
399 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
400 				u32 interrupt_mask,
401 				u32 enabled_irq_mask)
402 {
403 	u32 new_val;
404 	u32 old_val;
405 
406 	lockdep_assert_held(&dev_priv->irq_lock);
407 
408 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
409 
410 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
411 		return;
412 
413 	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
414 
415 	new_val = old_val;
416 	new_val &= ~interrupt_mask;
417 	new_val |= (~enabled_irq_mask & interrupt_mask);
418 
419 	if (new_val != old_val) {
420 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
421 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
422 	}
423 }
424 
425 /**
426  * bdw_update_pipe_irq - update DE pipe interrupt
427  * @dev_priv: driver private
428  * @pipe: pipe whose interrupt to update
429  * @interrupt_mask: mask of interrupt bits to update
430  * @enabled_irq_mask: mask of interrupt bits to enable
431  */
432 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
433 				enum pipe pipe, u32 interrupt_mask,
434 				u32 enabled_irq_mask)
435 {
436 	u32 new_val;
437 
438 	lockdep_assert_held(&dev_priv->irq_lock);
439 
440 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
441 
442 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
443 		return;
444 
445 	new_val = dev_priv->de_irq_mask[pipe];
446 	new_val &= ~interrupt_mask;
447 	new_val |= (~enabled_irq_mask & interrupt_mask);
448 
449 	if (new_val != dev_priv->de_irq_mask[pipe]) {
450 		dev_priv->de_irq_mask[pipe] = new_val;
451 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
452 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
453 	}
454 }
455 
456 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
457 			 enum pipe pipe, u32 bits)
458 {
459 	bdw_update_pipe_irq(i915, pipe, bits, bits);
460 }
461 
462 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
463 			  enum pipe pipe, u32 bits)
464 {
465 	bdw_update_pipe_irq(i915, pipe, bits, 0);
466 }
467 
468 /**
469  * ibx_display_interrupt_update - update SDEIMR
470  * @dev_priv: driver private
471  * @interrupt_mask: mask of interrupt bits to update
472  * @enabled_irq_mask: mask of interrupt bits to enable
473  */
474 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
475 					 u32 interrupt_mask,
476 					 u32 enabled_irq_mask)
477 {
478 	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
479 	sdeimr &= ~interrupt_mask;
480 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
481 
482 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
483 
484 	lockdep_assert_held(&dev_priv->irq_lock);
485 
486 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
487 		return;
488 
489 	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
490 	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
491 }
492 
493 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
494 {
495 	ibx_display_interrupt_update(i915, bits, bits);
496 }
497 
498 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
499 {
500 	ibx_display_interrupt_update(i915, bits, 0);
501 }
502 
503 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
504 			      enum pipe pipe)
505 {
506 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
507 	u32 enable_mask = status_mask << 16;
508 
509 	lockdep_assert_held(&dev_priv->irq_lock);
510 
511 	if (DISPLAY_VER(dev_priv) < 5)
512 		goto out;
513 
514 	/*
515 	 * On pipe A we don't support the PSR interrupt yet,
516 	 * on pipe B and C the same bit MBZ.
517 	 */
518 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
519 			     status_mask & PIPE_A_PSR_STATUS_VLV))
520 		return 0;
521 	/*
522 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
523 	 * A the same bit is for perf counters which we don't use either.
524 	 */
525 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
526 			     status_mask & PIPE_B_PSR_STATUS_VLV))
527 		return 0;
528 
529 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
530 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
531 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
532 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
533 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
534 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
535 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
536 
537 out:
538 	drm_WARN_ONCE(&dev_priv->drm,
539 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
540 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
541 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
542 		      pipe_name(pipe), enable_mask, status_mask);
543 
544 	return enable_mask;
545 }
546 
547 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
548 			  enum pipe pipe, u32 status_mask)
549 {
550 	i915_reg_t reg = PIPESTAT(pipe);
551 	u32 enable_mask;
552 
553 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
554 		      "pipe %c: status_mask=0x%x\n",
555 		      pipe_name(pipe), status_mask);
556 
557 	lockdep_assert_held(&dev_priv->irq_lock);
558 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
559 
560 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
561 		return;
562 
563 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
564 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
565 
566 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
567 	intel_uncore_posting_read(&dev_priv->uncore, reg);
568 }
569 
570 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
571 			   enum pipe pipe, u32 status_mask)
572 {
573 	i915_reg_t reg = PIPESTAT(pipe);
574 	u32 enable_mask;
575 
576 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
577 		      "pipe %c: status_mask=0x%x\n",
578 		      pipe_name(pipe), status_mask);
579 
580 	lockdep_assert_held(&dev_priv->irq_lock);
581 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
582 
583 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
584 		return;
585 
586 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
587 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
588 
589 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
590 	intel_uncore_posting_read(&dev_priv->uncore, reg);
591 }
592 
593 static bool i915_has_asle(struct drm_i915_private *dev_priv)
594 {
595 	if (!dev_priv->opregion.asle)
596 		return false;
597 
598 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
599 }
600 
601 /**
602  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
603  * @dev_priv: i915 device private
604  */
605 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
606 {
607 	if (!i915_has_asle(dev_priv))
608 		return;
609 
610 	spin_lock_irq(&dev_priv->irq_lock);
611 
612 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
613 	if (DISPLAY_VER(dev_priv) >= 4)
614 		i915_enable_pipestat(dev_priv, PIPE_A,
615 				     PIPE_LEGACY_BLC_EVENT_STATUS);
616 
617 	spin_unlock_irq(&dev_priv->irq_lock);
618 }
619 
620 /*
621  * This timing diagram depicts the video signal in and
622  * around the vertical blanking period.
623  *
624  * Assumptions about the fictitious mode used in this example:
625  *  vblank_start >= 3
626  *  vsync_start = vblank_start + 1
627  *  vsync_end = vblank_start + 2
628  *  vtotal = vblank_start + 3
629  *
630  *           start of vblank:
631  *           latch double buffered registers
632  *           increment frame counter (ctg+)
633  *           generate start of vblank interrupt (gen4+)
634  *           |
635  *           |          frame start:
636  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
637  *           |          may be shifted forward 1-3 extra lines via PIPECONF
638  *           |          |
639  *           |          |  start of vsync:
640  *           |          |  generate vsync interrupt
641  *           |          |  |
642  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
643  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
644  * ----va---> <-----------------vb--------------------> <--------va-------------
645  *       |          |       <----vs----->                     |
646  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
647  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
648  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
649  *       |          |                                         |
650  *       last visible pixel                                   first visible pixel
651  *                  |                                         increment frame counter (gen3/4)
652  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
653  *
654  * x  = horizontal active
655  * _  = horizontal blanking
656  * hs = horizontal sync
657  * va = vertical active
658  * vb = vertical blanking
659  * vs = vertical sync
660  * vbs = vblank_start (number)
661  *
662  * Summary:
663  * - most events happen at the start of horizontal sync
664  * - frame start happens at the start of horizontal blank, 1-4 lines
665  *   (depending on PIPECONF settings) after the start of vblank
666  * - gen3/4 pixel and frame counter are synchronized with the start
667  *   of horizontal active on the first line of vertical active
668  */
669 
670 /* Called from drm generic code, passed a 'crtc', which
671  * we use as a pipe index
672  */
673 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
674 {
675 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
676 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
677 	const struct drm_display_mode *mode = &vblank->hwmode;
678 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
679 	i915_reg_t high_frame, low_frame;
680 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
681 	unsigned long irqflags;
682 
683 	/*
684 	 * On i965gm TV output the frame counter only works up to
685 	 * the point when we enable the TV encoder. After that the
686 	 * frame counter ceases to work and reads zero. We need a
687 	 * vblank wait before enabling the TV encoder and so we
688 	 * have to enable vblank interrupts while the frame counter
689 	 * is still in a working state. However the core vblank code
690 	 * does not like us returning non-zero frame counter values
691 	 * when we've told it that we don't have a working frame
692 	 * counter. Thus we must stop non-zero values leaking out.
693 	 */
694 	if (!vblank->max_vblank_count)
695 		return 0;
696 
697 	htotal = mode->crtc_htotal;
698 	hsync_start = mode->crtc_hsync_start;
699 	vbl_start = mode->crtc_vblank_start;
700 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
701 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
702 
703 	/* Convert to pixel count */
704 	vbl_start *= htotal;
705 
706 	/* Start of vblank event occurs at start of hsync */
707 	vbl_start -= htotal - hsync_start;
708 
709 	high_frame = PIPEFRAME(pipe);
710 	low_frame = PIPEFRAMEPIXEL(pipe);
711 
712 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
713 
714 	/*
715 	 * High & low register fields aren't synchronized, so make sure
716 	 * we get a low value that's stable across two reads of the high
717 	 * register.
718 	 */
719 	do {
720 		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
721 		low   = intel_de_read_fw(dev_priv, low_frame);
722 		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
723 	} while (high1 != high2);
724 
725 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
726 
727 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
728 	pixel = low & PIPE_PIXEL_MASK;
729 	low >>= PIPE_FRAME_LOW_SHIFT;
730 
731 	/*
732 	 * The frame counter increments at beginning of active.
733 	 * Cook up a vblank counter by also checking the pixel
734 	 * counter against vblank start.
735 	 */
736 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
737 }
738 
739 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
740 {
741 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
742 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
743 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
744 
745 	if (!vblank->max_vblank_count)
746 		return 0;
747 
748 	return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
749 }
750 
751 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
752 {
753 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
754 	struct drm_vblank_crtc *vblank =
755 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
756 	const struct drm_display_mode *mode = &vblank->hwmode;
757 	u32 htotal = mode->crtc_htotal;
758 	u32 clock = mode->crtc_clock;
759 	u32 scan_prev_time, scan_curr_time, scan_post_time;
760 
761 	/*
762 	 * To avoid the race condition where we might cross into the
763 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
764 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
765 	 * during the same frame.
766 	 */
767 	do {
768 		/*
769 		 * This field provides read back of the display
770 		 * pipe frame time stamp. The time stamp value
771 		 * is sampled at every start of vertical blank.
772 		 */
773 		scan_prev_time = intel_de_read_fw(dev_priv,
774 						  PIPE_FRMTMSTMP(crtc->pipe));
775 
776 		/*
777 		 * The TIMESTAMP_CTR register has the current
778 		 * time stamp value.
779 		 */
780 		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
781 
782 		scan_post_time = intel_de_read_fw(dev_priv,
783 						  PIPE_FRMTMSTMP(crtc->pipe));
784 	} while (scan_post_time != scan_prev_time);
785 
786 	return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
787 				   clock), 1000 * htotal);
788 }
789 
790 /*
791  * On certain encoders on certain platforms, pipe
792  * scanline register will not work to get the scanline,
793  * since the timings are driven from the PORT or issues
794  * with scanline register updates.
795  * This function will use Framestamp and current
796  * timestamp registers to calculate the scanline.
797  */
798 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
799 {
800 	struct drm_vblank_crtc *vblank =
801 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
802 	const struct drm_display_mode *mode = &vblank->hwmode;
803 	u32 vblank_start = mode->crtc_vblank_start;
804 	u32 vtotal = mode->crtc_vtotal;
805 	u32 scanline;
806 
807 	scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
808 	scanline = min(scanline, vtotal - 1);
809 	scanline = (scanline + vblank_start) % vtotal;
810 
811 	return scanline;
812 }
813 
814 /*
815  * intel_de_read_fw(), only for fast reads of display block, no need for
816  * forcewake etc.
817  */
818 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
819 {
820 	struct drm_device *dev = crtc->base.dev;
821 	struct drm_i915_private *dev_priv = to_i915(dev);
822 	const struct drm_display_mode *mode;
823 	struct drm_vblank_crtc *vblank;
824 	enum pipe pipe = crtc->pipe;
825 	int position, vtotal;
826 
827 	if (!crtc->active)
828 		return 0;
829 
830 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
831 	mode = &vblank->hwmode;
832 
833 	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
834 		return __intel_get_crtc_scanline_from_timestamp(crtc);
835 
836 	vtotal = mode->crtc_vtotal;
837 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
838 		vtotal /= 2;
839 
840 	position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
841 
842 	/*
843 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
844 	 * read it just before the start of vblank.  So try it again
845 	 * so we don't accidentally end up spanning a vblank frame
846 	 * increment, causing the pipe_update_end() code to squak at us.
847 	 *
848 	 * The nature of this problem means we can't simply check the ISR
849 	 * bit and return the vblank start value; nor can we use the scanline
850 	 * debug register in the transcoder as it appears to have the same
851 	 * problem.  We may need to extend this to include other platforms,
852 	 * but so far testing only shows the problem on HSW.
853 	 */
854 	if (HAS_DDI(dev_priv) && !position) {
855 		int i, temp;
856 
857 		for (i = 0; i < 100; i++) {
858 			udelay(1);
859 			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
860 			if (temp != position) {
861 				position = temp;
862 				break;
863 			}
864 		}
865 	}
866 
867 	/*
868 	 * See update_scanline_offset() for the details on the
869 	 * scanline_offset adjustment.
870 	 */
871 	return (position + crtc->scanline_offset) % vtotal;
872 }
873 
874 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
875 				     bool in_vblank_irq,
876 				     int *vpos, int *hpos,
877 				     ktime_t *stime, ktime_t *etime,
878 				     const struct drm_display_mode *mode)
879 {
880 	struct drm_device *dev = _crtc->dev;
881 	struct drm_i915_private *dev_priv = to_i915(dev);
882 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
883 	enum pipe pipe = crtc->pipe;
884 	int position;
885 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
886 	unsigned long irqflags;
887 	bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
888 		IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
889 		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
890 
891 	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
892 		drm_dbg(&dev_priv->drm,
893 			"trying to get scanoutpos for disabled "
894 			"pipe %c\n", pipe_name(pipe));
895 		return false;
896 	}
897 
898 	htotal = mode->crtc_htotal;
899 	hsync_start = mode->crtc_hsync_start;
900 	vtotal = mode->crtc_vtotal;
901 	vbl_start = mode->crtc_vblank_start;
902 	vbl_end = mode->crtc_vblank_end;
903 
904 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
905 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
906 		vbl_end /= 2;
907 		vtotal /= 2;
908 	}
909 
910 	/*
911 	 * Lock uncore.lock, as we will do multiple timing critical raw
912 	 * register reads, potentially with preemption disabled, so the
913 	 * following code must not block on uncore.lock.
914 	 */
915 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
916 
917 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
918 
919 	/* Get optional system timestamp before query. */
920 	if (stime)
921 		*stime = ktime_get();
922 
923 	if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
924 		int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
925 
926 		position = __intel_get_crtc_scanline(crtc);
927 
928 		/*
929 		 * Already exiting vblank? If so, shift our position
930 		 * so it looks like we're already apporaching the full
931 		 * vblank end. This should make the generated timestamp
932 		 * more or less match when the active portion will start.
933 		 */
934 		if (position >= vbl_start && scanlines < position)
935 			position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
936 	} else if (use_scanline_counter) {
937 		/* No obvious pixelcount register. Only query vertical
938 		 * scanout position from Display scan line register.
939 		 */
940 		position = __intel_get_crtc_scanline(crtc);
941 	} else {
942 		/* Have access to pixelcount since start of frame.
943 		 * We can split this into vertical and horizontal
944 		 * scanout position.
945 		 */
946 		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
947 
948 		/* convert to pixel counts */
949 		vbl_start *= htotal;
950 		vbl_end *= htotal;
951 		vtotal *= htotal;
952 
953 		/*
954 		 * In interlaced modes, the pixel counter counts all pixels,
955 		 * so one field will have htotal more pixels. In order to avoid
956 		 * the reported position from jumping backwards when the pixel
957 		 * counter is beyond the length of the shorter field, just
958 		 * clamp the position the length of the shorter field. This
959 		 * matches how the scanline counter based position works since
960 		 * the scanline counter doesn't count the two half lines.
961 		 */
962 		if (position >= vtotal)
963 			position = vtotal - 1;
964 
965 		/*
966 		 * Start of vblank interrupt is triggered at start of hsync,
967 		 * just prior to the first active line of vblank. However we
968 		 * consider lines to start at the leading edge of horizontal
969 		 * active. So, should we get here before we've crossed into
970 		 * the horizontal active of the first line in vblank, we would
971 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
972 		 * always add htotal-hsync_start to the current pixel position.
973 		 */
974 		position = (position + htotal - hsync_start) % vtotal;
975 	}
976 
977 	/* Get optional system timestamp after query. */
978 	if (etime)
979 		*etime = ktime_get();
980 
981 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
982 
983 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
984 
985 	/*
986 	 * While in vblank, position will be negative
987 	 * counting up towards 0 at vbl_end. And outside
988 	 * vblank, position will be positive counting
989 	 * up since vbl_end.
990 	 */
991 	if (position >= vbl_start)
992 		position -= vbl_end;
993 	else
994 		position += vtotal - vbl_end;
995 
996 	if (use_scanline_counter) {
997 		*vpos = position;
998 		*hpos = 0;
999 	} else {
1000 		*vpos = position / htotal;
1001 		*hpos = position - (*vpos * htotal);
1002 	}
1003 
1004 	return true;
1005 }
1006 
1007 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1008 				     ktime_t *vblank_time, bool in_vblank_irq)
1009 {
1010 	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1011 		crtc, max_error, vblank_time, in_vblank_irq,
1012 		i915_get_crtc_scanoutpos);
1013 }
1014 
1015 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1016 {
1017 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1018 	unsigned long irqflags;
1019 	int position;
1020 
1021 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1022 	position = __intel_get_crtc_scanline(crtc);
1023 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1024 
1025 	return position;
1026 }
1027 
1028 /**
1029  * ivb_parity_work - Workqueue called when a parity error interrupt
1030  * occurred.
1031  * @work: workqueue struct
1032  *
1033  * Doesn't actually do anything except notify userspace. As a consequence of
1034  * this event, userspace should try to remap the bad rows since statistically
1035  * it is likely the same row is more likely to go bad again.
1036  */
1037 static void ivb_parity_work(struct work_struct *work)
1038 {
1039 	struct drm_i915_private *dev_priv =
1040 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1041 	struct intel_gt *gt = to_gt(dev_priv);
1042 	u32 error_status, row, bank, subbank;
1043 	char *parity_event[6];
1044 	u32 misccpctl;
1045 	u8 slice = 0;
1046 
1047 	/* We must turn off DOP level clock gating to access the L3 registers.
1048 	 * In order to prevent a get/put style interface, acquire struct mutex
1049 	 * any time we access those registers.
1050 	 */
1051 	mutex_lock(&dev_priv->drm.struct_mutex);
1052 
1053 	/* If we've screwed up tracking, just let the interrupt fire again */
1054 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1055 		goto out;
1056 
1057 	misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1058 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1059 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1060 
1061 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1062 		i915_reg_t reg;
1063 
1064 		slice--;
1065 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
1066 				     slice >= NUM_L3_SLICES(dev_priv)))
1067 			break;
1068 
1069 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1070 
1071 		reg = GEN7_L3CDERRST1(slice);
1072 
1073 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
1074 		row = GEN7_PARITY_ERROR_ROW(error_status);
1075 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1076 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1077 
1078 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1079 		intel_uncore_posting_read(&dev_priv->uncore, reg);
1080 
1081 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1082 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1083 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1084 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1085 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1086 		parity_event[5] = NULL;
1087 
1088 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1089 				   KOBJ_CHANGE, parity_event);
1090 
1091 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1092 			  slice, row, bank, subbank);
1093 
1094 		kfree(parity_event[4]);
1095 		kfree(parity_event[3]);
1096 		kfree(parity_event[2]);
1097 		kfree(parity_event[1]);
1098 	}
1099 
1100 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1101 
1102 out:
1103 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1104 	spin_lock_irq(&gt->irq_lock);
1105 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1106 	spin_unlock_irq(&gt->irq_lock);
1107 
1108 	mutex_unlock(&dev_priv->drm.struct_mutex);
1109 }
1110 
1111 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1112 {
1113 	switch (pin) {
1114 	case HPD_PORT_TC1:
1115 	case HPD_PORT_TC2:
1116 	case HPD_PORT_TC3:
1117 	case HPD_PORT_TC4:
1118 	case HPD_PORT_TC5:
1119 	case HPD_PORT_TC6:
1120 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1121 	default:
1122 		return false;
1123 	}
1124 }
1125 
1126 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1127 {
1128 	switch (pin) {
1129 	case HPD_PORT_A:
1130 		return val & PORTA_HOTPLUG_LONG_DETECT;
1131 	case HPD_PORT_B:
1132 		return val & PORTB_HOTPLUG_LONG_DETECT;
1133 	case HPD_PORT_C:
1134 		return val & PORTC_HOTPLUG_LONG_DETECT;
1135 	default:
1136 		return false;
1137 	}
1138 }
1139 
1140 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1141 {
1142 	switch (pin) {
1143 	case HPD_PORT_A:
1144 	case HPD_PORT_B:
1145 	case HPD_PORT_C:
1146 	case HPD_PORT_D:
1147 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1148 	default:
1149 		return false;
1150 	}
1151 }
1152 
1153 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1154 {
1155 	switch (pin) {
1156 	case HPD_PORT_TC1:
1157 	case HPD_PORT_TC2:
1158 	case HPD_PORT_TC3:
1159 	case HPD_PORT_TC4:
1160 	case HPD_PORT_TC5:
1161 	case HPD_PORT_TC6:
1162 		return val & ICP_TC_HPD_LONG_DETECT(pin);
1163 	default:
1164 		return false;
1165 	}
1166 }
1167 
1168 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1169 {
1170 	switch (pin) {
1171 	case HPD_PORT_E:
1172 		return val & PORTE_HOTPLUG_LONG_DETECT;
1173 	default:
1174 		return false;
1175 	}
1176 }
1177 
1178 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1179 {
1180 	switch (pin) {
1181 	case HPD_PORT_A:
1182 		return val & PORTA_HOTPLUG_LONG_DETECT;
1183 	case HPD_PORT_B:
1184 		return val & PORTB_HOTPLUG_LONG_DETECT;
1185 	case HPD_PORT_C:
1186 		return val & PORTC_HOTPLUG_LONG_DETECT;
1187 	case HPD_PORT_D:
1188 		return val & PORTD_HOTPLUG_LONG_DETECT;
1189 	default:
1190 		return false;
1191 	}
1192 }
1193 
1194 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1195 {
1196 	switch (pin) {
1197 	case HPD_PORT_A:
1198 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1199 	default:
1200 		return false;
1201 	}
1202 }
1203 
1204 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1205 {
1206 	switch (pin) {
1207 	case HPD_PORT_B:
1208 		return val & PORTB_HOTPLUG_LONG_DETECT;
1209 	case HPD_PORT_C:
1210 		return val & PORTC_HOTPLUG_LONG_DETECT;
1211 	case HPD_PORT_D:
1212 		return val & PORTD_HOTPLUG_LONG_DETECT;
1213 	default:
1214 		return false;
1215 	}
1216 }
1217 
1218 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1219 {
1220 	switch (pin) {
1221 	case HPD_PORT_B:
1222 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1223 	case HPD_PORT_C:
1224 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1225 	case HPD_PORT_D:
1226 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1227 	default:
1228 		return false;
1229 	}
1230 }
1231 
1232 /*
1233  * Get a bit mask of pins that have triggered, and which ones may be long.
1234  * This can be called multiple times with the same masks to accumulate
1235  * hotplug detection results from several registers.
1236  *
1237  * Note that the caller is expected to zero out the masks initially.
1238  */
1239 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1240 			       u32 *pin_mask, u32 *long_mask,
1241 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1242 			       const u32 hpd[HPD_NUM_PINS],
1243 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1244 {
1245 	enum hpd_pin pin;
1246 
1247 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1248 
1249 	for_each_hpd_pin(pin) {
1250 		if ((hpd[pin] & hotplug_trigger) == 0)
1251 			continue;
1252 
1253 		*pin_mask |= BIT(pin);
1254 
1255 		if (long_pulse_detect(pin, dig_hotplug_reg))
1256 			*long_mask |= BIT(pin);
1257 	}
1258 
1259 	drm_dbg(&dev_priv->drm,
1260 		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1261 		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1262 
1263 }
1264 
1265 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1266 				  const u32 hpd[HPD_NUM_PINS])
1267 {
1268 	struct intel_encoder *encoder;
1269 	u32 enabled_irqs = 0;
1270 
1271 	for_each_intel_encoder(&dev_priv->drm, encoder)
1272 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1273 			enabled_irqs |= hpd[encoder->hpd_pin];
1274 
1275 	return enabled_irqs;
1276 }
1277 
1278 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1279 				  const u32 hpd[HPD_NUM_PINS])
1280 {
1281 	struct intel_encoder *encoder;
1282 	u32 hotplug_irqs = 0;
1283 
1284 	for_each_intel_encoder(&dev_priv->drm, encoder)
1285 		hotplug_irqs |= hpd[encoder->hpd_pin];
1286 
1287 	return hotplug_irqs;
1288 }
1289 
1290 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1291 				     hotplug_enables_func hotplug_enables)
1292 {
1293 	struct intel_encoder *encoder;
1294 	u32 hotplug = 0;
1295 
1296 	for_each_intel_encoder(&i915->drm, encoder)
1297 		hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1298 
1299 	return hotplug;
1300 }
1301 
1302 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1303 {
1304 	wake_up_all(&dev_priv->gmbus_wait_queue);
1305 }
1306 
1307 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1308 {
1309 	wake_up_all(&dev_priv->gmbus_wait_queue);
1310 }
1311 
1312 #if defined(CONFIG_DEBUG_FS)
1313 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1314 					 enum pipe pipe,
1315 					 u32 crc0, u32 crc1,
1316 					 u32 crc2, u32 crc3,
1317 					 u32 crc4)
1318 {
1319 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1320 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1321 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1322 
1323 	trace_intel_pipe_crc(crtc, crcs);
1324 
1325 	spin_lock(&pipe_crc->lock);
1326 	/*
1327 	 * For some not yet identified reason, the first CRC is
1328 	 * bonkers. So let's just wait for the next vblank and read
1329 	 * out the buggy result.
1330 	 *
1331 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1332 	 * don't trust that one either.
1333 	 */
1334 	if (pipe_crc->skipped <= 0 ||
1335 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1336 		pipe_crc->skipped++;
1337 		spin_unlock(&pipe_crc->lock);
1338 		return;
1339 	}
1340 	spin_unlock(&pipe_crc->lock);
1341 
1342 	drm_crtc_add_crc_entry(&crtc->base, true,
1343 				drm_crtc_accurate_vblank_count(&crtc->base),
1344 				crcs);
1345 }
1346 #else
1347 static inline void
1348 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1349 			     enum pipe pipe,
1350 			     u32 crc0, u32 crc1,
1351 			     u32 crc2, u32 crc3,
1352 			     u32 crc4) {}
1353 #endif
1354 
1355 static void flip_done_handler(struct drm_i915_private *i915,
1356 			      enum pipe pipe)
1357 {
1358 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1359 	struct drm_crtc_state *crtc_state = crtc->base.state;
1360 	struct drm_pending_vblank_event *e = crtc_state->event;
1361 	struct drm_device *dev = &i915->drm;
1362 	unsigned long irqflags;
1363 
1364 	spin_lock_irqsave(&dev->event_lock, irqflags);
1365 
1366 	crtc_state->event = NULL;
1367 
1368 	drm_crtc_send_vblank_event(&crtc->base, e);
1369 
1370 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
1371 }
1372 
1373 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1374 				     enum pipe pipe)
1375 {
1376 	display_pipe_crc_irq_handler(dev_priv, pipe,
1377 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1378 				     0, 0, 0, 0);
1379 }
1380 
1381 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1382 				     enum pipe pipe)
1383 {
1384 	display_pipe_crc_irq_handler(dev_priv, pipe,
1385 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1386 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1387 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1388 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1389 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1390 }
1391 
1392 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1393 				      enum pipe pipe)
1394 {
1395 	u32 res1, res2;
1396 
1397 	if (DISPLAY_VER(dev_priv) >= 3)
1398 		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1399 	else
1400 		res1 = 0;
1401 
1402 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1403 		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1404 	else
1405 		res2 = 0;
1406 
1407 	display_pipe_crc_irq_handler(dev_priv, pipe,
1408 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1409 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1410 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1411 				     res1, res2);
1412 }
1413 
1414 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1415 {
1416 	enum pipe pipe;
1417 
1418 	for_each_pipe(dev_priv, pipe) {
1419 		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1420 			   PIPESTAT_INT_STATUS_MASK |
1421 			   PIPE_FIFO_UNDERRUN_STATUS);
1422 
1423 		dev_priv->pipestat_irq_mask[pipe] = 0;
1424 	}
1425 }
1426 
1427 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1428 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1429 {
1430 	enum pipe pipe;
1431 
1432 	spin_lock(&dev_priv->irq_lock);
1433 
1434 	if (!dev_priv->display_irqs_enabled) {
1435 		spin_unlock(&dev_priv->irq_lock);
1436 		return;
1437 	}
1438 
1439 	for_each_pipe(dev_priv, pipe) {
1440 		i915_reg_t reg;
1441 		u32 status_mask, enable_mask, iir_bit = 0;
1442 
1443 		/*
1444 		 * PIPESTAT bits get signalled even when the interrupt is
1445 		 * disabled with the mask bits, and some of the status bits do
1446 		 * not generate interrupts at all (like the underrun bit). Hence
1447 		 * we need to be careful that we only handle what we want to
1448 		 * handle.
1449 		 */
1450 
1451 		/* fifo underruns are filterered in the underrun handler. */
1452 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1453 
1454 		switch (pipe) {
1455 		default:
1456 		case PIPE_A:
1457 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1458 			break;
1459 		case PIPE_B:
1460 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1461 			break;
1462 		case PIPE_C:
1463 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1464 			break;
1465 		}
1466 		if (iir & iir_bit)
1467 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1468 
1469 		if (!status_mask)
1470 			continue;
1471 
1472 		reg = PIPESTAT(pipe);
1473 		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1474 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1475 
1476 		/*
1477 		 * Clear the PIPE*STAT regs before the IIR
1478 		 *
1479 		 * Toggle the enable bits to make sure we get an
1480 		 * edge in the ISR pipe event bit if we don't clear
1481 		 * all the enabled status bits. Otherwise the edge
1482 		 * triggered IIR on i965/g4x wouldn't notice that
1483 		 * an interrupt is still pending.
1484 		 */
1485 		if (pipe_stats[pipe]) {
1486 			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1487 			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1488 		}
1489 	}
1490 	spin_unlock(&dev_priv->irq_lock);
1491 }
1492 
1493 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1494 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1495 {
1496 	enum pipe pipe;
1497 
1498 	for_each_pipe(dev_priv, pipe) {
1499 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1500 			intel_handle_vblank(dev_priv, pipe);
1501 
1502 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1503 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1504 
1505 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1506 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1507 	}
1508 }
1509 
1510 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1511 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1512 {
1513 	bool blc_event = false;
1514 	enum pipe pipe;
1515 
1516 	for_each_pipe(dev_priv, pipe) {
1517 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1518 			intel_handle_vblank(dev_priv, pipe);
1519 
1520 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1521 			blc_event = true;
1522 
1523 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1524 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1525 
1526 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1527 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1528 	}
1529 
1530 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1531 		intel_opregion_asle_intr(dev_priv);
1532 }
1533 
1534 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1535 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1536 {
1537 	bool blc_event = false;
1538 	enum pipe pipe;
1539 
1540 	for_each_pipe(dev_priv, pipe) {
1541 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1542 			intel_handle_vblank(dev_priv, pipe);
1543 
1544 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1545 			blc_event = true;
1546 
1547 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1548 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1549 
1550 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1551 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1552 	}
1553 
1554 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1555 		intel_opregion_asle_intr(dev_priv);
1556 
1557 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1558 		gmbus_irq_handler(dev_priv);
1559 }
1560 
1561 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1562 					    u32 pipe_stats[I915_MAX_PIPES])
1563 {
1564 	enum pipe pipe;
1565 
1566 	for_each_pipe(dev_priv, pipe) {
1567 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1568 			intel_handle_vblank(dev_priv, pipe);
1569 
1570 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1571 			flip_done_handler(dev_priv, pipe);
1572 
1573 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1574 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1575 
1576 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1577 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1578 	}
1579 
1580 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1581 		gmbus_irq_handler(dev_priv);
1582 }
1583 
1584 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1585 {
1586 	u32 hotplug_status = 0, hotplug_status_mask;
1587 	int i;
1588 
1589 	if (IS_G4X(dev_priv) ||
1590 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1591 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1592 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1593 	else
1594 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1595 
1596 	/*
1597 	 * We absolutely have to clear all the pending interrupt
1598 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1599 	 * interrupt bit won't have an edge, and the i965/g4x
1600 	 * edge triggered IIR will not notice that an interrupt
1601 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1602 	 * guarantee the edge as the act of toggling the enable
1603 	 * bits can itself generate a new hotplug interrupt :(
1604 	 */
1605 	for (i = 0; i < 10; i++) {
1606 		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1607 
1608 		if (tmp == 0)
1609 			return hotplug_status;
1610 
1611 		hotplug_status |= tmp;
1612 		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1613 	}
1614 
1615 	drm_WARN_ONCE(&dev_priv->drm, 1,
1616 		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1617 		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1618 
1619 	return hotplug_status;
1620 }
1621 
1622 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1623 				 u32 hotplug_status)
1624 {
1625 	u32 pin_mask = 0, long_mask = 0;
1626 	u32 hotplug_trigger;
1627 
1628 	if (IS_G4X(dev_priv) ||
1629 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1630 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1631 	else
1632 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1633 
1634 	if (hotplug_trigger) {
1635 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1636 				   hotplug_trigger, hotplug_trigger,
1637 				   dev_priv->hotplug.hpd,
1638 				   i9xx_port_hotplug_long_detect);
1639 
1640 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1641 	}
1642 
1643 	if ((IS_G4X(dev_priv) ||
1644 	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1645 	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1646 		dp_aux_irq_handler(dev_priv);
1647 }
1648 
1649 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1650 {
1651 	struct drm_i915_private *dev_priv = arg;
1652 	irqreturn_t ret = IRQ_NONE;
1653 
1654 	if (!intel_irqs_enabled(dev_priv))
1655 		return IRQ_NONE;
1656 
1657 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1658 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1659 
1660 	do {
1661 		u32 iir, gt_iir, pm_iir;
1662 		u32 pipe_stats[I915_MAX_PIPES] = {};
1663 		u32 hotplug_status = 0;
1664 		u32 ier = 0;
1665 
1666 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1667 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1668 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1669 
1670 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1671 			break;
1672 
1673 		ret = IRQ_HANDLED;
1674 
1675 		/*
1676 		 * Theory on interrupt generation, based on empirical evidence:
1677 		 *
1678 		 * x = ((VLV_IIR & VLV_IER) ||
1679 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1680 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1681 		 *
1682 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1683 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1684 		 * guarantee the CPU interrupt will be raised again even if we
1685 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1686 		 * bits this time around.
1687 		 */
1688 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1689 		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1690 		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1691 
1692 		if (gt_iir)
1693 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1694 		if (pm_iir)
1695 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1696 
1697 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1698 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1699 
1700 		/* Call regardless, as some status bits might not be
1701 		 * signalled in iir */
1702 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1703 
1704 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1705 			   I915_LPE_PIPE_B_INTERRUPT))
1706 			intel_lpe_audio_irq_handler(dev_priv);
1707 
1708 		/*
1709 		 * VLV_IIR is single buffered, and reflects the level
1710 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1711 		 */
1712 		if (iir)
1713 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1714 
1715 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1716 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1717 
1718 		if (gt_iir)
1719 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1720 		if (pm_iir)
1721 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1722 
1723 		if (hotplug_status)
1724 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1725 
1726 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1727 	} while (0);
1728 
1729 	pmu_irq_stats(dev_priv, ret);
1730 
1731 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1732 
1733 	return ret;
1734 }
1735 
1736 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1737 {
1738 	struct drm_i915_private *dev_priv = arg;
1739 	irqreturn_t ret = IRQ_NONE;
1740 
1741 	if (!intel_irqs_enabled(dev_priv))
1742 		return IRQ_NONE;
1743 
1744 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1745 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1746 
1747 	do {
1748 		u32 master_ctl, iir;
1749 		u32 pipe_stats[I915_MAX_PIPES] = {};
1750 		u32 hotplug_status = 0;
1751 		u32 ier = 0;
1752 
1753 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1754 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1755 
1756 		if (master_ctl == 0 && iir == 0)
1757 			break;
1758 
1759 		ret = IRQ_HANDLED;
1760 
1761 		/*
1762 		 * Theory on interrupt generation, based on empirical evidence:
1763 		 *
1764 		 * x = ((VLV_IIR & VLV_IER) ||
1765 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1766 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1767 		 *
1768 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1769 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1770 		 * guarantee the CPU interrupt will be raised again even if we
1771 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1772 		 * bits this time around.
1773 		 */
1774 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1775 		ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1776 		intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1777 
1778 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1779 
1780 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1781 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1782 
1783 		/* Call regardless, as some status bits might not be
1784 		 * signalled in iir */
1785 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1786 
1787 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1788 			   I915_LPE_PIPE_B_INTERRUPT |
1789 			   I915_LPE_PIPE_C_INTERRUPT))
1790 			intel_lpe_audio_irq_handler(dev_priv);
1791 
1792 		/*
1793 		 * VLV_IIR is single buffered, and reflects the level
1794 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1795 		 */
1796 		if (iir)
1797 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1798 
1799 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1800 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1801 
1802 		if (hotplug_status)
1803 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1804 
1805 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1806 	} while (0);
1807 
1808 	pmu_irq_stats(dev_priv, ret);
1809 
1810 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1811 
1812 	return ret;
1813 }
1814 
1815 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1816 				u32 hotplug_trigger)
1817 {
1818 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1819 
1820 	/*
1821 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1822 	 * unless we touch the hotplug register, even if hotplug_trigger is
1823 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1824 	 * errors.
1825 	 */
1826 	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1827 	if (!hotplug_trigger) {
1828 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1829 			PORTD_HOTPLUG_STATUS_MASK |
1830 			PORTC_HOTPLUG_STATUS_MASK |
1831 			PORTB_HOTPLUG_STATUS_MASK;
1832 		dig_hotplug_reg &= ~mask;
1833 	}
1834 
1835 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1836 	if (!hotplug_trigger)
1837 		return;
1838 
1839 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1840 			   hotplug_trigger, dig_hotplug_reg,
1841 			   dev_priv->hotplug.pch_hpd,
1842 			   pch_port_hotplug_long_detect);
1843 
1844 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1845 }
1846 
1847 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1848 {
1849 	enum pipe pipe;
1850 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1851 
1852 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1853 
1854 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1855 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1856 			       SDE_AUDIO_POWER_SHIFT);
1857 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1858 			port_name(port));
1859 	}
1860 
1861 	if (pch_iir & SDE_AUX_MASK)
1862 		dp_aux_irq_handler(dev_priv);
1863 
1864 	if (pch_iir & SDE_GMBUS)
1865 		gmbus_irq_handler(dev_priv);
1866 
1867 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1868 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1869 
1870 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1871 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1872 
1873 	if (pch_iir & SDE_POISON)
1874 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1875 
1876 	if (pch_iir & SDE_FDI_MASK) {
1877 		for_each_pipe(dev_priv, pipe)
1878 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1879 				pipe_name(pipe),
1880 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1881 	}
1882 
1883 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1884 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1885 
1886 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1887 		drm_dbg(&dev_priv->drm,
1888 			"PCH transcoder CRC error interrupt\n");
1889 
1890 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1891 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1892 
1893 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1894 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1895 }
1896 
1897 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1898 {
1899 	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1900 	enum pipe pipe;
1901 
1902 	if (err_int & ERR_INT_POISON)
1903 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1904 
1905 	for_each_pipe(dev_priv, pipe) {
1906 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1907 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1908 
1909 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1910 			if (IS_IVYBRIDGE(dev_priv))
1911 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1912 			else
1913 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1914 		}
1915 	}
1916 
1917 	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1918 }
1919 
1920 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1921 {
1922 	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1923 	enum pipe pipe;
1924 
1925 	if (serr_int & SERR_INT_POISON)
1926 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1927 
1928 	for_each_pipe(dev_priv, pipe)
1929 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1930 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1931 
1932 	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1933 }
1934 
1935 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1936 {
1937 	enum pipe pipe;
1938 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1939 
1940 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1941 
1942 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1943 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1944 			       SDE_AUDIO_POWER_SHIFT_CPT);
1945 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1946 			port_name(port));
1947 	}
1948 
1949 	if (pch_iir & SDE_AUX_MASK_CPT)
1950 		dp_aux_irq_handler(dev_priv);
1951 
1952 	if (pch_iir & SDE_GMBUS_CPT)
1953 		gmbus_irq_handler(dev_priv);
1954 
1955 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1956 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1957 
1958 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1959 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1960 
1961 	if (pch_iir & SDE_FDI_MASK_CPT) {
1962 		for_each_pipe(dev_priv, pipe)
1963 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1964 				pipe_name(pipe),
1965 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1966 	}
1967 
1968 	if (pch_iir & SDE_ERROR_CPT)
1969 		cpt_serr_int_handler(dev_priv);
1970 }
1971 
1972 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1973 {
1974 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1975 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1976 	u32 pin_mask = 0, long_mask = 0;
1977 
1978 	if (ddi_hotplug_trigger) {
1979 		u32 dig_hotplug_reg;
1980 
1981 		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
1982 		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1983 
1984 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1985 				   ddi_hotplug_trigger, dig_hotplug_reg,
1986 				   dev_priv->hotplug.pch_hpd,
1987 				   icp_ddi_port_hotplug_long_detect);
1988 	}
1989 
1990 	if (tc_hotplug_trigger) {
1991 		u32 dig_hotplug_reg;
1992 
1993 		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
1994 		intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
1995 
1996 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1997 				   tc_hotplug_trigger, dig_hotplug_reg,
1998 				   dev_priv->hotplug.pch_hpd,
1999 				   icp_tc_port_hotplug_long_detect);
2000 	}
2001 
2002 	if (pin_mask)
2003 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2004 
2005 	if (pch_iir & SDE_GMBUS_ICP)
2006 		gmbus_irq_handler(dev_priv);
2007 }
2008 
2009 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2010 {
2011 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2012 		~SDE_PORTE_HOTPLUG_SPT;
2013 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2014 	u32 pin_mask = 0, long_mask = 0;
2015 
2016 	if (hotplug_trigger) {
2017 		u32 dig_hotplug_reg;
2018 
2019 		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2020 		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2021 
2022 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2023 				   hotplug_trigger, dig_hotplug_reg,
2024 				   dev_priv->hotplug.pch_hpd,
2025 				   spt_port_hotplug_long_detect);
2026 	}
2027 
2028 	if (hotplug2_trigger) {
2029 		u32 dig_hotplug_reg;
2030 
2031 		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
2032 		intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2033 
2034 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2035 				   hotplug2_trigger, dig_hotplug_reg,
2036 				   dev_priv->hotplug.pch_hpd,
2037 				   spt_port_hotplug2_long_detect);
2038 	}
2039 
2040 	if (pin_mask)
2041 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2042 
2043 	if (pch_iir & SDE_GMBUS_CPT)
2044 		gmbus_irq_handler(dev_priv);
2045 }
2046 
2047 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2048 				u32 hotplug_trigger)
2049 {
2050 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2051 
2052 	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
2053 	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2054 
2055 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2056 			   hotplug_trigger, dig_hotplug_reg,
2057 			   dev_priv->hotplug.hpd,
2058 			   ilk_port_hotplug_long_detect);
2059 
2060 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2061 }
2062 
2063 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2064 				    u32 de_iir)
2065 {
2066 	enum pipe pipe;
2067 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2068 
2069 	if (hotplug_trigger)
2070 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2071 
2072 	if (de_iir & DE_AUX_CHANNEL_A)
2073 		dp_aux_irq_handler(dev_priv);
2074 
2075 	if (de_iir & DE_GSE)
2076 		intel_opregion_asle_intr(dev_priv);
2077 
2078 	if (de_iir & DE_POISON)
2079 		drm_err(&dev_priv->drm, "Poison interrupt\n");
2080 
2081 	for_each_pipe(dev_priv, pipe) {
2082 		if (de_iir & DE_PIPE_VBLANK(pipe))
2083 			intel_handle_vblank(dev_priv, pipe);
2084 
2085 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2086 			flip_done_handler(dev_priv, pipe);
2087 
2088 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2089 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2090 
2091 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2092 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2093 	}
2094 
2095 	/* check event from PCH */
2096 	if (de_iir & DE_PCH_EVENT) {
2097 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2098 
2099 		if (HAS_PCH_CPT(dev_priv))
2100 			cpt_irq_handler(dev_priv, pch_iir);
2101 		else
2102 			ibx_irq_handler(dev_priv, pch_iir);
2103 
2104 		/* should clear PCH hotplug event before clear CPU irq */
2105 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2106 	}
2107 
2108 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2109 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2110 }
2111 
2112 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2113 				    u32 de_iir)
2114 {
2115 	enum pipe pipe;
2116 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2117 
2118 	if (hotplug_trigger)
2119 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2120 
2121 	if (de_iir & DE_ERR_INT_IVB)
2122 		ivb_err_int_handler(dev_priv);
2123 
2124 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2125 		dp_aux_irq_handler(dev_priv);
2126 
2127 	if (de_iir & DE_GSE_IVB)
2128 		intel_opregion_asle_intr(dev_priv);
2129 
2130 	for_each_pipe(dev_priv, pipe) {
2131 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2132 			intel_handle_vblank(dev_priv, pipe);
2133 
2134 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2135 			flip_done_handler(dev_priv, pipe);
2136 	}
2137 
2138 	/* check event from PCH */
2139 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2140 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2141 
2142 		cpt_irq_handler(dev_priv, pch_iir);
2143 
2144 		/* clear PCH hotplug event before clear CPU irq */
2145 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2146 	}
2147 }
2148 
2149 /*
2150  * To handle irqs with the minimum potential races with fresh interrupts, we:
2151  * 1 - Disable Master Interrupt Control.
2152  * 2 - Find the source(s) of the interrupt.
2153  * 3 - Clear the Interrupt Identity bits (IIR).
2154  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2155  * 5 - Re-enable Master Interrupt Control.
2156  */
2157 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2158 {
2159 	struct drm_i915_private *i915 = arg;
2160 	void __iomem * const regs = i915->uncore.regs;
2161 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2162 	irqreturn_t ret = IRQ_NONE;
2163 
2164 	if (unlikely(!intel_irqs_enabled(i915)))
2165 		return IRQ_NONE;
2166 
2167 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2168 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2169 
2170 	/* disable master interrupt before clearing iir  */
2171 	de_ier = raw_reg_read(regs, DEIER);
2172 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2173 
2174 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2175 	 * interrupts will will be stored on its back queue, and then we'll be
2176 	 * able to process them after we restore SDEIER (as soon as we restore
2177 	 * it, we'll get an interrupt if SDEIIR still has something to process
2178 	 * due to its back queue). */
2179 	if (!HAS_PCH_NOP(i915)) {
2180 		sde_ier = raw_reg_read(regs, SDEIER);
2181 		raw_reg_write(regs, SDEIER, 0);
2182 	}
2183 
2184 	/* Find, clear, then process each source of interrupt */
2185 
2186 	gt_iir = raw_reg_read(regs, GTIIR);
2187 	if (gt_iir) {
2188 		raw_reg_write(regs, GTIIR, gt_iir);
2189 		if (GRAPHICS_VER(i915) >= 6)
2190 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
2191 		else
2192 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
2193 		ret = IRQ_HANDLED;
2194 	}
2195 
2196 	de_iir = raw_reg_read(regs, DEIIR);
2197 	if (de_iir) {
2198 		raw_reg_write(regs, DEIIR, de_iir);
2199 		if (DISPLAY_VER(i915) >= 7)
2200 			ivb_display_irq_handler(i915, de_iir);
2201 		else
2202 			ilk_display_irq_handler(i915, de_iir);
2203 		ret = IRQ_HANDLED;
2204 	}
2205 
2206 	if (GRAPHICS_VER(i915) >= 6) {
2207 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2208 		if (pm_iir) {
2209 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2210 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2211 			ret = IRQ_HANDLED;
2212 		}
2213 	}
2214 
2215 	raw_reg_write(regs, DEIER, de_ier);
2216 	if (sde_ier)
2217 		raw_reg_write(regs, SDEIER, sde_ier);
2218 
2219 	pmu_irq_stats(i915, ret);
2220 
2221 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2222 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2223 
2224 	return ret;
2225 }
2226 
2227 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2228 				u32 hotplug_trigger)
2229 {
2230 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2231 
2232 	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2233 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2234 
2235 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2236 			   hotplug_trigger, dig_hotplug_reg,
2237 			   dev_priv->hotplug.hpd,
2238 			   bxt_port_hotplug_long_detect);
2239 
2240 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2241 }
2242 
2243 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2244 {
2245 	u32 pin_mask = 0, long_mask = 0;
2246 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2247 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2248 
2249 	if (trigger_tc) {
2250 		u32 dig_hotplug_reg;
2251 
2252 		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
2253 		intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2254 
2255 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2256 				   trigger_tc, dig_hotplug_reg,
2257 				   dev_priv->hotplug.hpd,
2258 				   gen11_port_hotplug_long_detect);
2259 	}
2260 
2261 	if (trigger_tbt) {
2262 		u32 dig_hotplug_reg;
2263 
2264 		dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
2265 		intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2266 
2267 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2268 				   trigger_tbt, dig_hotplug_reg,
2269 				   dev_priv->hotplug.hpd,
2270 				   gen11_port_hotplug_long_detect);
2271 	}
2272 
2273 	if (pin_mask)
2274 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2275 	else
2276 		drm_err(&dev_priv->drm,
2277 			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2278 }
2279 
2280 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2281 {
2282 	u32 mask;
2283 
2284 	if (DISPLAY_VER(dev_priv) >= 13)
2285 		return TGL_DE_PORT_AUX_DDIA |
2286 			TGL_DE_PORT_AUX_DDIB |
2287 			TGL_DE_PORT_AUX_DDIC |
2288 			XELPD_DE_PORT_AUX_DDID |
2289 			XELPD_DE_PORT_AUX_DDIE |
2290 			TGL_DE_PORT_AUX_USBC1 |
2291 			TGL_DE_PORT_AUX_USBC2 |
2292 			TGL_DE_PORT_AUX_USBC3 |
2293 			TGL_DE_PORT_AUX_USBC4;
2294 	else if (DISPLAY_VER(dev_priv) >= 12)
2295 		return TGL_DE_PORT_AUX_DDIA |
2296 			TGL_DE_PORT_AUX_DDIB |
2297 			TGL_DE_PORT_AUX_DDIC |
2298 			TGL_DE_PORT_AUX_USBC1 |
2299 			TGL_DE_PORT_AUX_USBC2 |
2300 			TGL_DE_PORT_AUX_USBC3 |
2301 			TGL_DE_PORT_AUX_USBC4 |
2302 			TGL_DE_PORT_AUX_USBC5 |
2303 			TGL_DE_PORT_AUX_USBC6;
2304 
2305 
2306 	mask = GEN8_AUX_CHANNEL_A;
2307 	if (DISPLAY_VER(dev_priv) >= 9)
2308 		mask |= GEN9_AUX_CHANNEL_B |
2309 			GEN9_AUX_CHANNEL_C |
2310 			GEN9_AUX_CHANNEL_D;
2311 
2312 	if (DISPLAY_VER(dev_priv) == 11) {
2313 		mask |= ICL_AUX_CHANNEL_F;
2314 		mask |= ICL_AUX_CHANNEL_E;
2315 	}
2316 
2317 	return mask;
2318 }
2319 
2320 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2321 {
2322 	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2323 		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2324 	else if (DISPLAY_VER(dev_priv) >= 11)
2325 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2326 	else if (DISPLAY_VER(dev_priv) >= 9)
2327 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2328 	else
2329 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2330 }
2331 
2332 static void
2333 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2334 {
2335 	bool found = false;
2336 
2337 	if (iir & GEN8_DE_MISC_GSE) {
2338 		intel_opregion_asle_intr(dev_priv);
2339 		found = true;
2340 	}
2341 
2342 	if (iir & GEN8_DE_EDP_PSR) {
2343 		struct intel_encoder *encoder;
2344 		u32 psr_iir;
2345 		i915_reg_t iir_reg;
2346 
2347 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2348 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2349 
2350 			if (DISPLAY_VER(dev_priv) >= 12)
2351 				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2352 			else
2353 				iir_reg = EDP_PSR_IIR;
2354 
2355 			psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2356 			intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2357 
2358 			if (psr_iir)
2359 				found = true;
2360 
2361 			intel_psr_irq_handler(intel_dp, psr_iir);
2362 
2363 			/* prior GEN12 only have one EDP PSR */
2364 			if (DISPLAY_VER(dev_priv) < 12)
2365 				break;
2366 		}
2367 	}
2368 
2369 	if (!found)
2370 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2371 }
2372 
2373 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2374 					   u32 te_trigger)
2375 {
2376 	enum pipe pipe = INVALID_PIPE;
2377 	enum transcoder dsi_trans;
2378 	enum port port;
2379 	u32 val, tmp;
2380 
2381 	/*
2382 	 * Incase of dual link, TE comes from DSI_1
2383 	 * this is to check if dual link is enabled
2384 	 */
2385 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2386 	val &= PORT_SYNC_MODE_ENABLE;
2387 
2388 	/*
2389 	 * if dual link is enabled, then read DSI_0
2390 	 * transcoder registers
2391 	 */
2392 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2393 						  PORT_A : PORT_B;
2394 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2395 
2396 	/* Check if DSI configured in command mode */
2397 	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2398 	val = val & OP_MODE_MASK;
2399 
2400 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2401 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2402 		return;
2403 	}
2404 
2405 	/* Get PIPE for handling VBLANK event */
2406 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2407 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2408 	case TRANS_DDI_EDP_INPUT_A_ON:
2409 		pipe = PIPE_A;
2410 		break;
2411 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
2412 		pipe = PIPE_B;
2413 		break;
2414 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
2415 		pipe = PIPE_C;
2416 		break;
2417 	default:
2418 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
2419 		return;
2420 	}
2421 
2422 	intel_handle_vblank(dev_priv, pipe);
2423 
2424 	/* clear TE in dsi IIR */
2425 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2426 	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2427 	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2428 }
2429 
2430 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2431 {
2432 	if (DISPLAY_VER(i915) >= 9)
2433 		return GEN9_PIPE_PLANE1_FLIP_DONE;
2434 	else
2435 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
2436 }
2437 
2438 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2439 {
2440 	u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2441 
2442 	if (DISPLAY_VER(dev_priv) >= 13)
2443 		mask |= XELPD_PIPE_SOFT_UNDERRUN |
2444 			XELPD_PIPE_HARD_UNDERRUN;
2445 
2446 	return mask;
2447 }
2448 
2449 static irqreturn_t
2450 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2451 {
2452 	irqreturn_t ret = IRQ_NONE;
2453 	u32 iir;
2454 	enum pipe pipe;
2455 
2456 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2457 
2458 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2459 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2460 		if (iir) {
2461 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2462 			ret = IRQ_HANDLED;
2463 			gen8_de_misc_irq_handler(dev_priv, iir);
2464 		} else {
2465 			drm_err(&dev_priv->drm,
2466 				"The master control interrupt lied (DE MISC)!\n");
2467 		}
2468 	}
2469 
2470 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2471 		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2472 		if (iir) {
2473 			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2474 			ret = IRQ_HANDLED;
2475 			gen11_hpd_irq_handler(dev_priv, iir);
2476 		} else {
2477 			drm_err(&dev_priv->drm,
2478 				"The master control interrupt lied, (DE HPD)!\n");
2479 		}
2480 	}
2481 
2482 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2483 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2484 		if (iir) {
2485 			bool found = false;
2486 
2487 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2488 			ret = IRQ_HANDLED;
2489 
2490 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2491 				dp_aux_irq_handler(dev_priv);
2492 				found = true;
2493 			}
2494 
2495 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2496 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2497 
2498 				if (hotplug_trigger) {
2499 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2500 					found = true;
2501 				}
2502 			} else if (IS_BROADWELL(dev_priv)) {
2503 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2504 
2505 				if (hotplug_trigger) {
2506 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2507 					found = true;
2508 				}
2509 			}
2510 
2511 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2512 			    (iir & BXT_DE_PORT_GMBUS)) {
2513 				gmbus_irq_handler(dev_priv);
2514 				found = true;
2515 			}
2516 
2517 			if (DISPLAY_VER(dev_priv) >= 11) {
2518 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2519 
2520 				if (te_trigger) {
2521 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2522 					found = true;
2523 				}
2524 			}
2525 
2526 			if (!found)
2527 				drm_err(&dev_priv->drm,
2528 					"Unexpected DE Port interrupt\n");
2529 		}
2530 		else
2531 			drm_err(&dev_priv->drm,
2532 				"The master control interrupt lied (DE PORT)!\n");
2533 	}
2534 
2535 	for_each_pipe(dev_priv, pipe) {
2536 		u32 fault_errors;
2537 
2538 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2539 			continue;
2540 
2541 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2542 		if (!iir) {
2543 			drm_err(&dev_priv->drm,
2544 				"The master control interrupt lied (DE PIPE)!\n");
2545 			continue;
2546 		}
2547 
2548 		ret = IRQ_HANDLED;
2549 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2550 
2551 		if (iir & GEN8_PIPE_VBLANK)
2552 			intel_handle_vblank(dev_priv, pipe);
2553 
2554 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2555 			flip_done_handler(dev_priv, pipe);
2556 
2557 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2558 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2559 
2560 		if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2561 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2562 
2563 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2564 		if (fault_errors)
2565 			drm_err(&dev_priv->drm,
2566 				"Fault errors on pipe %c: 0x%08x\n",
2567 				pipe_name(pipe),
2568 				fault_errors);
2569 	}
2570 
2571 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2572 	    master_ctl & GEN8_DE_PCH_IRQ) {
2573 		/*
2574 		 * FIXME(BDW): Assume for now that the new interrupt handling
2575 		 * scheme also closed the SDE interrupt handling race we've seen
2576 		 * on older pch-split platforms. But this needs testing.
2577 		 */
2578 		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2579 		if (iir) {
2580 			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2581 			ret = IRQ_HANDLED;
2582 
2583 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2584 				icp_irq_handler(dev_priv, iir);
2585 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2586 				spt_irq_handler(dev_priv, iir);
2587 			else
2588 				cpt_irq_handler(dev_priv, iir);
2589 		} else {
2590 			/*
2591 			 * Like on previous PCH there seems to be something
2592 			 * fishy going on with forwarding PCH interrupts.
2593 			 */
2594 			drm_dbg(&dev_priv->drm,
2595 				"The master control interrupt lied (SDE)!\n");
2596 		}
2597 	}
2598 
2599 	return ret;
2600 }
2601 
2602 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2603 {
2604 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2605 
2606 	/*
2607 	 * Now with master disabled, get a sample of level indications
2608 	 * for this interrupt. Indications will be cleared on related acks.
2609 	 * New indications can and will light up during processing,
2610 	 * and will generate new interrupt after enabling master.
2611 	 */
2612 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2613 }
2614 
2615 static inline void gen8_master_intr_enable(void __iomem * const regs)
2616 {
2617 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2618 }
2619 
2620 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2621 {
2622 	struct drm_i915_private *dev_priv = arg;
2623 	void __iomem * const regs = dev_priv->uncore.regs;
2624 	u32 master_ctl;
2625 
2626 	if (!intel_irqs_enabled(dev_priv))
2627 		return IRQ_NONE;
2628 
2629 	master_ctl = gen8_master_intr_disable(regs);
2630 	if (!master_ctl) {
2631 		gen8_master_intr_enable(regs);
2632 		return IRQ_NONE;
2633 	}
2634 
2635 	/* Find, queue (onto bottom-halves), then clear each source */
2636 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2637 
2638 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2639 	if (master_ctl & ~GEN8_GT_IRQS) {
2640 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2641 		gen8_de_irq_handler(dev_priv, master_ctl);
2642 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2643 	}
2644 
2645 	gen8_master_intr_enable(regs);
2646 
2647 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
2648 
2649 	return IRQ_HANDLED;
2650 }
2651 
2652 static u32
2653 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2654 {
2655 	void __iomem * const regs = gt->uncore->regs;
2656 	u32 iir;
2657 
2658 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2659 		return 0;
2660 
2661 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2662 	if (likely(iir))
2663 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2664 
2665 	return iir;
2666 }
2667 
2668 static void
2669 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2670 {
2671 	if (iir & GEN11_GU_MISC_GSE)
2672 		intel_opregion_asle_intr(gt->i915);
2673 }
2674 
2675 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2676 {
2677 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2678 
2679 	/*
2680 	 * Now with master disabled, get a sample of level indications
2681 	 * for this interrupt. Indications will be cleared on related acks.
2682 	 * New indications can and will light up during processing,
2683 	 * and will generate new interrupt after enabling master.
2684 	 */
2685 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2686 }
2687 
2688 static inline void gen11_master_intr_enable(void __iomem * const regs)
2689 {
2690 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2691 }
2692 
2693 static void
2694 gen11_display_irq_handler(struct drm_i915_private *i915)
2695 {
2696 	void __iomem * const regs = i915->uncore.regs;
2697 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2698 
2699 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2700 	/*
2701 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2702 	 * for the display related bits.
2703 	 */
2704 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2705 	gen8_de_irq_handler(i915, disp_ctl);
2706 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2707 		      GEN11_DISPLAY_IRQ_ENABLE);
2708 
2709 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2710 }
2711 
2712 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2713 {
2714 	struct drm_i915_private *i915 = arg;
2715 	void __iomem * const regs = i915->uncore.regs;
2716 	struct intel_gt *gt = to_gt(i915);
2717 	u32 master_ctl;
2718 	u32 gu_misc_iir;
2719 
2720 	if (!intel_irqs_enabled(i915))
2721 		return IRQ_NONE;
2722 
2723 	master_ctl = gen11_master_intr_disable(regs);
2724 	if (!master_ctl) {
2725 		gen11_master_intr_enable(regs);
2726 		return IRQ_NONE;
2727 	}
2728 
2729 	/* Find, queue (onto bottom-halves), then clear each source */
2730 	gen11_gt_irq_handler(gt, master_ctl);
2731 
2732 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2733 	if (master_ctl & GEN11_DISPLAY_IRQ)
2734 		gen11_display_irq_handler(i915);
2735 
2736 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2737 
2738 	gen11_master_intr_enable(regs);
2739 
2740 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2741 
2742 	pmu_irq_stats(i915, IRQ_HANDLED);
2743 
2744 	return IRQ_HANDLED;
2745 }
2746 
2747 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2748 {
2749 	u32 val;
2750 
2751 	/* First disable interrupts */
2752 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2753 
2754 	/* Get the indication levels and ack the master unit */
2755 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2756 	if (unlikely(!val))
2757 		return 0;
2758 
2759 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2760 
2761 	return val;
2762 }
2763 
2764 static inline void dg1_master_intr_enable(void __iomem * const regs)
2765 {
2766 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2767 }
2768 
2769 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2770 {
2771 	struct drm_i915_private * const i915 = arg;
2772 	struct intel_gt *gt = to_gt(i915);
2773 	void __iomem * const regs = gt->uncore->regs;
2774 	u32 master_tile_ctl, master_ctl;
2775 	u32 gu_misc_iir;
2776 
2777 	if (!intel_irqs_enabled(i915))
2778 		return IRQ_NONE;
2779 
2780 	master_tile_ctl = dg1_master_intr_disable(regs);
2781 	if (!master_tile_ctl) {
2782 		dg1_master_intr_enable(regs);
2783 		return IRQ_NONE;
2784 	}
2785 
2786 	/* FIXME: we only support tile 0 for now. */
2787 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2788 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2789 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2790 	} else {
2791 		DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl);
2792 		dg1_master_intr_enable(regs);
2793 		return IRQ_NONE;
2794 	}
2795 
2796 	gen11_gt_irq_handler(gt, master_ctl);
2797 
2798 	if (master_ctl & GEN11_DISPLAY_IRQ)
2799 		gen11_display_irq_handler(i915);
2800 
2801 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2802 
2803 	dg1_master_intr_enable(regs);
2804 
2805 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2806 
2807 	pmu_irq_stats(i915, IRQ_HANDLED);
2808 
2809 	return IRQ_HANDLED;
2810 }
2811 
2812 /* Called from drm generic code, passed 'crtc' which
2813  * we use as a pipe index
2814  */
2815 int i8xx_enable_vblank(struct drm_crtc *crtc)
2816 {
2817 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2818 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2819 	unsigned long irqflags;
2820 
2821 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2822 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2823 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2824 
2825 	return 0;
2826 }
2827 
2828 int i915gm_enable_vblank(struct drm_crtc *crtc)
2829 {
2830 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2831 
2832 	/*
2833 	 * Vblank interrupts fail to wake the device up from C2+.
2834 	 * Disabling render clock gating during C-states avoids
2835 	 * the problem. There is a small power cost so we do this
2836 	 * only when vblank interrupts are actually enabled.
2837 	 */
2838 	if (dev_priv->vblank_enabled++ == 0)
2839 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2840 
2841 	return i8xx_enable_vblank(crtc);
2842 }
2843 
2844 int i965_enable_vblank(struct drm_crtc *crtc)
2845 {
2846 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2847 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2848 	unsigned long irqflags;
2849 
2850 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2851 	i915_enable_pipestat(dev_priv, pipe,
2852 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2853 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2854 
2855 	return 0;
2856 }
2857 
2858 int ilk_enable_vblank(struct drm_crtc *crtc)
2859 {
2860 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2861 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2862 	unsigned long irqflags;
2863 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2864 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2865 
2866 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2867 	ilk_enable_display_irq(dev_priv, bit);
2868 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2869 
2870 	/* Even though there is no DMC, frame counter can get stuck when
2871 	 * PSR is active as no frames are generated.
2872 	 */
2873 	if (HAS_PSR(dev_priv))
2874 		drm_crtc_vblank_restore(crtc);
2875 
2876 	return 0;
2877 }
2878 
2879 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2880 				   bool enable)
2881 {
2882 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2883 	enum port port;
2884 	u32 tmp;
2885 
2886 	if (!(intel_crtc->mode_flags &
2887 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2888 		return false;
2889 
2890 	/* for dual link cases we consider TE from slave */
2891 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2892 		port = PORT_B;
2893 	else
2894 		port = PORT_A;
2895 
2896 	tmp =  intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2897 	if (enable)
2898 		tmp &= ~DSI_TE_EVENT;
2899 	else
2900 		tmp |= DSI_TE_EVENT;
2901 
2902 	intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2903 
2904 	tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2905 	intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2906 
2907 	return true;
2908 }
2909 
2910 int bdw_enable_vblank(struct drm_crtc *_crtc)
2911 {
2912 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2913 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2914 	enum pipe pipe = crtc->pipe;
2915 	unsigned long irqflags;
2916 
2917 	if (gen11_dsi_configure_te(crtc, true))
2918 		return 0;
2919 
2920 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2921 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2922 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2923 
2924 	/* Even if there is no DMC, frame counter can get stuck when
2925 	 * PSR is active as no frames are generated, so check only for PSR.
2926 	 */
2927 	if (HAS_PSR(dev_priv))
2928 		drm_crtc_vblank_restore(&crtc->base);
2929 
2930 	return 0;
2931 }
2932 
2933 /* Called from drm generic code, passed 'crtc' which
2934  * we use as a pipe index
2935  */
2936 void i8xx_disable_vblank(struct drm_crtc *crtc)
2937 {
2938 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2939 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2940 	unsigned long irqflags;
2941 
2942 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2943 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2944 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2945 }
2946 
2947 void i915gm_disable_vblank(struct drm_crtc *crtc)
2948 {
2949 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2950 
2951 	i8xx_disable_vblank(crtc);
2952 
2953 	if (--dev_priv->vblank_enabled == 0)
2954 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2955 }
2956 
2957 void i965_disable_vblank(struct drm_crtc *crtc)
2958 {
2959 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2960 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2961 	unsigned long irqflags;
2962 
2963 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2964 	i915_disable_pipestat(dev_priv, pipe,
2965 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2966 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2967 }
2968 
2969 void ilk_disable_vblank(struct drm_crtc *crtc)
2970 {
2971 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2972 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2973 	unsigned long irqflags;
2974 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2975 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2976 
2977 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2978 	ilk_disable_display_irq(dev_priv, bit);
2979 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2980 }
2981 
2982 void bdw_disable_vblank(struct drm_crtc *_crtc)
2983 {
2984 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2985 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2986 	enum pipe pipe = crtc->pipe;
2987 	unsigned long irqflags;
2988 
2989 	if (gen11_dsi_configure_te(crtc, false))
2990 		return;
2991 
2992 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2993 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2994 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2995 }
2996 
2997 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2998 {
2999 	struct intel_uncore *uncore = &dev_priv->uncore;
3000 
3001 	if (HAS_PCH_NOP(dev_priv))
3002 		return;
3003 
3004 	GEN3_IRQ_RESET(uncore, SDE);
3005 
3006 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3007 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
3008 }
3009 
3010 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3011 {
3012 	struct intel_uncore *uncore = &dev_priv->uncore;
3013 
3014 	if (IS_CHERRYVIEW(dev_priv))
3015 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3016 	else
3017 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
3018 
3019 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3020 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
3021 
3022 	i9xx_pipestat_irq_reset(dev_priv);
3023 
3024 	GEN3_IRQ_RESET(uncore, VLV_);
3025 	dev_priv->irq_mask = ~0u;
3026 }
3027 
3028 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3029 {
3030 	struct intel_uncore *uncore = &dev_priv->uncore;
3031 
3032 	u32 pipestat_mask;
3033 	u32 enable_mask;
3034 	enum pipe pipe;
3035 
3036 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3037 
3038 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3039 	for_each_pipe(dev_priv, pipe)
3040 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3041 
3042 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3043 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3044 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3045 		I915_LPE_PIPE_A_INTERRUPT |
3046 		I915_LPE_PIPE_B_INTERRUPT;
3047 
3048 	if (IS_CHERRYVIEW(dev_priv))
3049 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3050 			I915_LPE_PIPE_C_INTERRUPT;
3051 
3052 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3053 
3054 	dev_priv->irq_mask = ~enable_mask;
3055 
3056 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3057 }
3058 
3059 /* drm_dma.h hooks
3060 */
3061 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3062 {
3063 	struct intel_uncore *uncore = &dev_priv->uncore;
3064 
3065 	GEN3_IRQ_RESET(uncore, DE);
3066 	dev_priv->irq_mask = ~0u;
3067 
3068 	if (GRAPHICS_VER(dev_priv) == 7)
3069 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3070 
3071 	if (IS_HASWELL(dev_priv)) {
3072 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3073 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3074 	}
3075 
3076 	gen5_gt_irq_reset(to_gt(dev_priv));
3077 
3078 	ibx_irq_reset(dev_priv);
3079 }
3080 
3081 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3082 {
3083 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3084 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3085 
3086 	gen5_gt_irq_reset(to_gt(dev_priv));
3087 
3088 	spin_lock_irq(&dev_priv->irq_lock);
3089 	if (dev_priv->display_irqs_enabled)
3090 		vlv_display_irq_reset(dev_priv);
3091 	spin_unlock_irq(&dev_priv->irq_lock);
3092 }
3093 
3094 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3095 {
3096 	struct intel_uncore *uncore = &dev_priv->uncore;
3097 	enum pipe pipe;
3098 
3099 	if (!HAS_DISPLAY(dev_priv))
3100 		return;
3101 
3102 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3103 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3104 
3105 	for_each_pipe(dev_priv, pipe)
3106 		if (intel_display_power_is_enabled(dev_priv,
3107 						   POWER_DOMAIN_PIPE(pipe)))
3108 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3109 
3110 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3111 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3112 }
3113 
3114 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3115 {
3116 	struct intel_uncore *uncore = &dev_priv->uncore;
3117 
3118 	gen8_master_intr_disable(dev_priv->uncore.regs);
3119 
3120 	gen8_gt_irq_reset(to_gt(dev_priv));
3121 	gen8_display_irq_reset(dev_priv);
3122 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3123 
3124 	if (HAS_PCH_SPLIT(dev_priv))
3125 		ibx_irq_reset(dev_priv);
3126 
3127 }
3128 
3129 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3130 {
3131 	struct intel_uncore *uncore = &dev_priv->uncore;
3132 	enum pipe pipe;
3133 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3134 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3135 
3136 	if (!HAS_DISPLAY(dev_priv))
3137 		return;
3138 
3139 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3140 
3141 	if (DISPLAY_VER(dev_priv) >= 12) {
3142 		enum transcoder trans;
3143 
3144 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3145 			enum intel_display_power_domain domain;
3146 
3147 			domain = POWER_DOMAIN_TRANSCODER(trans);
3148 			if (!intel_display_power_is_enabled(dev_priv, domain))
3149 				continue;
3150 
3151 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3152 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3153 		}
3154 	} else {
3155 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3156 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3157 	}
3158 
3159 	for_each_pipe(dev_priv, pipe)
3160 		if (intel_display_power_is_enabled(dev_priv,
3161 						   POWER_DOMAIN_PIPE(pipe)))
3162 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3163 
3164 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3165 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3166 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3167 
3168 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3169 		GEN3_IRQ_RESET(uncore, SDE);
3170 }
3171 
3172 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3173 {
3174 	struct intel_gt *gt = to_gt(dev_priv);
3175 	struct intel_uncore *uncore = gt->uncore;
3176 
3177 	gen11_master_intr_disable(dev_priv->uncore.regs);
3178 
3179 	gen11_gt_irq_reset(gt);
3180 	gen11_display_irq_reset(dev_priv);
3181 
3182 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3183 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3184 }
3185 
3186 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3187 {
3188 	struct intel_gt *gt = to_gt(dev_priv);
3189 	struct intel_uncore *uncore = gt->uncore;
3190 
3191 	dg1_master_intr_disable(dev_priv->uncore.regs);
3192 
3193 	gen11_gt_irq_reset(gt);
3194 	gen11_display_irq_reset(dev_priv);
3195 
3196 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3197 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3198 }
3199 
3200 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3201 				     u8 pipe_mask)
3202 {
3203 	struct intel_uncore *uncore = &dev_priv->uncore;
3204 	u32 extra_ier = GEN8_PIPE_VBLANK |
3205 		gen8_de_pipe_underrun_mask(dev_priv) |
3206 		gen8_de_pipe_flip_done_mask(dev_priv);
3207 	enum pipe pipe;
3208 
3209 	spin_lock_irq(&dev_priv->irq_lock);
3210 
3211 	if (!intel_irqs_enabled(dev_priv)) {
3212 		spin_unlock_irq(&dev_priv->irq_lock);
3213 		return;
3214 	}
3215 
3216 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3217 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3218 				  dev_priv->de_irq_mask[pipe],
3219 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3220 
3221 	spin_unlock_irq(&dev_priv->irq_lock);
3222 }
3223 
3224 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3225 				     u8 pipe_mask)
3226 {
3227 	struct intel_uncore *uncore = &dev_priv->uncore;
3228 	enum pipe pipe;
3229 
3230 	spin_lock_irq(&dev_priv->irq_lock);
3231 
3232 	if (!intel_irqs_enabled(dev_priv)) {
3233 		spin_unlock_irq(&dev_priv->irq_lock);
3234 		return;
3235 	}
3236 
3237 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3238 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3239 
3240 	spin_unlock_irq(&dev_priv->irq_lock);
3241 
3242 	/* make sure we're done processing display irqs */
3243 	intel_synchronize_irq(dev_priv);
3244 }
3245 
3246 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3247 {
3248 	struct intel_uncore *uncore = &dev_priv->uncore;
3249 
3250 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3251 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3252 
3253 	gen8_gt_irq_reset(to_gt(dev_priv));
3254 
3255 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3256 
3257 	spin_lock_irq(&dev_priv->irq_lock);
3258 	if (dev_priv->display_irqs_enabled)
3259 		vlv_display_irq_reset(dev_priv);
3260 	spin_unlock_irq(&dev_priv->irq_lock);
3261 }
3262 
3263 static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3264 			       enum hpd_pin pin)
3265 {
3266 	switch (pin) {
3267 	case HPD_PORT_A:
3268 		/*
3269 		 * When CPU and PCH are on the same package, port A
3270 		 * HPD must be enabled in both north and south.
3271 		 */
3272 		return HAS_PCH_LPT_LP(i915) ?
3273 			PORTA_HOTPLUG_ENABLE : 0;
3274 	case HPD_PORT_B:
3275 		return PORTB_HOTPLUG_ENABLE |
3276 			PORTB_PULSE_DURATION_2ms;
3277 	case HPD_PORT_C:
3278 		return PORTC_HOTPLUG_ENABLE |
3279 			PORTC_PULSE_DURATION_2ms;
3280 	case HPD_PORT_D:
3281 		return PORTD_HOTPLUG_ENABLE |
3282 			PORTD_PULSE_DURATION_2ms;
3283 	default:
3284 		return 0;
3285 	}
3286 }
3287 
3288 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3289 {
3290 	u32 hotplug;
3291 
3292 	/*
3293 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3294 	 * duration to 2ms (which is the minimum in the Display Port spec).
3295 	 * The pulse duration bits are reserved on LPT+.
3296 	 */
3297 	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3298 	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3299 		     PORTB_HOTPLUG_ENABLE |
3300 		     PORTC_HOTPLUG_ENABLE |
3301 		     PORTD_HOTPLUG_ENABLE |
3302 		     PORTB_PULSE_DURATION_MASK |
3303 		     PORTC_PULSE_DURATION_MASK |
3304 		     PORTD_PULSE_DURATION_MASK);
3305 	hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3306 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3307 }
3308 
3309 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3310 {
3311 	u32 hotplug_irqs, enabled_irqs;
3312 
3313 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3314 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3315 
3316 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3317 
3318 	ibx_hpd_detection_setup(dev_priv);
3319 }
3320 
3321 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3322 				   enum hpd_pin pin)
3323 {
3324 	switch (pin) {
3325 	case HPD_PORT_A:
3326 	case HPD_PORT_B:
3327 	case HPD_PORT_C:
3328 	case HPD_PORT_D:
3329 		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3330 	default:
3331 		return 0;
3332 	}
3333 }
3334 
3335 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3336 				  enum hpd_pin pin)
3337 {
3338 	switch (pin) {
3339 	case HPD_PORT_TC1:
3340 	case HPD_PORT_TC2:
3341 	case HPD_PORT_TC3:
3342 	case HPD_PORT_TC4:
3343 	case HPD_PORT_TC5:
3344 	case HPD_PORT_TC6:
3345 		return ICP_TC_HPD_ENABLE(pin);
3346 	default:
3347 		return 0;
3348 	}
3349 }
3350 
3351 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3352 {
3353 	u32 hotplug;
3354 
3355 	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3356 	hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3357 		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3358 		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3359 		     SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
3360 	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3361 	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3362 }
3363 
3364 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3365 {
3366 	u32 hotplug;
3367 
3368 	hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3369 	hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3370 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3371 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3372 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3373 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3374 		     ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
3375 	hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3376 	intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3377 }
3378 
3379 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3380 {
3381 	u32 hotplug_irqs, enabled_irqs;
3382 
3383 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3384 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3385 
3386 	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3387 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3388 
3389 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3390 
3391 	icp_ddi_hpd_detection_setup(dev_priv);
3392 	icp_tc_hpd_detection_setup(dev_priv);
3393 }
3394 
3395 static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3396 				 enum hpd_pin pin)
3397 {
3398 	switch (pin) {
3399 	case HPD_PORT_TC1:
3400 	case HPD_PORT_TC2:
3401 	case HPD_PORT_TC3:
3402 	case HPD_PORT_TC4:
3403 	case HPD_PORT_TC5:
3404 	case HPD_PORT_TC6:
3405 		return GEN11_HOTPLUG_CTL_ENABLE(pin);
3406 	default:
3407 		return 0;
3408 	}
3409 }
3410 
3411 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3412 {
3413 	u32 val;
3414 
3415 	val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3416 	val |= (INVERT_DDIA_HPD |
3417 		INVERT_DDIB_HPD |
3418 		INVERT_DDIC_HPD |
3419 		INVERT_DDID_HPD);
3420 	intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3421 
3422 	icp_hpd_irq_setup(dev_priv);
3423 }
3424 
3425 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3426 {
3427 	u32 hotplug;
3428 
3429 	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3430 	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3431 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3432 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3433 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3434 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3435 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3436 	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3437 	intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3438 }
3439 
3440 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3441 {
3442 	u32 hotplug;
3443 
3444 	hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3445 	hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3446 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3447 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3448 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3449 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3450 		     GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3451 	hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3452 	intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3453 }
3454 
3455 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3456 {
3457 	u32 hotplug_irqs, enabled_irqs;
3458 	u32 val;
3459 
3460 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3461 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3462 
3463 	val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3464 	val &= ~hotplug_irqs;
3465 	val |= ~enabled_irqs & hotplug_irqs;
3466 	intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
3467 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3468 
3469 	gen11_tc_hpd_detection_setup(dev_priv);
3470 	gen11_tbt_hpd_detection_setup(dev_priv);
3471 
3472 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3473 		icp_hpd_irq_setup(dev_priv);
3474 }
3475 
3476 static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3477 			       enum hpd_pin pin)
3478 {
3479 	switch (pin) {
3480 	case HPD_PORT_A:
3481 		return PORTA_HOTPLUG_ENABLE;
3482 	case HPD_PORT_B:
3483 		return PORTB_HOTPLUG_ENABLE;
3484 	case HPD_PORT_C:
3485 		return PORTC_HOTPLUG_ENABLE;
3486 	case HPD_PORT_D:
3487 		return PORTD_HOTPLUG_ENABLE;
3488 	default:
3489 		return 0;
3490 	}
3491 }
3492 
3493 static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3494 				enum hpd_pin pin)
3495 {
3496 	switch (pin) {
3497 	case HPD_PORT_E:
3498 		return PORTE_HOTPLUG_ENABLE;
3499 	default:
3500 		return 0;
3501 	}
3502 }
3503 
3504 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3505 {
3506 	u32 val, hotplug;
3507 
3508 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3509 	if (HAS_PCH_CNP(dev_priv)) {
3510 		val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3511 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3512 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3513 		intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3514 	}
3515 
3516 	/* Enable digital hotplug on the PCH */
3517 	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3518 	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3519 		     PORTB_HOTPLUG_ENABLE |
3520 		     PORTC_HOTPLUG_ENABLE |
3521 		     PORTD_HOTPLUG_ENABLE);
3522 	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3523 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3524 
3525 	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3526 	hotplug &= ~PORTE_HOTPLUG_ENABLE;
3527 	hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3528 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3529 }
3530 
3531 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3532 {
3533 	u32 hotplug_irqs, enabled_irqs;
3534 
3535 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3536 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3537 
3538 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3539 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3540 
3541 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3542 
3543 	spt_hpd_detection_setup(dev_priv);
3544 }
3545 
3546 static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3547 			       enum hpd_pin pin)
3548 {
3549 	switch (pin) {
3550 	case HPD_PORT_A:
3551 		return DIGITAL_PORTA_HOTPLUG_ENABLE |
3552 			DIGITAL_PORTA_PULSE_DURATION_2ms;
3553 	default:
3554 		return 0;
3555 	}
3556 }
3557 
3558 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3559 {
3560 	u32 hotplug;
3561 
3562 	/*
3563 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3564 	 * duration to 2ms (which is the minimum in the Display Port spec)
3565 	 * The pulse duration bits are reserved on HSW+.
3566 	 */
3567 	hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3568 	hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
3569 		     DIGITAL_PORTA_PULSE_DURATION_MASK);
3570 	hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3571 	intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3572 }
3573 
3574 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3575 {
3576 	u32 hotplug_irqs, enabled_irqs;
3577 
3578 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3579 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3580 
3581 	if (DISPLAY_VER(dev_priv) >= 8)
3582 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3583 	else
3584 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3585 
3586 	ilk_hpd_detection_setup(dev_priv);
3587 
3588 	ibx_hpd_irq_setup(dev_priv);
3589 }
3590 
3591 static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3592 			       enum hpd_pin pin)
3593 {
3594 	u32 hotplug;
3595 
3596 	switch (pin) {
3597 	case HPD_PORT_A:
3598 		hotplug = PORTA_HOTPLUG_ENABLE;
3599 		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3600 			hotplug |= BXT_DDIA_HPD_INVERT;
3601 		return hotplug;
3602 	case HPD_PORT_B:
3603 		hotplug = PORTB_HOTPLUG_ENABLE;
3604 		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3605 			hotplug |= BXT_DDIB_HPD_INVERT;
3606 		return hotplug;
3607 	case HPD_PORT_C:
3608 		hotplug = PORTC_HOTPLUG_ENABLE;
3609 		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3610 			hotplug |= BXT_DDIC_HPD_INVERT;
3611 		return hotplug;
3612 	default:
3613 		return 0;
3614 	}
3615 }
3616 
3617 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3618 {
3619 	u32 hotplug;
3620 
3621 	hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3622 	hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3623 		     PORTB_HOTPLUG_ENABLE |
3624 		     PORTC_HOTPLUG_ENABLE |
3625 		     BXT_DDIA_HPD_INVERT |
3626 		     BXT_DDIB_HPD_INVERT |
3627 		     BXT_DDIC_HPD_INVERT);
3628 	hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3629 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3630 }
3631 
3632 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3633 {
3634 	u32 hotplug_irqs, enabled_irqs;
3635 
3636 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3637 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3638 
3639 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3640 
3641 	bxt_hpd_detection_setup(dev_priv);
3642 }
3643 
3644 /*
3645  * SDEIER is also touched by the interrupt handler to work around missed PCH
3646  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3647  * instead we unconditionally enable all PCH interrupt sources here, but then
3648  * only unmask them as needed with SDEIMR.
3649  *
3650  * Note that we currently do this after installing the interrupt handler,
3651  * but before we enable the master interrupt. That should be sufficient
3652  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3653  * interrupts could still race.
3654  */
3655 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3656 {
3657 	struct intel_uncore *uncore = &dev_priv->uncore;
3658 	u32 mask;
3659 
3660 	if (HAS_PCH_NOP(dev_priv))
3661 		return;
3662 
3663 	if (HAS_PCH_IBX(dev_priv))
3664 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3665 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3666 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3667 	else
3668 		mask = SDE_GMBUS_CPT;
3669 
3670 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3671 }
3672 
3673 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3674 {
3675 	struct intel_uncore *uncore = &dev_priv->uncore;
3676 	u32 display_mask, extra_mask;
3677 
3678 	if (GRAPHICS_VER(dev_priv) >= 7) {
3679 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3680 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3681 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3682 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3683 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3684 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3685 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3686 			      DE_DP_A_HOTPLUG_IVB);
3687 	} else {
3688 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3689 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3690 				DE_PIPEA_CRC_DONE | DE_POISON);
3691 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3692 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3693 			      DE_PLANE_FLIP_DONE(PLANE_A) |
3694 			      DE_PLANE_FLIP_DONE(PLANE_B) |
3695 			      DE_DP_A_HOTPLUG);
3696 	}
3697 
3698 	if (IS_HASWELL(dev_priv)) {
3699 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3700 		display_mask |= DE_EDP_PSR_INT_HSW;
3701 	}
3702 
3703 	if (IS_IRONLAKE_M(dev_priv))
3704 		extra_mask |= DE_PCU_EVENT;
3705 
3706 	dev_priv->irq_mask = ~display_mask;
3707 
3708 	ibx_irq_postinstall(dev_priv);
3709 
3710 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3711 
3712 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3713 		      display_mask | extra_mask);
3714 }
3715 
3716 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3717 {
3718 	lockdep_assert_held(&dev_priv->irq_lock);
3719 
3720 	if (dev_priv->display_irqs_enabled)
3721 		return;
3722 
3723 	dev_priv->display_irqs_enabled = true;
3724 
3725 	if (intel_irqs_enabled(dev_priv)) {
3726 		vlv_display_irq_reset(dev_priv);
3727 		vlv_display_irq_postinstall(dev_priv);
3728 	}
3729 }
3730 
3731 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3732 {
3733 	lockdep_assert_held(&dev_priv->irq_lock);
3734 
3735 	if (!dev_priv->display_irqs_enabled)
3736 		return;
3737 
3738 	dev_priv->display_irqs_enabled = false;
3739 
3740 	if (intel_irqs_enabled(dev_priv))
3741 		vlv_display_irq_reset(dev_priv);
3742 }
3743 
3744 
3745 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3746 {
3747 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3748 
3749 	spin_lock_irq(&dev_priv->irq_lock);
3750 	if (dev_priv->display_irqs_enabled)
3751 		vlv_display_irq_postinstall(dev_priv);
3752 	spin_unlock_irq(&dev_priv->irq_lock);
3753 
3754 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3755 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3756 }
3757 
3758 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3759 {
3760 	struct intel_uncore *uncore = &dev_priv->uncore;
3761 
3762 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3763 		GEN8_PIPE_CDCLK_CRC_DONE;
3764 	u32 de_pipe_enables;
3765 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3766 	u32 de_port_enables;
3767 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3768 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3769 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3770 	enum pipe pipe;
3771 
3772 	if (!HAS_DISPLAY(dev_priv))
3773 		return;
3774 
3775 	if (DISPLAY_VER(dev_priv) <= 10)
3776 		de_misc_masked |= GEN8_DE_MISC_GSE;
3777 
3778 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3779 		de_port_masked |= BXT_DE_PORT_GMBUS;
3780 
3781 	if (DISPLAY_VER(dev_priv) >= 11) {
3782 		enum port port;
3783 
3784 		if (intel_bios_is_dsi_present(dev_priv, &port))
3785 			de_port_masked |= DSI0_TE | DSI1_TE;
3786 	}
3787 
3788 	de_pipe_enables = de_pipe_masked |
3789 		GEN8_PIPE_VBLANK |
3790 		gen8_de_pipe_underrun_mask(dev_priv) |
3791 		gen8_de_pipe_flip_done_mask(dev_priv);
3792 
3793 	de_port_enables = de_port_masked;
3794 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3795 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3796 	else if (IS_BROADWELL(dev_priv))
3797 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3798 
3799 	if (DISPLAY_VER(dev_priv) >= 12) {
3800 		enum transcoder trans;
3801 
3802 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3803 			enum intel_display_power_domain domain;
3804 
3805 			domain = POWER_DOMAIN_TRANSCODER(trans);
3806 			if (!intel_display_power_is_enabled(dev_priv, domain))
3807 				continue;
3808 
3809 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3810 		}
3811 	} else {
3812 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3813 	}
3814 
3815 	for_each_pipe(dev_priv, pipe) {
3816 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3817 
3818 		if (intel_display_power_is_enabled(dev_priv,
3819 				POWER_DOMAIN_PIPE(pipe)))
3820 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3821 					  dev_priv->de_irq_mask[pipe],
3822 					  de_pipe_enables);
3823 	}
3824 
3825 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3826 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3827 
3828 	if (DISPLAY_VER(dev_priv) >= 11) {
3829 		u32 de_hpd_masked = 0;
3830 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3831 				     GEN11_DE_TBT_HOTPLUG_MASK;
3832 
3833 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3834 			      de_hpd_enables);
3835 	}
3836 }
3837 
3838 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3839 {
3840 	struct intel_uncore *uncore = &dev_priv->uncore;
3841 	u32 mask = SDE_GMBUS_ICP;
3842 
3843 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3844 }
3845 
3846 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3847 {
3848 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3849 		icp_irq_postinstall(dev_priv);
3850 	else if (HAS_PCH_SPLIT(dev_priv))
3851 		ibx_irq_postinstall(dev_priv);
3852 
3853 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3854 	gen8_de_irq_postinstall(dev_priv);
3855 
3856 	gen8_master_intr_enable(dev_priv->uncore.regs);
3857 }
3858 
3859 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3860 {
3861 	if (!HAS_DISPLAY(dev_priv))
3862 		return;
3863 
3864 	gen8_de_irq_postinstall(dev_priv);
3865 
3866 	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3867 			   GEN11_DISPLAY_IRQ_ENABLE);
3868 }
3869 
3870 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3871 {
3872 	struct intel_gt *gt = to_gt(dev_priv);
3873 	struct intel_uncore *uncore = gt->uncore;
3874 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3875 
3876 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3877 		icp_irq_postinstall(dev_priv);
3878 
3879 	gen11_gt_irq_postinstall(gt);
3880 	gen11_de_irq_postinstall(dev_priv);
3881 
3882 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3883 
3884 	gen11_master_intr_enable(uncore->regs);
3885 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3886 }
3887 
3888 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3889 {
3890 	struct intel_gt *gt = to_gt(dev_priv);
3891 	struct intel_uncore *uncore = gt->uncore;
3892 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3893 
3894 	gen11_gt_irq_postinstall(gt);
3895 
3896 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3897 
3898 	if (HAS_DISPLAY(dev_priv)) {
3899 		icp_irq_postinstall(dev_priv);
3900 		gen8_de_irq_postinstall(dev_priv);
3901 		intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3902 				   GEN11_DISPLAY_IRQ_ENABLE);
3903 	}
3904 
3905 	dg1_master_intr_enable(uncore->regs);
3906 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3907 }
3908 
3909 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3910 {
3911 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3912 
3913 	spin_lock_irq(&dev_priv->irq_lock);
3914 	if (dev_priv->display_irqs_enabled)
3915 		vlv_display_irq_postinstall(dev_priv);
3916 	spin_unlock_irq(&dev_priv->irq_lock);
3917 
3918 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3919 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3920 }
3921 
3922 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3923 {
3924 	struct intel_uncore *uncore = &dev_priv->uncore;
3925 
3926 	i9xx_pipestat_irq_reset(dev_priv);
3927 
3928 	GEN2_IRQ_RESET(uncore);
3929 	dev_priv->irq_mask = ~0u;
3930 }
3931 
3932 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3933 {
3934 	struct intel_uncore *uncore = &dev_priv->uncore;
3935 	u16 enable_mask;
3936 
3937 	intel_uncore_write16(uncore,
3938 			     EMR,
3939 			     ~(I915_ERROR_PAGE_TABLE |
3940 			       I915_ERROR_MEMORY_REFRESH));
3941 
3942 	/* Unmask the interrupts that we always want on. */
3943 	dev_priv->irq_mask =
3944 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3945 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3946 		  I915_MASTER_ERROR_INTERRUPT);
3947 
3948 	enable_mask =
3949 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3950 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3951 		I915_MASTER_ERROR_INTERRUPT |
3952 		I915_USER_INTERRUPT;
3953 
3954 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3955 
3956 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3957 	 * just to make the assert_spin_locked check happy. */
3958 	spin_lock_irq(&dev_priv->irq_lock);
3959 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3960 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3961 	spin_unlock_irq(&dev_priv->irq_lock);
3962 }
3963 
3964 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3965 			       u16 *eir, u16 *eir_stuck)
3966 {
3967 	struct intel_uncore *uncore = &i915->uncore;
3968 	u16 emr;
3969 
3970 	*eir = intel_uncore_read16(uncore, EIR);
3971 
3972 	if (*eir)
3973 		intel_uncore_write16(uncore, EIR, *eir);
3974 
3975 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3976 	if (*eir_stuck == 0)
3977 		return;
3978 
3979 	/*
3980 	 * Toggle all EMR bits to make sure we get an edge
3981 	 * in the ISR master error bit if we don't clear
3982 	 * all the EIR bits. Otherwise the edge triggered
3983 	 * IIR on i965/g4x wouldn't notice that an interrupt
3984 	 * is still pending. Also some EIR bits can't be
3985 	 * cleared except by handling the underlying error
3986 	 * (or by a GPU reset) so we mask any bit that
3987 	 * remains set.
3988 	 */
3989 	emr = intel_uncore_read16(uncore, EMR);
3990 	intel_uncore_write16(uncore, EMR, 0xffff);
3991 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3992 }
3993 
3994 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3995 				   u16 eir, u16 eir_stuck)
3996 {
3997 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3998 
3999 	if (eir_stuck)
4000 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
4001 			eir_stuck);
4002 }
4003 
4004 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4005 			       u32 *eir, u32 *eir_stuck)
4006 {
4007 	u32 emr;
4008 
4009 	*eir = intel_uncore_read(&dev_priv->uncore, EIR);
4010 
4011 	intel_uncore_write(&dev_priv->uncore, EIR, *eir);
4012 
4013 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
4014 	if (*eir_stuck == 0)
4015 		return;
4016 
4017 	/*
4018 	 * Toggle all EMR bits to make sure we get an edge
4019 	 * in the ISR master error bit if we don't clear
4020 	 * all the EIR bits. Otherwise the edge triggered
4021 	 * IIR on i965/g4x wouldn't notice that an interrupt
4022 	 * is still pending. Also some EIR bits can't be
4023 	 * cleared except by handling the underlying error
4024 	 * (or by a GPU reset) so we mask any bit that
4025 	 * remains set.
4026 	 */
4027 	emr = intel_uncore_read(&dev_priv->uncore, EMR);
4028 	intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
4029 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
4030 }
4031 
4032 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4033 				   u32 eir, u32 eir_stuck)
4034 {
4035 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4036 
4037 	if (eir_stuck)
4038 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
4039 			eir_stuck);
4040 }
4041 
4042 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4043 {
4044 	struct drm_i915_private *dev_priv = arg;
4045 	irqreturn_t ret = IRQ_NONE;
4046 
4047 	if (!intel_irqs_enabled(dev_priv))
4048 		return IRQ_NONE;
4049 
4050 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4051 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4052 
4053 	do {
4054 		u32 pipe_stats[I915_MAX_PIPES] = {};
4055 		u16 eir = 0, eir_stuck = 0;
4056 		u16 iir;
4057 
4058 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4059 		if (iir == 0)
4060 			break;
4061 
4062 		ret = IRQ_HANDLED;
4063 
4064 		/* Call regardless, as some status bits might not be
4065 		 * signalled in iir */
4066 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4067 
4068 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4069 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4070 
4071 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4072 
4073 		if (iir & I915_USER_INTERRUPT)
4074 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4075 
4076 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4077 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4078 
4079 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4080 	} while (0);
4081 
4082 	pmu_irq_stats(dev_priv, ret);
4083 
4084 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4085 
4086 	return ret;
4087 }
4088 
4089 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4090 {
4091 	struct intel_uncore *uncore = &dev_priv->uncore;
4092 
4093 	if (I915_HAS_HOTPLUG(dev_priv)) {
4094 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4095 		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4096 	}
4097 
4098 	i9xx_pipestat_irq_reset(dev_priv);
4099 
4100 	GEN3_IRQ_RESET(uncore, GEN2_);
4101 	dev_priv->irq_mask = ~0u;
4102 }
4103 
4104 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4105 {
4106 	struct intel_uncore *uncore = &dev_priv->uncore;
4107 	u32 enable_mask;
4108 
4109 	intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4110 			  I915_ERROR_MEMORY_REFRESH));
4111 
4112 	/* Unmask the interrupts that we always want on. */
4113 	dev_priv->irq_mask =
4114 		~(I915_ASLE_INTERRUPT |
4115 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4116 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4117 		  I915_MASTER_ERROR_INTERRUPT);
4118 
4119 	enable_mask =
4120 		I915_ASLE_INTERRUPT |
4121 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4122 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4123 		I915_MASTER_ERROR_INTERRUPT |
4124 		I915_USER_INTERRUPT;
4125 
4126 	if (I915_HAS_HOTPLUG(dev_priv)) {
4127 		/* Enable in IER... */
4128 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4129 		/* and unmask in IMR */
4130 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4131 	}
4132 
4133 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4134 
4135 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4136 	 * just to make the assert_spin_locked check happy. */
4137 	spin_lock_irq(&dev_priv->irq_lock);
4138 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4139 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4140 	spin_unlock_irq(&dev_priv->irq_lock);
4141 
4142 	i915_enable_asle_pipestat(dev_priv);
4143 }
4144 
4145 static irqreturn_t i915_irq_handler(int irq, void *arg)
4146 {
4147 	struct drm_i915_private *dev_priv = arg;
4148 	irqreturn_t ret = IRQ_NONE;
4149 
4150 	if (!intel_irqs_enabled(dev_priv))
4151 		return IRQ_NONE;
4152 
4153 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4154 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4155 
4156 	do {
4157 		u32 pipe_stats[I915_MAX_PIPES] = {};
4158 		u32 eir = 0, eir_stuck = 0;
4159 		u32 hotplug_status = 0;
4160 		u32 iir;
4161 
4162 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4163 		if (iir == 0)
4164 			break;
4165 
4166 		ret = IRQ_HANDLED;
4167 
4168 		if (I915_HAS_HOTPLUG(dev_priv) &&
4169 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4170 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4171 
4172 		/* Call regardless, as some status bits might not be
4173 		 * signalled in iir */
4174 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4175 
4176 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4177 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4178 
4179 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4180 
4181 		if (iir & I915_USER_INTERRUPT)
4182 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4183 
4184 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4185 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4186 
4187 		if (hotplug_status)
4188 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4189 
4190 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4191 	} while (0);
4192 
4193 	pmu_irq_stats(dev_priv, ret);
4194 
4195 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4196 
4197 	return ret;
4198 }
4199 
4200 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4201 {
4202 	struct intel_uncore *uncore = &dev_priv->uncore;
4203 
4204 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4205 	intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4206 
4207 	i9xx_pipestat_irq_reset(dev_priv);
4208 
4209 	GEN3_IRQ_RESET(uncore, GEN2_);
4210 	dev_priv->irq_mask = ~0u;
4211 }
4212 
4213 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4214 {
4215 	struct intel_uncore *uncore = &dev_priv->uncore;
4216 	u32 enable_mask;
4217 	u32 error_mask;
4218 
4219 	/*
4220 	 * Enable some error detection, note the instruction error mask
4221 	 * bit is reserved, so we leave it masked.
4222 	 */
4223 	if (IS_G4X(dev_priv)) {
4224 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4225 			       GM45_ERROR_MEM_PRIV |
4226 			       GM45_ERROR_CP_PRIV |
4227 			       I915_ERROR_MEMORY_REFRESH);
4228 	} else {
4229 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4230 			       I915_ERROR_MEMORY_REFRESH);
4231 	}
4232 	intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4233 
4234 	/* Unmask the interrupts that we always want on. */
4235 	dev_priv->irq_mask =
4236 		~(I915_ASLE_INTERRUPT |
4237 		  I915_DISPLAY_PORT_INTERRUPT |
4238 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4239 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4240 		  I915_MASTER_ERROR_INTERRUPT);
4241 
4242 	enable_mask =
4243 		I915_ASLE_INTERRUPT |
4244 		I915_DISPLAY_PORT_INTERRUPT |
4245 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4246 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4247 		I915_MASTER_ERROR_INTERRUPT |
4248 		I915_USER_INTERRUPT;
4249 
4250 	if (IS_G4X(dev_priv))
4251 		enable_mask |= I915_BSD_USER_INTERRUPT;
4252 
4253 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4254 
4255 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4256 	 * just to make the assert_spin_locked check happy. */
4257 	spin_lock_irq(&dev_priv->irq_lock);
4258 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4259 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4260 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4261 	spin_unlock_irq(&dev_priv->irq_lock);
4262 
4263 	i915_enable_asle_pipestat(dev_priv);
4264 }
4265 
4266 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4267 {
4268 	u32 hotplug_en;
4269 
4270 	lockdep_assert_held(&dev_priv->irq_lock);
4271 
4272 	/* Note HDMI and DP share hotplug bits */
4273 	/* enable bits are the same for all generations */
4274 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4275 	/* Programming the CRT detection parameters tends
4276 	   to generate a spurious hotplug event about three
4277 	   seconds later.  So just do it once.
4278 	*/
4279 	if (IS_G4X(dev_priv))
4280 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4281 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4282 
4283 	/* Ignore TV since it's buggy */
4284 	i915_hotplug_interrupt_update_locked(dev_priv,
4285 					     HOTPLUG_INT_EN_MASK |
4286 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4287 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4288 					     hotplug_en);
4289 }
4290 
4291 static irqreturn_t i965_irq_handler(int irq, void *arg)
4292 {
4293 	struct drm_i915_private *dev_priv = arg;
4294 	irqreturn_t ret = IRQ_NONE;
4295 
4296 	if (!intel_irqs_enabled(dev_priv))
4297 		return IRQ_NONE;
4298 
4299 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4300 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4301 
4302 	do {
4303 		u32 pipe_stats[I915_MAX_PIPES] = {};
4304 		u32 eir = 0, eir_stuck = 0;
4305 		u32 hotplug_status = 0;
4306 		u32 iir;
4307 
4308 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4309 		if (iir == 0)
4310 			break;
4311 
4312 		ret = IRQ_HANDLED;
4313 
4314 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4315 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4316 
4317 		/* Call regardless, as some status bits might not be
4318 		 * signalled in iir */
4319 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4320 
4321 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4322 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4323 
4324 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4325 
4326 		if (iir & I915_USER_INTERRUPT)
4327 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4328 					    iir);
4329 
4330 		if (iir & I915_BSD_USER_INTERRUPT)
4331 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4332 					    iir >> 25);
4333 
4334 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4335 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4336 
4337 		if (hotplug_status)
4338 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4339 
4340 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4341 	} while (0);
4342 
4343 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
4344 
4345 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4346 
4347 	return ret;
4348 }
4349 
4350 #define HPD_FUNCS(platform)					 \
4351 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4352 	.hpd_irq_setup = platform##_hpd_irq_setup,		 \
4353 }
4354 
4355 HPD_FUNCS(i915);
4356 HPD_FUNCS(dg1);
4357 HPD_FUNCS(gen11);
4358 HPD_FUNCS(bxt);
4359 HPD_FUNCS(icp);
4360 HPD_FUNCS(spt);
4361 HPD_FUNCS(ilk);
4362 #undef HPD_FUNCS
4363 
4364 /**
4365  * intel_irq_init - initializes irq support
4366  * @dev_priv: i915 device instance
4367  *
4368  * This function initializes all the irq support including work items, timers
4369  * and all the vtables. It does not setup the interrupt itself though.
4370  */
4371 void intel_irq_init(struct drm_i915_private *dev_priv)
4372 {
4373 	struct drm_device *dev = &dev_priv->drm;
4374 	int i;
4375 
4376 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4377 	for (i = 0; i < MAX_L3_SLICES; ++i)
4378 		dev_priv->l3_parity.remap_info[i] = NULL;
4379 
4380 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4381 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4382 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4383 
4384 	if (!HAS_DISPLAY(dev_priv))
4385 		return;
4386 
4387 	intel_hpd_init_pins(dev_priv);
4388 
4389 	intel_hpd_init_work(dev_priv);
4390 
4391 	dev->vblank_disable_immediate = true;
4392 
4393 	/* Most platforms treat the display irq block as an always-on
4394 	 * power domain. vlv/chv can disable it at runtime and need
4395 	 * special care to avoid writing any of the display block registers
4396 	 * outside of the power domain. We defer setting up the display irqs
4397 	 * in this case to the runtime pm.
4398 	 */
4399 	dev_priv->display_irqs_enabled = true;
4400 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4401 		dev_priv->display_irqs_enabled = false;
4402 
4403 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4404 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
4405 	 * detection, as short HPD storms will occur as a natural part of
4406 	 * sideband messaging with MST.
4407 	 * On older platforms however, IRQ storms can occur with both long and
4408 	 * short pulses, as seen on some G4x systems.
4409 	 */
4410 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4411 
4412 	if (HAS_GMCH(dev_priv)) {
4413 		if (I915_HAS_HOTPLUG(dev_priv))
4414 			dev_priv->hotplug_funcs = &i915_hpd_funcs;
4415 	} else {
4416 		if (HAS_PCH_DG1(dev_priv))
4417 			dev_priv->hotplug_funcs = &dg1_hpd_funcs;
4418 		else if (DISPLAY_VER(dev_priv) >= 11)
4419 			dev_priv->hotplug_funcs = &gen11_hpd_funcs;
4420 		else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4421 			dev_priv->hotplug_funcs = &bxt_hpd_funcs;
4422 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4423 			dev_priv->hotplug_funcs = &icp_hpd_funcs;
4424 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4425 			dev_priv->hotplug_funcs = &spt_hpd_funcs;
4426 		else
4427 			dev_priv->hotplug_funcs = &ilk_hpd_funcs;
4428 	}
4429 }
4430 
4431 /**
4432  * intel_irq_fini - deinitializes IRQ support
4433  * @i915: i915 device instance
4434  *
4435  * This function deinitializes all the IRQ support.
4436  */
4437 void intel_irq_fini(struct drm_i915_private *i915)
4438 {
4439 	int i;
4440 
4441 	for (i = 0; i < MAX_L3_SLICES; ++i)
4442 		kfree(i915->l3_parity.remap_info[i]);
4443 }
4444 
4445 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4446 {
4447 	if (HAS_GMCH(dev_priv)) {
4448 		if (IS_CHERRYVIEW(dev_priv))
4449 			return cherryview_irq_handler;
4450 		else if (IS_VALLEYVIEW(dev_priv))
4451 			return valleyview_irq_handler;
4452 		else if (GRAPHICS_VER(dev_priv) == 4)
4453 			return i965_irq_handler;
4454 		else if (GRAPHICS_VER(dev_priv) == 3)
4455 			return i915_irq_handler;
4456 		else
4457 			return i8xx_irq_handler;
4458 	} else {
4459 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4460 			return dg1_irq_handler;
4461 		else if (GRAPHICS_VER(dev_priv) >= 11)
4462 			return gen11_irq_handler;
4463 		else if (GRAPHICS_VER(dev_priv) >= 8)
4464 			return gen8_irq_handler;
4465 		else
4466 			return ilk_irq_handler;
4467 	}
4468 }
4469 
4470 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4471 {
4472 	if (HAS_GMCH(dev_priv)) {
4473 		if (IS_CHERRYVIEW(dev_priv))
4474 			cherryview_irq_reset(dev_priv);
4475 		else if (IS_VALLEYVIEW(dev_priv))
4476 			valleyview_irq_reset(dev_priv);
4477 		else if (GRAPHICS_VER(dev_priv) == 4)
4478 			i965_irq_reset(dev_priv);
4479 		else if (GRAPHICS_VER(dev_priv) == 3)
4480 			i915_irq_reset(dev_priv);
4481 		else
4482 			i8xx_irq_reset(dev_priv);
4483 	} else {
4484 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4485 			dg1_irq_reset(dev_priv);
4486 		else if (GRAPHICS_VER(dev_priv) >= 11)
4487 			gen11_irq_reset(dev_priv);
4488 		else if (GRAPHICS_VER(dev_priv) >= 8)
4489 			gen8_irq_reset(dev_priv);
4490 		else
4491 			ilk_irq_reset(dev_priv);
4492 	}
4493 }
4494 
4495 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4496 {
4497 	if (HAS_GMCH(dev_priv)) {
4498 		if (IS_CHERRYVIEW(dev_priv))
4499 			cherryview_irq_postinstall(dev_priv);
4500 		else if (IS_VALLEYVIEW(dev_priv))
4501 			valleyview_irq_postinstall(dev_priv);
4502 		else if (GRAPHICS_VER(dev_priv) == 4)
4503 			i965_irq_postinstall(dev_priv);
4504 		else if (GRAPHICS_VER(dev_priv) == 3)
4505 			i915_irq_postinstall(dev_priv);
4506 		else
4507 			i8xx_irq_postinstall(dev_priv);
4508 	} else {
4509 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4510 			dg1_irq_postinstall(dev_priv);
4511 		else if (GRAPHICS_VER(dev_priv) >= 11)
4512 			gen11_irq_postinstall(dev_priv);
4513 		else if (GRAPHICS_VER(dev_priv) >= 8)
4514 			gen8_irq_postinstall(dev_priv);
4515 		else
4516 			ilk_irq_postinstall(dev_priv);
4517 	}
4518 }
4519 
4520 /**
4521  * intel_irq_install - enables the hardware interrupt
4522  * @dev_priv: i915 device instance
4523  *
4524  * This function enables the hardware interrupt handling, but leaves the hotplug
4525  * handling still disabled. It is called after intel_irq_init().
4526  *
4527  * In the driver load and resume code we need working interrupts in a few places
4528  * but don't want to deal with the hassle of concurrent probe and hotplug
4529  * workers. Hence the split into this two-stage approach.
4530  */
4531 int intel_irq_install(struct drm_i915_private *dev_priv)
4532 {
4533 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4534 	int ret;
4535 
4536 	/*
4537 	 * We enable some interrupt sources in our postinstall hooks, so mark
4538 	 * interrupts as enabled _before_ actually enabling them to avoid
4539 	 * special cases in our ordering checks.
4540 	 */
4541 	dev_priv->runtime_pm.irqs_enabled = true;
4542 
4543 	dev_priv->irq_enabled = true;
4544 
4545 	intel_irq_reset(dev_priv);
4546 
4547 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4548 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4549 	if (ret < 0) {
4550 		dev_priv->irq_enabled = false;
4551 		return ret;
4552 	}
4553 
4554 	intel_irq_postinstall(dev_priv);
4555 
4556 	return ret;
4557 }
4558 
4559 /**
4560  * intel_irq_uninstall - finilizes all irq handling
4561  * @dev_priv: i915 device instance
4562  *
4563  * This stops interrupt and hotplug handling and unregisters and frees all
4564  * resources acquired in the init functions.
4565  */
4566 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4567 {
4568 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4569 
4570 	/*
4571 	 * FIXME we can get called twice during driver probe
4572 	 * error handling as well as during driver remove due to
4573 	 * intel_modeset_driver_remove() calling us out of sequence.
4574 	 * Would be nice if it didn't do that...
4575 	 */
4576 	if (!dev_priv->irq_enabled)
4577 		return;
4578 
4579 	dev_priv->irq_enabled = false;
4580 
4581 	intel_irq_reset(dev_priv);
4582 
4583 	free_irq(irq, dev_priv);
4584 
4585 	intel_hpd_cancel_work(dev_priv);
4586 	dev_priv->runtime_pm.irqs_enabled = false;
4587 }
4588 
4589 /**
4590  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4591  * @dev_priv: i915 device instance
4592  *
4593  * This function is used to disable interrupts at runtime, both in the runtime
4594  * pm and the system suspend/resume code.
4595  */
4596 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4597 {
4598 	intel_irq_reset(dev_priv);
4599 	dev_priv->runtime_pm.irqs_enabled = false;
4600 	intel_synchronize_irq(dev_priv);
4601 }
4602 
4603 /**
4604  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4605  * @dev_priv: i915 device instance
4606  *
4607  * This function is used to enable interrupts at runtime, both in the runtime
4608  * pm and the system suspend/resume code.
4609  */
4610 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4611 {
4612 	dev_priv->runtime_pm.irqs_enabled = true;
4613 	intel_irq_reset(dev_priv);
4614 	intel_irq_postinstall(dev_priv);
4615 }
4616 
4617 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4618 {
4619 	return dev_priv->runtime_pm.irqs_enabled;
4620 }
4621 
4622 void intel_synchronize_irq(struct drm_i915_private *i915)
4623 {
4624 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4625 }
4626 
4627 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4628 {
4629 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4630 }
4631