xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision c699ce1a)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33 
34 #include <drm/drm_drv.h>
35 
36 #include "display/icl_dsi_regs.h"
37 #include "display/intel_de.h"
38 #include "display/intel_display_trace.h"
39 #include "display/intel_display_types.h"
40 #include "display/intel_fifo_underrun.h"
41 #include "display/intel_hotplug.h"
42 #include "display/intel_lpe_audio.h"
43 #include "display/intel_psr.h"
44 
45 #include "gt/intel_breadcrumbs.h"
46 #include "gt/intel_gt.h"
47 #include "gt/intel_gt_irq.h"
48 #include "gt/intel_gt_pm_irq.h"
49 #include "gt/intel_gt_regs.h"
50 #include "gt/intel_rps.h"
51 
52 #include "i915_driver.h"
53 #include "i915_drv.h"
54 #include "i915_irq.h"
55 #include "intel_pm.h"
56 
57 /**
58  * DOC: interrupt handling
59  *
60  * These functions provide the basic support for enabling and disabling the
61  * interrupt handling support. There's a lot more functionality in i915_irq.c
62  * and related files, but that will be described in separate chapters.
63  */
64 
65 /*
66  * Interrupt statistic for PMU. Increments the counter only if the
67  * interrupt originated from the GPU so interrupts from a device which
68  * shares the interrupt line are not accounted.
69  */
70 static inline void pmu_irq_stats(struct drm_i915_private *i915,
71 				 irqreturn_t res)
72 {
73 	if (unlikely(res != IRQ_HANDLED))
74 		return;
75 
76 	/*
77 	 * A clever compiler translates that into INC. A not so clever one
78 	 * should at least prevent store tearing.
79 	 */
80 	WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
81 }
82 
83 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
84 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
85 				    enum hpd_pin pin);
86 
87 static const u32 hpd_ilk[HPD_NUM_PINS] = {
88 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
89 };
90 
91 static const u32 hpd_ivb[HPD_NUM_PINS] = {
92 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
93 };
94 
95 static const u32 hpd_bdw[HPD_NUM_PINS] = {
96 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
97 };
98 
99 static const u32 hpd_ibx[HPD_NUM_PINS] = {
100 	[HPD_CRT] = SDE_CRT_HOTPLUG,
101 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
102 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
103 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
104 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
105 };
106 
107 static const u32 hpd_cpt[HPD_NUM_PINS] = {
108 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
109 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
110 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
111 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
112 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
113 };
114 
115 static const u32 hpd_spt[HPD_NUM_PINS] = {
116 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
117 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
118 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
119 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
120 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
121 };
122 
123 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
124 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
125 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
126 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
127 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
128 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
129 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
130 };
131 
132 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
133 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
134 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
135 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
136 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
137 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
138 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
139 };
140 
141 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
142 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
143 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
144 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
145 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
146 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
147 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
148 };
149 
150 static const u32 hpd_bxt[HPD_NUM_PINS] = {
151 	[HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
152 	[HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
153 	[HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
154 };
155 
156 static const u32 hpd_gen11[HPD_NUM_PINS] = {
157 	[HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
158 	[HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
159 	[HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
160 	[HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
161 	[HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
162 	[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
163 };
164 
165 static const u32 hpd_icp[HPD_NUM_PINS] = {
166 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
167 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
168 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
169 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
170 	[HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
171 	[HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
172 	[HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
173 	[HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
174 	[HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
175 };
176 
177 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
178 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
179 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
180 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
181 	[HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
182 	[HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
183 };
184 
185 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
186 {
187 	struct intel_hotplug *hpd = &dev_priv->display.hotplug;
188 
189 	if (HAS_GMCH(dev_priv)) {
190 		if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
191 		    IS_CHERRYVIEW(dev_priv))
192 			hpd->hpd = hpd_status_g4x;
193 		else
194 			hpd->hpd = hpd_status_i915;
195 		return;
196 	}
197 
198 	if (DISPLAY_VER(dev_priv) >= 11)
199 		hpd->hpd = hpd_gen11;
200 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
201 		hpd->hpd = hpd_bxt;
202 	else if (DISPLAY_VER(dev_priv) >= 8)
203 		hpd->hpd = hpd_bdw;
204 	else if (DISPLAY_VER(dev_priv) >= 7)
205 		hpd->hpd = hpd_ivb;
206 	else
207 		hpd->hpd = hpd_ilk;
208 
209 	if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
210 	    (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
211 		return;
212 
213 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
214 		hpd->pch_hpd = hpd_sde_dg1;
215 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
216 		hpd->pch_hpd = hpd_icp;
217 	else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
218 		hpd->pch_hpd = hpd_spt;
219 	else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
220 		hpd->pch_hpd = hpd_cpt;
221 	else if (HAS_PCH_IBX(dev_priv))
222 		hpd->pch_hpd = hpd_ibx;
223 	else
224 		MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
225 }
226 
227 static void
228 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
229 {
230 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
231 
232 	drm_crtc_handle_vblank(&crtc->base);
233 }
234 
235 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
236 		    i915_reg_t iir, i915_reg_t ier)
237 {
238 	intel_uncore_write(uncore, imr, 0xffffffff);
239 	intel_uncore_posting_read(uncore, imr);
240 
241 	intel_uncore_write(uncore, ier, 0);
242 
243 	/* IIR can theoretically queue up two events. Be paranoid. */
244 	intel_uncore_write(uncore, iir, 0xffffffff);
245 	intel_uncore_posting_read(uncore, iir);
246 	intel_uncore_write(uncore, iir, 0xffffffff);
247 	intel_uncore_posting_read(uncore, iir);
248 }
249 
250 static void gen2_irq_reset(struct intel_uncore *uncore)
251 {
252 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
253 	intel_uncore_posting_read16(uncore, GEN2_IMR);
254 
255 	intel_uncore_write16(uncore, GEN2_IER, 0);
256 
257 	/* IIR can theoretically queue up two events. Be paranoid. */
258 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
259 	intel_uncore_posting_read16(uncore, GEN2_IIR);
260 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
261 	intel_uncore_posting_read16(uncore, GEN2_IIR);
262 }
263 
264 /*
265  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
266  */
267 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
268 {
269 	u32 val = intel_uncore_read(uncore, reg);
270 
271 	if (val == 0)
272 		return;
273 
274 	drm_WARN(&uncore->i915->drm, 1,
275 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
276 		 i915_mmio_reg_offset(reg), val);
277 	intel_uncore_write(uncore, reg, 0xffffffff);
278 	intel_uncore_posting_read(uncore, reg);
279 	intel_uncore_write(uncore, reg, 0xffffffff);
280 	intel_uncore_posting_read(uncore, reg);
281 }
282 
283 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
284 {
285 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
286 
287 	if (val == 0)
288 		return;
289 
290 	drm_WARN(&uncore->i915->drm, 1,
291 		 "Interrupt register 0x%x is not zero: 0x%08x\n",
292 		 i915_mmio_reg_offset(GEN2_IIR), val);
293 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
294 	intel_uncore_posting_read16(uncore, GEN2_IIR);
295 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
296 	intel_uncore_posting_read16(uncore, GEN2_IIR);
297 }
298 
299 void gen3_irq_init(struct intel_uncore *uncore,
300 		   i915_reg_t imr, u32 imr_val,
301 		   i915_reg_t ier, u32 ier_val,
302 		   i915_reg_t iir)
303 {
304 	gen3_assert_iir_is_zero(uncore, iir);
305 
306 	intel_uncore_write(uncore, ier, ier_val);
307 	intel_uncore_write(uncore, imr, imr_val);
308 	intel_uncore_posting_read(uncore, imr);
309 }
310 
311 static void gen2_irq_init(struct intel_uncore *uncore,
312 			  u32 imr_val, u32 ier_val)
313 {
314 	gen2_assert_iir_is_zero(uncore);
315 
316 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
317 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
318 	intel_uncore_posting_read16(uncore, GEN2_IMR);
319 }
320 
321 /* For display hotplug interrupt */
322 static inline void
323 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
324 				     u32 mask,
325 				     u32 bits)
326 {
327 	lockdep_assert_held(&dev_priv->irq_lock);
328 	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
329 
330 	intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
331 }
332 
333 /**
334  * i915_hotplug_interrupt_update - update hotplug interrupt enable
335  * @dev_priv: driver private
336  * @mask: bits to update
337  * @bits: bits to enable
338  * NOTE: the HPD enable bits are modified both inside and outside
339  * of an interrupt context. To avoid that read-modify-write cycles
340  * interfer, these bits are protected by a spinlock. Since this
341  * function is usually not called from a context where the lock is
342  * held already, this function acquires the lock itself. A non-locking
343  * version is also available.
344  */
345 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
346 				   u32 mask,
347 				   u32 bits)
348 {
349 	spin_lock_irq(&dev_priv->irq_lock);
350 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
351 	spin_unlock_irq(&dev_priv->irq_lock);
352 }
353 
354 /**
355  * ilk_update_display_irq - update DEIMR
356  * @dev_priv: driver private
357  * @interrupt_mask: mask of interrupt bits to update
358  * @enabled_irq_mask: mask of interrupt bits to enable
359  */
360 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
361 				   u32 interrupt_mask, u32 enabled_irq_mask)
362 {
363 	u32 new_val;
364 
365 	lockdep_assert_held(&dev_priv->irq_lock);
366 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
367 
368 	new_val = dev_priv->irq_mask;
369 	new_val &= ~interrupt_mask;
370 	new_val |= (~enabled_irq_mask & interrupt_mask);
371 
372 	if (new_val != dev_priv->irq_mask &&
373 	    !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
374 		dev_priv->irq_mask = new_val;
375 		intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
376 		intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
377 	}
378 }
379 
380 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
381 {
382 	ilk_update_display_irq(i915, bits, bits);
383 }
384 
385 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
386 {
387 	ilk_update_display_irq(i915, bits, 0);
388 }
389 
390 /**
391  * bdw_update_port_irq - update DE port interrupt
392  * @dev_priv: driver private
393  * @interrupt_mask: mask of interrupt bits to update
394  * @enabled_irq_mask: mask of interrupt bits to enable
395  */
396 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
397 				u32 interrupt_mask,
398 				u32 enabled_irq_mask)
399 {
400 	u32 new_val;
401 	u32 old_val;
402 
403 	lockdep_assert_held(&dev_priv->irq_lock);
404 
405 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
406 
407 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
408 		return;
409 
410 	old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
411 
412 	new_val = old_val;
413 	new_val &= ~interrupt_mask;
414 	new_val |= (~enabled_irq_mask & interrupt_mask);
415 
416 	if (new_val != old_val) {
417 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
418 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
419 	}
420 }
421 
422 /**
423  * bdw_update_pipe_irq - update DE pipe interrupt
424  * @dev_priv: driver private
425  * @pipe: pipe whose interrupt to update
426  * @interrupt_mask: mask of interrupt bits to update
427  * @enabled_irq_mask: mask of interrupt bits to enable
428  */
429 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
430 				enum pipe pipe, u32 interrupt_mask,
431 				u32 enabled_irq_mask)
432 {
433 	u32 new_val;
434 
435 	lockdep_assert_held(&dev_priv->irq_lock);
436 
437 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
438 
439 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
440 		return;
441 
442 	new_val = dev_priv->de_irq_mask[pipe];
443 	new_val &= ~interrupt_mask;
444 	new_val |= (~enabled_irq_mask & interrupt_mask);
445 
446 	if (new_val != dev_priv->de_irq_mask[pipe]) {
447 		dev_priv->de_irq_mask[pipe] = new_val;
448 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
449 		intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
450 	}
451 }
452 
453 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
454 			 enum pipe pipe, u32 bits)
455 {
456 	bdw_update_pipe_irq(i915, pipe, bits, bits);
457 }
458 
459 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
460 			  enum pipe pipe, u32 bits)
461 {
462 	bdw_update_pipe_irq(i915, pipe, bits, 0);
463 }
464 
465 /**
466  * ibx_display_interrupt_update - update SDEIMR
467  * @dev_priv: driver private
468  * @interrupt_mask: mask of interrupt bits to update
469  * @enabled_irq_mask: mask of interrupt bits to enable
470  */
471 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
472 					 u32 interrupt_mask,
473 					 u32 enabled_irq_mask)
474 {
475 	u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
476 	sdeimr &= ~interrupt_mask;
477 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
478 
479 	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
480 
481 	lockdep_assert_held(&dev_priv->irq_lock);
482 
483 	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
484 		return;
485 
486 	intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
487 	intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
488 }
489 
490 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
491 {
492 	ibx_display_interrupt_update(i915, bits, bits);
493 }
494 
495 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
496 {
497 	ibx_display_interrupt_update(i915, bits, 0);
498 }
499 
500 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
501 			      enum pipe pipe)
502 {
503 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
504 	u32 enable_mask = status_mask << 16;
505 
506 	lockdep_assert_held(&dev_priv->irq_lock);
507 
508 	if (DISPLAY_VER(dev_priv) < 5)
509 		goto out;
510 
511 	/*
512 	 * On pipe A we don't support the PSR interrupt yet,
513 	 * on pipe B and C the same bit MBZ.
514 	 */
515 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
516 			     status_mask & PIPE_A_PSR_STATUS_VLV))
517 		return 0;
518 	/*
519 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
520 	 * A the same bit is for perf counters which we don't use either.
521 	 */
522 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
523 			     status_mask & PIPE_B_PSR_STATUS_VLV))
524 		return 0;
525 
526 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
527 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
528 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
529 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
530 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
531 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
532 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
533 
534 out:
535 	drm_WARN_ONCE(&dev_priv->drm,
536 		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
537 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
538 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
539 		      pipe_name(pipe), enable_mask, status_mask);
540 
541 	return enable_mask;
542 }
543 
544 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
545 			  enum pipe pipe, u32 status_mask)
546 {
547 	i915_reg_t reg = PIPESTAT(pipe);
548 	u32 enable_mask;
549 
550 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
551 		      "pipe %c: status_mask=0x%x\n",
552 		      pipe_name(pipe), status_mask);
553 
554 	lockdep_assert_held(&dev_priv->irq_lock);
555 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
556 
557 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
558 		return;
559 
560 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
561 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
562 
563 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
564 	intel_uncore_posting_read(&dev_priv->uncore, reg);
565 }
566 
567 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
568 			   enum pipe pipe, u32 status_mask)
569 {
570 	i915_reg_t reg = PIPESTAT(pipe);
571 	u32 enable_mask;
572 
573 	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
574 		      "pipe %c: status_mask=0x%x\n",
575 		      pipe_name(pipe), status_mask);
576 
577 	lockdep_assert_held(&dev_priv->irq_lock);
578 	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
579 
580 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
581 		return;
582 
583 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
584 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
585 
586 	intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
587 	intel_uncore_posting_read(&dev_priv->uncore, reg);
588 }
589 
590 static bool i915_has_asle(struct drm_i915_private *dev_priv)
591 {
592 	if (!dev_priv->display.opregion.asle)
593 		return false;
594 
595 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
596 }
597 
598 /**
599  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
600  * @dev_priv: i915 device private
601  */
602 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
603 {
604 	if (!i915_has_asle(dev_priv))
605 		return;
606 
607 	spin_lock_irq(&dev_priv->irq_lock);
608 
609 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
610 	if (DISPLAY_VER(dev_priv) >= 4)
611 		i915_enable_pipestat(dev_priv, PIPE_A,
612 				     PIPE_LEGACY_BLC_EVENT_STATUS);
613 
614 	spin_unlock_irq(&dev_priv->irq_lock);
615 }
616 
617 /*
618  * This timing diagram depicts the video signal in and
619  * around the vertical blanking period.
620  *
621  * Assumptions about the fictitious mode used in this example:
622  *  vblank_start >= 3
623  *  vsync_start = vblank_start + 1
624  *  vsync_end = vblank_start + 2
625  *  vtotal = vblank_start + 3
626  *
627  *           start of vblank:
628  *           latch double buffered registers
629  *           increment frame counter (ctg+)
630  *           generate start of vblank interrupt (gen4+)
631  *           |
632  *           |          frame start:
633  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
634  *           |          may be shifted forward 1-3 extra lines via PIPECONF
635  *           |          |
636  *           |          |  start of vsync:
637  *           |          |  generate vsync interrupt
638  *           |          |  |
639  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
640  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
641  * ----va---> <-----------------vb--------------------> <--------va-------------
642  *       |          |       <----vs----->                     |
643  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
644  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
645  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
646  *       |          |                                         |
647  *       last visible pixel                                   first visible pixel
648  *                  |                                         increment frame counter (gen3/4)
649  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
650  *
651  * x  = horizontal active
652  * _  = horizontal blanking
653  * hs = horizontal sync
654  * va = vertical active
655  * vb = vertical blanking
656  * vs = vertical sync
657  * vbs = vblank_start (number)
658  *
659  * Summary:
660  * - most events happen at the start of horizontal sync
661  * - frame start happens at the start of horizontal blank, 1-4 lines
662  *   (depending on PIPECONF settings) after the start of vblank
663  * - gen3/4 pixel and frame counter are synchronized with the start
664  *   of horizontal active on the first line of vertical active
665  */
666 
667 /* Called from drm generic code, passed a 'crtc', which
668  * we use as a pipe index
669  */
670 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
671 {
672 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
673 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
674 	const struct drm_display_mode *mode = &vblank->hwmode;
675 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
676 	i915_reg_t high_frame, low_frame;
677 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
678 	unsigned long irqflags;
679 
680 	/*
681 	 * On i965gm TV output the frame counter only works up to
682 	 * the point when we enable the TV encoder. After that the
683 	 * frame counter ceases to work and reads zero. We need a
684 	 * vblank wait before enabling the TV encoder and so we
685 	 * have to enable vblank interrupts while the frame counter
686 	 * is still in a working state. However the core vblank code
687 	 * does not like us returning non-zero frame counter values
688 	 * when we've told it that we don't have a working frame
689 	 * counter. Thus we must stop non-zero values leaking out.
690 	 */
691 	if (!vblank->max_vblank_count)
692 		return 0;
693 
694 	htotal = mode->crtc_htotal;
695 	hsync_start = mode->crtc_hsync_start;
696 	vbl_start = mode->crtc_vblank_start;
697 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
698 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
699 
700 	/* Convert to pixel count */
701 	vbl_start *= htotal;
702 
703 	/* Start of vblank event occurs at start of hsync */
704 	vbl_start -= htotal - hsync_start;
705 
706 	high_frame = PIPEFRAME(pipe);
707 	low_frame = PIPEFRAMEPIXEL(pipe);
708 
709 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
710 
711 	/*
712 	 * High & low register fields aren't synchronized, so make sure
713 	 * we get a low value that's stable across two reads of the high
714 	 * register.
715 	 */
716 	do {
717 		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
718 		low   = intel_de_read_fw(dev_priv, low_frame);
719 		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
720 	} while (high1 != high2);
721 
722 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
723 
724 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
725 	pixel = low & PIPE_PIXEL_MASK;
726 	low >>= PIPE_FRAME_LOW_SHIFT;
727 
728 	/*
729 	 * The frame counter increments at beginning of active.
730 	 * Cook up a vblank counter by also checking the pixel
731 	 * counter against vblank start.
732 	 */
733 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
734 }
735 
736 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
737 {
738 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
739 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
740 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
741 
742 	if (!vblank->max_vblank_count)
743 		return 0;
744 
745 	return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
746 }
747 
748 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
749 {
750 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
751 	struct drm_vblank_crtc *vblank =
752 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
753 	const struct drm_display_mode *mode = &vblank->hwmode;
754 	u32 htotal = mode->crtc_htotal;
755 	u32 clock = mode->crtc_clock;
756 	u32 scan_prev_time, scan_curr_time, scan_post_time;
757 
758 	/*
759 	 * To avoid the race condition where we might cross into the
760 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
761 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
762 	 * during the same frame.
763 	 */
764 	do {
765 		/*
766 		 * This field provides read back of the display
767 		 * pipe frame time stamp. The time stamp value
768 		 * is sampled at every start of vertical blank.
769 		 */
770 		scan_prev_time = intel_de_read_fw(dev_priv,
771 						  PIPE_FRMTMSTMP(crtc->pipe));
772 
773 		/*
774 		 * The TIMESTAMP_CTR register has the current
775 		 * time stamp value.
776 		 */
777 		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
778 
779 		scan_post_time = intel_de_read_fw(dev_priv,
780 						  PIPE_FRMTMSTMP(crtc->pipe));
781 	} while (scan_post_time != scan_prev_time);
782 
783 	return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
784 				   clock), 1000 * htotal);
785 }
786 
787 /*
788  * On certain encoders on certain platforms, pipe
789  * scanline register will not work to get the scanline,
790  * since the timings are driven from the PORT or issues
791  * with scanline register updates.
792  * This function will use Framestamp and current
793  * timestamp registers to calculate the scanline.
794  */
795 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
796 {
797 	struct drm_vblank_crtc *vblank =
798 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
799 	const struct drm_display_mode *mode = &vblank->hwmode;
800 	u32 vblank_start = mode->crtc_vblank_start;
801 	u32 vtotal = mode->crtc_vtotal;
802 	u32 scanline;
803 
804 	scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
805 	scanline = min(scanline, vtotal - 1);
806 	scanline = (scanline + vblank_start) % vtotal;
807 
808 	return scanline;
809 }
810 
811 /*
812  * intel_de_read_fw(), only for fast reads of display block, no need for
813  * forcewake etc.
814  */
815 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
816 {
817 	struct drm_device *dev = crtc->base.dev;
818 	struct drm_i915_private *dev_priv = to_i915(dev);
819 	const struct drm_display_mode *mode;
820 	struct drm_vblank_crtc *vblank;
821 	enum pipe pipe = crtc->pipe;
822 	int position, vtotal;
823 
824 	if (!crtc->active)
825 		return 0;
826 
827 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
828 	mode = &vblank->hwmode;
829 
830 	if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
831 		return __intel_get_crtc_scanline_from_timestamp(crtc);
832 
833 	vtotal = mode->crtc_vtotal;
834 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
835 		vtotal /= 2;
836 
837 	position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
838 
839 	/*
840 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
841 	 * read it just before the start of vblank.  So try it again
842 	 * so we don't accidentally end up spanning a vblank frame
843 	 * increment, causing the pipe_update_end() code to squak at us.
844 	 *
845 	 * The nature of this problem means we can't simply check the ISR
846 	 * bit and return the vblank start value; nor can we use the scanline
847 	 * debug register in the transcoder as it appears to have the same
848 	 * problem.  We may need to extend this to include other platforms,
849 	 * but so far testing only shows the problem on HSW.
850 	 */
851 	if (HAS_DDI(dev_priv) && !position) {
852 		int i, temp;
853 
854 		for (i = 0; i < 100; i++) {
855 			udelay(1);
856 			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
857 			if (temp != position) {
858 				position = temp;
859 				break;
860 			}
861 		}
862 	}
863 
864 	/*
865 	 * See update_scanline_offset() for the details on the
866 	 * scanline_offset adjustment.
867 	 */
868 	return (position + crtc->scanline_offset) % vtotal;
869 }
870 
871 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
872 				     bool in_vblank_irq,
873 				     int *vpos, int *hpos,
874 				     ktime_t *stime, ktime_t *etime,
875 				     const struct drm_display_mode *mode)
876 {
877 	struct drm_device *dev = _crtc->dev;
878 	struct drm_i915_private *dev_priv = to_i915(dev);
879 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
880 	enum pipe pipe = crtc->pipe;
881 	int position;
882 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
883 	unsigned long irqflags;
884 	bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
885 		IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
886 		crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
887 
888 	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
889 		drm_dbg(&dev_priv->drm,
890 			"trying to get scanoutpos for disabled "
891 			"pipe %c\n", pipe_name(pipe));
892 		return false;
893 	}
894 
895 	htotal = mode->crtc_htotal;
896 	hsync_start = mode->crtc_hsync_start;
897 	vtotal = mode->crtc_vtotal;
898 	vbl_start = mode->crtc_vblank_start;
899 	vbl_end = mode->crtc_vblank_end;
900 
901 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
902 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
903 		vbl_end /= 2;
904 		vtotal /= 2;
905 	}
906 
907 	/*
908 	 * Lock uncore.lock, as we will do multiple timing critical raw
909 	 * register reads, potentially with preemption disabled, so the
910 	 * following code must not block on uncore.lock.
911 	 */
912 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
913 
914 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
915 
916 	/* Get optional system timestamp before query. */
917 	if (stime)
918 		*stime = ktime_get();
919 
920 	if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
921 		int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
922 
923 		position = __intel_get_crtc_scanline(crtc);
924 
925 		/*
926 		 * Already exiting vblank? If so, shift our position
927 		 * so it looks like we're already apporaching the full
928 		 * vblank end. This should make the generated timestamp
929 		 * more or less match when the active portion will start.
930 		 */
931 		if (position >= vbl_start && scanlines < position)
932 			position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
933 	} else if (use_scanline_counter) {
934 		/* No obvious pixelcount register. Only query vertical
935 		 * scanout position from Display scan line register.
936 		 */
937 		position = __intel_get_crtc_scanline(crtc);
938 	} else {
939 		/* Have access to pixelcount since start of frame.
940 		 * We can split this into vertical and horizontal
941 		 * scanout position.
942 		 */
943 		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
944 
945 		/* convert to pixel counts */
946 		vbl_start *= htotal;
947 		vbl_end *= htotal;
948 		vtotal *= htotal;
949 
950 		/*
951 		 * In interlaced modes, the pixel counter counts all pixels,
952 		 * so one field will have htotal more pixels. In order to avoid
953 		 * the reported position from jumping backwards when the pixel
954 		 * counter is beyond the length of the shorter field, just
955 		 * clamp the position the length of the shorter field. This
956 		 * matches how the scanline counter based position works since
957 		 * the scanline counter doesn't count the two half lines.
958 		 */
959 		if (position >= vtotal)
960 			position = vtotal - 1;
961 
962 		/*
963 		 * Start of vblank interrupt is triggered at start of hsync,
964 		 * just prior to the first active line of vblank. However we
965 		 * consider lines to start at the leading edge of horizontal
966 		 * active. So, should we get here before we've crossed into
967 		 * the horizontal active of the first line in vblank, we would
968 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
969 		 * always add htotal-hsync_start to the current pixel position.
970 		 */
971 		position = (position + htotal - hsync_start) % vtotal;
972 	}
973 
974 	/* Get optional system timestamp after query. */
975 	if (etime)
976 		*etime = ktime_get();
977 
978 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
979 
980 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
981 
982 	/*
983 	 * While in vblank, position will be negative
984 	 * counting up towards 0 at vbl_end. And outside
985 	 * vblank, position will be positive counting
986 	 * up since vbl_end.
987 	 */
988 	if (position >= vbl_start)
989 		position -= vbl_end;
990 	else
991 		position += vtotal - vbl_end;
992 
993 	if (use_scanline_counter) {
994 		*vpos = position;
995 		*hpos = 0;
996 	} else {
997 		*vpos = position / htotal;
998 		*hpos = position - (*vpos * htotal);
999 	}
1000 
1001 	return true;
1002 }
1003 
1004 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1005 				     ktime_t *vblank_time, bool in_vblank_irq)
1006 {
1007 	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1008 		crtc, max_error, vblank_time, in_vblank_irq,
1009 		i915_get_crtc_scanoutpos);
1010 }
1011 
1012 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1013 {
1014 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1015 	unsigned long irqflags;
1016 	int position;
1017 
1018 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1019 	position = __intel_get_crtc_scanline(crtc);
1020 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1021 
1022 	return position;
1023 }
1024 
1025 /**
1026  * ivb_parity_work - Workqueue called when a parity error interrupt
1027  * occurred.
1028  * @work: workqueue struct
1029  *
1030  * Doesn't actually do anything except notify userspace. As a consequence of
1031  * this event, userspace should try to remap the bad rows since statistically
1032  * it is likely the same row is more likely to go bad again.
1033  */
1034 static void ivb_parity_work(struct work_struct *work)
1035 {
1036 	struct drm_i915_private *dev_priv =
1037 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1038 	struct intel_gt *gt = to_gt(dev_priv);
1039 	u32 error_status, row, bank, subbank;
1040 	char *parity_event[6];
1041 	u32 misccpctl;
1042 	u8 slice = 0;
1043 
1044 	/* We must turn off DOP level clock gating to access the L3 registers.
1045 	 * In order to prevent a get/put style interface, acquire struct mutex
1046 	 * any time we access those registers.
1047 	 */
1048 	mutex_lock(&dev_priv->drm.struct_mutex);
1049 
1050 	/* If we've screwed up tracking, just let the interrupt fire again */
1051 	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1052 		goto out;
1053 
1054 	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
1055 				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
1056 	intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1057 
1058 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1059 		i915_reg_t reg;
1060 
1061 		slice--;
1062 		if (drm_WARN_ON_ONCE(&dev_priv->drm,
1063 				     slice >= NUM_L3_SLICES(dev_priv)))
1064 			break;
1065 
1066 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1067 
1068 		reg = GEN7_L3CDERRST1(slice);
1069 
1070 		error_status = intel_uncore_read(&dev_priv->uncore, reg);
1071 		row = GEN7_PARITY_ERROR_ROW(error_status);
1072 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1073 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1074 
1075 		intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1076 		intel_uncore_posting_read(&dev_priv->uncore, reg);
1077 
1078 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1079 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1080 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1081 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1082 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1083 		parity_event[5] = NULL;
1084 
1085 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1086 				   KOBJ_CHANGE, parity_event);
1087 
1088 		drm_dbg(&dev_priv->drm,
1089 			"Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1090 			slice, row, bank, subbank);
1091 
1092 		kfree(parity_event[4]);
1093 		kfree(parity_event[3]);
1094 		kfree(parity_event[2]);
1095 		kfree(parity_event[1]);
1096 	}
1097 
1098 	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1099 
1100 out:
1101 	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1102 	spin_lock_irq(gt->irq_lock);
1103 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1104 	spin_unlock_irq(gt->irq_lock);
1105 
1106 	mutex_unlock(&dev_priv->drm.struct_mutex);
1107 }
1108 
1109 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1110 {
1111 	switch (pin) {
1112 	case HPD_PORT_TC1:
1113 	case HPD_PORT_TC2:
1114 	case HPD_PORT_TC3:
1115 	case HPD_PORT_TC4:
1116 	case HPD_PORT_TC5:
1117 	case HPD_PORT_TC6:
1118 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1119 	default:
1120 		return false;
1121 	}
1122 }
1123 
1124 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1125 {
1126 	switch (pin) {
1127 	case HPD_PORT_A:
1128 		return val & PORTA_HOTPLUG_LONG_DETECT;
1129 	case HPD_PORT_B:
1130 		return val & PORTB_HOTPLUG_LONG_DETECT;
1131 	case HPD_PORT_C:
1132 		return val & PORTC_HOTPLUG_LONG_DETECT;
1133 	default:
1134 		return false;
1135 	}
1136 }
1137 
1138 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1139 {
1140 	switch (pin) {
1141 	case HPD_PORT_A:
1142 	case HPD_PORT_B:
1143 	case HPD_PORT_C:
1144 	case HPD_PORT_D:
1145 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1146 	default:
1147 		return false;
1148 	}
1149 }
1150 
1151 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1152 {
1153 	switch (pin) {
1154 	case HPD_PORT_TC1:
1155 	case HPD_PORT_TC2:
1156 	case HPD_PORT_TC3:
1157 	case HPD_PORT_TC4:
1158 	case HPD_PORT_TC5:
1159 	case HPD_PORT_TC6:
1160 		return val & ICP_TC_HPD_LONG_DETECT(pin);
1161 	default:
1162 		return false;
1163 	}
1164 }
1165 
1166 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1167 {
1168 	switch (pin) {
1169 	case HPD_PORT_E:
1170 		return val & PORTE_HOTPLUG_LONG_DETECT;
1171 	default:
1172 		return false;
1173 	}
1174 }
1175 
1176 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1177 {
1178 	switch (pin) {
1179 	case HPD_PORT_A:
1180 		return val & PORTA_HOTPLUG_LONG_DETECT;
1181 	case HPD_PORT_B:
1182 		return val & PORTB_HOTPLUG_LONG_DETECT;
1183 	case HPD_PORT_C:
1184 		return val & PORTC_HOTPLUG_LONG_DETECT;
1185 	case HPD_PORT_D:
1186 		return val & PORTD_HOTPLUG_LONG_DETECT;
1187 	default:
1188 		return false;
1189 	}
1190 }
1191 
1192 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1193 {
1194 	switch (pin) {
1195 	case HPD_PORT_A:
1196 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1197 	default:
1198 		return false;
1199 	}
1200 }
1201 
1202 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1203 {
1204 	switch (pin) {
1205 	case HPD_PORT_B:
1206 		return val & PORTB_HOTPLUG_LONG_DETECT;
1207 	case HPD_PORT_C:
1208 		return val & PORTC_HOTPLUG_LONG_DETECT;
1209 	case HPD_PORT_D:
1210 		return val & PORTD_HOTPLUG_LONG_DETECT;
1211 	default:
1212 		return false;
1213 	}
1214 }
1215 
1216 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1217 {
1218 	switch (pin) {
1219 	case HPD_PORT_B:
1220 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1221 	case HPD_PORT_C:
1222 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1223 	case HPD_PORT_D:
1224 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1225 	default:
1226 		return false;
1227 	}
1228 }
1229 
1230 /*
1231  * Get a bit mask of pins that have triggered, and which ones may be long.
1232  * This can be called multiple times with the same masks to accumulate
1233  * hotplug detection results from several registers.
1234  *
1235  * Note that the caller is expected to zero out the masks initially.
1236  */
1237 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1238 			       u32 *pin_mask, u32 *long_mask,
1239 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1240 			       const u32 hpd[HPD_NUM_PINS],
1241 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1242 {
1243 	enum hpd_pin pin;
1244 
1245 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1246 
1247 	for_each_hpd_pin(pin) {
1248 		if ((hpd[pin] & hotplug_trigger) == 0)
1249 			continue;
1250 
1251 		*pin_mask |= BIT(pin);
1252 
1253 		if (long_pulse_detect(pin, dig_hotplug_reg))
1254 			*long_mask |= BIT(pin);
1255 	}
1256 
1257 	drm_dbg(&dev_priv->drm,
1258 		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1259 		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1260 
1261 }
1262 
1263 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1264 				  const u32 hpd[HPD_NUM_PINS])
1265 {
1266 	struct intel_encoder *encoder;
1267 	u32 enabled_irqs = 0;
1268 
1269 	for_each_intel_encoder(&dev_priv->drm, encoder)
1270 		if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1271 			enabled_irqs |= hpd[encoder->hpd_pin];
1272 
1273 	return enabled_irqs;
1274 }
1275 
1276 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1277 				  const u32 hpd[HPD_NUM_PINS])
1278 {
1279 	struct intel_encoder *encoder;
1280 	u32 hotplug_irqs = 0;
1281 
1282 	for_each_intel_encoder(&dev_priv->drm, encoder)
1283 		hotplug_irqs |= hpd[encoder->hpd_pin];
1284 
1285 	return hotplug_irqs;
1286 }
1287 
1288 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1289 				     hotplug_enables_func hotplug_enables)
1290 {
1291 	struct intel_encoder *encoder;
1292 	u32 hotplug = 0;
1293 
1294 	for_each_intel_encoder(&i915->drm, encoder)
1295 		hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1296 
1297 	return hotplug;
1298 }
1299 
1300 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1301 {
1302 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
1303 }
1304 
1305 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1306 {
1307 	wake_up_all(&dev_priv->display.gmbus.wait_queue);
1308 }
1309 
1310 #if defined(CONFIG_DEBUG_FS)
1311 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1312 					 enum pipe pipe,
1313 					 u32 crc0, u32 crc1,
1314 					 u32 crc2, u32 crc3,
1315 					 u32 crc4)
1316 {
1317 	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1318 	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1319 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1320 
1321 	trace_intel_pipe_crc(crtc, crcs);
1322 
1323 	spin_lock(&pipe_crc->lock);
1324 	/*
1325 	 * For some not yet identified reason, the first CRC is
1326 	 * bonkers. So let's just wait for the next vblank and read
1327 	 * out the buggy result.
1328 	 *
1329 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1330 	 * don't trust that one either.
1331 	 */
1332 	if (pipe_crc->skipped <= 0 ||
1333 	    (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1334 		pipe_crc->skipped++;
1335 		spin_unlock(&pipe_crc->lock);
1336 		return;
1337 	}
1338 	spin_unlock(&pipe_crc->lock);
1339 
1340 	drm_crtc_add_crc_entry(&crtc->base, true,
1341 				drm_crtc_accurate_vblank_count(&crtc->base),
1342 				crcs);
1343 }
1344 #else
1345 static inline void
1346 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1347 			     enum pipe pipe,
1348 			     u32 crc0, u32 crc1,
1349 			     u32 crc2, u32 crc3,
1350 			     u32 crc4) {}
1351 #endif
1352 
1353 static void flip_done_handler(struct drm_i915_private *i915,
1354 			      enum pipe pipe)
1355 {
1356 	struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1357 	struct drm_crtc_state *crtc_state = crtc->base.state;
1358 	struct drm_pending_vblank_event *e = crtc_state->event;
1359 	struct drm_device *dev = &i915->drm;
1360 	unsigned long irqflags;
1361 
1362 	spin_lock_irqsave(&dev->event_lock, irqflags);
1363 
1364 	crtc_state->event = NULL;
1365 
1366 	drm_crtc_send_vblank_event(&crtc->base, e);
1367 
1368 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
1369 }
1370 
1371 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1372 				     enum pipe pipe)
1373 {
1374 	display_pipe_crc_irq_handler(dev_priv, pipe,
1375 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1376 				     0, 0, 0, 0);
1377 }
1378 
1379 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1380 				     enum pipe pipe)
1381 {
1382 	display_pipe_crc_irq_handler(dev_priv, pipe,
1383 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1384 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1385 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1386 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1387 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1388 }
1389 
1390 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1391 				      enum pipe pipe)
1392 {
1393 	u32 res1, res2;
1394 
1395 	if (DISPLAY_VER(dev_priv) >= 3)
1396 		res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1397 	else
1398 		res1 = 0;
1399 
1400 	if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1401 		res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1402 	else
1403 		res2 = 0;
1404 
1405 	display_pipe_crc_irq_handler(dev_priv, pipe,
1406 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1407 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1408 				     intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1409 				     res1, res2);
1410 }
1411 
1412 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1413 {
1414 	enum pipe pipe;
1415 
1416 	for_each_pipe(dev_priv, pipe) {
1417 		intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1418 			   PIPESTAT_INT_STATUS_MASK |
1419 			   PIPE_FIFO_UNDERRUN_STATUS);
1420 
1421 		dev_priv->pipestat_irq_mask[pipe] = 0;
1422 	}
1423 }
1424 
1425 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1426 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1427 {
1428 	enum pipe pipe;
1429 
1430 	spin_lock(&dev_priv->irq_lock);
1431 
1432 	if (!dev_priv->display_irqs_enabled) {
1433 		spin_unlock(&dev_priv->irq_lock);
1434 		return;
1435 	}
1436 
1437 	for_each_pipe(dev_priv, pipe) {
1438 		i915_reg_t reg;
1439 		u32 status_mask, enable_mask, iir_bit = 0;
1440 
1441 		/*
1442 		 * PIPESTAT bits get signalled even when the interrupt is
1443 		 * disabled with the mask bits, and some of the status bits do
1444 		 * not generate interrupts at all (like the underrun bit). Hence
1445 		 * we need to be careful that we only handle what we want to
1446 		 * handle.
1447 		 */
1448 
1449 		/* fifo underruns are filterered in the underrun handler. */
1450 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1451 
1452 		switch (pipe) {
1453 		default:
1454 		case PIPE_A:
1455 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1456 			break;
1457 		case PIPE_B:
1458 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1459 			break;
1460 		case PIPE_C:
1461 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1462 			break;
1463 		}
1464 		if (iir & iir_bit)
1465 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1466 
1467 		if (!status_mask)
1468 			continue;
1469 
1470 		reg = PIPESTAT(pipe);
1471 		pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1472 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1473 
1474 		/*
1475 		 * Clear the PIPE*STAT regs before the IIR
1476 		 *
1477 		 * Toggle the enable bits to make sure we get an
1478 		 * edge in the ISR pipe event bit if we don't clear
1479 		 * all the enabled status bits. Otherwise the edge
1480 		 * triggered IIR on i965/g4x wouldn't notice that
1481 		 * an interrupt is still pending.
1482 		 */
1483 		if (pipe_stats[pipe]) {
1484 			intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1485 			intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1486 		}
1487 	}
1488 	spin_unlock(&dev_priv->irq_lock);
1489 }
1490 
1491 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1492 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1493 {
1494 	enum pipe pipe;
1495 
1496 	for_each_pipe(dev_priv, pipe) {
1497 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1498 			intel_handle_vblank(dev_priv, pipe);
1499 
1500 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1501 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1502 
1503 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1504 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1505 	}
1506 }
1507 
1508 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1509 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1510 {
1511 	bool blc_event = false;
1512 	enum pipe pipe;
1513 
1514 	for_each_pipe(dev_priv, pipe) {
1515 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1516 			intel_handle_vblank(dev_priv, pipe);
1517 
1518 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1519 			blc_event = true;
1520 
1521 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1522 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1523 
1524 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1525 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1526 	}
1527 
1528 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1529 		intel_opregion_asle_intr(dev_priv);
1530 }
1531 
1532 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1533 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1534 {
1535 	bool blc_event = false;
1536 	enum pipe pipe;
1537 
1538 	for_each_pipe(dev_priv, pipe) {
1539 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1540 			intel_handle_vblank(dev_priv, pipe);
1541 
1542 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1543 			blc_event = true;
1544 
1545 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1546 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1547 
1548 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1549 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1550 	}
1551 
1552 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1553 		intel_opregion_asle_intr(dev_priv);
1554 
1555 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1556 		gmbus_irq_handler(dev_priv);
1557 }
1558 
1559 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1560 					    u32 pipe_stats[I915_MAX_PIPES])
1561 {
1562 	enum pipe pipe;
1563 
1564 	for_each_pipe(dev_priv, pipe) {
1565 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1566 			intel_handle_vblank(dev_priv, pipe);
1567 
1568 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1569 			flip_done_handler(dev_priv, pipe);
1570 
1571 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1572 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1573 
1574 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1575 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1576 	}
1577 
1578 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1579 		gmbus_irq_handler(dev_priv);
1580 }
1581 
1582 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1583 {
1584 	u32 hotplug_status = 0, hotplug_status_mask;
1585 	int i;
1586 
1587 	if (IS_G4X(dev_priv) ||
1588 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1589 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1590 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1591 	else
1592 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1593 
1594 	/*
1595 	 * We absolutely have to clear all the pending interrupt
1596 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1597 	 * interrupt bit won't have an edge, and the i965/g4x
1598 	 * edge triggered IIR will not notice that an interrupt
1599 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1600 	 * guarantee the edge as the act of toggling the enable
1601 	 * bits can itself generate a new hotplug interrupt :(
1602 	 */
1603 	for (i = 0; i < 10; i++) {
1604 		u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1605 
1606 		if (tmp == 0)
1607 			return hotplug_status;
1608 
1609 		hotplug_status |= tmp;
1610 		intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1611 	}
1612 
1613 	drm_WARN_ONCE(&dev_priv->drm, 1,
1614 		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1615 		      intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1616 
1617 	return hotplug_status;
1618 }
1619 
1620 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1621 				 u32 hotplug_status)
1622 {
1623 	u32 pin_mask = 0, long_mask = 0;
1624 	u32 hotplug_trigger;
1625 
1626 	if (IS_G4X(dev_priv) ||
1627 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1628 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1629 	else
1630 		hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1631 
1632 	if (hotplug_trigger) {
1633 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1634 				   hotplug_trigger, hotplug_trigger,
1635 				   dev_priv->display.hotplug.hpd,
1636 				   i9xx_port_hotplug_long_detect);
1637 
1638 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1639 	}
1640 
1641 	if ((IS_G4X(dev_priv) ||
1642 	     IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1643 	    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1644 		dp_aux_irq_handler(dev_priv);
1645 }
1646 
1647 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1648 {
1649 	struct drm_i915_private *dev_priv = arg;
1650 	irqreturn_t ret = IRQ_NONE;
1651 
1652 	if (!intel_irqs_enabled(dev_priv))
1653 		return IRQ_NONE;
1654 
1655 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1656 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1657 
1658 	do {
1659 		u32 iir, gt_iir, pm_iir;
1660 		u32 pipe_stats[I915_MAX_PIPES] = {};
1661 		u32 hotplug_status = 0;
1662 		u32 ier = 0;
1663 
1664 		gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1665 		pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1666 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1667 
1668 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1669 			break;
1670 
1671 		ret = IRQ_HANDLED;
1672 
1673 		/*
1674 		 * Theory on interrupt generation, based on empirical evidence:
1675 		 *
1676 		 * x = ((VLV_IIR & VLV_IER) ||
1677 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1678 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1679 		 *
1680 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1681 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1682 		 * guarantee the CPU interrupt will be raised again even if we
1683 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1684 		 * bits this time around.
1685 		 */
1686 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1687 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1688 
1689 		if (gt_iir)
1690 			intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1691 		if (pm_iir)
1692 			intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1693 
1694 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1695 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1696 
1697 		/* Call regardless, as some status bits might not be
1698 		 * signalled in iir */
1699 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1700 
1701 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1702 			   I915_LPE_PIPE_B_INTERRUPT))
1703 			intel_lpe_audio_irq_handler(dev_priv);
1704 
1705 		/*
1706 		 * VLV_IIR is single buffered, and reflects the level
1707 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1708 		 */
1709 		if (iir)
1710 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1711 
1712 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1713 		intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1714 
1715 		if (gt_iir)
1716 			gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1717 		if (pm_iir)
1718 			gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1719 
1720 		if (hotplug_status)
1721 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1722 
1723 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1724 	} while (0);
1725 
1726 	pmu_irq_stats(dev_priv, ret);
1727 
1728 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1729 
1730 	return ret;
1731 }
1732 
1733 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1734 {
1735 	struct drm_i915_private *dev_priv = arg;
1736 	irqreturn_t ret = IRQ_NONE;
1737 
1738 	if (!intel_irqs_enabled(dev_priv))
1739 		return IRQ_NONE;
1740 
1741 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1742 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1743 
1744 	do {
1745 		u32 master_ctl, iir;
1746 		u32 pipe_stats[I915_MAX_PIPES] = {};
1747 		u32 hotplug_status = 0;
1748 		u32 ier = 0;
1749 
1750 		master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1751 		iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1752 
1753 		if (master_ctl == 0 && iir == 0)
1754 			break;
1755 
1756 		ret = IRQ_HANDLED;
1757 
1758 		/*
1759 		 * Theory on interrupt generation, based on empirical evidence:
1760 		 *
1761 		 * x = ((VLV_IIR & VLV_IER) ||
1762 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1763 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1764 		 *
1765 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1766 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1767 		 * guarantee the CPU interrupt will be raised again even if we
1768 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1769 		 * bits this time around.
1770 		 */
1771 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1772 		ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1773 
1774 		gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1775 
1776 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1777 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1778 
1779 		/* Call regardless, as some status bits might not be
1780 		 * signalled in iir */
1781 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1782 
1783 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1784 			   I915_LPE_PIPE_B_INTERRUPT |
1785 			   I915_LPE_PIPE_C_INTERRUPT))
1786 			intel_lpe_audio_irq_handler(dev_priv);
1787 
1788 		/*
1789 		 * VLV_IIR is single buffered, and reflects the level
1790 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1791 		 */
1792 		if (iir)
1793 			intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1794 
1795 		intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1796 		intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1797 
1798 		if (hotplug_status)
1799 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1800 
1801 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1802 	} while (0);
1803 
1804 	pmu_irq_stats(dev_priv, ret);
1805 
1806 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1807 
1808 	return ret;
1809 }
1810 
1811 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1812 				u32 hotplug_trigger)
1813 {
1814 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1815 
1816 	/*
1817 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1818 	 * unless we touch the hotplug register, even if hotplug_trigger is
1819 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1820 	 * errors.
1821 	 */
1822 	dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1823 	if (!hotplug_trigger) {
1824 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1825 			PORTD_HOTPLUG_STATUS_MASK |
1826 			PORTC_HOTPLUG_STATUS_MASK |
1827 			PORTB_HOTPLUG_STATUS_MASK;
1828 		dig_hotplug_reg &= ~mask;
1829 	}
1830 
1831 	intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1832 	if (!hotplug_trigger)
1833 		return;
1834 
1835 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1836 			   hotplug_trigger, dig_hotplug_reg,
1837 			   dev_priv->display.hotplug.pch_hpd,
1838 			   pch_port_hotplug_long_detect);
1839 
1840 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1841 }
1842 
1843 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1844 {
1845 	enum pipe pipe;
1846 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1847 
1848 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1849 
1850 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1851 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1852 			       SDE_AUDIO_POWER_SHIFT);
1853 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1854 			port_name(port));
1855 	}
1856 
1857 	if (pch_iir & SDE_AUX_MASK)
1858 		dp_aux_irq_handler(dev_priv);
1859 
1860 	if (pch_iir & SDE_GMBUS)
1861 		gmbus_irq_handler(dev_priv);
1862 
1863 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1864 		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1865 
1866 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1867 		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1868 
1869 	if (pch_iir & SDE_POISON)
1870 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1871 
1872 	if (pch_iir & SDE_FDI_MASK) {
1873 		for_each_pipe(dev_priv, pipe)
1874 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1875 				pipe_name(pipe),
1876 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1877 	}
1878 
1879 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1880 		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1881 
1882 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1883 		drm_dbg(&dev_priv->drm,
1884 			"PCH transcoder CRC error interrupt\n");
1885 
1886 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1887 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1888 
1889 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1890 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1891 }
1892 
1893 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1894 {
1895 	u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1896 	enum pipe pipe;
1897 
1898 	if (err_int & ERR_INT_POISON)
1899 		drm_err(&dev_priv->drm, "Poison interrupt\n");
1900 
1901 	for_each_pipe(dev_priv, pipe) {
1902 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1903 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1904 
1905 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1906 			if (IS_IVYBRIDGE(dev_priv))
1907 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1908 			else
1909 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1910 		}
1911 	}
1912 
1913 	intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1914 }
1915 
1916 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1917 {
1918 	u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1919 	enum pipe pipe;
1920 
1921 	if (serr_int & SERR_INT_POISON)
1922 		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1923 
1924 	for_each_pipe(dev_priv, pipe)
1925 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1926 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1927 
1928 	intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1929 }
1930 
1931 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1932 {
1933 	enum pipe pipe;
1934 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1935 
1936 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1937 
1938 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1939 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1940 			       SDE_AUDIO_POWER_SHIFT_CPT);
1941 		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1942 			port_name(port));
1943 	}
1944 
1945 	if (pch_iir & SDE_AUX_MASK_CPT)
1946 		dp_aux_irq_handler(dev_priv);
1947 
1948 	if (pch_iir & SDE_GMBUS_CPT)
1949 		gmbus_irq_handler(dev_priv);
1950 
1951 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1952 		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1953 
1954 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1955 		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1956 
1957 	if (pch_iir & SDE_FDI_MASK_CPT) {
1958 		for_each_pipe(dev_priv, pipe)
1959 			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1960 				pipe_name(pipe),
1961 				intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1962 	}
1963 
1964 	if (pch_iir & SDE_ERROR_CPT)
1965 		cpt_serr_int_handler(dev_priv);
1966 }
1967 
1968 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1969 {
1970 	u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1971 	u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1972 	u32 pin_mask = 0, long_mask = 0;
1973 
1974 	if (ddi_hotplug_trigger) {
1975 		u32 dig_hotplug_reg;
1976 
1977 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1978 
1979 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1980 				   ddi_hotplug_trigger, dig_hotplug_reg,
1981 				   dev_priv->display.hotplug.pch_hpd,
1982 				   icp_ddi_port_hotplug_long_detect);
1983 	}
1984 
1985 	if (tc_hotplug_trigger) {
1986 		u32 dig_hotplug_reg;
1987 
1988 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1989 
1990 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1991 				   tc_hotplug_trigger, dig_hotplug_reg,
1992 				   dev_priv->display.hotplug.pch_hpd,
1993 				   icp_tc_port_hotplug_long_detect);
1994 	}
1995 
1996 	if (pin_mask)
1997 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1998 
1999 	if (pch_iir & SDE_GMBUS_ICP)
2000 		gmbus_irq_handler(dev_priv);
2001 }
2002 
2003 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2004 {
2005 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2006 		~SDE_PORTE_HOTPLUG_SPT;
2007 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2008 	u32 pin_mask = 0, long_mask = 0;
2009 
2010 	if (hotplug_trigger) {
2011 		u32 dig_hotplug_reg;
2012 
2013 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
2014 
2015 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2016 				   hotplug_trigger, dig_hotplug_reg,
2017 				   dev_priv->display.hotplug.pch_hpd,
2018 				   spt_port_hotplug_long_detect);
2019 	}
2020 
2021 	if (hotplug2_trigger) {
2022 		u32 dig_hotplug_reg;
2023 
2024 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
2025 
2026 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2027 				   hotplug2_trigger, dig_hotplug_reg,
2028 				   dev_priv->display.hotplug.pch_hpd,
2029 				   spt_port_hotplug2_long_detect);
2030 	}
2031 
2032 	if (pin_mask)
2033 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2034 
2035 	if (pch_iir & SDE_GMBUS_CPT)
2036 		gmbus_irq_handler(dev_priv);
2037 }
2038 
2039 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2040 				u32 hotplug_trigger)
2041 {
2042 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2043 
2044 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
2045 
2046 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2047 			   hotplug_trigger, dig_hotplug_reg,
2048 			   dev_priv->display.hotplug.hpd,
2049 			   ilk_port_hotplug_long_detect);
2050 
2051 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2052 }
2053 
2054 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2055 				    u32 de_iir)
2056 {
2057 	enum pipe pipe;
2058 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2059 
2060 	if (hotplug_trigger)
2061 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2062 
2063 	if (de_iir & DE_AUX_CHANNEL_A)
2064 		dp_aux_irq_handler(dev_priv);
2065 
2066 	if (de_iir & DE_GSE)
2067 		intel_opregion_asle_intr(dev_priv);
2068 
2069 	if (de_iir & DE_POISON)
2070 		drm_err(&dev_priv->drm, "Poison interrupt\n");
2071 
2072 	for_each_pipe(dev_priv, pipe) {
2073 		if (de_iir & DE_PIPE_VBLANK(pipe))
2074 			intel_handle_vblank(dev_priv, pipe);
2075 
2076 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2077 			flip_done_handler(dev_priv, pipe);
2078 
2079 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2080 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2081 
2082 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2083 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2084 	}
2085 
2086 	/* check event from PCH */
2087 	if (de_iir & DE_PCH_EVENT) {
2088 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2089 
2090 		if (HAS_PCH_CPT(dev_priv))
2091 			cpt_irq_handler(dev_priv, pch_iir);
2092 		else
2093 			ibx_irq_handler(dev_priv, pch_iir);
2094 
2095 		/* should clear PCH hotplug event before clear CPU irq */
2096 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2097 	}
2098 
2099 	if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2100 		gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2101 }
2102 
2103 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2104 				    u32 de_iir)
2105 {
2106 	enum pipe pipe;
2107 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2108 
2109 	if (hotplug_trigger)
2110 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2111 
2112 	if (de_iir & DE_ERR_INT_IVB)
2113 		ivb_err_int_handler(dev_priv);
2114 
2115 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2116 		dp_aux_irq_handler(dev_priv);
2117 
2118 	if (de_iir & DE_GSE_IVB)
2119 		intel_opregion_asle_intr(dev_priv);
2120 
2121 	for_each_pipe(dev_priv, pipe) {
2122 		if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2123 			intel_handle_vblank(dev_priv, pipe);
2124 
2125 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2126 			flip_done_handler(dev_priv, pipe);
2127 	}
2128 
2129 	/* check event from PCH */
2130 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2131 		u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2132 
2133 		cpt_irq_handler(dev_priv, pch_iir);
2134 
2135 		/* clear PCH hotplug event before clear CPU irq */
2136 		intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2137 	}
2138 }
2139 
2140 /*
2141  * To handle irqs with the minimum potential races with fresh interrupts, we:
2142  * 1 - Disable Master Interrupt Control.
2143  * 2 - Find the source(s) of the interrupt.
2144  * 3 - Clear the Interrupt Identity bits (IIR).
2145  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2146  * 5 - Re-enable Master Interrupt Control.
2147  */
2148 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2149 {
2150 	struct drm_i915_private *i915 = arg;
2151 	void __iomem * const regs = i915->uncore.regs;
2152 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2153 	irqreturn_t ret = IRQ_NONE;
2154 
2155 	if (unlikely(!intel_irqs_enabled(i915)))
2156 		return IRQ_NONE;
2157 
2158 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2159 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2160 
2161 	/* disable master interrupt before clearing iir  */
2162 	de_ier = raw_reg_read(regs, DEIER);
2163 	raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2164 
2165 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2166 	 * interrupts will will be stored on its back queue, and then we'll be
2167 	 * able to process them after we restore SDEIER (as soon as we restore
2168 	 * it, we'll get an interrupt if SDEIIR still has something to process
2169 	 * due to its back queue). */
2170 	if (!HAS_PCH_NOP(i915)) {
2171 		sde_ier = raw_reg_read(regs, SDEIER);
2172 		raw_reg_write(regs, SDEIER, 0);
2173 	}
2174 
2175 	/* Find, clear, then process each source of interrupt */
2176 
2177 	gt_iir = raw_reg_read(regs, GTIIR);
2178 	if (gt_iir) {
2179 		raw_reg_write(regs, GTIIR, gt_iir);
2180 		if (GRAPHICS_VER(i915) >= 6)
2181 			gen6_gt_irq_handler(to_gt(i915), gt_iir);
2182 		else
2183 			gen5_gt_irq_handler(to_gt(i915), gt_iir);
2184 		ret = IRQ_HANDLED;
2185 	}
2186 
2187 	de_iir = raw_reg_read(regs, DEIIR);
2188 	if (de_iir) {
2189 		raw_reg_write(regs, DEIIR, de_iir);
2190 		if (DISPLAY_VER(i915) >= 7)
2191 			ivb_display_irq_handler(i915, de_iir);
2192 		else
2193 			ilk_display_irq_handler(i915, de_iir);
2194 		ret = IRQ_HANDLED;
2195 	}
2196 
2197 	if (GRAPHICS_VER(i915) >= 6) {
2198 		u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2199 		if (pm_iir) {
2200 			raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2201 			gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2202 			ret = IRQ_HANDLED;
2203 		}
2204 	}
2205 
2206 	raw_reg_write(regs, DEIER, de_ier);
2207 	if (sde_ier)
2208 		raw_reg_write(regs, SDEIER, sde_ier);
2209 
2210 	pmu_irq_stats(i915, ret);
2211 
2212 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2213 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2214 
2215 	return ret;
2216 }
2217 
2218 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2219 				u32 hotplug_trigger)
2220 {
2221 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2222 
2223 	dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
2224 
2225 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2226 			   hotplug_trigger, dig_hotplug_reg,
2227 			   dev_priv->display.hotplug.hpd,
2228 			   bxt_port_hotplug_long_detect);
2229 
2230 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2231 }
2232 
2233 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2234 {
2235 	u32 pin_mask = 0, long_mask = 0;
2236 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2237 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2238 
2239 	if (trigger_tc) {
2240 		u32 dig_hotplug_reg;
2241 
2242 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
2243 
2244 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2245 				   trigger_tc, dig_hotplug_reg,
2246 				   dev_priv->display.hotplug.hpd,
2247 				   gen11_port_hotplug_long_detect);
2248 	}
2249 
2250 	if (trigger_tbt) {
2251 		u32 dig_hotplug_reg;
2252 
2253 		dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
2254 
2255 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2256 				   trigger_tbt, dig_hotplug_reg,
2257 				   dev_priv->display.hotplug.hpd,
2258 				   gen11_port_hotplug_long_detect);
2259 	}
2260 
2261 	if (pin_mask)
2262 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2263 	else
2264 		drm_err(&dev_priv->drm,
2265 			"Unexpected DE HPD interrupt 0x%08x\n", iir);
2266 }
2267 
2268 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2269 {
2270 	u32 mask;
2271 
2272 	if (DISPLAY_VER(dev_priv) >= 13)
2273 		return TGL_DE_PORT_AUX_DDIA |
2274 			TGL_DE_PORT_AUX_DDIB |
2275 			TGL_DE_PORT_AUX_DDIC |
2276 			XELPD_DE_PORT_AUX_DDID |
2277 			XELPD_DE_PORT_AUX_DDIE |
2278 			TGL_DE_PORT_AUX_USBC1 |
2279 			TGL_DE_PORT_AUX_USBC2 |
2280 			TGL_DE_PORT_AUX_USBC3 |
2281 			TGL_DE_PORT_AUX_USBC4;
2282 	else if (DISPLAY_VER(dev_priv) >= 12)
2283 		return TGL_DE_PORT_AUX_DDIA |
2284 			TGL_DE_PORT_AUX_DDIB |
2285 			TGL_DE_PORT_AUX_DDIC |
2286 			TGL_DE_PORT_AUX_USBC1 |
2287 			TGL_DE_PORT_AUX_USBC2 |
2288 			TGL_DE_PORT_AUX_USBC3 |
2289 			TGL_DE_PORT_AUX_USBC4 |
2290 			TGL_DE_PORT_AUX_USBC5 |
2291 			TGL_DE_PORT_AUX_USBC6;
2292 
2293 
2294 	mask = GEN8_AUX_CHANNEL_A;
2295 	if (DISPLAY_VER(dev_priv) >= 9)
2296 		mask |= GEN9_AUX_CHANNEL_B |
2297 			GEN9_AUX_CHANNEL_C |
2298 			GEN9_AUX_CHANNEL_D;
2299 
2300 	if (DISPLAY_VER(dev_priv) == 11) {
2301 		mask |= ICL_AUX_CHANNEL_F;
2302 		mask |= ICL_AUX_CHANNEL_E;
2303 	}
2304 
2305 	return mask;
2306 }
2307 
2308 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2309 {
2310 	if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2311 		return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2312 	else if (DISPLAY_VER(dev_priv) >= 11)
2313 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2314 	else if (DISPLAY_VER(dev_priv) >= 9)
2315 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2316 	else
2317 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2318 }
2319 
2320 static void
2321 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2322 {
2323 	bool found = false;
2324 
2325 	if (iir & GEN8_DE_MISC_GSE) {
2326 		intel_opregion_asle_intr(dev_priv);
2327 		found = true;
2328 	}
2329 
2330 	if (iir & GEN8_DE_EDP_PSR) {
2331 		struct intel_encoder *encoder;
2332 		u32 psr_iir;
2333 		i915_reg_t iir_reg;
2334 
2335 		for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2336 			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2337 
2338 			if (DISPLAY_VER(dev_priv) >= 12)
2339 				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2340 			else
2341 				iir_reg = EDP_PSR_IIR;
2342 
2343 			psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
2344 
2345 			if (psr_iir)
2346 				found = true;
2347 
2348 			intel_psr_irq_handler(intel_dp, psr_iir);
2349 
2350 			/* prior GEN12 only have one EDP PSR */
2351 			if (DISPLAY_VER(dev_priv) < 12)
2352 				break;
2353 		}
2354 	}
2355 
2356 	if (!found)
2357 		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2358 }
2359 
2360 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2361 					   u32 te_trigger)
2362 {
2363 	enum pipe pipe = INVALID_PIPE;
2364 	enum transcoder dsi_trans;
2365 	enum port port;
2366 	u32 val, tmp;
2367 
2368 	/*
2369 	 * Incase of dual link, TE comes from DSI_1
2370 	 * this is to check if dual link is enabled
2371 	 */
2372 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2373 	val &= PORT_SYNC_MODE_ENABLE;
2374 
2375 	/*
2376 	 * if dual link is enabled, then read DSI_0
2377 	 * transcoder registers
2378 	 */
2379 	port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2380 						  PORT_A : PORT_B;
2381 	dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2382 
2383 	/* Check if DSI configured in command mode */
2384 	val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2385 	val = val & OP_MODE_MASK;
2386 
2387 	if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2388 		drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2389 		return;
2390 	}
2391 
2392 	/* Get PIPE for handling VBLANK event */
2393 	val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2394 	switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2395 	case TRANS_DDI_EDP_INPUT_A_ON:
2396 		pipe = PIPE_A;
2397 		break;
2398 	case TRANS_DDI_EDP_INPUT_B_ONOFF:
2399 		pipe = PIPE_B;
2400 		break;
2401 	case TRANS_DDI_EDP_INPUT_C_ONOFF:
2402 		pipe = PIPE_C;
2403 		break;
2404 	default:
2405 		drm_err(&dev_priv->drm, "Invalid PIPE\n");
2406 		return;
2407 	}
2408 
2409 	intel_handle_vblank(dev_priv, pipe);
2410 
2411 	/* clear TE in dsi IIR */
2412 	port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2413 	tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2414 }
2415 
2416 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2417 {
2418 	if (DISPLAY_VER(i915) >= 9)
2419 		return GEN9_PIPE_PLANE1_FLIP_DONE;
2420 	else
2421 		return GEN8_PIPE_PRIMARY_FLIP_DONE;
2422 }
2423 
2424 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2425 {
2426 	u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2427 
2428 	if (DISPLAY_VER(dev_priv) >= 13)
2429 		mask |= XELPD_PIPE_SOFT_UNDERRUN |
2430 			XELPD_PIPE_HARD_UNDERRUN;
2431 
2432 	return mask;
2433 }
2434 
2435 static irqreturn_t
2436 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2437 {
2438 	irqreturn_t ret = IRQ_NONE;
2439 	u32 iir;
2440 	enum pipe pipe;
2441 
2442 	drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2443 
2444 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2445 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2446 		if (iir) {
2447 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2448 			ret = IRQ_HANDLED;
2449 			gen8_de_misc_irq_handler(dev_priv, iir);
2450 		} else {
2451 			drm_err(&dev_priv->drm,
2452 				"The master control interrupt lied (DE MISC)!\n");
2453 		}
2454 	}
2455 
2456 	if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2457 		iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2458 		if (iir) {
2459 			intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2460 			ret = IRQ_HANDLED;
2461 			gen11_hpd_irq_handler(dev_priv, iir);
2462 		} else {
2463 			drm_err(&dev_priv->drm,
2464 				"The master control interrupt lied, (DE HPD)!\n");
2465 		}
2466 	}
2467 
2468 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2469 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2470 		if (iir) {
2471 			bool found = false;
2472 
2473 			intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2474 			ret = IRQ_HANDLED;
2475 
2476 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2477 				dp_aux_irq_handler(dev_priv);
2478 				found = true;
2479 			}
2480 
2481 			if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2482 				u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2483 
2484 				if (hotplug_trigger) {
2485 					bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2486 					found = true;
2487 				}
2488 			} else if (IS_BROADWELL(dev_priv)) {
2489 				u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2490 
2491 				if (hotplug_trigger) {
2492 					ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2493 					found = true;
2494 				}
2495 			}
2496 
2497 			if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2498 			    (iir & BXT_DE_PORT_GMBUS)) {
2499 				gmbus_irq_handler(dev_priv);
2500 				found = true;
2501 			}
2502 
2503 			if (DISPLAY_VER(dev_priv) >= 11) {
2504 				u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2505 
2506 				if (te_trigger) {
2507 					gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2508 					found = true;
2509 				}
2510 			}
2511 
2512 			if (!found)
2513 				drm_err(&dev_priv->drm,
2514 					"Unexpected DE Port interrupt\n");
2515 		}
2516 		else
2517 			drm_err(&dev_priv->drm,
2518 				"The master control interrupt lied (DE PORT)!\n");
2519 	}
2520 
2521 	for_each_pipe(dev_priv, pipe) {
2522 		u32 fault_errors;
2523 
2524 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2525 			continue;
2526 
2527 		iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2528 		if (!iir) {
2529 			drm_err(&dev_priv->drm,
2530 				"The master control interrupt lied (DE PIPE)!\n");
2531 			continue;
2532 		}
2533 
2534 		ret = IRQ_HANDLED;
2535 		intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2536 
2537 		if (iir & GEN8_PIPE_VBLANK)
2538 			intel_handle_vblank(dev_priv, pipe);
2539 
2540 		if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2541 			flip_done_handler(dev_priv, pipe);
2542 
2543 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2544 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2545 
2546 		if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2547 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2548 
2549 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2550 		if (fault_errors)
2551 			drm_err(&dev_priv->drm,
2552 				"Fault errors on pipe %c: 0x%08x\n",
2553 				pipe_name(pipe),
2554 				fault_errors);
2555 	}
2556 
2557 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2558 	    master_ctl & GEN8_DE_PCH_IRQ) {
2559 		/*
2560 		 * FIXME(BDW): Assume for now that the new interrupt handling
2561 		 * scheme also closed the SDE interrupt handling race we've seen
2562 		 * on older pch-split platforms. But this needs testing.
2563 		 */
2564 		iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2565 		if (iir) {
2566 			intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2567 			ret = IRQ_HANDLED;
2568 
2569 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2570 				icp_irq_handler(dev_priv, iir);
2571 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2572 				spt_irq_handler(dev_priv, iir);
2573 			else
2574 				cpt_irq_handler(dev_priv, iir);
2575 		} else {
2576 			/*
2577 			 * Like on previous PCH there seems to be something
2578 			 * fishy going on with forwarding PCH interrupts.
2579 			 */
2580 			drm_dbg(&dev_priv->drm,
2581 				"The master control interrupt lied (SDE)!\n");
2582 		}
2583 	}
2584 
2585 	return ret;
2586 }
2587 
2588 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2589 {
2590 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2591 
2592 	/*
2593 	 * Now with master disabled, get a sample of level indications
2594 	 * for this interrupt. Indications will be cleared on related acks.
2595 	 * New indications can and will light up during processing,
2596 	 * and will generate new interrupt after enabling master.
2597 	 */
2598 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2599 }
2600 
2601 static inline void gen8_master_intr_enable(void __iomem * const regs)
2602 {
2603 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2604 }
2605 
2606 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2607 {
2608 	struct drm_i915_private *dev_priv = arg;
2609 	void __iomem * const regs = dev_priv->uncore.regs;
2610 	u32 master_ctl;
2611 
2612 	if (!intel_irqs_enabled(dev_priv))
2613 		return IRQ_NONE;
2614 
2615 	master_ctl = gen8_master_intr_disable(regs);
2616 	if (!master_ctl) {
2617 		gen8_master_intr_enable(regs);
2618 		return IRQ_NONE;
2619 	}
2620 
2621 	/* Find, queue (onto bottom-halves), then clear each source */
2622 	gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2623 
2624 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2625 	if (master_ctl & ~GEN8_GT_IRQS) {
2626 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2627 		gen8_de_irq_handler(dev_priv, master_ctl);
2628 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2629 	}
2630 
2631 	gen8_master_intr_enable(regs);
2632 
2633 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
2634 
2635 	return IRQ_HANDLED;
2636 }
2637 
2638 static u32
2639 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2640 {
2641 	void __iomem * const regs = i915->uncore.regs;
2642 	u32 iir;
2643 
2644 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2645 		return 0;
2646 
2647 	iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2648 	if (likely(iir))
2649 		raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2650 
2651 	return iir;
2652 }
2653 
2654 static void
2655 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2656 {
2657 	if (iir & GEN11_GU_MISC_GSE)
2658 		intel_opregion_asle_intr(i915);
2659 }
2660 
2661 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2662 {
2663 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2664 
2665 	/*
2666 	 * Now with master disabled, get a sample of level indications
2667 	 * for this interrupt. Indications will be cleared on related acks.
2668 	 * New indications can and will light up during processing,
2669 	 * and will generate new interrupt after enabling master.
2670 	 */
2671 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2672 }
2673 
2674 static inline void gen11_master_intr_enable(void __iomem * const regs)
2675 {
2676 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2677 }
2678 
2679 static void
2680 gen11_display_irq_handler(struct drm_i915_private *i915)
2681 {
2682 	void __iomem * const regs = i915->uncore.regs;
2683 	const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2684 
2685 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2686 	/*
2687 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2688 	 * for the display related bits.
2689 	 */
2690 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2691 	gen8_de_irq_handler(i915, disp_ctl);
2692 	raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2693 		      GEN11_DISPLAY_IRQ_ENABLE);
2694 
2695 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2696 }
2697 
2698 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2699 {
2700 	struct drm_i915_private *i915 = arg;
2701 	void __iomem * const regs = i915->uncore.regs;
2702 	struct intel_gt *gt = to_gt(i915);
2703 	u32 master_ctl;
2704 	u32 gu_misc_iir;
2705 
2706 	if (!intel_irqs_enabled(i915))
2707 		return IRQ_NONE;
2708 
2709 	master_ctl = gen11_master_intr_disable(regs);
2710 	if (!master_ctl) {
2711 		gen11_master_intr_enable(regs);
2712 		return IRQ_NONE;
2713 	}
2714 
2715 	/* Find, queue (onto bottom-halves), then clear each source */
2716 	gen11_gt_irq_handler(gt, master_ctl);
2717 
2718 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2719 	if (master_ctl & GEN11_DISPLAY_IRQ)
2720 		gen11_display_irq_handler(i915);
2721 
2722 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2723 
2724 	gen11_master_intr_enable(regs);
2725 
2726 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2727 
2728 	pmu_irq_stats(i915, IRQ_HANDLED);
2729 
2730 	return IRQ_HANDLED;
2731 }
2732 
2733 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2734 {
2735 	u32 val;
2736 
2737 	/* First disable interrupts */
2738 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2739 
2740 	/* Get the indication levels and ack the master unit */
2741 	val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2742 	if (unlikely(!val))
2743 		return 0;
2744 
2745 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2746 
2747 	return val;
2748 }
2749 
2750 static inline void dg1_master_intr_enable(void __iomem * const regs)
2751 {
2752 	raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2753 }
2754 
2755 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2756 {
2757 	struct drm_i915_private * const i915 = arg;
2758 	struct intel_gt *gt = to_gt(i915);
2759 	void __iomem * const regs = gt->uncore->regs;
2760 	u32 master_tile_ctl, master_ctl;
2761 	u32 gu_misc_iir;
2762 
2763 	if (!intel_irqs_enabled(i915))
2764 		return IRQ_NONE;
2765 
2766 	master_tile_ctl = dg1_master_intr_disable(regs);
2767 	if (!master_tile_ctl) {
2768 		dg1_master_intr_enable(regs);
2769 		return IRQ_NONE;
2770 	}
2771 
2772 	/* FIXME: we only support tile 0 for now. */
2773 	if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2774 		master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2775 		raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2776 	} else {
2777 		drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2778 			master_tile_ctl);
2779 		dg1_master_intr_enable(regs);
2780 		return IRQ_NONE;
2781 	}
2782 
2783 	gen11_gt_irq_handler(gt, master_ctl);
2784 
2785 	if (master_ctl & GEN11_DISPLAY_IRQ)
2786 		gen11_display_irq_handler(i915);
2787 
2788 	gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2789 
2790 	dg1_master_intr_enable(regs);
2791 
2792 	gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2793 
2794 	pmu_irq_stats(i915, IRQ_HANDLED);
2795 
2796 	return IRQ_HANDLED;
2797 }
2798 
2799 /* Called from drm generic code, passed 'crtc' which
2800  * we use as a pipe index
2801  */
2802 int i8xx_enable_vblank(struct drm_crtc *crtc)
2803 {
2804 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2805 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2806 	unsigned long irqflags;
2807 
2808 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2809 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2810 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2811 
2812 	return 0;
2813 }
2814 
2815 int i915gm_enable_vblank(struct drm_crtc *crtc)
2816 {
2817 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2818 
2819 	/*
2820 	 * Vblank interrupts fail to wake the device up from C2+.
2821 	 * Disabling render clock gating during C-states avoids
2822 	 * the problem. There is a small power cost so we do this
2823 	 * only when vblank interrupts are actually enabled.
2824 	 */
2825 	if (dev_priv->vblank_enabled++ == 0)
2826 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2827 
2828 	return i8xx_enable_vblank(crtc);
2829 }
2830 
2831 int i965_enable_vblank(struct drm_crtc *crtc)
2832 {
2833 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2834 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2835 	unsigned long irqflags;
2836 
2837 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2838 	i915_enable_pipestat(dev_priv, pipe,
2839 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2840 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2841 
2842 	return 0;
2843 }
2844 
2845 int ilk_enable_vblank(struct drm_crtc *crtc)
2846 {
2847 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2848 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2849 	unsigned long irqflags;
2850 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2851 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2852 
2853 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2854 	ilk_enable_display_irq(dev_priv, bit);
2855 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2856 
2857 	/* Even though there is no DMC, frame counter can get stuck when
2858 	 * PSR is active as no frames are generated.
2859 	 */
2860 	if (HAS_PSR(dev_priv))
2861 		drm_crtc_vblank_restore(crtc);
2862 
2863 	return 0;
2864 }
2865 
2866 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2867 				   bool enable)
2868 {
2869 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2870 	enum port port;
2871 
2872 	if (!(intel_crtc->mode_flags &
2873 	    (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2874 		return false;
2875 
2876 	/* for dual link cases we consider TE from slave */
2877 	if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2878 		port = PORT_B;
2879 	else
2880 		port = PORT_A;
2881 
2882 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2883 			 enable ? 0 : DSI_TE_EVENT);
2884 
2885 	intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2886 
2887 	return true;
2888 }
2889 
2890 int bdw_enable_vblank(struct drm_crtc *_crtc)
2891 {
2892 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2893 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2894 	enum pipe pipe = crtc->pipe;
2895 	unsigned long irqflags;
2896 
2897 	if (gen11_dsi_configure_te(crtc, true))
2898 		return 0;
2899 
2900 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2901 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2902 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2903 
2904 	/* Even if there is no DMC, frame counter can get stuck when
2905 	 * PSR is active as no frames are generated, so check only for PSR.
2906 	 */
2907 	if (HAS_PSR(dev_priv))
2908 		drm_crtc_vblank_restore(&crtc->base);
2909 
2910 	return 0;
2911 }
2912 
2913 /* Called from drm generic code, passed 'crtc' which
2914  * we use as a pipe index
2915  */
2916 void i8xx_disable_vblank(struct drm_crtc *crtc)
2917 {
2918 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2919 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2920 	unsigned long irqflags;
2921 
2922 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2923 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2924 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2925 }
2926 
2927 void i915gm_disable_vblank(struct drm_crtc *crtc)
2928 {
2929 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2930 
2931 	i8xx_disable_vblank(crtc);
2932 
2933 	if (--dev_priv->vblank_enabled == 0)
2934 		intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2935 }
2936 
2937 void i965_disable_vblank(struct drm_crtc *crtc)
2938 {
2939 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2940 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2941 	unsigned long irqflags;
2942 
2943 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2944 	i915_disable_pipestat(dev_priv, pipe,
2945 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2946 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2947 }
2948 
2949 void ilk_disable_vblank(struct drm_crtc *crtc)
2950 {
2951 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2952 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2953 	unsigned long irqflags;
2954 	u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2955 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2956 
2957 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2958 	ilk_disable_display_irq(dev_priv, bit);
2959 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2960 }
2961 
2962 void bdw_disable_vblank(struct drm_crtc *_crtc)
2963 {
2964 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
2965 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2966 	enum pipe pipe = crtc->pipe;
2967 	unsigned long irqflags;
2968 
2969 	if (gen11_dsi_configure_te(crtc, false))
2970 		return;
2971 
2972 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2973 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2974 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2975 }
2976 
2977 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2978 {
2979 	struct intel_uncore *uncore = &dev_priv->uncore;
2980 
2981 	if (HAS_PCH_NOP(dev_priv))
2982 		return;
2983 
2984 	GEN3_IRQ_RESET(uncore, SDE);
2985 
2986 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2987 		intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2988 }
2989 
2990 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2991 {
2992 	struct intel_uncore *uncore = &dev_priv->uncore;
2993 
2994 	if (IS_CHERRYVIEW(dev_priv))
2995 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2996 	else
2997 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
2998 
2999 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3000 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
3001 
3002 	i9xx_pipestat_irq_reset(dev_priv);
3003 
3004 	GEN3_IRQ_RESET(uncore, VLV_);
3005 	dev_priv->irq_mask = ~0u;
3006 }
3007 
3008 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3009 {
3010 	struct intel_uncore *uncore = &dev_priv->uncore;
3011 
3012 	u32 pipestat_mask;
3013 	u32 enable_mask;
3014 	enum pipe pipe;
3015 
3016 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3017 
3018 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3019 	for_each_pipe(dev_priv, pipe)
3020 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3021 
3022 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3023 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3024 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3025 		I915_LPE_PIPE_A_INTERRUPT |
3026 		I915_LPE_PIPE_B_INTERRUPT;
3027 
3028 	if (IS_CHERRYVIEW(dev_priv))
3029 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3030 			I915_LPE_PIPE_C_INTERRUPT;
3031 
3032 	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3033 
3034 	dev_priv->irq_mask = ~enable_mask;
3035 
3036 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3037 }
3038 
3039 /* drm_dma.h hooks
3040 */
3041 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3042 {
3043 	struct intel_uncore *uncore = &dev_priv->uncore;
3044 
3045 	GEN3_IRQ_RESET(uncore, DE);
3046 	dev_priv->irq_mask = ~0u;
3047 
3048 	if (GRAPHICS_VER(dev_priv) == 7)
3049 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3050 
3051 	if (IS_HASWELL(dev_priv)) {
3052 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3053 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3054 	}
3055 
3056 	gen5_gt_irq_reset(to_gt(dev_priv));
3057 
3058 	ibx_irq_reset(dev_priv);
3059 }
3060 
3061 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3062 {
3063 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3064 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3065 
3066 	gen5_gt_irq_reset(to_gt(dev_priv));
3067 
3068 	spin_lock_irq(&dev_priv->irq_lock);
3069 	if (dev_priv->display_irqs_enabled)
3070 		vlv_display_irq_reset(dev_priv);
3071 	spin_unlock_irq(&dev_priv->irq_lock);
3072 }
3073 
3074 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3075 {
3076 	struct intel_uncore *uncore = &dev_priv->uncore;
3077 	enum pipe pipe;
3078 
3079 	if (!HAS_DISPLAY(dev_priv))
3080 		return;
3081 
3082 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3083 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3084 
3085 	for_each_pipe(dev_priv, pipe)
3086 		if (intel_display_power_is_enabled(dev_priv,
3087 						   POWER_DOMAIN_PIPE(pipe)))
3088 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3089 
3090 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3091 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3092 }
3093 
3094 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3095 {
3096 	struct intel_uncore *uncore = &dev_priv->uncore;
3097 
3098 	gen8_master_intr_disable(uncore->regs);
3099 
3100 	gen8_gt_irq_reset(to_gt(dev_priv));
3101 	gen8_display_irq_reset(dev_priv);
3102 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3103 
3104 	if (HAS_PCH_SPLIT(dev_priv))
3105 		ibx_irq_reset(dev_priv);
3106 
3107 }
3108 
3109 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3110 {
3111 	struct intel_uncore *uncore = &dev_priv->uncore;
3112 	enum pipe pipe;
3113 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3114 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3115 
3116 	if (!HAS_DISPLAY(dev_priv))
3117 		return;
3118 
3119 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3120 
3121 	if (DISPLAY_VER(dev_priv) >= 12) {
3122 		enum transcoder trans;
3123 
3124 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3125 			enum intel_display_power_domain domain;
3126 
3127 			domain = POWER_DOMAIN_TRANSCODER(trans);
3128 			if (!intel_display_power_is_enabled(dev_priv, domain))
3129 				continue;
3130 
3131 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3132 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3133 		}
3134 	} else {
3135 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3136 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3137 	}
3138 
3139 	for_each_pipe(dev_priv, pipe)
3140 		if (intel_display_power_is_enabled(dev_priv,
3141 						   POWER_DOMAIN_PIPE(pipe)))
3142 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3143 
3144 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3145 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3146 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3147 
3148 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3149 		GEN3_IRQ_RESET(uncore, SDE);
3150 }
3151 
3152 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3153 {
3154 	struct intel_gt *gt = to_gt(dev_priv);
3155 	struct intel_uncore *uncore = gt->uncore;
3156 
3157 	gen11_master_intr_disable(dev_priv->uncore.regs);
3158 
3159 	gen11_gt_irq_reset(gt);
3160 	gen11_display_irq_reset(dev_priv);
3161 
3162 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3163 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3164 }
3165 
3166 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3167 {
3168 	struct intel_gt *gt = to_gt(dev_priv);
3169 	struct intel_uncore *uncore = gt->uncore;
3170 
3171 	dg1_master_intr_disable(dev_priv->uncore.regs);
3172 
3173 	gen11_gt_irq_reset(gt);
3174 	gen11_display_irq_reset(dev_priv);
3175 
3176 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3177 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3178 }
3179 
3180 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3181 				     u8 pipe_mask)
3182 {
3183 	struct intel_uncore *uncore = &dev_priv->uncore;
3184 	u32 extra_ier = GEN8_PIPE_VBLANK |
3185 		gen8_de_pipe_underrun_mask(dev_priv) |
3186 		gen8_de_pipe_flip_done_mask(dev_priv);
3187 	enum pipe pipe;
3188 
3189 	spin_lock_irq(&dev_priv->irq_lock);
3190 
3191 	if (!intel_irqs_enabled(dev_priv)) {
3192 		spin_unlock_irq(&dev_priv->irq_lock);
3193 		return;
3194 	}
3195 
3196 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3197 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3198 				  dev_priv->de_irq_mask[pipe],
3199 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3200 
3201 	spin_unlock_irq(&dev_priv->irq_lock);
3202 }
3203 
3204 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3205 				     u8 pipe_mask)
3206 {
3207 	struct intel_uncore *uncore = &dev_priv->uncore;
3208 	enum pipe pipe;
3209 
3210 	spin_lock_irq(&dev_priv->irq_lock);
3211 
3212 	if (!intel_irqs_enabled(dev_priv)) {
3213 		spin_unlock_irq(&dev_priv->irq_lock);
3214 		return;
3215 	}
3216 
3217 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3218 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3219 
3220 	spin_unlock_irq(&dev_priv->irq_lock);
3221 
3222 	/* make sure we're done processing display irqs */
3223 	intel_synchronize_irq(dev_priv);
3224 }
3225 
3226 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3227 {
3228 	struct intel_uncore *uncore = &dev_priv->uncore;
3229 
3230 	intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
3231 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3232 
3233 	gen8_gt_irq_reset(to_gt(dev_priv));
3234 
3235 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3236 
3237 	spin_lock_irq(&dev_priv->irq_lock);
3238 	if (dev_priv->display_irqs_enabled)
3239 		vlv_display_irq_reset(dev_priv);
3240 	spin_unlock_irq(&dev_priv->irq_lock);
3241 }
3242 
3243 static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3244 			       enum hpd_pin pin)
3245 {
3246 	switch (pin) {
3247 	case HPD_PORT_A:
3248 		/*
3249 		 * When CPU and PCH are on the same package, port A
3250 		 * HPD must be enabled in both north and south.
3251 		 */
3252 		return HAS_PCH_LPT_LP(i915) ?
3253 			PORTA_HOTPLUG_ENABLE : 0;
3254 	case HPD_PORT_B:
3255 		return PORTB_HOTPLUG_ENABLE |
3256 			PORTB_PULSE_DURATION_2ms;
3257 	case HPD_PORT_C:
3258 		return PORTC_HOTPLUG_ENABLE |
3259 			PORTC_PULSE_DURATION_2ms;
3260 	case HPD_PORT_D:
3261 		return PORTD_HOTPLUG_ENABLE |
3262 			PORTD_PULSE_DURATION_2ms;
3263 	default:
3264 		return 0;
3265 	}
3266 }
3267 
3268 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3269 {
3270 	/*
3271 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3272 	 * duration to 2ms (which is the minimum in the Display Port spec).
3273 	 * The pulse duration bits are reserved on LPT+.
3274 	 */
3275 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3276 			 PORTA_HOTPLUG_ENABLE |
3277 			 PORTB_HOTPLUG_ENABLE |
3278 			 PORTC_HOTPLUG_ENABLE |
3279 			 PORTD_HOTPLUG_ENABLE |
3280 			 PORTB_PULSE_DURATION_MASK |
3281 			 PORTC_PULSE_DURATION_MASK |
3282 			 PORTD_PULSE_DURATION_MASK,
3283 			 intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
3284 }
3285 
3286 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3287 {
3288 	u32 hotplug_irqs, enabled_irqs;
3289 
3290 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3291 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3292 
3293 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3294 
3295 	ibx_hpd_detection_setup(dev_priv);
3296 }
3297 
3298 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3299 				   enum hpd_pin pin)
3300 {
3301 	switch (pin) {
3302 	case HPD_PORT_A:
3303 	case HPD_PORT_B:
3304 	case HPD_PORT_C:
3305 	case HPD_PORT_D:
3306 		return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3307 	default:
3308 		return 0;
3309 	}
3310 }
3311 
3312 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3313 				  enum hpd_pin pin)
3314 {
3315 	switch (pin) {
3316 	case HPD_PORT_TC1:
3317 	case HPD_PORT_TC2:
3318 	case HPD_PORT_TC3:
3319 	case HPD_PORT_TC4:
3320 	case HPD_PORT_TC5:
3321 	case HPD_PORT_TC6:
3322 		return ICP_TC_HPD_ENABLE(pin);
3323 	default:
3324 		return 0;
3325 	}
3326 }
3327 
3328 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3329 {
3330 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
3331 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3332 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3333 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3334 			 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
3335 			 intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
3336 }
3337 
3338 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3339 {
3340 	intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
3341 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3342 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3343 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3344 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3345 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3346 			 ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
3347 			 intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
3348 }
3349 
3350 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3351 {
3352 	u32 hotplug_irqs, enabled_irqs;
3353 
3354 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3355 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3356 
3357 	if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3358 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3359 
3360 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3361 
3362 	icp_ddi_hpd_detection_setup(dev_priv);
3363 	icp_tc_hpd_detection_setup(dev_priv);
3364 }
3365 
3366 static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3367 				 enum hpd_pin pin)
3368 {
3369 	switch (pin) {
3370 	case HPD_PORT_TC1:
3371 	case HPD_PORT_TC2:
3372 	case HPD_PORT_TC3:
3373 	case HPD_PORT_TC4:
3374 	case HPD_PORT_TC5:
3375 	case HPD_PORT_TC6:
3376 		return GEN11_HOTPLUG_CTL_ENABLE(pin);
3377 	default:
3378 		return 0;
3379 	}
3380 }
3381 
3382 static void dg1_hpd_invert(struct drm_i915_private *i915)
3383 {
3384 	u32 val = (INVERT_DDIA_HPD |
3385 		   INVERT_DDIB_HPD |
3386 		   INVERT_DDIC_HPD |
3387 		   INVERT_DDID_HPD);
3388 	intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
3389 }
3390 
3391 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3392 {
3393 	dg1_hpd_invert(dev_priv);
3394 	icp_hpd_irq_setup(dev_priv);
3395 }
3396 
3397 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3398 {
3399 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
3400 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3401 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3402 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3403 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3404 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3405 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3406 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3407 }
3408 
3409 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3410 {
3411 	intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3412 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3413 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3414 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3415 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3416 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3417 			 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
3418 			 intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3419 }
3420 
3421 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3422 {
3423 	u32 hotplug_irqs, enabled_irqs;
3424 
3425 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3426 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3427 
3428 	intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3429 			 ~enabled_irqs & hotplug_irqs);
3430 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3431 
3432 	gen11_tc_hpd_detection_setup(dev_priv);
3433 	gen11_tbt_hpd_detection_setup(dev_priv);
3434 
3435 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3436 		icp_hpd_irq_setup(dev_priv);
3437 }
3438 
3439 static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3440 			       enum hpd_pin pin)
3441 {
3442 	switch (pin) {
3443 	case HPD_PORT_A:
3444 		return PORTA_HOTPLUG_ENABLE;
3445 	case HPD_PORT_B:
3446 		return PORTB_HOTPLUG_ENABLE;
3447 	case HPD_PORT_C:
3448 		return PORTC_HOTPLUG_ENABLE;
3449 	case HPD_PORT_D:
3450 		return PORTD_HOTPLUG_ENABLE;
3451 	default:
3452 		return 0;
3453 	}
3454 }
3455 
3456 static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3457 				enum hpd_pin pin)
3458 {
3459 	switch (pin) {
3460 	case HPD_PORT_E:
3461 		return PORTE_HOTPLUG_ENABLE;
3462 	default:
3463 		return 0;
3464 	}
3465 }
3466 
3467 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3468 {
3469 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3470 	if (HAS_PCH_CNP(dev_priv)) {
3471 		intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3472 				 CHASSIS_CLK_REQ_DURATION(0xf));
3473 	}
3474 
3475 	/* Enable digital hotplug on the PCH */
3476 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3477 			 PORTA_HOTPLUG_ENABLE |
3478 			 PORTB_HOTPLUG_ENABLE |
3479 			 PORTC_HOTPLUG_ENABLE |
3480 			 PORTD_HOTPLUG_ENABLE,
3481 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3482 
3483 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
3484 			 intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3485 }
3486 
3487 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3488 {
3489 	u32 hotplug_irqs, enabled_irqs;
3490 
3491 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3492 		intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3493 
3494 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3495 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3496 
3497 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3498 
3499 	spt_hpd_detection_setup(dev_priv);
3500 }
3501 
3502 static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3503 			       enum hpd_pin pin)
3504 {
3505 	switch (pin) {
3506 	case HPD_PORT_A:
3507 		return DIGITAL_PORTA_HOTPLUG_ENABLE |
3508 			DIGITAL_PORTA_PULSE_DURATION_2ms;
3509 	default:
3510 		return 0;
3511 	}
3512 }
3513 
3514 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3515 {
3516 	/*
3517 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3518 	 * duration to 2ms (which is the minimum in the Display Port spec)
3519 	 * The pulse duration bits are reserved on HSW+.
3520 	 */
3521 	intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3522 			 DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
3523 			 intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3524 }
3525 
3526 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3527 {
3528 	u32 hotplug_irqs, enabled_irqs;
3529 
3530 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3531 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3532 
3533 	if (DISPLAY_VER(dev_priv) >= 8)
3534 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3535 	else
3536 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3537 
3538 	ilk_hpd_detection_setup(dev_priv);
3539 
3540 	ibx_hpd_irq_setup(dev_priv);
3541 }
3542 
3543 static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3544 			       enum hpd_pin pin)
3545 {
3546 	u32 hotplug;
3547 
3548 	switch (pin) {
3549 	case HPD_PORT_A:
3550 		hotplug = PORTA_HOTPLUG_ENABLE;
3551 		if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3552 			hotplug |= BXT_DDIA_HPD_INVERT;
3553 		return hotplug;
3554 	case HPD_PORT_B:
3555 		hotplug = PORTB_HOTPLUG_ENABLE;
3556 		if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3557 			hotplug |= BXT_DDIB_HPD_INVERT;
3558 		return hotplug;
3559 	case HPD_PORT_C:
3560 		hotplug = PORTC_HOTPLUG_ENABLE;
3561 		if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3562 			hotplug |= BXT_DDIC_HPD_INVERT;
3563 		return hotplug;
3564 	default:
3565 		return 0;
3566 	}
3567 }
3568 
3569 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3570 {
3571 	intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3572 			 PORTA_HOTPLUG_ENABLE |
3573 			 PORTB_HOTPLUG_ENABLE |
3574 			 PORTC_HOTPLUG_ENABLE |
3575 			 BXT_DDI_HPD_INVERT_MASK,
3576 			 intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3577 }
3578 
3579 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3580 {
3581 	u32 hotplug_irqs, enabled_irqs;
3582 
3583 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3584 	hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3585 
3586 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3587 
3588 	bxt_hpd_detection_setup(dev_priv);
3589 }
3590 
3591 /*
3592  * SDEIER is also touched by the interrupt handler to work around missed PCH
3593  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3594  * instead we unconditionally enable all PCH interrupt sources here, but then
3595  * only unmask them as needed with SDEIMR.
3596  *
3597  * Note that we currently do this after installing the interrupt handler,
3598  * but before we enable the master interrupt. That should be sufficient
3599  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3600  * interrupts could still race.
3601  */
3602 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3603 {
3604 	struct intel_uncore *uncore = &dev_priv->uncore;
3605 	u32 mask;
3606 
3607 	if (HAS_PCH_NOP(dev_priv))
3608 		return;
3609 
3610 	if (HAS_PCH_IBX(dev_priv))
3611 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3612 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3613 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3614 	else
3615 		mask = SDE_GMBUS_CPT;
3616 
3617 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3618 }
3619 
3620 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3621 {
3622 	struct intel_uncore *uncore = &dev_priv->uncore;
3623 	u32 display_mask, extra_mask;
3624 
3625 	if (GRAPHICS_VER(dev_priv) >= 7) {
3626 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3627 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3628 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3629 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3630 			      DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3631 			      DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3632 			      DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3633 			      DE_DP_A_HOTPLUG_IVB);
3634 	} else {
3635 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3636 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3637 				DE_PIPEA_CRC_DONE | DE_POISON);
3638 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3639 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3640 			      DE_PLANE_FLIP_DONE(PLANE_A) |
3641 			      DE_PLANE_FLIP_DONE(PLANE_B) |
3642 			      DE_DP_A_HOTPLUG);
3643 	}
3644 
3645 	if (IS_HASWELL(dev_priv)) {
3646 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3647 		display_mask |= DE_EDP_PSR_INT_HSW;
3648 	}
3649 
3650 	if (IS_IRONLAKE_M(dev_priv))
3651 		extra_mask |= DE_PCU_EVENT;
3652 
3653 	dev_priv->irq_mask = ~display_mask;
3654 
3655 	ibx_irq_postinstall(dev_priv);
3656 
3657 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3658 
3659 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3660 		      display_mask | extra_mask);
3661 }
3662 
3663 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3664 {
3665 	lockdep_assert_held(&dev_priv->irq_lock);
3666 
3667 	if (dev_priv->display_irqs_enabled)
3668 		return;
3669 
3670 	dev_priv->display_irqs_enabled = true;
3671 
3672 	if (intel_irqs_enabled(dev_priv)) {
3673 		vlv_display_irq_reset(dev_priv);
3674 		vlv_display_irq_postinstall(dev_priv);
3675 	}
3676 }
3677 
3678 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3679 {
3680 	lockdep_assert_held(&dev_priv->irq_lock);
3681 
3682 	if (!dev_priv->display_irqs_enabled)
3683 		return;
3684 
3685 	dev_priv->display_irqs_enabled = false;
3686 
3687 	if (intel_irqs_enabled(dev_priv))
3688 		vlv_display_irq_reset(dev_priv);
3689 }
3690 
3691 
3692 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3693 {
3694 	gen5_gt_irq_postinstall(to_gt(dev_priv));
3695 
3696 	spin_lock_irq(&dev_priv->irq_lock);
3697 	if (dev_priv->display_irqs_enabled)
3698 		vlv_display_irq_postinstall(dev_priv);
3699 	spin_unlock_irq(&dev_priv->irq_lock);
3700 
3701 	intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3702 	intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3703 }
3704 
3705 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3706 {
3707 	struct intel_uncore *uncore = &dev_priv->uncore;
3708 
3709 	u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3710 		GEN8_PIPE_CDCLK_CRC_DONE;
3711 	u32 de_pipe_enables;
3712 	u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3713 	u32 de_port_enables;
3714 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3715 	u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3716 		BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3717 	enum pipe pipe;
3718 
3719 	if (!HAS_DISPLAY(dev_priv))
3720 		return;
3721 
3722 	if (DISPLAY_VER(dev_priv) <= 10)
3723 		de_misc_masked |= GEN8_DE_MISC_GSE;
3724 
3725 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3726 		de_port_masked |= BXT_DE_PORT_GMBUS;
3727 
3728 	if (DISPLAY_VER(dev_priv) >= 11) {
3729 		enum port port;
3730 
3731 		if (intel_bios_is_dsi_present(dev_priv, &port))
3732 			de_port_masked |= DSI0_TE | DSI1_TE;
3733 	}
3734 
3735 	de_pipe_enables = de_pipe_masked |
3736 		GEN8_PIPE_VBLANK |
3737 		gen8_de_pipe_underrun_mask(dev_priv) |
3738 		gen8_de_pipe_flip_done_mask(dev_priv);
3739 
3740 	de_port_enables = de_port_masked;
3741 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3742 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3743 	else if (IS_BROADWELL(dev_priv))
3744 		de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3745 
3746 	if (DISPLAY_VER(dev_priv) >= 12) {
3747 		enum transcoder trans;
3748 
3749 		for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3750 			enum intel_display_power_domain domain;
3751 
3752 			domain = POWER_DOMAIN_TRANSCODER(trans);
3753 			if (!intel_display_power_is_enabled(dev_priv, domain))
3754 				continue;
3755 
3756 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3757 		}
3758 	} else {
3759 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3760 	}
3761 
3762 	for_each_pipe(dev_priv, pipe) {
3763 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3764 
3765 		if (intel_display_power_is_enabled(dev_priv,
3766 				POWER_DOMAIN_PIPE(pipe)))
3767 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3768 					  dev_priv->de_irq_mask[pipe],
3769 					  de_pipe_enables);
3770 	}
3771 
3772 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3773 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3774 
3775 	if (DISPLAY_VER(dev_priv) >= 11) {
3776 		u32 de_hpd_masked = 0;
3777 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3778 				     GEN11_DE_TBT_HOTPLUG_MASK;
3779 
3780 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3781 			      de_hpd_enables);
3782 	}
3783 }
3784 
3785 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3786 {
3787 	struct intel_uncore *uncore = &dev_priv->uncore;
3788 	u32 mask = SDE_GMBUS_ICP;
3789 
3790 	GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3791 }
3792 
3793 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3794 {
3795 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3796 		icp_irq_postinstall(dev_priv);
3797 	else if (HAS_PCH_SPLIT(dev_priv))
3798 		ibx_irq_postinstall(dev_priv);
3799 
3800 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3801 	gen8_de_irq_postinstall(dev_priv);
3802 
3803 	gen8_master_intr_enable(dev_priv->uncore.regs);
3804 }
3805 
3806 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3807 {
3808 	if (!HAS_DISPLAY(dev_priv))
3809 		return;
3810 
3811 	gen8_de_irq_postinstall(dev_priv);
3812 
3813 	intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3814 			   GEN11_DISPLAY_IRQ_ENABLE);
3815 }
3816 
3817 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3818 {
3819 	struct intel_gt *gt = to_gt(dev_priv);
3820 	struct intel_uncore *uncore = gt->uncore;
3821 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3822 
3823 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3824 		icp_irq_postinstall(dev_priv);
3825 
3826 	gen11_gt_irq_postinstall(gt);
3827 	gen11_de_irq_postinstall(dev_priv);
3828 
3829 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3830 
3831 	gen11_master_intr_enable(uncore->regs);
3832 	intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3833 }
3834 
3835 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3836 {
3837 	struct intel_gt *gt = to_gt(dev_priv);
3838 	struct intel_uncore *uncore = gt->uncore;
3839 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3840 
3841 	gen11_gt_irq_postinstall(gt);
3842 
3843 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3844 
3845 	if (HAS_DISPLAY(dev_priv)) {
3846 		icp_irq_postinstall(dev_priv);
3847 		gen8_de_irq_postinstall(dev_priv);
3848 		intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3849 				   GEN11_DISPLAY_IRQ_ENABLE);
3850 	}
3851 
3852 	dg1_master_intr_enable(uncore->regs);
3853 	intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3854 }
3855 
3856 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3857 {
3858 	gen8_gt_irq_postinstall(to_gt(dev_priv));
3859 
3860 	spin_lock_irq(&dev_priv->irq_lock);
3861 	if (dev_priv->display_irqs_enabled)
3862 		vlv_display_irq_postinstall(dev_priv);
3863 	spin_unlock_irq(&dev_priv->irq_lock);
3864 
3865 	intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3866 	intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3867 }
3868 
3869 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3870 {
3871 	struct intel_uncore *uncore = &dev_priv->uncore;
3872 
3873 	i9xx_pipestat_irq_reset(dev_priv);
3874 
3875 	gen2_irq_reset(uncore);
3876 	dev_priv->irq_mask = ~0u;
3877 }
3878 
3879 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3880 {
3881 	struct intel_uncore *uncore = &dev_priv->uncore;
3882 	u16 enable_mask;
3883 
3884 	intel_uncore_write16(uncore,
3885 			     EMR,
3886 			     ~(I915_ERROR_PAGE_TABLE |
3887 			       I915_ERROR_MEMORY_REFRESH));
3888 
3889 	/* Unmask the interrupts that we always want on. */
3890 	dev_priv->irq_mask =
3891 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3892 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3893 		  I915_MASTER_ERROR_INTERRUPT);
3894 
3895 	enable_mask =
3896 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3897 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3898 		I915_MASTER_ERROR_INTERRUPT |
3899 		I915_USER_INTERRUPT;
3900 
3901 	gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3902 
3903 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3904 	 * just to make the assert_spin_locked check happy. */
3905 	spin_lock_irq(&dev_priv->irq_lock);
3906 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3907 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3908 	spin_unlock_irq(&dev_priv->irq_lock);
3909 }
3910 
3911 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3912 			       u16 *eir, u16 *eir_stuck)
3913 {
3914 	struct intel_uncore *uncore = &i915->uncore;
3915 	u16 emr;
3916 
3917 	*eir = intel_uncore_read16(uncore, EIR);
3918 
3919 	if (*eir)
3920 		intel_uncore_write16(uncore, EIR, *eir);
3921 
3922 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3923 	if (*eir_stuck == 0)
3924 		return;
3925 
3926 	/*
3927 	 * Toggle all EMR bits to make sure we get an edge
3928 	 * in the ISR master error bit if we don't clear
3929 	 * all the EIR bits. Otherwise the edge triggered
3930 	 * IIR on i965/g4x wouldn't notice that an interrupt
3931 	 * is still pending. Also some EIR bits can't be
3932 	 * cleared except by handling the underlying error
3933 	 * (or by a GPU reset) so we mask any bit that
3934 	 * remains set.
3935 	 */
3936 	emr = intel_uncore_read16(uncore, EMR);
3937 	intel_uncore_write16(uncore, EMR, 0xffff);
3938 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3939 }
3940 
3941 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3942 				   u16 eir, u16 eir_stuck)
3943 {
3944 	drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
3945 
3946 	if (eir_stuck)
3947 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3948 			eir_stuck);
3949 }
3950 
3951 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3952 			       u32 *eir, u32 *eir_stuck)
3953 {
3954 	u32 emr;
3955 
3956 	*eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0);
3957 
3958 	*eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
3959 	if (*eir_stuck == 0)
3960 		return;
3961 
3962 	/*
3963 	 * Toggle all EMR bits to make sure we get an edge
3964 	 * in the ISR master error bit if we don't clear
3965 	 * all the EIR bits. Otherwise the edge triggered
3966 	 * IIR on i965/g4x wouldn't notice that an interrupt
3967 	 * is still pending. Also some EIR bits can't be
3968 	 * cleared except by handling the underlying error
3969 	 * (or by a GPU reset) so we mask any bit that
3970 	 * remains set.
3971 	 */
3972 	emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff);
3973 	intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
3974 }
3975 
3976 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3977 				   u32 eir, u32 eir_stuck)
3978 {
3979 	drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
3980 
3981 	if (eir_stuck)
3982 		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3983 			eir_stuck);
3984 }
3985 
3986 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3987 {
3988 	struct drm_i915_private *dev_priv = arg;
3989 	irqreturn_t ret = IRQ_NONE;
3990 
3991 	if (!intel_irqs_enabled(dev_priv))
3992 		return IRQ_NONE;
3993 
3994 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3995 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3996 
3997 	do {
3998 		u32 pipe_stats[I915_MAX_PIPES] = {};
3999 		u16 eir = 0, eir_stuck = 0;
4000 		u16 iir;
4001 
4002 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4003 		if (iir == 0)
4004 			break;
4005 
4006 		ret = IRQ_HANDLED;
4007 
4008 		/* Call regardless, as some status bits might not be
4009 		 * signalled in iir */
4010 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4011 
4012 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4013 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4014 
4015 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4016 
4017 		if (iir & I915_USER_INTERRUPT)
4018 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4019 
4020 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4021 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4022 
4023 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4024 	} while (0);
4025 
4026 	pmu_irq_stats(dev_priv, ret);
4027 
4028 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4029 
4030 	return ret;
4031 }
4032 
4033 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4034 {
4035 	struct intel_uncore *uncore = &dev_priv->uncore;
4036 
4037 	if (I915_HAS_HOTPLUG(dev_priv)) {
4038 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4039 		intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
4040 	}
4041 
4042 	i9xx_pipestat_irq_reset(dev_priv);
4043 
4044 	GEN3_IRQ_RESET(uncore, GEN2_);
4045 	dev_priv->irq_mask = ~0u;
4046 }
4047 
4048 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4049 {
4050 	struct intel_uncore *uncore = &dev_priv->uncore;
4051 	u32 enable_mask;
4052 
4053 	intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4054 					  I915_ERROR_MEMORY_REFRESH));
4055 
4056 	/* Unmask the interrupts that we always want on. */
4057 	dev_priv->irq_mask =
4058 		~(I915_ASLE_INTERRUPT |
4059 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4060 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4061 		  I915_MASTER_ERROR_INTERRUPT);
4062 
4063 	enable_mask =
4064 		I915_ASLE_INTERRUPT |
4065 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4066 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4067 		I915_MASTER_ERROR_INTERRUPT |
4068 		I915_USER_INTERRUPT;
4069 
4070 	if (I915_HAS_HOTPLUG(dev_priv)) {
4071 		/* Enable in IER... */
4072 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4073 		/* and unmask in IMR */
4074 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4075 	}
4076 
4077 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4078 
4079 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4080 	 * just to make the assert_spin_locked check happy. */
4081 	spin_lock_irq(&dev_priv->irq_lock);
4082 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4083 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4084 	spin_unlock_irq(&dev_priv->irq_lock);
4085 
4086 	i915_enable_asle_pipestat(dev_priv);
4087 }
4088 
4089 static irqreturn_t i915_irq_handler(int irq, void *arg)
4090 {
4091 	struct drm_i915_private *dev_priv = arg;
4092 	irqreturn_t ret = IRQ_NONE;
4093 
4094 	if (!intel_irqs_enabled(dev_priv))
4095 		return IRQ_NONE;
4096 
4097 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4098 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4099 
4100 	do {
4101 		u32 pipe_stats[I915_MAX_PIPES] = {};
4102 		u32 eir = 0, eir_stuck = 0;
4103 		u32 hotplug_status = 0;
4104 		u32 iir;
4105 
4106 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4107 		if (iir == 0)
4108 			break;
4109 
4110 		ret = IRQ_HANDLED;
4111 
4112 		if (I915_HAS_HOTPLUG(dev_priv) &&
4113 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4114 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4115 
4116 		/* Call regardless, as some status bits might not be
4117 		 * signalled in iir */
4118 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4119 
4120 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4121 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4122 
4123 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4124 
4125 		if (iir & I915_USER_INTERRUPT)
4126 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4127 
4128 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4129 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4130 
4131 		if (hotplug_status)
4132 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4133 
4134 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4135 	} while (0);
4136 
4137 	pmu_irq_stats(dev_priv, ret);
4138 
4139 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4140 
4141 	return ret;
4142 }
4143 
4144 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4145 {
4146 	struct intel_uncore *uncore = &dev_priv->uncore;
4147 
4148 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4149 	intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
4150 
4151 	i9xx_pipestat_irq_reset(dev_priv);
4152 
4153 	GEN3_IRQ_RESET(uncore, GEN2_);
4154 	dev_priv->irq_mask = ~0u;
4155 }
4156 
4157 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4158 {
4159 	struct intel_uncore *uncore = &dev_priv->uncore;
4160 	u32 enable_mask;
4161 	u32 error_mask;
4162 
4163 	/*
4164 	 * Enable some error detection, note the instruction error mask
4165 	 * bit is reserved, so we leave it masked.
4166 	 */
4167 	if (IS_G4X(dev_priv)) {
4168 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4169 			       GM45_ERROR_MEM_PRIV |
4170 			       GM45_ERROR_CP_PRIV |
4171 			       I915_ERROR_MEMORY_REFRESH);
4172 	} else {
4173 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4174 			       I915_ERROR_MEMORY_REFRESH);
4175 	}
4176 	intel_uncore_write(uncore, EMR, error_mask);
4177 
4178 	/* Unmask the interrupts that we always want on. */
4179 	dev_priv->irq_mask =
4180 		~(I915_ASLE_INTERRUPT |
4181 		  I915_DISPLAY_PORT_INTERRUPT |
4182 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4183 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4184 		  I915_MASTER_ERROR_INTERRUPT);
4185 
4186 	enable_mask =
4187 		I915_ASLE_INTERRUPT |
4188 		I915_DISPLAY_PORT_INTERRUPT |
4189 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4190 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4191 		I915_MASTER_ERROR_INTERRUPT |
4192 		I915_USER_INTERRUPT;
4193 
4194 	if (IS_G4X(dev_priv))
4195 		enable_mask |= I915_BSD_USER_INTERRUPT;
4196 
4197 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4198 
4199 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4200 	 * just to make the assert_spin_locked check happy. */
4201 	spin_lock_irq(&dev_priv->irq_lock);
4202 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4203 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4204 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4205 	spin_unlock_irq(&dev_priv->irq_lock);
4206 
4207 	i915_enable_asle_pipestat(dev_priv);
4208 }
4209 
4210 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4211 {
4212 	u32 hotplug_en;
4213 
4214 	lockdep_assert_held(&dev_priv->irq_lock);
4215 
4216 	/* Note HDMI and DP share hotplug bits */
4217 	/* enable bits are the same for all generations */
4218 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4219 	/* Programming the CRT detection parameters tends
4220 	   to generate a spurious hotplug event about three
4221 	   seconds later.  So just do it once.
4222 	*/
4223 	if (IS_G4X(dev_priv))
4224 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4225 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4226 
4227 	/* Ignore TV since it's buggy */
4228 	i915_hotplug_interrupt_update_locked(dev_priv,
4229 					     HOTPLUG_INT_EN_MASK |
4230 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4231 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4232 					     hotplug_en);
4233 }
4234 
4235 static irqreturn_t i965_irq_handler(int irq, void *arg)
4236 {
4237 	struct drm_i915_private *dev_priv = arg;
4238 	irqreturn_t ret = IRQ_NONE;
4239 
4240 	if (!intel_irqs_enabled(dev_priv))
4241 		return IRQ_NONE;
4242 
4243 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4244 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4245 
4246 	do {
4247 		u32 pipe_stats[I915_MAX_PIPES] = {};
4248 		u32 eir = 0, eir_stuck = 0;
4249 		u32 hotplug_status = 0;
4250 		u32 iir;
4251 
4252 		iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4253 		if (iir == 0)
4254 			break;
4255 
4256 		ret = IRQ_HANDLED;
4257 
4258 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4259 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4260 
4261 		/* Call regardless, as some status bits might not be
4262 		 * signalled in iir */
4263 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4264 
4265 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4266 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4267 
4268 		intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4269 
4270 		if (iir & I915_USER_INTERRUPT)
4271 			intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4272 					    iir);
4273 
4274 		if (iir & I915_BSD_USER_INTERRUPT)
4275 			intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4276 					    iir >> 25);
4277 
4278 		if (iir & I915_MASTER_ERROR_INTERRUPT)
4279 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4280 
4281 		if (hotplug_status)
4282 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4283 
4284 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4285 	} while (0);
4286 
4287 	pmu_irq_stats(dev_priv, IRQ_HANDLED);
4288 
4289 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4290 
4291 	return ret;
4292 }
4293 
4294 struct intel_hotplug_funcs {
4295 	void (*hpd_irq_setup)(struct drm_i915_private *i915);
4296 };
4297 
4298 #define HPD_FUNCS(platform)					 \
4299 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4300 	.hpd_irq_setup = platform##_hpd_irq_setup,		 \
4301 }
4302 
4303 HPD_FUNCS(i915);
4304 HPD_FUNCS(dg1);
4305 HPD_FUNCS(gen11);
4306 HPD_FUNCS(bxt);
4307 HPD_FUNCS(icp);
4308 HPD_FUNCS(spt);
4309 HPD_FUNCS(ilk);
4310 #undef HPD_FUNCS
4311 
4312 void intel_hpd_irq_setup(struct drm_i915_private *i915)
4313 {
4314 	if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4315 		i915->display.funcs.hotplug->hpd_irq_setup(i915);
4316 }
4317 
4318 /**
4319  * intel_irq_init - initializes irq support
4320  * @dev_priv: i915 device instance
4321  *
4322  * This function initializes all the irq support including work items, timers
4323  * and all the vtables. It does not setup the interrupt itself though.
4324  */
4325 void intel_irq_init(struct drm_i915_private *dev_priv)
4326 {
4327 	int i;
4328 
4329 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4330 	for (i = 0; i < MAX_L3_SLICES; ++i)
4331 		dev_priv->l3_parity.remap_info[i] = NULL;
4332 
4333 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4334 	if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4335 		to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4336 
4337 	if (!HAS_DISPLAY(dev_priv))
4338 		return;
4339 
4340 	intel_hpd_init_pins(dev_priv);
4341 
4342 	intel_hpd_init_early(dev_priv);
4343 
4344 	dev_priv->drm.vblank_disable_immediate = true;
4345 
4346 	/* Most platforms treat the display irq block as an always-on
4347 	 * power domain. vlv/chv can disable it at runtime and need
4348 	 * special care to avoid writing any of the display block registers
4349 	 * outside of the power domain. We defer setting up the display irqs
4350 	 * in this case to the runtime pm.
4351 	 */
4352 	dev_priv->display_irqs_enabled = true;
4353 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4354 		dev_priv->display_irqs_enabled = false;
4355 
4356 	if (HAS_GMCH(dev_priv)) {
4357 		if (I915_HAS_HOTPLUG(dev_priv))
4358 			dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4359 	} else {
4360 		if (HAS_PCH_DG2(dev_priv))
4361 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4362 		else if (HAS_PCH_DG1(dev_priv))
4363 			dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4364 		else if (DISPLAY_VER(dev_priv) >= 11)
4365 			dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4366 		else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4367 			dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4368 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4369 			dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4370 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4371 			dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4372 		else
4373 			dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4374 	}
4375 }
4376 
4377 /**
4378  * intel_irq_fini - deinitializes IRQ support
4379  * @i915: i915 device instance
4380  *
4381  * This function deinitializes all the IRQ support.
4382  */
4383 void intel_irq_fini(struct drm_i915_private *i915)
4384 {
4385 	int i;
4386 
4387 	for (i = 0; i < MAX_L3_SLICES; ++i)
4388 		kfree(i915->l3_parity.remap_info[i]);
4389 }
4390 
4391 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4392 {
4393 	if (HAS_GMCH(dev_priv)) {
4394 		if (IS_CHERRYVIEW(dev_priv))
4395 			return cherryview_irq_handler;
4396 		else if (IS_VALLEYVIEW(dev_priv))
4397 			return valleyview_irq_handler;
4398 		else if (GRAPHICS_VER(dev_priv) == 4)
4399 			return i965_irq_handler;
4400 		else if (GRAPHICS_VER(dev_priv) == 3)
4401 			return i915_irq_handler;
4402 		else
4403 			return i8xx_irq_handler;
4404 	} else {
4405 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4406 			return dg1_irq_handler;
4407 		else if (GRAPHICS_VER(dev_priv) >= 11)
4408 			return gen11_irq_handler;
4409 		else if (GRAPHICS_VER(dev_priv) >= 8)
4410 			return gen8_irq_handler;
4411 		else
4412 			return ilk_irq_handler;
4413 	}
4414 }
4415 
4416 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4417 {
4418 	if (HAS_GMCH(dev_priv)) {
4419 		if (IS_CHERRYVIEW(dev_priv))
4420 			cherryview_irq_reset(dev_priv);
4421 		else if (IS_VALLEYVIEW(dev_priv))
4422 			valleyview_irq_reset(dev_priv);
4423 		else if (GRAPHICS_VER(dev_priv) == 4)
4424 			i965_irq_reset(dev_priv);
4425 		else if (GRAPHICS_VER(dev_priv) == 3)
4426 			i915_irq_reset(dev_priv);
4427 		else
4428 			i8xx_irq_reset(dev_priv);
4429 	} else {
4430 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4431 			dg1_irq_reset(dev_priv);
4432 		else if (GRAPHICS_VER(dev_priv) >= 11)
4433 			gen11_irq_reset(dev_priv);
4434 		else if (GRAPHICS_VER(dev_priv) >= 8)
4435 			gen8_irq_reset(dev_priv);
4436 		else
4437 			ilk_irq_reset(dev_priv);
4438 	}
4439 }
4440 
4441 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4442 {
4443 	if (HAS_GMCH(dev_priv)) {
4444 		if (IS_CHERRYVIEW(dev_priv))
4445 			cherryview_irq_postinstall(dev_priv);
4446 		else if (IS_VALLEYVIEW(dev_priv))
4447 			valleyview_irq_postinstall(dev_priv);
4448 		else if (GRAPHICS_VER(dev_priv) == 4)
4449 			i965_irq_postinstall(dev_priv);
4450 		else if (GRAPHICS_VER(dev_priv) == 3)
4451 			i915_irq_postinstall(dev_priv);
4452 		else
4453 			i8xx_irq_postinstall(dev_priv);
4454 	} else {
4455 		if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4456 			dg1_irq_postinstall(dev_priv);
4457 		else if (GRAPHICS_VER(dev_priv) >= 11)
4458 			gen11_irq_postinstall(dev_priv);
4459 		else if (GRAPHICS_VER(dev_priv) >= 8)
4460 			gen8_irq_postinstall(dev_priv);
4461 		else
4462 			ilk_irq_postinstall(dev_priv);
4463 	}
4464 }
4465 
4466 /**
4467  * intel_irq_install - enables the hardware interrupt
4468  * @dev_priv: i915 device instance
4469  *
4470  * This function enables the hardware interrupt handling, but leaves the hotplug
4471  * handling still disabled. It is called after intel_irq_init().
4472  *
4473  * In the driver load and resume code we need working interrupts in a few places
4474  * but don't want to deal with the hassle of concurrent probe and hotplug
4475  * workers. Hence the split into this two-stage approach.
4476  */
4477 int intel_irq_install(struct drm_i915_private *dev_priv)
4478 {
4479 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4480 	int ret;
4481 
4482 	/*
4483 	 * We enable some interrupt sources in our postinstall hooks, so mark
4484 	 * interrupts as enabled _before_ actually enabling them to avoid
4485 	 * special cases in our ordering checks.
4486 	 */
4487 	dev_priv->runtime_pm.irqs_enabled = true;
4488 
4489 	dev_priv->irq_enabled = true;
4490 
4491 	intel_irq_reset(dev_priv);
4492 
4493 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4494 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4495 	if (ret < 0) {
4496 		dev_priv->irq_enabled = false;
4497 		return ret;
4498 	}
4499 
4500 	intel_irq_postinstall(dev_priv);
4501 
4502 	return ret;
4503 }
4504 
4505 /**
4506  * intel_irq_uninstall - finilizes all irq handling
4507  * @dev_priv: i915 device instance
4508  *
4509  * This stops interrupt and hotplug handling and unregisters and frees all
4510  * resources acquired in the init functions.
4511  */
4512 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4513 {
4514 	int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4515 
4516 	/*
4517 	 * FIXME we can get called twice during driver probe
4518 	 * error handling as well as during driver remove due to
4519 	 * intel_modeset_driver_remove() calling us out of sequence.
4520 	 * Would be nice if it didn't do that...
4521 	 */
4522 	if (!dev_priv->irq_enabled)
4523 		return;
4524 
4525 	dev_priv->irq_enabled = false;
4526 
4527 	intel_irq_reset(dev_priv);
4528 
4529 	free_irq(irq, dev_priv);
4530 
4531 	intel_hpd_cancel_work(dev_priv);
4532 	dev_priv->runtime_pm.irqs_enabled = false;
4533 }
4534 
4535 /**
4536  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4537  * @dev_priv: i915 device instance
4538  *
4539  * This function is used to disable interrupts at runtime, both in the runtime
4540  * pm and the system suspend/resume code.
4541  */
4542 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4543 {
4544 	intel_irq_reset(dev_priv);
4545 	dev_priv->runtime_pm.irqs_enabled = false;
4546 	intel_synchronize_irq(dev_priv);
4547 }
4548 
4549 /**
4550  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4551  * @dev_priv: i915 device instance
4552  *
4553  * This function is used to enable interrupts at runtime, both in the runtime
4554  * pm and the system suspend/resume code.
4555  */
4556 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4557 {
4558 	dev_priv->runtime_pm.irqs_enabled = true;
4559 	intel_irq_reset(dev_priv);
4560 	intel_irq_postinstall(dev_priv);
4561 }
4562 
4563 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4564 {
4565 	return dev_priv->runtime_pm.irqs_enabled;
4566 }
4567 
4568 void intel_synchronize_irq(struct drm_i915_private *i915)
4569 {
4570 	synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4571 }
4572 
4573 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4574 {
4575 	synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4576 }
4577