xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision ba61bb17)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47 
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51 
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55 
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59 
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 	[HPD_CRT] = SDE_CRT_HOTPLUG,
62 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67 
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75 
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83 
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92 
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101 
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110 
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117 
118 static const u32 hpd_gen11[HPD_NUM_PINS] = {
119 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
120 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
121 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
122 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
123 };
124 
125 /* IIR can theoretically queue up two events. Be paranoid. */
126 #define GEN8_IRQ_RESET_NDX(type, which) do { \
127 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
128 	POSTING_READ(GEN8_##type##_IMR(which)); \
129 	I915_WRITE(GEN8_##type##_IER(which), 0); \
130 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
131 	POSTING_READ(GEN8_##type##_IIR(which)); \
132 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
133 	POSTING_READ(GEN8_##type##_IIR(which)); \
134 } while (0)
135 
136 #define GEN3_IRQ_RESET(type) do { \
137 	I915_WRITE(type##IMR, 0xffffffff); \
138 	POSTING_READ(type##IMR); \
139 	I915_WRITE(type##IER, 0); \
140 	I915_WRITE(type##IIR, 0xffffffff); \
141 	POSTING_READ(type##IIR); \
142 	I915_WRITE(type##IIR, 0xffffffff); \
143 	POSTING_READ(type##IIR); \
144 } while (0)
145 
146 #define GEN2_IRQ_RESET(type) do { \
147 	I915_WRITE16(type##IMR, 0xffff); \
148 	POSTING_READ16(type##IMR); \
149 	I915_WRITE16(type##IER, 0); \
150 	I915_WRITE16(type##IIR, 0xffff); \
151 	POSTING_READ16(type##IIR); \
152 	I915_WRITE16(type##IIR, 0xffff); \
153 	POSTING_READ16(type##IIR); \
154 } while (0)
155 
156 /*
157  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
158  */
159 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
160 				    i915_reg_t reg)
161 {
162 	u32 val = I915_READ(reg);
163 
164 	if (val == 0)
165 		return;
166 
167 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
168 	     i915_mmio_reg_offset(reg), val);
169 	I915_WRITE(reg, 0xffffffff);
170 	POSTING_READ(reg);
171 	I915_WRITE(reg, 0xffffffff);
172 	POSTING_READ(reg);
173 }
174 
175 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
176 				    i915_reg_t reg)
177 {
178 	u16 val = I915_READ16(reg);
179 
180 	if (val == 0)
181 		return;
182 
183 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
184 	     i915_mmio_reg_offset(reg), val);
185 	I915_WRITE16(reg, 0xffff);
186 	POSTING_READ16(reg);
187 	I915_WRITE16(reg, 0xffff);
188 	POSTING_READ16(reg);
189 }
190 
191 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
192 	gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
193 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
194 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
195 	POSTING_READ(GEN8_##type##_IMR(which)); \
196 } while (0)
197 
198 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
199 	gen3_assert_iir_is_zero(dev_priv, type##IIR); \
200 	I915_WRITE(type##IER, (ier_val)); \
201 	I915_WRITE(type##IMR, (imr_val)); \
202 	POSTING_READ(type##IMR); \
203 } while (0)
204 
205 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
206 	gen2_assert_iir_is_zero(dev_priv, type##IIR); \
207 	I915_WRITE16(type##IER, (ier_val)); \
208 	I915_WRITE16(type##IMR, (imr_val)); \
209 	POSTING_READ16(type##IMR); \
210 } while (0)
211 
212 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
213 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
214 
215 /* For display hotplug interrupt */
216 static inline void
217 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
218 				     uint32_t mask,
219 				     uint32_t bits)
220 {
221 	uint32_t val;
222 
223 	lockdep_assert_held(&dev_priv->irq_lock);
224 	WARN_ON(bits & ~mask);
225 
226 	val = I915_READ(PORT_HOTPLUG_EN);
227 	val &= ~mask;
228 	val |= bits;
229 	I915_WRITE(PORT_HOTPLUG_EN, val);
230 }
231 
232 /**
233  * i915_hotplug_interrupt_update - update hotplug interrupt enable
234  * @dev_priv: driver private
235  * @mask: bits to update
236  * @bits: bits to enable
237  * NOTE: the HPD enable bits are modified both inside and outside
238  * of an interrupt context. To avoid that read-modify-write cycles
239  * interfer, these bits are protected by a spinlock. Since this
240  * function is usually not called from a context where the lock is
241  * held already, this function acquires the lock itself. A non-locking
242  * version is also available.
243  */
244 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
245 				   uint32_t mask,
246 				   uint32_t bits)
247 {
248 	spin_lock_irq(&dev_priv->irq_lock);
249 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
250 	spin_unlock_irq(&dev_priv->irq_lock);
251 }
252 
253 static u32
254 gen11_gt_engine_identity(struct drm_i915_private * const i915,
255 			 const unsigned int bank, const unsigned int bit);
256 
257 bool gen11_reset_one_iir(struct drm_i915_private * const i915,
258 			 const unsigned int bank,
259 			 const unsigned int bit)
260 {
261 	void __iomem * const regs = i915->regs;
262 	u32 dw;
263 
264 	lockdep_assert_held(&i915->irq_lock);
265 
266 	dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
267 	if (dw & BIT(bit)) {
268 		/*
269 		 * According to the BSpec, DW_IIR bits cannot be cleared without
270 		 * first servicing the Selector & Shared IIR registers.
271 		 */
272 		gen11_gt_engine_identity(i915, bank, bit);
273 
274 		/*
275 		 * We locked GT INT DW by reading it. If we want to (try
276 		 * to) recover from this succesfully, we need to clear
277 		 * our bit, otherwise we are locking the register for
278 		 * everybody.
279 		 */
280 		raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
281 
282 		return true;
283 	}
284 
285 	return false;
286 }
287 
288 /**
289  * ilk_update_display_irq - update DEIMR
290  * @dev_priv: driver private
291  * @interrupt_mask: mask of interrupt bits to update
292  * @enabled_irq_mask: mask of interrupt bits to enable
293  */
294 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
295 			    uint32_t interrupt_mask,
296 			    uint32_t enabled_irq_mask)
297 {
298 	uint32_t new_val;
299 
300 	lockdep_assert_held(&dev_priv->irq_lock);
301 
302 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
303 
304 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
305 		return;
306 
307 	new_val = dev_priv->irq_mask;
308 	new_val &= ~interrupt_mask;
309 	new_val |= (~enabled_irq_mask & interrupt_mask);
310 
311 	if (new_val != dev_priv->irq_mask) {
312 		dev_priv->irq_mask = new_val;
313 		I915_WRITE(DEIMR, dev_priv->irq_mask);
314 		POSTING_READ(DEIMR);
315 	}
316 }
317 
318 /**
319  * ilk_update_gt_irq - update GTIMR
320  * @dev_priv: driver private
321  * @interrupt_mask: mask of interrupt bits to update
322  * @enabled_irq_mask: mask of interrupt bits to enable
323  */
324 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
325 			      uint32_t interrupt_mask,
326 			      uint32_t enabled_irq_mask)
327 {
328 	lockdep_assert_held(&dev_priv->irq_lock);
329 
330 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
331 
332 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
333 		return;
334 
335 	dev_priv->gt_irq_mask &= ~interrupt_mask;
336 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
337 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
338 }
339 
340 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
341 {
342 	ilk_update_gt_irq(dev_priv, mask, mask);
343 	POSTING_READ_FW(GTIMR);
344 }
345 
346 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
347 {
348 	ilk_update_gt_irq(dev_priv, mask, 0);
349 }
350 
351 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
352 {
353 	WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
354 
355 	return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
356 }
357 
358 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
359 {
360 	if (INTEL_GEN(dev_priv) >= 11)
361 		return GEN11_GPM_WGBOXPERF_INTR_MASK;
362 	else if (INTEL_GEN(dev_priv) >= 8)
363 		return GEN8_GT_IMR(2);
364 	else
365 		return GEN6_PMIMR;
366 }
367 
368 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
369 {
370 	if (INTEL_GEN(dev_priv) >= 11)
371 		return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
372 	else if (INTEL_GEN(dev_priv) >= 8)
373 		return GEN8_GT_IER(2);
374 	else
375 		return GEN6_PMIER;
376 }
377 
378 /**
379  * snb_update_pm_irq - update GEN6_PMIMR
380  * @dev_priv: driver private
381  * @interrupt_mask: mask of interrupt bits to update
382  * @enabled_irq_mask: mask of interrupt bits to enable
383  */
384 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
385 			      uint32_t interrupt_mask,
386 			      uint32_t enabled_irq_mask)
387 {
388 	uint32_t new_val;
389 
390 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
391 
392 	lockdep_assert_held(&dev_priv->irq_lock);
393 
394 	new_val = dev_priv->pm_imr;
395 	new_val &= ~interrupt_mask;
396 	new_val |= (~enabled_irq_mask & interrupt_mask);
397 
398 	if (new_val != dev_priv->pm_imr) {
399 		dev_priv->pm_imr = new_val;
400 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
401 		POSTING_READ(gen6_pm_imr(dev_priv));
402 	}
403 }
404 
405 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
406 {
407 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
408 		return;
409 
410 	snb_update_pm_irq(dev_priv, mask, mask);
411 }
412 
413 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
414 {
415 	snb_update_pm_irq(dev_priv, mask, 0);
416 }
417 
418 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
419 {
420 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
421 		return;
422 
423 	__gen6_mask_pm_irq(dev_priv, mask);
424 }
425 
426 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
427 {
428 	i915_reg_t reg = gen6_pm_iir(dev_priv);
429 
430 	lockdep_assert_held(&dev_priv->irq_lock);
431 
432 	I915_WRITE(reg, reset_mask);
433 	I915_WRITE(reg, reset_mask);
434 	POSTING_READ(reg);
435 }
436 
437 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
438 {
439 	lockdep_assert_held(&dev_priv->irq_lock);
440 
441 	dev_priv->pm_ier |= enable_mask;
442 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
443 	gen6_unmask_pm_irq(dev_priv, enable_mask);
444 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
445 }
446 
447 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
448 {
449 	lockdep_assert_held(&dev_priv->irq_lock);
450 
451 	dev_priv->pm_ier &= ~disable_mask;
452 	__gen6_mask_pm_irq(dev_priv, disable_mask);
453 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
454 	/* though a barrier is missing here, but don't really need a one */
455 }
456 
457 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
458 {
459 	spin_lock_irq(&dev_priv->irq_lock);
460 
461 	while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
462 		;
463 
464 	dev_priv->gt_pm.rps.pm_iir = 0;
465 
466 	spin_unlock_irq(&dev_priv->irq_lock);
467 }
468 
469 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
470 {
471 	spin_lock_irq(&dev_priv->irq_lock);
472 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
473 	dev_priv->gt_pm.rps.pm_iir = 0;
474 	spin_unlock_irq(&dev_priv->irq_lock);
475 }
476 
477 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
478 {
479 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
480 
481 	if (READ_ONCE(rps->interrupts_enabled))
482 		return;
483 
484 	spin_lock_irq(&dev_priv->irq_lock);
485 	WARN_ON_ONCE(rps->pm_iir);
486 
487 	if (INTEL_GEN(dev_priv) >= 11)
488 		WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
489 	else
490 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
491 
492 	rps->interrupts_enabled = true;
493 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
494 
495 	spin_unlock_irq(&dev_priv->irq_lock);
496 }
497 
498 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
499 {
500 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
501 
502 	if (!READ_ONCE(rps->interrupts_enabled))
503 		return;
504 
505 	spin_lock_irq(&dev_priv->irq_lock);
506 	rps->interrupts_enabled = false;
507 
508 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
509 
510 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
511 
512 	spin_unlock_irq(&dev_priv->irq_lock);
513 	synchronize_irq(dev_priv->drm.irq);
514 
515 	/* Now that we will not be generating any more work, flush any
516 	 * outstanding tasks. As we are called on the RPS idle path,
517 	 * we will reset the GPU to minimum frequencies, so the current
518 	 * state of the worker can be discarded.
519 	 */
520 	cancel_work_sync(&rps->work);
521 	if (INTEL_GEN(dev_priv) >= 11)
522 		gen11_reset_rps_interrupts(dev_priv);
523 	else
524 		gen6_reset_rps_interrupts(dev_priv);
525 }
526 
527 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
528 {
529 	assert_rpm_wakelock_held(dev_priv);
530 
531 	spin_lock_irq(&dev_priv->irq_lock);
532 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
533 	spin_unlock_irq(&dev_priv->irq_lock);
534 }
535 
536 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
537 {
538 	assert_rpm_wakelock_held(dev_priv);
539 
540 	spin_lock_irq(&dev_priv->irq_lock);
541 	if (!dev_priv->guc.interrupts_enabled) {
542 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
543 				       dev_priv->pm_guc_events);
544 		dev_priv->guc.interrupts_enabled = true;
545 		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
546 	}
547 	spin_unlock_irq(&dev_priv->irq_lock);
548 }
549 
550 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
551 {
552 	assert_rpm_wakelock_held(dev_priv);
553 
554 	spin_lock_irq(&dev_priv->irq_lock);
555 	dev_priv->guc.interrupts_enabled = false;
556 
557 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
558 
559 	spin_unlock_irq(&dev_priv->irq_lock);
560 	synchronize_irq(dev_priv->drm.irq);
561 
562 	gen9_reset_guc_interrupts(dev_priv);
563 }
564 
565 /**
566  * bdw_update_port_irq - update DE port interrupt
567  * @dev_priv: driver private
568  * @interrupt_mask: mask of interrupt bits to update
569  * @enabled_irq_mask: mask of interrupt bits to enable
570  */
571 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
572 				uint32_t interrupt_mask,
573 				uint32_t enabled_irq_mask)
574 {
575 	uint32_t new_val;
576 	uint32_t old_val;
577 
578 	lockdep_assert_held(&dev_priv->irq_lock);
579 
580 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
581 
582 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
583 		return;
584 
585 	old_val = I915_READ(GEN8_DE_PORT_IMR);
586 
587 	new_val = old_val;
588 	new_val &= ~interrupt_mask;
589 	new_val |= (~enabled_irq_mask & interrupt_mask);
590 
591 	if (new_val != old_val) {
592 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
593 		POSTING_READ(GEN8_DE_PORT_IMR);
594 	}
595 }
596 
597 /**
598  * bdw_update_pipe_irq - update DE pipe interrupt
599  * @dev_priv: driver private
600  * @pipe: pipe whose interrupt to update
601  * @interrupt_mask: mask of interrupt bits to update
602  * @enabled_irq_mask: mask of interrupt bits to enable
603  */
604 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
605 			 enum pipe pipe,
606 			 uint32_t interrupt_mask,
607 			 uint32_t enabled_irq_mask)
608 {
609 	uint32_t new_val;
610 
611 	lockdep_assert_held(&dev_priv->irq_lock);
612 
613 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
614 
615 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
616 		return;
617 
618 	new_val = dev_priv->de_irq_mask[pipe];
619 	new_val &= ~interrupt_mask;
620 	new_val |= (~enabled_irq_mask & interrupt_mask);
621 
622 	if (new_val != dev_priv->de_irq_mask[pipe]) {
623 		dev_priv->de_irq_mask[pipe] = new_val;
624 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
625 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
626 	}
627 }
628 
629 /**
630  * ibx_display_interrupt_update - update SDEIMR
631  * @dev_priv: driver private
632  * @interrupt_mask: mask of interrupt bits to update
633  * @enabled_irq_mask: mask of interrupt bits to enable
634  */
635 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
636 				  uint32_t interrupt_mask,
637 				  uint32_t enabled_irq_mask)
638 {
639 	uint32_t sdeimr = I915_READ(SDEIMR);
640 	sdeimr &= ~interrupt_mask;
641 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
642 
643 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
644 
645 	lockdep_assert_held(&dev_priv->irq_lock);
646 
647 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
648 		return;
649 
650 	I915_WRITE(SDEIMR, sdeimr);
651 	POSTING_READ(SDEIMR);
652 }
653 
654 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
655 			      enum pipe pipe)
656 {
657 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
658 	u32 enable_mask = status_mask << 16;
659 
660 	lockdep_assert_held(&dev_priv->irq_lock);
661 
662 	if (INTEL_GEN(dev_priv) < 5)
663 		goto out;
664 
665 	/*
666 	 * On pipe A we don't support the PSR interrupt yet,
667 	 * on pipe B and C the same bit MBZ.
668 	 */
669 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
670 		return 0;
671 	/*
672 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
673 	 * A the same bit is for perf counters which we don't use either.
674 	 */
675 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
676 		return 0;
677 
678 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
679 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
680 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
681 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
682 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
683 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
684 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
685 
686 out:
687 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
688 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
689 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
690 		  pipe_name(pipe), enable_mask, status_mask);
691 
692 	return enable_mask;
693 }
694 
695 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
696 			  enum pipe pipe, u32 status_mask)
697 {
698 	i915_reg_t reg = PIPESTAT(pipe);
699 	u32 enable_mask;
700 
701 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
702 		  "pipe %c: status_mask=0x%x\n",
703 		  pipe_name(pipe), status_mask);
704 
705 	lockdep_assert_held(&dev_priv->irq_lock);
706 	WARN_ON(!intel_irqs_enabled(dev_priv));
707 
708 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
709 		return;
710 
711 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
712 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
713 
714 	I915_WRITE(reg, enable_mask | status_mask);
715 	POSTING_READ(reg);
716 }
717 
718 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
719 			   enum pipe pipe, u32 status_mask)
720 {
721 	i915_reg_t reg = PIPESTAT(pipe);
722 	u32 enable_mask;
723 
724 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
725 		  "pipe %c: status_mask=0x%x\n",
726 		  pipe_name(pipe), status_mask);
727 
728 	lockdep_assert_held(&dev_priv->irq_lock);
729 	WARN_ON(!intel_irqs_enabled(dev_priv));
730 
731 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
732 		return;
733 
734 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
735 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
736 
737 	I915_WRITE(reg, enable_mask | status_mask);
738 	POSTING_READ(reg);
739 }
740 
741 /**
742  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
743  * @dev_priv: i915 device private
744  */
745 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
746 {
747 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
748 		return;
749 
750 	spin_lock_irq(&dev_priv->irq_lock);
751 
752 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
753 	if (INTEL_GEN(dev_priv) >= 4)
754 		i915_enable_pipestat(dev_priv, PIPE_A,
755 				     PIPE_LEGACY_BLC_EVENT_STATUS);
756 
757 	spin_unlock_irq(&dev_priv->irq_lock);
758 }
759 
760 /*
761  * This timing diagram depicts the video signal in and
762  * around the vertical blanking period.
763  *
764  * Assumptions about the fictitious mode used in this example:
765  *  vblank_start >= 3
766  *  vsync_start = vblank_start + 1
767  *  vsync_end = vblank_start + 2
768  *  vtotal = vblank_start + 3
769  *
770  *           start of vblank:
771  *           latch double buffered registers
772  *           increment frame counter (ctg+)
773  *           generate start of vblank interrupt (gen4+)
774  *           |
775  *           |          frame start:
776  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
777  *           |          may be shifted forward 1-3 extra lines via PIPECONF
778  *           |          |
779  *           |          |  start of vsync:
780  *           |          |  generate vsync interrupt
781  *           |          |  |
782  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
783  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
784  * ----va---> <-----------------vb--------------------> <--------va-------------
785  *       |          |       <----vs----->                     |
786  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
787  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
788  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
789  *       |          |                                         |
790  *       last visible pixel                                   first visible pixel
791  *                  |                                         increment frame counter (gen3/4)
792  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
793  *
794  * x  = horizontal active
795  * _  = horizontal blanking
796  * hs = horizontal sync
797  * va = vertical active
798  * vb = vertical blanking
799  * vs = vertical sync
800  * vbs = vblank_start (number)
801  *
802  * Summary:
803  * - most events happen at the start of horizontal sync
804  * - frame start happens at the start of horizontal blank, 1-4 lines
805  *   (depending on PIPECONF settings) after the start of vblank
806  * - gen3/4 pixel and frame counter are synchronized with the start
807  *   of horizontal active on the first line of vertical active
808  */
809 
810 /* Called from drm generic code, passed a 'crtc', which
811  * we use as a pipe index
812  */
813 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
814 {
815 	struct drm_i915_private *dev_priv = to_i915(dev);
816 	i915_reg_t high_frame, low_frame;
817 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
818 	const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
819 	unsigned long irqflags;
820 
821 	htotal = mode->crtc_htotal;
822 	hsync_start = mode->crtc_hsync_start;
823 	vbl_start = mode->crtc_vblank_start;
824 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
825 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
826 
827 	/* Convert to pixel count */
828 	vbl_start *= htotal;
829 
830 	/* Start of vblank event occurs at start of hsync */
831 	vbl_start -= htotal - hsync_start;
832 
833 	high_frame = PIPEFRAME(pipe);
834 	low_frame = PIPEFRAMEPIXEL(pipe);
835 
836 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
837 
838 	/*
839 	 * High & low register fields aren't synchronized, so make sure
840 	 * we get a low value that's stable across two reads of the high
841 	 * register.
842 	 */
843 	do {
844 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
845 		low   = I915_READ_FW(low_frame);
846 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
847 	} while (high1 != high2);
848 
849 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
850 
851 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 	pixel = low & PIPE_PIXEL_MASK;
853 	low >>= PIPE_FRAME_LOW_SHIFT;
854 
855 	/*
856 	 * The frame counter increments at beginning of active.
857 	 * Cook up a vblank counter by also checking the pixel
858 	 * counter against vblank start.
859 	 */
860 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
861 }
862 
863 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
864 {
865 	struct drm_i915_private *dev_priv = to_i915(dev);
866 
867 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
868 }
869 
870 /*
871  * On certain encoders on certain platforms, pipe
872  * scanline register will not work to get the scanline,
873  * since the timings are driven from the PORT or issues
874  * with scanline register updates.
875  * This function will use Framestamp and current
876  * timestamp registers to calculate the scanline.
877  */
878 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
879 {
880 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
881 	struct drm_vblank_crtc *vblank =
882 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
883 	const struct drm_display_mode *mode = &vblank->hwmode;
884 	u32 vblank_start = mode->crtc_vblank_start;
885 	u32 vtotal = mode->crtc_vtotal;
886 	u32 htotal = mode->crtc_htotal;
887 	u32 clock = mode->crtc_clock;
888 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
889 
890 	/*
891 	 * To avoid the race condition where we might cross into the
892 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
893 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
894 	 * during the same frame.
895 	 */
896 	do {
897 		/*
898 		 * This field provides read back of the display
899 		 * pipe frame time stamp. The time stamp value
900 		 * is sampled at every start of vertical blank.
901 		 */
902 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
903 
904 		/*
905 		 * The TIMESTAMP_CTR register has the current
906 		 * time stamp value.
907 		 */
908 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
909 
910 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
911 	} while (scan_post_time != scan_prev_time);
912 
913 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
914 					clock), 1000 * htotal);
915 	scanline = min(scanline, vtotal - 1);
916 	scanline = (scanline + vblank_start) % vtotal;
917 
918 	return scanline;
919 }
920 
921 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
922 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
923 {
924 	struct drm_device *dev = crtc->base.dev;
925 	struct drm_i915_private *dev_priv = to_i915(dev);
926 	const struct drm_display_mode *mode;
927 	struct drm_vblank_crtc *vblank;
928 	enum pipe pipe = crtc->pipe;
929 	int position, vtotal;
930 
931 	if (!crtc->active)
932 		return -1;
933 
934 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
935 	mode = &vblank->hwmode;
936 
937 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
938 		return __intel_get_crtc_scanline_from_timestamp(crtc);
939 
940 	vtotal = mode->crtc_vtotal;
941 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
942 		vtotal /= 2;
943 
944 	if (IS_GEN2(dev_priv))
945 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
946 	else
947 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
948 
949 	/*
950 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
951 	 * read it just before the start of vblank.  So try it again
952 	 * so we don't accidentally end up spanning a vblank frame
953 	 * increment, causing the pipe_update_end() code to squak at us.
954 	 *
955 	 * The nature of this problem means we can't simply check the ISR
956 	 * bit and return the vblank start value; nor can we use the scanline
957 	 * debug register in the transcoder as it appears to have the same
958 	 * problem.  We may need to extend this to include other platforms,
959 	 * but so far testing only shows the problem on HSW.
960 	 */
961 	if (HAS_DDI(dev_priv) && !position) {
962 		int i, temp;
963 
964 		for (i = 0; i < 100; i++) {
965 			udelay(1);
966 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
967 			if (temp != position) {
968 				position = temp;
969 				break;
970 			}
971 		}
972 	}
973 
974 	/*
975 	 * See update_scanline_offset() for the details on the
976 	 * scanline_offset adjustment.
977 	 */
978 	return (position + crtc->scanline_offset) % vtotal;
979 }
980 
981 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
982 				     bool in_vblank_irq, int *vpos, int *hpos,
983 				     ktime_t *stime, ktime_t *etime,
984 				     const struct drm_display_mode *mode)
985 {
986 	struct drm_i915_private *dev_priv = to_i915(dev);
987 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
988 								pipe);
989 	int position;
990 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
991 	unsigned long irqflags;
992 
993 	if (WARN_ON(!mode->crtc_clock)) {
994 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
995 				 "pipe %c\n", pipe_name(pipe));
996 		return false;
997 	}
998 
999 	htotal = mode->crtc_htotal;
1000 	hsync_start = mode->crtc_hsync_start;
1001 	vtotal = mode->crtc_vtotal;
1002 	vbl_start = mode->crtc_vblank_start;
1003 	vbl_end = mode->crtc_vblank_end;
1004 
1005 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1006 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
1007 		vbl_end /= 2;
1008 		vtotal /= 2;
1009 	}
1010 
1011 	/*
1012 	 * Lock uncore.lock, as we will do multiple timing critical raw
1013 	 * register reads, potentially with preemption disabled, so the
1014 	 * following code must not block on uncore.lock.
1015 	 */
1016 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1017 
1018 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1019 
1020 	/* Get optional system timestamp before query. */
1021 	if (stime)
1022 		*stime = ktime_get();
1023 
1024 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1025 		/* No obvious pixelcount register. Only query vertical
1026 		 * scanout position from Display scan line register.
1027 		 */
1028 		position = __intel_get_crtc_scanline(intel_crtc);
1029 	} else {
1030 		/* Have access to pixelcount since start of frame.
1031 		 * We can split this into vertical and horizontal
1032 		 * scanout position.
1033 		 */
1034 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1035 
1036 		/* convert to pixel counts */
1037 		vbl_start *= htotal;
1038 		vbl_end *= htotal;
1039 		vtotal *= htotal;
1040 
1041 		/*
1042 		 * In interlaced modes, the pixel counter counts all pixels,
1043 		 * so one field will have htotal more pixels. In order to avoid
1044 		 * the reported position from jumping backwards when the pixel
1045 		 * counter is beyond the length of the shorter field, just
1046 		 * clamp the position the length of the shorter field. This
1047 		 * matches how the scanline counter based position works since
1048 		 * the scanline counter doesn't count the two half lines.
1049 		 */
1050 		if (position >= vtotal)
1051 			position = vtotal - 1;
1052 
1053 		/*
1054 		 * Start of vblank interrupt is triggered at start of hsync,
1055 		 * just prior to the first active line of vblank. However we
1056 		 * consider lines to start at the leading edge of horizontal
1057 		 * active. So, should we get here before we've crossed into
1058 		 * the horizontal active of the first line in vblank, we would
1059 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1060 		 * always add htotal-hsync_start to the current pixel position.
1061 		 */
1062 		position = (position + htotal - hsync_start) % vtotal;
1063 	}
1064 
1065 	/* Get optional system timestamp after query. */
1066 	if (etime)
1067 		*etime = ktime_get();
1068 
1069 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1070 
1071 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1072 
1073 	/*
1074 	 * While in vblank, position will be negative
1075 	 * counting up towards 0 at vbl_end. And outside
1076 	 * vblank, position will be positive counting
1077 	 * up since vbl_end.
1078 	 */
1079 	if (position >= vbl_start)
1080 		position -= vbl_end;
1081 	else
1082 		position += vtotal - vbl_end;
1083 
1084 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1085 		*vpos = position;
1086 		*hpos = 0;
1087 	} else {
1088 		*vpos = position / htotal;
1089 		*hpos = position - (*vpos * htotal);
1090 	}
1091 
1092 	return true;
1093 }
1094 
1095 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1096 {
1097 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1098 	unsigned long irqflags;
1099 	int position;
1100 
1101 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1102 	position = __intel_get_crtc_scanline(crtc);
1103 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1104 
1105 	return position;
1106 }
1107 
1108 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1109 {
1110 	u32 busy_up, busy_down, max_avg, min_avg;
1111 	u8 new_delay;
1112 
1113 	spin_lock(&mchdev_lock);
1114 
1115 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1116 
1117 	new_delay = dev_priv->ips.cur_delay;
1118 
1119 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1120 	busy_up = I915_READ(RCPREVBSYTUPAVG);
1121 	busy_down = I915_READ(RCPREVBSYTDNAVG);
1122 	max_avg = I915_READ(RCBMAXAVG);
1123 	min_avg = I915_READ(RCBMINAVG);
1124 
1125 	/* Handle RCS change request from hw */
1126 	if (busy_up > max_avg) {
1127 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1128 			new_delay = dev_priv->ips.cur_delay - 1;
1129 		if (new_delay < dev_priv->ips.max_delay)
1130 			new_delay = dev_priv->ips.max_delay;
1131 	} else if (busy_down < min_avg) {
1132 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1133 			new_delay = dev_priv->ips.cur_delay + 1;
1134 		if (new_delay > dev_priv->ips.min_delay)
1135 			new_delay = dev_priv->ips.min_delay;
1136 	}
1137 
1138 	if (ironlake_set_drps(dev_priv, new_delay))
1139 		dev_priv->ips.cur_delay = new_delay;
1140 
1141 	spin_unlock(&mchdev_lock);
1142 
1143 	return;
1144 }
1145 
1146 static void notify_ring(struct intel_engine_cs *engine)
1147 {
1148 	struct i915_request *rq = NULL;
1149 	struct intel_wait *wait;
1150 
1151 	if (!engine->breadcrumbs.irq_armed)
1152 		return;
1153 
1154 	atomic_inc(&engine->irq_count);
1155 	set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
1156 
1157 	spin_lock(&engine->breadcrumbs.irq_lock);
1158 	wait = engine->breadcrumbs.irq_wait;
1159 	if (wait) {
1160 		bool wakeup = engine->irq_seqno_barrier;
1161 
1162 		/* We use a callback from the dma-fence to submit
1163 		 * requests after waiting on our own requests. To
1164 		 * ensure minimum delay in queuing the next request to
1165 		 * hardware, signal the fence now rather than wait for
1166 		 * the signaler to be woken up. We still wake up the
1167 		 * waiter in order to handle the irq-seqno coherency
1168 		 * issues (we may receive the interrupt before the
1169 		 * seqno is written, see __i915_request_irq_complete())
1170 		 * and to handle coalescing of multiple seqno updates
1171 		 * and many waiters.
1172 		 */
1173 		if (i915_seqno_passed(intel_engine_get_seqno(engine),
1174 				      wait->seqno)) {
1175 			struct i915_request *waiter = wait->request;
1176 
1177 			wakeup = true;
1178 			if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1179 				      &waiter->fence.flags) &&
1180 			    intel_wait_check_request(wait, waiter))
1181 				rq = i915_request_get(waiter);
1182 		}
1183 
1184 		if (wakeup)
1185 			wake_up_process(wait->tsk);
1186 	} else {
1187 		if (engine->breadcrumbs.irq_armed)
1188 			__intel_engine_disarm_breadcrumbs(engine);
1189 	}
1190 	spin_unlock(&engine->breadcrumbs.irq_lock);
1191 
1192 	if (rq) {
1193 		dma_fence_signal(&rq->fence);
1194 		GEM_BUG_ON(!i915_request_completed(rq));
1195 		i915_request_put(rq);
1196 	}
1197 
1198 	trace_intel_engine_notify(engine, wait);
1199 }
1200 
1201 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1202 			struct intel_rps_ei *ei)
1203 {
1204 	ei->ktime = ktime_get_raw();
1205 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1206 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1207 }
1208 
1209 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1210 {
1211 	memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1212 }
1213 
1214 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1215 {
1216 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1217 	const struct intel_rps_ei *prev = &rps->ei;
1218 	struct intel_rps_ei now;
1219 	u32 events = 0;
1220 
1221 	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1222 		return 0;
1223 
1224 	vlv_c0_read(dev_priv, &now);
1225 
1226 	if (prev->ktime) {
1227 		u64 time, c0;
1228 		u32 render, media;
1229 
1230 		time = ktime_us_delta(now.ktime, prev->ktime);
1231 
1232 		time *= dev_priv->czclk_freq;
1233 
1234 		/* Workload can be split between render + media,
1235 		 * e.g. SwapBuffers being blitted in X after being rendered in
1236 		 * mesa. To account for this we need to combine both engines
1237 		 * into our activity counter.
1238 		 */
1239 		render = now.render_c0 - prev->render_c0;
1240 		media = now.media_c0 - prev->media_c0;
1241 		c0 = max(render, media);
1242 		c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1243 
1244 		if (c0 > time * rps->up_threshold)
1245 			events = GEN6_PM_RP_UP_THRESHOLD;
1246 		else if (c0 < time * rps->down_threshold)
1247 			events = GEN6_PM_RP_DOWN_THRESHOLD;
1248 	}
1249 
1250 	rps->ei = now;
1251 	return events;
1252 }
1253 
1254 static void gen6_pm_rps_work(struct work_struct *work)
1255 {
1256 	struct drm_i915_private *dev_priv =
1257 		container_of(work, struct drm_i915_private, gt_pm.rps.work);
1258 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1259 	bool client_boost = false;
1260 	int new_delay, adj, min, max;
1261 	u32 pm_iir = 0;
1262 
1263 	spin_lock_irq(&dev_priv->irq_lock);
1264 	if (rps->interrupts_enabled) {
1265 		pm_iir = fetch_and_zero(&rps->pm_iir);
1266 		client_boost = atomic_read(&rps->num_waiters);
1267 	}
1268 	spin_unlock_irq(&dev_priv->irq_lock);
1269 
1270 	/* Make sure we didn't queue anything we're not going to process. */
1271 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1272 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1273 		goto out;
1274 
1275 	mutex_lock(&dev_priv->pcu_lock);
1276 
1277 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1278 
1279 	adj = rps->last_adj;
1280 	new_delay = rps->cur_freq;
1281 	min = rps->min_freq_softlimit;
1282 	max = rps->max_freq_softlimit;
1283 	if (client_boost)
1284 		max = rps->max_freq;
1285 	if (client_boost && new_delay < rps->boost_freq) {
1286 		new_delay = rps->boost_freq;
1287 		adj = 0;
1288 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1289 		if (adj > 0)
1290 			adj *= 2;
1291 		else /* CHV needs even encode values */
1292 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1293 
1294 		if (new_delay >= rps->max_freq_softlimit)
1295 			adj = 0;
1296 	} else if (client_boost) {
1297 		adj = 0;
1298 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1299 		if (rps->cur_freq > rps->efficient_freq)
1300 			new_delay = rps->efficient_freq;
1301 		else if (rps->cur_freq > rps->min_freq_softlimit)
1302 			new_delay = rps->min_freq_softlimit;
1303 		adj = 0;
1304 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1305 		if (adj < 0)
1306 			adj *= 2;
1307 		else /* CHV needs even encode values */
1308 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1309 
1310 		if (new_delay <= rps->min_freq_softlimit)
1311 			adj = 0;
1312 	} else { /* unknown event */
1313 		adj = 0;
1314 	}
1315 
1316 	rps->last_adj = adj;
1317 
1318 	/* sysfs frequency interfaces may have snuck in while servicing the
1319 	 * interrupt
1320 	 */
1321 	new_delay += adj;
1322 	new_delay = clamp_t(int, new_delay, min, max);
1323 
1324 	if (intel_set_rps(dev_priv, new_delay)) {
1325 		DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1326 		rps->last_adj = 0;
1327 	}
1328 
1329 	mutex_unlock(&dev_priv->pcu_lock);
1330 
1331 out:
1332 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1333 	spin_lock_irq(&dev_priv->irq_lock);
1334 	if (rps->interrupts_enabled)
1335 		gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1336 	spin_unlock_irq(&dev_priv->irq_lock);
1337 }
1338 
1339 
1340 /**
1341  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1342  * occurred.
1343  * @work: workqueue struct
1344  *
1345  * Doesn't actually do anything except notify userspace. As a consequence of
1346  * this event, userspace should try to remap the bad rows since statistically
1347  * it is likely the same row is more likely to go bad again.
1348  */
1349 static void ivybridge_parity_work(struct work_struct *work)
1350 {
1351 	struct drm_i915_private *dev_priv =
1352 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
1353 	u32 error_status, row, bank, subbank;
1354 	char *parity_event[6];
1355 	uint32_t misccpctl;
1356 	uint8_t slice = 0;
1357 
1358 	/* We must turn off DOP level clock gating to access the L3 registers.
1359 	 * In order to prevent a get/put style interface, acquire struct mutex
1360 	 * any time we access those registers.
1361 	 */
1362 	mutex_lock(&dev_priv->drm.struct_mutex);
1363 
1364 	/* If we've screwed up tracking, just let the interrupt fire again */
1365 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1366 		goto out;
1367 
1368 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1369 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1370 	POSTING_READ(GEN7_MISCCPCTL);
1371 
1372 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1373 		i915_reg_t reg;
1374 
1375 		slice--;
1376 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1377 			break;
1378 
1379 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1380 
1381 		reg = GEN7_L3CDERRST1(slice);
1382 
1383 		error_status = I915_READ(reg);
1384 		row = GEN7_PARITY_ERROR_ROW(error_status);
1385 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1386 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1387 
1388 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1389 		POSTING_READ(reg);
1390 
1391 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1392 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1393 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1394 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1395 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1396 		parity_event[5] = NULL;
1397 
1398 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1399 				   KOBJ_CHANGE, parity_event);
1400 
1401 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1402 			  slice, row, bank, subbank);
1403 
1404 		kfree(parity_event[4]);
1405 		kfree(parity_event[3]);
1406 		kfree(parity_event[2]);
1407 		kfree(parity_event[1]);
1408 	}
1409 
1410 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1411 
1412 out:
1413 	WARN_ON(dev_priv->l3_parity.which_slice);
1414 	spin_lock_irq(&dev_priv->irq_lock);
1415 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1416 	spin_unlock_irq(&dev_priv->irq_lock);
1417 
1418 	mutex_unlock(&dev_priv->drm.struct_mutex);
1419 }
1420 
1421 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1422 					       u32 iir)
1423 {
1424 	if (!HAS_L3_DPF(dev_priv))
1425 		return;
1426 
1427 	spin_lock(&dev_priv->irq_lock);
1428 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1429 	spin_unlock(&dev_priv->irq_lock);
1430 
1431 	iir &= GT_PARITY_ERROR(dev_priv);
1432 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1433 		dev_priv->l3_parity.which_slice |= 1 << 1;
1434 
1435 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1436 		dev_priv->l3_parity.which_slice |= 1 << 0;
1437 
1438 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1439 }
1440 
1441 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1442 			       u32 gt_iir)
1443 {
1444 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1445 		notify_ring(dev_priv->engine[RCS]);
1446 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1447 		notify_ring(dev_priv->engine[VCS]);
1448 }
1449 
1450 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1451 			       u32 gt_iir)
1452 {
1453 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1454 		notify_ring(dev_priv->engine[RCS]);
1455 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1456 		notify_ring(dev_priv->engine[VCS]);
1457 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1458 		notify_ring(dev_priv->engine[BCS]);
1459 
1460 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1461 		      GT_BSD_CS_ERROR_INTERRUPT |
1462 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1463 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1464 
1465 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1466 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1467 }
1468 
1469 static void
1470 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1471 {
1472 	struct intel_engine_execlists * const execlists = &engine->execlists;
1473 	bool tasklet = false;
1474 
1475 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
1476 		if (READ_ONCE(engine->execlists.active))
1477 			tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
1478 						    &engine->irq_posted);
1479 	}
1480 
1481 	if (iir & GT_RENDER_USER_INTERRUPT) {
1482 		notify_ring(engine);
1483 		tasklet |= USES_GUC_SUBMISSION(engine->i915);
1484 	}
1485 
1486 	if (tasklet)
1487 		tasklet_hi_schedule(&execlists->tasklet);
1488 }
1489 
1490 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1491 			    u32 master_ctl, u32 gt_iir[4])
1492 {
1493 	void __iomem * const regs = i915->regs;
1494 
1495 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1496 		      GEN8_GT_BCS_IRQ | \
1497 		      GEN8_GT_VCS1_IRQ | \
1498 		      GEN8_GT_VCS2_IRQ | \
1499 		      GEN8_GT_VECS_IRQ | \
1500 		      GEN8_GT_PM_IRQ | \
1501 		      GEN8_GT_GUC_IRQ)
1502 
1503 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1504 		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1505 		if (likely(gt_iir[0]))
1506 			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1507 	}
1508 
1509 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1510 		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1511 		if (likely(gt_iir[1]))
1512 			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1513 	}
1514 
1515 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1516 		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1517 		if (likely(gt_iir[2] & (i915->pm_rps_events |
1518 					i915->pm_guc_events)))
1519 			raw_reg_write(regs, GEN8_GT_IIR(2),
1520 				      gt_iir[2] & (i915->pm_rps_events |
1521 						   i915->pm_guc_events));
1522 	}
1523 
1524 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1525 		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1526 		if (likely(gt_iir[3]))
1527 			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1528 	}
1529 }
1530 
1531 static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1532 				u32 master_ctl, u32 gt_iir[4])
1533 {
1534 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1535 		gen8_cs_irq_handler(i915->engine[RCS],
1536 				    gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1537 		gen8_cs_irq_handler(i915->engine[BCS],
1538 				    gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1539 	}
1540 
1541 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1542 		gen8_cs_irq_handler(i915->engine[VCS],
1543 				    gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1544 		gen8_cs_irq_handler(i915->engine[VCS2],
1545 				    gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
1546 	}
1547 
1548 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1549 		gen8_cs_irq_handler(i915->engine[VECS],
1550 				    gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1551 	}
1552 
1553 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1554 		gen6_rps_irq_handler(i915, gt_iir[2]);
1555 		gen9_guc_irq_handler(i915, gt_iir[2]);
1556 	}
1557 }
1558 
1559 static bool gen11_port_hotplug_long_detect(enum port port, u32 val)
1560 {
1561 	switch (port) {
1562 	case PORT_C:
1563 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1564 	case PORT_D:
1565 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1566 	case PORT_E:
1567 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1568 	case PORT_F:
1569 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1570 	default:
1571 		return false;
1572 	}
1573 }
1574 
1575 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1576 {
1577 	switch (port) {
1578 	case PORT_A:
1579 		return val & PORTA_HOTPLUG_LONG_DETECT;
1580 	case PORT_B:
1581 		return val & PORTB_HOTPLUG_LONG_DETECT;
1582 	case PORT_C:
1583 		return val & PORTC_HOTPLUG_LONG_DETECT;
1584 	default:
1585 		return false;
1586 	}
1587 }
1588 
1589 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1590 {
1591 	switch (port) {
1592 	case PORT_E:
1593 		return val & PORTE_HOTPLUG_LONG_DETECT;
1594 	default:
1595 		return false;
1596 	}
1597 }
1598 
1599 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1600 {
1601 	switch (port) {
1602 	case PORT_A:
1603 		return val & PORTA_HOTPLUG_LONG_DETECT;
1604 	case PORT_B:
1605 		return val & PORTB_HOTPLUG_LONG_DETECT;
1606 	case PORT_C:
1607 		return val & PORTC_HOTPLUG_LONG_DETECT;
1608 	case PORT_D:
1609 		return val & PORTD_HOTPLUG_LONG_DETECT;
1610 	default:
1611 		return false;
1612 	}
1613 }
1614 
1615 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1616 {
1617 	switch (port) {
1618 	case PORT_A:
1619 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1620 	default:
1621 		return false;
1622 	}
1623 }
1624 
1625 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1626 {
1627 	switch (port) {
1628 	case PORT_B:
1629 		return val & PORTB_HOTPLUG_LONG_DETECT;
1630 	case PORT_C:
1631 		return val & PORTC_HOTPLUG_LONG_DETECT;
1632 	case PORT_D:
1633 		return val & PORTD_HOTPLUG_LONG_DETECT;
1634 	default:
1635 		return false;
1636 	}
1637 }
1638 
1639 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1640 {
1641 	switch (port) {
1642 	case PORT_B:
1643 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1644 	case PORT_C:
1645 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1646 	case PORT_D:
1647 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1648 	default:
1649 		return false;
1650 	}
1651 }
1652 
1653 /*
1654  * Get a bit mask of pins that have triggered, and which ones may be long.
1655  * This can be called multiple times with the same masks to accumulate
1656  * hotplug detection results from several registers.
1657  *
1658  * Note that the caller is expected to zero out the masks initially.
1659  */
1660 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1661 			       u32 *pin_mask, u32 *long_mask,
1662 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1663 			       const u32 hpd[HPD_NUM_PINS],
1664 			       bool long_pulse_detect(enum port port, u32 val))
1665 {
1666 	enum port port;
1667 	int i;
1668 
1669 	for_each_hpd_pin(i) {
1670 		if ((hpd[i] & hotplug_trigger) == 0)
1671 			continue;
1672 
1673 		*pin_mask |= BIT(i);
1674 
1675 		port = intel_hpd_pin_to_port(dev_priv, i);
1676 		if (port == PORT_NONE)
1677 			continue;
1678 
1679 		if (long_pulse_detect(port, dig_hotplug_reg))
1680 			*long_mask |= BIT(i);
1681 	}
1682 
1683 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1684 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1685 
1686 }
1687 
1688 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1689 {
1690 	wake_up_all(&dev_priv->gmbus_wait_queue);
1691 }
1692 
1693 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1694 {
1695 	wake_up_all(&dev_priv->gmbus_wait_queue);
1696 }
1697 
1698 #if defined(CONFIG_DEBUG_FS)
1699 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1700 					 enum pipe pipe,
1701 					 uint32_t crc0, uint32_t crc1,
1702 					 uint32_t crc2, uint32_t crc3,
1703 					 uint32_t crc4)
1704 {
1705 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1706 	struct intel_pipe_crc_entry *entry;
1707 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1708 	struct drm_driver *driver = dev_priv->drm.driver;
1709 	uint32_t crcs[5];
1710 	int head, tail;
1711 
1712 	spin_lock(&pipe_crc->lock);
1713 	if (pipe_crc->source && !crtc->base.crc.opened) {
1714 		if (!pipe_crc->entries) {
1715 			spin_unlock(&pipe_crc->lock);
1716 			DRM_DEBUG_KMS("spurious interrupt\n");
1717 			return;
1718 		}
1719 
1720 		head = pipe_crc->head;
1721 		tail = pipe_crc->tail;
1722 
1723 		if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1724 			spin_unlock(&pipe_crc->lock);
1725 			DRM_ERROR("CRC buffer overflowing\n");
1726 			return;
1727 		}
1728 
1729 		entry = &pipe_crc->entries[head];
1730 
1731 		entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1732 		entry->crc[0] = crc0;
1733 		entry->crc[1] = crc1;
1734 		entry->crc[2] = crc2;
1735 		entry->crc[3] = crc3;
1736 		entry->crc[4] = crc4;
1737 
1738 		head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1739 		pipe_crc->head = head;
1740 
1741 		spin_unlock(&pipe_crc->lock);
1742 
1743 		wake_up_interruptible(&pipe_crc->wq);
1744 	} else {
1745 		/*
1746 		 * For some not yet identified reason, the first CRC is
1747 		 * bonkers. So let's just wait for the next vblank and read
1748 		 * out the buggy result.
1749 		 *
1750 		 * On GEN8+ sometimes the second CRC is bonkers as well, so
1751 		 * don't trust that one either.
1752 		 */
1753 		if (pipe_crc->skipped <= 0 ||
1754 		    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1755 			pipe_crc->skipped++;
1756 			spin_unlock(&pipe_crc->lock);
1757 			return;
1758 		}
1759 		spin_unlock(&pipe_crc->lock);
1760 		crcs[0] = crc0;
1761 		crcs[1] = crc1;
1762 		crcs[2] = crc2;
1763 		crcs[3] = crc3;
1764 		crcs[4] = crc4;
1765 		drm_crtc_add_crc_entry(&crtc->base, true,
1766 				       drm_crtc_accurate_vblank_count(&crtc->base),
1767 				       crcs);
1768 	}
1769 }
1770 #else
1771 static inline void
1772 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1773 			     enum pipe pipe,
1774 			     uint32_t crc0, uint32_t crc1,
1775 			     uint32_t crc2, uint32_t crc3,
1776 			     uint32_t crc4) {}
1777 #endif
1778 
1779 
1780 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1781 				     enum pipe pipe)
1782 {
1783 	display_pipe_crc_irq_handler(dev_priv, pipe,
1784 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1785 				     0, 0, 0, 0);
1786 }
1787 
1788 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1789 				     enum pipe pipe)
1790 {
1791 	display_pipe_crc_irq_handler(dev_priv, pipe,
1792 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1793 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1794 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1795 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1796 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1797 }
1798 
1799 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1800 				      enum pipe pipe)
1801 {
1802 	uint32_t res1, res2;
1803 
1804 	if (INTEL_GEN(dev_priv) >= 3)
1805 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1806 	else
1807 		res1 = 0;
1808 
1809 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1810 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1811 	else
1812 		res2 = 0;
1813 
1814 	display_pipe_crc_irq_handler(dev_priv, pipe,
1815 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1816 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1817 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1818 				     res1, res2);
1819 }
1820 
1821 /* The RPS events need forcewake, so we add them to a work queue and mask their
1822  * IMR bits until the work is done. Other interrupts can be processed without
1823  * the work queue. */
1824 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1825 {
1826 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1827 
1828 	if (pm_iir & dev_priv->pm_rps_events) {
1829 		spin_lock(&dev_priv->irq_lock);
1830 		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1831 		if (rps->interrupts_enabled) {
1832 			rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1833 			schedule_work(&rps->work);
1834 		}
1835 		spin_unlock(&dev_priv->irq_lock);
1836 	}
1837 
1838 	if (INTEL_GEN(dev_priv) >= 8)
1839 		return;
1840 
1841 	if (HAS_VEBOX(dev_priv)) {
1842 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1843 			notify_ring(dev_priv->engine[VECS]);
1844 
1845 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1846 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1847 	}
1848 }
1849 
1850 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1851 {
1852 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1853 		intel_guc_to_host_event_handler(&dev_priv->guc);
1854 }
1855 
1856 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1857 {
1858 	enum pipe pipe;
1859 
1860 	for_each_pipe(dev_priv, pipe) {
1861 		I915_WRITE(PIPESTAT(pipe),
1862 			   PIPESTAT_INT_STATUS_MASK |
1863 			   PIPE_FIFO_UNDERRUN_STATUS);
1864 
1865 		dev_priv->pipestat_irq_mask[pipe] = 0;
1866 	}
1867 }
1868 
1869 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1870 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1871 {
1872 	int pipe;
1873 
1874 	spin_lock(&dev_priv->irq_lock);
1875 
1876 	if (!dev_priv->display_irqs_enabled) {
1877 		spin_unlock(&dev_priv->irq_lock);
1878 		return;
1879 	}
1880 
1881 	for_each_pipe(dev_priv, pipe) {
1882 		i915_reg_t reg;
1883 		u32 status_mask, enable_mask, iir_bit = 0;
1884 
1885 		/*
1886 		 * PIPESTAT bits get signalled even when the interrupt is
1887 		 * disabled with the mask bits, and some of the status bits do
1888 		 * not generate interrupts at all (like the underrun bit). Hence
1889 		 * we need to be careful that we only handle what we want to
1890 		 * handle.
1891 		 */
1892 
1893 		/* fifo underruns are filterered in the underrun handler. */
1894 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1895 
1896 		switch (pipe) {
1897 		case PIPE_A:
1898 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1899 			break;
1900 		case PIPE_B:
1901 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1902 			break;
1903 		case PIPE_C:
1904 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1905 			break;
1906 		}
1907 		if (iir & iir_bit)
1908 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1909 
1910 		if (!status_mask)
1911 			continue;
1912 
1913 		reg = PIPESTAT(pipe);
1914 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
1915 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1916 
1917 		/*
1918 		 * Clear the PIPE*STAT regs before the IIR
1919 		 *
1920 		 * Toggle the enable bits to make sure we get an
1921 		 * edge in the ISR pipe event bit if we don't clear
1922 		 * all the enabled status bits. Otherwise the edge
1923 		 * triggered IIR on i965/g4x wouldn't notice that
1924 		 * an interrupt is still pending.
1925 		 */
1926 		if (pipe_stats[pipe]) {
1927 			I915_WRITE(reg, pipe_stats[pipe]);
1928 			I915_WRITE(reg, enable_mask);
1929 		}
1930 	}
1931 	spin_unlock(&dev_priv->irq_lock);
1932 }
1933 
1934 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1935 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1936 {
1937 	enum pipe pipe;
1938 
1939 	for_each_pipe(dev_priv, pipe) {
1940 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1941 			drm_handle_vblank(&dev_priv->drm, pipe);
1942 
1943 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1944 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1945 
1946 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1947 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1948 	}
1949 }
1950 
1951 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1952 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1953 {
1954 	bool blc_event = false;
1955 	enum pipe pipe;
1956 
1957 	for_each_pipe(dev_priv, pipe) {
1958 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1959 			drm_handle_vblank(&dev_priv->drm, pipe);
1960 
1961 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1962 			blc_event = true;
1963 
1964 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1965 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1966 
1967 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1968 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1969 	}
1970 
1971 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1972 		intel_opregion_asle_intr(dev_priv);
1973 }
1974 
1975 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1976 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1977 {
1978 	bool blc_event = false;
1979 	enum pipe pipe;
1980 
1981 	for_each_pipe(dev_priv, pipe) {
1982 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1983 			drm_handle_vblank(&dev_priv->drm, pipe);
1984 
1985 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1986 			blc_event = true;
1987 
1988 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1989 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1990 
1991 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1992 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1993 	}
1994 
1995 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1996 		intel_opregion_asle_intr(dev_priv);
1997 
1998 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1999 		gmbus_irq_handler(dev_priv);
2000 }
2001 
2002 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2003 					    u32 pipe_stats[I915_MAX_PIPES])
2004 {
2005 	enum pipe pipe;
2006 
2007 	for_each_pipe(dev_priv, pipe) {
2008 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2009 			drm_handle_vblank(&dev_priv->drm, pipe);
2010 
2011 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2012 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2013 
2014 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2015 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2016 	}
2017 
2018 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2019 		gmbus_irq_handler(dev_priv);
2020 }
2021 
2022 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2023 {
2024 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2025 
2026 	if (hotplug_status)
2027 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2028 
2029 	return hotplug_status;
2030 }
2031 
2032 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2033 				 u32 hotplug_status)
2034 {
2035 	u32 pin_mask = 0, long_mask = 0;
2036 
2037 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2038 	    IS_CHERRYVIEW(dev_priv)) {
2039 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2040 
2041 		if (hotplug_trigger) {
2042 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2043 					   hotplug_trigger, hotplug_trigger,
2044 					   hpd_status_g4x,
2045 					   i9xx_port_hotplug_long_detect);
2046 
2047 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2048 		}
2049 
2050 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2051 			dp_aux_irq_handler(dev_priv);
2052 	} else {
2053 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2054 
2055 		if (hotplug_trigger) {
2056 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2057 					   hotplug_trigger, hotplug_trigger,
2058 					   hpd_status_i915,
2059 					   i9xx_port_hotplug_long_detect);
2060 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2061 		}
2062 	}
2063 }
2064 
2065 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2066 {
2067 	struct drm_device *dev = arg;
2068 	struct drm_i915_private *dev_priv = to_i915(dev);
2069 	irqreturn_t ret = IRQ_NONE;
2070 
2071 	if (!intel_irqs_enabled(dev_priv))
2072 		return IRQ_NONE;
2073 
2074 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2075 	disable_rpm_wakeref_asserts(dev_priv);
2076 
2077 	do {
2078 		u32 iir, gt_iir, pm_iir;
2079 		u32 pipe_stats[I915_MAX_PIPES] = {};
2080 		u32 hotplug_status = 0;
2081 		u32 ier = 0;
2082 
2083 		gt_iir = I915_READ(GTIIR);
2084 		pm_iir = I915_READ(GEN6_PMIIR);
2085 		iir = I915_READ(VLV_IIR);
2086 
2087 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2088 			break;
2089 
2090 		ret = IRQ_HANDLED;
2091 
2092 		/*
2093 		 * Theory on interrupt generation, based on empirical evidence:
2094 		 *
2095 		 * x = ((VLV_IIR & VLV_IER) ||
2096 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2097 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2098 		 *
2099 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2100 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2101 		 * guarantee the CPU interrupt will be raised again even if we
2102 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2103 		 * bits this time around.
2104 		 */
2105 		I915_WRITE(VLV_MASTER_IER, 0);
2106 		ier = I915_READ(VLV_IER);
2107 		I915_WRITE(VLV_IER, 0);
2108 
2109 		if (gt_iir)
2110 			I915_WRITE(GTIIR, gt_iir);
2111 		if (pm_iir)
2112 			I915_WRITE(GEN6_PMIIR, pm_iir);
2113 
2114 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2115 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2116 
2117 		/* Call regardless, as some status bits might not be
2118 		 * signalled in iir */
2119 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2120 
2121 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2122 			   I915_LPE_PIPE_B_INTERRUPT))
2123 			intel_lpe_audio_irq_handler(dev_priv);
2124 
2125 		/*
2126 		 * VLV_IIR is single buffered, and reflects the level
2127 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2128 		 */
2129 		if (iir)
2130 			I915_WRITE(VLV_IIR, iir);
2131 
2132 		I915_WRITE(VLV_IER, ier);
2133 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2134 		POSTING_READ(VLV_MASTER_IER);
2135 
2136 		if (gt_iir)
2137 			snb_gt_irq_handler(dev_priv, gt_iir);
2138 		if (pm_iir)
2139 			gen6_rps_irq_handler(dev_priv, pm_iir);
2140 
2141 		if (hotplug_status)
2142 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2143 
2144 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2145 	} while (0);
2146 
2147 	enable_rpm_wakeref_asserts(dev_priv);
2148 
2149 	return ret;
2150 }
2151 
2152 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2153 {
2154 	struct drm_device *dev = arg;
2155 	struct drm_i915_private *dev_priv = to_i915(dev);
2156 	irqreturn_t ret = IRQ_NONE;
2157 
2158 	if (!intel_irqs_enabled(dev_priv))
2159 		return IRQ_NONE;
2160 
2161 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2162 	disable_rpm_wakeref_asserts(dev_priv);
2163 
2164 	do {
2165 		u32 master_ctl, iir;
2166 		u32 pipe_stats[I915_MAX_PIPES] = {};
2167 		u32 hotplug_status = 0;
2168 		u32 gt_iir[4];
2169 		u32 ier = 0;
2170 
2171 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2172 		iir = I915_READ(VLV_IIR);
2173 
2174 		if (master_ctl == 0 && iir == 0)
2175 			break;
2176 
2177 		ret = IRQ_HANDLED;
2178 
2179 		/*
2180 		 * Theory on interrupt generation, based on empirical evidence:
2181 		 *
2182 		 * x = ((VLV_IIR & VLV_IER) ||
2183 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2184 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2185 		 *
2186 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2187 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2188 		 * guarantee the CPU interrupt will be raised again even if we
2189 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2190 		 * bits this time around.
2191 		 */
2192 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2193 		ier = I915_READ(VLV_IER);
2194 		I915_WRITE(VLV_IER, 0);
2195 
2196 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2197 
2198 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2199 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2200 
2201 		/* Call regardless, as some status bits might not be
2202 		 * signalled in iir */
2203 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2204 
2205 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2206 			   I915_LPE_PIPE_B_INTERRUPT |
2207 			   I915_LPE_PIPE_C_INTERRUPT))
2208 			intel_lpe_audio_irq_handler(dev_priv);
2209 
2210 		/*
2211 		 * VLV_IIR is single buffered, and reflects the level
2212 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2213 		 */
2214 		if (iir)
2215 			I915_WRITE(VLV_IIR, iir);
2216 
2217 		I915_WRITE(VLV_IER, ier);
2218 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2219 		POSTING_READ(GEN8_MASTER_IRQ);
2220 
2221 		gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2222 
2223 		if (hotplug_status)
2224 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2225 
2226 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2227 	} while (0);
2228 
2229 	enable_rpm_wakeref_asserts(dev_priv);
2230 
2231 	return ret;
2232 }
2233 
2234 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2235 				u32 hotplug_trigger,
2236 				const u32 hpd[HPD_NUM_PINS])
2237 {
2238 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2239 
2240 	/*
2241 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2242 	 * unless we touch the hotplug register, even if hotplug_trigger is
2243 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2244 	 * errors.
2245 	 */
2246 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2247 	if (!hotplug_trigger) {
2248 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2249 			PORTD_HOTPLUG_STATUS_MASK |
2250 			PORTC_HOTPLUG_STATUS_MASK |
2251 			PORTB_HOTPLUG_STATUS_MASK;
2252 		dig_hotplug_reg &= ~mask;
2253 	}
2254 
2255 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2256 	if (!hotplug_trigger)
2257 		return;
2258 
2259 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2260 			   dig_hotplug_reg, hpd,
2261 			   pch_port_hotplug_long_detect);
2262 
2263 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2264 }
2265 
2266 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2267 {
2268 	int pipe;
2269 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2270 
2271 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2272 
2273 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2274 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2275 			       SDE_AUDIO_POWER_SHIFT);
2276 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2277 				 port_name(port));
2278 	}
2279 
2280 	if (pch_iir & SDE_AUX_MASK)
2281 		dp_aux_irq_handler(dev_priv);
2282 
2283 	if (pch_iir & SDE_GMBUS)
2284 		gmbus_irq_handler(dev_priv);
2285 
2286 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2287 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2288 
2289 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2290 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2291 
2292 	if (pch_iir & SDE_POISON)
2293 		DRM_ERROR("PCH poison interrupt\n");
2294 
2295 	if (pch_iir & SDE_FDI_MASK)
2296 		for_each_pipe(dev_priv, pipe)
2297 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2298 					 pipe_name(pipe),
2299 					 I915_READ(FDI_RX_IIR(pipe)));
2300 
2301 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2302 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2303 
2304 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2305 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2306 
2307 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2308 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2309 
2310 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2311 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2312 }
2313 
2314 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2315 {
2316 	u32 err_int = I915_READ(GEN7_ERR_INT);
2317 	enum pipe pipe;
2318 
2319 	if (err_int & ERR_INT_POISON)
2320 		DRM_ERROR("Poison interrupt\n");
2321 
2322 	for_each_pipe(dev_priv, pipe) {
2323 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2324 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2325 
2326 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2327 			if (IS_IVYBRIDGE(dev_priv))
2328 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
2329 			else
2330 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
2331 		}
2332 	}
2333 
2334 	I915_WRITE(GEN7_ERR_INT, err_int);
2335 }
2336 
2337 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2338 {
2339 	u32 serr_int = I915_READ(SERR_INT);
2340 	enum pipe pipe;
2341 
2342 	if (serr_int & SERR_INT_POISON)
2343 		DRM_ERROR("PCH poison interrupt\n");
2344 
2345 	for_each_pipe(dev_priv, pipe)
2346 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2347 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2348 
2349 	I915_WRITE(SERR_INT, serr_int);
2350 }
2351 
2352 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2353 {
2354 	int pipe;
2355 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2356 
2357 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2358 
2359 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2360 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2361 			       SDE_AUDIO_POWER_SHIFT_CPT);
2362 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2363 				 port_name(port));
2364 	}
2365 
2366 	if (pch_iir & SDE_AUX_MASK_CPT)
2367 		dp_aux_irq_handler(dev_priv);
2368 
2369 	if (pch_iir & SDE_GMBUS_CPT)
2370 		gmbus_irq_handler(dev_priv);
2371 
2372 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2373 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2374 
2375 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2376 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2377 
2378 	if (pch_iir & SDE_FDI_MASK_CPT)
2379 		for_each_pipe(dev_priv, pipe)
2380 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2381 					 pipe_name(pipe),
2382 					 I915_READ(FDI_RX_IIR(pipe)));
2383 
2384 	if (pch_iir & SDE_ERROR_CPT)
2385 		cpt_serr_int_handler(dev_priv);
2386 }
2387 
2388 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2389 {
2390 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2391 		~SDE_PORTE_HOTPLUG_SPT;
2392 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2393 	u32 pin_mask = 0, long_mask = 0;
2394 
2395 	if (hotplug_trigger) {
2396 		u32 dig_hotplug_reg;
2397 
2398 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2399 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2400 
2401 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2402 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
2403 				   spt_port_hotplug_long_detect);
2404 	}
2405 
2406 	if (hotplug2_trigger) {
2407 		u32 dig_hotplug_reg;
2408 
2409 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2410 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2411 
2412 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2413 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2414 				   spt_port_hotplug2_long_detect);
2415 	}
2416 
2417 	if (pin_mask)
2418 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2419 
2420 	if (pch_iir & SDE_GMBUS_CPT)
2421 		gmbus_irq_handler(dev_priv);
2422 }
2423 
2424 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2425 				u32 hotplug_trigger,
2426 				const u32 hpd[HPD_NUM_PINS])
2427 {
2428 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2429 
2430 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2431 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2432 
2433 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2434 			   dig_hotplug_reg, hpd,
2435 			   ilk_port_hotplug_long_detect);
2436 
2437 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2438 }
2439 
2440 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2441 				    u32 de_iir)
2442 {
2443 	enum pipe pipe;
2444 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2445 
2446 	if (hotplug_trigger)
2447 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2448 
2449 	if (de_iir & DE_AUX_CHANNEL_A)
2450 		dp_aux_irq_handler(dev_priv);
2451 
2452 	if (de_iir & DE_GSE)
2453 		intel_opregion_asle_intr(dev_priv);
2454 
2455 	if (de_iir & DE_POISON)
2456 		DRM_ERROR("Poison interrupt\n");
2457 
2458 	for_each_pipe(dev_priv, pipe) {
2459 		if (de_iir & DE_PIPE_VBLANK(pipe))
2460 			drm_handle_vblank(&dev_priv->drm, pipe);
2461 
2462 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2463 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2464 
2465 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2466 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2467 	}
2468 
2469 	/* check event from PCH */
2470 	if (de_iir & DE_PCH_EVENT) {
2471 		u32 pch_iir = I915_READ(SDEIIR);
2472 
2473 		if (HAS_PCH_CPT(dev_priv))
2474 			cpt_irq_handler(dev_priv, pch_iir);
2475 		else
2476 			ibx_irq_handler(dev_priv, pch_iir);
2477 
2478 		/* should clear PCH hotplug event before clear CPU irq */
2479 		I915_WRITE(SDEIIR, pch_iir);
2480 	}
2481 
2482 	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2483 		ironlake_rps_change_irq_handler(dev_priv);
2484 }
2485 
2486 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2487 				    u32 de_iir)
2488 {
2489 	enum pipe pipe;
2490 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2491 
2492 	if (hotplug_trigger)
2493 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2494 
2495 	if (de_iir & DE_ERR_INT_IVB)
2496 		ivb_err_int_handler(dev_priv);
2497 
2498 	if (de_iir & DE_EDP_PSR_INT_HSW) {
2499 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
2500 
2501 		intel_psr_irq_handler(dev_priv, psr_iir);
2502 		I915_WRITE(EDP_PSR_IIR, psr_iir);
2503 	}
2504 
2505 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2506 		dp_aux_irq_handler(dev_priv);
2507 
2508 	if (de_iir & DE_GSE_IVB)
2509 		intel_opregion_asle_intr(dev_priv);
2510 
2511 	for_each_pipe(dev_priv, pipe) {
2512 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2513 			drm_handle_vblank(&dev_priv->drm, pipe);
2514 	}
2515 
2516 	/* check event from PCH */
2517 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2518 		u32 pch_iir = I915_READ(SDEIIR);
2519 
2520 		cpt_irq_handler(dev_priv, pch_iir);
2521 
2522 		/* clear PCH hotplug event before clear CPU irq */
2523 		I915_WRITE(SDEIIR, pch_iir);
2524 	}
2525 }
2526 
2527 /*
2528  * To handle irqs with the minimum potential races with fresh interrupts, we:
2529  * 1 - Disable Master Interrupt Control.
2530  * 2 - Find the source(s) of the interrupt.
2531  * 3 - Clear the Interrupt Identity bits (IIR).
2532  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2533  * 5 - Re-enable Master Interrupt Control.
2534  */
2535 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2536 {
2537 	struct drm_device *dev = arg;
2538 	struct drm_i915_private *dev_priv = to_i915(dev);
2539 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2540 	irqreturn_t ret = IRQ_NONE;
2541 
2542 	if (!intel_irqs_enabled(dev_priv))
2543 		return IRQ_NONE;
2544 
2545 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2546 	disable_rpm_wakeref_asserts(dev_priv);
2547 
2548 	/* disable master interrupt before clearing iir  */
2549 	de_ier = I915_READ(DEIER);
2550 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2551 	POSTING_READ(DEIER);
2552 
2553 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2554 	 * interrupts will will be stored on its back queue, and then we'll be
2555 	 * able to process them after we restore SDEIER (as soon as we restore
2556 	 * it, we'll get an interrupt if SDEIIR still has something to process
2557 	 * due to its back queue). */
2558 	if (!HAS_PCH_NOP(dev_priv)) {
2559 		sde_ier = I915_READ(SDEIER);
2560 		I915_WRITE(SDEIER, 0);
2561 		POSTING_READ(SDEIER);
2562 	}
2563 
2564 	/* Find, clear, then process each source of interrupt */
2565 
2566 	gt_iir = I915_READ(GTIIR);
2567 	if (gt_iir) {
2568 		I915_WRITE(GTIIR, gt_iir);
2569 		ret = IRQ_HANDLED;
2570 		if (INTEL_GEN(dev_priv) >= 6)
2571 			snb_gt_irq_handler(dev_priv, gt_iir);
2572 		else
2573 			ilk_gt_irq_handler(dev_priv, gt_iir);
2574 	}
2575 
2576 	de_iir = I915_READ(DEIIR);
2577 	if (de_iir) {
2578 		I915_WRITE(DEIIR, de_iir);
2579 		ret = IRQ_HANDLED;
2580 		if (INTEL_GEN(dev_priv) >= 7)
2581 			ivb_display_irq_handler(dev_priv, de_iir);
2582 		else
2583 			ilk_display_irq_handler(dev_priv, de_iir);
2584 	}
2585 
2586 	if (INTEL_GEN(dev_priv) >= 6) {
2587 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2588 		if (pm_iir) {
2589 			I915_WRITE(GEN6_PMIIR, pm_iir);
2590 			ret = IRQ_HANDLED;
2591 			gen6_rps_irq_handler(dev_priv, pm_iir);
2592 		}
2593 	}
2594 
2595 	I915_WRITE(DEIER, de_ier);
2596 	POSTING_READ(DEIER);
2597 	if (!HAS_PCH_NOP(dev_priv)) {
2598 		I915_WRITE(SDEIER, sde_ier);
2599 		POSTING_READ(SDEIER);
2600 	}
2601 
2602 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2603 	enable_rpm_wakeref_asserts(dev_priv);
2604 
2605 	return ret;
2606 }
2607 
2608 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2609 				u32 hotplug_trigger,
2610 				const u32 hpd[HPD_NUM_PINS])
2611 {
2612 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2613 
2614 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2615 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2616 
2617 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2618 			   dig_hotplug_reg, hpd,
2619 			   bxt_port_hotplug_long_detect);
2620 
2621 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2622 }
2623 
2624 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2625 {
2626 	u32 pin_mask = 0, long_mask = 0;
2627 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2628 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2629 
2630 	if (trigger_tc) {
2631 		u32 dig_hotplug_reg;
2632 
2633 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2634 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2635 
2636 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2637 				   dig_hotplug_reg, hpd_gen11,
2638 				   gen11_port_hotplug_long_detect);
2639 	}
2640 
2641 	if (trigger_tbt) {
2642 		u32 dig_hotplug_reg;
2643 
2644 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2645 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2646 
2647 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2648 				   dig_hotplug_reg, hpd_gen11,
2649 				   gen11_port_hotplug_long_detect);
2650 	}
2651 
2652 	if (pin_mask)
2653 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2654 	else
2655 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2656 }
2657 
2658 static irqreturn_t
2659 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2660 {
2661 	irqreturn_t ret = IRQ_NONE;
2662 	u32 iir;
2663 	enum pipe pipe;
2664 
2665 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2666 		iir = I915_READ(GEN8_DE_MISC_IIR);
2667 		if (iir) {
2668 			bool found = false;
2669 
2670 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2671 			ret = IRQ_HANDLED;
2672 
2673 			if (iir & GEN8_DE_MISC_GSE) {
2674 				intel_opregion_asle_intr(dev_priv);
2675 				found = true;
2676 			}
2677 
2678 			if (iir & GEN8_DE_EDP_PSR) {
2679 				u32 psr_iir = I915_READ(EDP_PSR_IIR);
2680 
2681 				intel_psr_irq_handler(dev_priv, psr_iir);
2682 				I915_WRITE(EDP_PSR_IIR, psr_iir);
2683 				found = true;
2684 			}
2685 
2686 			if (!found)
2687 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2688 		}
2689 		else
2690 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2691 	}
2692 
2693 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2694 		iir = I915_READ(GEN11_DE_HPD_IIR);
2695 		if (iir) {
2696 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2697 			ret = IRQ_HANDLED;
2698 			gen11_hpd_irq_handler(dev_priv, iir);
2699 		} else {
2700 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2701 		}
2702 	}
2703 
2704 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2705 		iir = I915_READ(GEN8_DE_PORT_IIR);
2706 		if (iir) {
2707 			u32 tmp_mask;
2708 			bool found = false;
2709 
2710 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2711 			ret = IRQ_HANDLED;
2712 
2713 			tmp_mask = GEN8_AUX_CHANNEL_A;
2714 			if (INTEL_GEN(dev_priv) >= 9)
2715 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2716 					    GEN9_AUX_CHANNEL_C |
2717 					    GEN9_AUX_CHANNEL_D;
2718 
2719 			if (INTEL_GEN(dev_priv) >= 11)
2720 				tmp_mask |= ICL_AUX_CHANNEL_E;
2721 
2722 			if (IS_CNL_WITH_PORT_F(dev_priv) ||
2723 			    INTEL_GEN(dev_priv) >= 11)
2724 				tmp_mask |= CNL_AUX_CHANNEL_F;
2725 
2726 			if (iir & tmp_mask) {
2727 				dp_aux_irq_handler(dev_priv);
2728 				found = true;
2729 			}
2730 
2731 			if (IS_GEN9_LP(dev_priv)) {
2732 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2733 				if (tmp_mask) {
2734 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
2735 							    hpd_bxt);
2736 					found = true;
2737 				}
2738 			} else if (IS_BROADWELL(dev_priv)) {
2739 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2740 				if (tmp_mask) {
2741 					ilk_hpd_irq_handler(dev_priv,
2742 							    tmp_mask, hpd_bdw);
2743 					found = true;
2744 				}
2745 			}
2746 
2747 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2748 				gmbus_irq_handler(dev_priv);
2749 				found = true;
2750 			}
2751 
2752 			if (!found)
2753 				DRM_ERROR("Unexpected DE Port interrupt\n");
2754 		}
2755 		else
2756 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2757 	}
2758 
2759 	for_each_pipe(dev_priv, pipe) {
2760 		u32 fault_errors;
2761 
2762 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2763 			continue;
2764 
2765 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2766 		if (!iir) {
2767 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2768 			continue;
2769 		}
2770 
2771 		ret = IRQ_HANDLED;
2772 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2773 
2774 		if (iir & GEN8_PIPE_VBLANK)
2775 			drm_handle_vblank(&dev_priv->drm, pipe);
2776 
2777 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2778 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2779 
2780 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2781 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2782 
2783 		fault_errors = iir;
2784 		if (INTEL_GEN(dev_priv) >= 9)
2785 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2786 		else
2787 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2788 
2789 		if (fault_errors)
2790 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2791 				  pipe_name(pipe),
2792 				  fault_errors);
2793 	}
2794 
2795 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2796 	    master_ctl & GEN8_DE_PCH_IRQ) {
2797 		/*
2798 		 * FIXME(BDW): Assume for now that the new interrupt handling
2799 		 * scheme also closed the SDE interrupt handling race we've seen
2800 		 * on older pch-split platforms. But this needs testing.
2801 		 */
2802 		iir = I915_READ(SDEIIR);
2803 		if (iir) {
2804 			I915_WRITE(SDEIIR, iir);
2805 			ret = IRQ_HANDLED;
2806 
2807 			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
2808 			    HAS_PCH_CNP(dev_priv))
2809 				spt_irq_handler(dev_priv, iir);
2810 			else
2811 				cpt_irq_handler(dev_priv, iir);
2812 		} else {
2813 			/*
2814 			 * Like on previous PCH there seems to be something
2815 			 * fishy going on with forwarding PCH interrupts.
2816 			 */
2817 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2818 		}
2819 	}
2820 
2821 	return ret;
2822 }
2823 
2824 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2825 {
2826 	struct drm_i915_private *dev_priv = to_i915(arg);
2827 	u32 master_ctl;
2828 	u32 gt_iir[4];
2829 
2830 	if (!intel_irqs_enabled(dev_priv))
2831 		return IRQ_NONE;
2832 
2833 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2834 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2835 	if (!master_ctl)
2836 		return IRQ_NONE;
2837 
2838 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2839 
2840 	/* Find, clear, then process each source of interrupt */
2841 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2842 
2843 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2844 	if (master_ctl & ~GEN8_GT_IRQS) {
2845 		disable_rpm_wakeref_asserts(dev_priv);
2846 		gen8_de_irq_handler(dev_priv, master_ctl);
2847 		enable_rpm_wakeref_asserts(dev_priv);
2848 	}
2849 
2850 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2851 
2852 	gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2853 
2854 	return IRQ_HANDLED;
2855 }
2856 
2857 struct wedge_me {
2858 	struct delayed_work work;
2859 	struct drm_i915_private *i915;
2860 	const char *name;
2861 };
2862 
2863 static void wedge_me(struct work_struct *work)
2864 {
2865 	struct wedge_me *w = container_of(work, typeof(*w), work.work);
2866 
2867 	dev_err(w->i915->drm.dev,
2868 		"%s timed out, cancelling all in-flight rendering.\n",
2869 		w->name);
2870 	i915_gem_set_wedged(w->i915);
2871 }
2872 
2873 static void __init_wedge(struct wedge_me *w,
2874 			 struct drm_i915_private *i915,
2875 			 long timeout,
2876 			 const char *name)
2877 {
2878 	w->i915 = i915;
2879 	w->name = name;
2880 
2881 	INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
2882 	schedule_delayed_work(&w->work, timeout);
2883 }
2884 
2885 static void __fini_wedge(struct wedge_me *w)
2886 {
2887 	cancel_delayed_work_sync(&w->work);
2888 	destroy_delayed_work_on_stack(&w->work);
2889 	w->i915 = NULL;
2890 }
2891 
2892 #define i915_wedge_on_timeout(W, DEV, TIMEOUT)				\
2893 	for (__init_wedge((W), (DEV), (TIMEOUT), __func__);		\
2894 	     (W)->i915;							\
2895 	     __fini_wedge((W)))
2896 
2897 static u32
2898 gen11_gt_engine_identity(struct drm_i915_private * const i915,
2899 			 const unsigned int bank, const unsigned int bit)
2900 {
2901 	void __iomem * const regs = i915->regs;
2902 	u32 timeout_ts;
2903 	u32 ident;
2904 
2905 	lockdep_assert_held(&i915->irq_lock);
2906 
2907 	raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2908 
2909 	/*
2910 	 * NB: Specs do not specify how long to spin wait,
2911 	 * so we do ~100us as an educated guess.
2912 	 */
2913 	timeout_ts = (local_clock() >> 10) + 100;
2914 	do {
2915 		ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2916 	} while (!(ident & GEN11_INTR_DATA_VALID) &&
2917 		 !time_after32(local_clock() >> 10, timeout_ts));
2918 
2919 	if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2920 		DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2921 			  bank, bit, ident);
2922 		return 0;
2923 	}
2924 
2925 	raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
2926 		      GEN11_INTR_DATA_VALID);
2927 
2928 	return ident;
2929 }
2930 
2931 static void
2932 gen11_other_irq_handler(struct drm_i915_private * const i915,
2933 			const u8 instance, const u16 iir)
2934 {
2935 	if (instance == OTHER_GTPM_INSTANCE)
2936 		return gen6_rps_irq_handler(i915, iir);
2937 
2938 	WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
2939 		  instance, iir);
2940 }
2941 
2942 static void
2943 gen11_engine_irq_handler(struct drm_i915_private * const i915,
2944 			 const u8 class, const u8 instance, const u16 iir)
2945 {
2946 	struct intel_engine_cs *engine;
2947 
2948 	if (instance <= MAX_ENGINE_INSTANCE)
2949 		engine = i915->engine_class[class][instance];
2950 	else
2951 		engine = NULL;
2952 
2953 	if (likely(engine))
2954 		return gen8_cs_irq_handler(engine, iir);
2955 
2956 	WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
2957 		  class, instance);
2958 }
2959 
2960 static void
2961 gen11_gt_identity_handler(struct drm_i915_private * const i915,
2962 			  const u32 identity)
2963 {
2964 	const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
2965 	const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
2966 	const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
2967 
2968 	if (unlikely(!intr))
2969 		return;
2970 
2971 	if (class <= COPY_ENGINE_CLASS)
2972 		return gen11_engine_irq_handler(i915, class, instance, intr);
2973 
2974 	if (class == OTHER_CLASS)
2975 		return gen11_other_irq_handler(i915, instance, intr);
2976 
2977 	WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
2978 		  class, instance, intr);
2979 }
2980 
2981 static void
2982 gen11_gt_bank_handler(struct drm_i915_private * const i915,
2983 		      const unsigned int bank)
2984 {
2985 	void __iomem * const regs = i915->regs;
2986 	unsigned long intr_dw;
2987 	unsigned int bit;
2988 
2989 	lockdep_assert_held(&i915->irq_lock);
2990 
2991 	intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
2992 
2993 	if (unlikely(!intr_dw)) {
2994 		DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
2995 		return;
2996 	}
2997 
2998 	for_each_set_bit(bit, &intr_dw, 32) {
2999 		const u32 ident = gen11_gt_engine_identity(i915,
3000 							   bank, bit);
3001 
3002 		gen11_gt_identity_handler(i915, ident);
3003 	}
3004 
3005 	/* Clear must be after shared has been served for engine */
3006 	raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3007 }
3008 
3009 static void
3010 gen11_gt_irq_handler(struct drm_i915_private * const i915,
3011 		     const u32 master_ctl)
3012 {
3013 	unsigned int bank;
3014 
3015 	spin_lock(&i915->irq_lock);
3016 
3017 	for (bank = 0; bank < 2; bank++) {
3018 		if (master_ctl & GEN11_GT_DW_IRQ(bank))
3019 			gen11_gt_bank_handler(i915, bank);
3020 	}
3021 
3022 	spin_unlock(&i915->irq_lock);
3023 }
3024 
3025 static void
3026 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
3027 		      u32 *iir)
3028 {
3029 	void __iomem * const regs = dev_priv->regs;
3030 
3031 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
3032 		return;
3033 
3034 	*iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3035 	if (likely(*iir))
3036 		raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
3037 }
3038 
3039 static void
3040 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
3041 			  const u32 master_ctl, const u32 iir)
3042 {
3043 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
3044 		return;
3045 
3046 	if (unlikely(!iir)) {
3047 		DRM_ERROR("GU_MISC iir blank!\n");
3048 		return;
3049 	}
3050 
3051 	if (iir & GEN11_GU_MISC_GSE)
3052 		intel_opregion_asle_intr(dev_priv);
3053 	else
3054 		DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
3055 }
3056 
3057 static irqreturn_t gen11_irq_handler(int irq, void *arg)
3058 {
3059 	struct drm_i915_private * const i915 = to_i915(arg);
3060 	void __iomem * const regs = i915->regs;
3061 	u32 master_ctl;
3062 	u32 gu_misc_iir;
3063 
3064 	if (!intel_irqs_enabled(i915))
3065 		return IRQ_NONE;
3066 
3067 	master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3068 	master_ctl &= ~GEN11_MASTER_IRQ;
3069 	if (!master_ctl)
3070 		return IRQ_NONE;
3071 
3072 	/* Disable interrupts. */
3073 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3074 
3075 	/* Find, clear, then process each source of interrupt. */
3076 	gen11_gt_irq_handler(i915, master_ctl);
3077 
3078 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3079 	if (master_ctl & GEN11_DISPLAY_IRQ) {
3080 		const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3081 
3082 		disable_rpm_wakeref_asserts(i915);
3083 		/*
3084 		 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3085 		 * for the display related bits.
3086 		 */
3087 		gen8_de_irq_handler(i915, disp_ctl);
3088 		enable_rpm_wakeref_asserts(i915);
3089 	}
3090 
3091 	gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
3092 
3093 	/* Acknowledge and enable interrupts. */
3094 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
3095 
3096 	gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
3097 
3098 	return IRQ_HANDLED;
3099 }
3100 
3101 static void i915_reset_device(struct drm_i915_private *dev_priv,
3102 			      u32 engine_mask,
3103 			      const char *reason)
3104 {
3105 	struct i915_gpu_error *error = &dev_priv->gpu_error;
3106 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
3107 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
3108 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
3109 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
3110 	struct wedge_me w;
3111 
3112 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
3113 
3114 	DRM_DEBUG_DRIVER("resetting chip\n");
3115 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
3116 
3117 	/* Use a watchdog to ensure that our reset completes */
3118 	i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
3119 		intel_prepare_reset(dev_priv);
3120 
3121 		error->reason = reason;
3122 		error->stalled_mask = engine_mask;
3123 
3124 		/* Signal that locked waiters should reset the GPU */
3125 		smp_mb__before_atomic();
3126 		set_bit(I915_RESET_HANDOFF, &error->flags);
3127 		wake_up_all(&error->wait_queue);
3128 
3129 		/* Wait for anyone holding the lock to wakeup, without
3130 		 * blocking indefinitely on struct_mutex.
3131 		 */
3132 		do {
3133 			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
3134 				i915_reset(dev_priv, engine_mask, reason);
3135 				mutex_unlock(&dev_priv->drm.struct_mutex);
3136 			}
3137 		} while (wait_on_bit_timeout(&error->flags,
3138 					     I915_RESET_HANDOFF,
3139 					     TASK_UNINTERRUPTIBLE,
3140 					     1));
3141 
3142 		error->stalled_mask = 0;
3143 		error->reason = NULL;
3144 
3145 		intel_finish_reset(dev_priv);
3146 	}
3147 
3148 	if (!test_bit(I915_WEDGED, &error->flags))
3149 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
3150 }
3151 
3152 static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
3153 {
3154 	u32 eir;
3155 
3156 	if (!IS_GEN2(dev_priv))
3157 		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
3158 
3159 	if (INTEL_GEN(dev_priv) < 4)
3160 		I915_WRITE(IPEIR, I915_READ(IPEIR));
3161 	else
3162 		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
3163 
3164 	I915_WRITE(EIR, I915_READ(EIR));
3165 	eir = I915_READ(EIR);
3166 	if (eir) {
3167 		/*
3168 		 * some errors might have become stuck,
3169 		 * mask them.
3170 		 */
3171 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
3172 		I915_WRITE(EMR, I915_READ(EMR) | eir);
3173 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3174 	}
3175 }
3176 
3177 /**
3178  * i915_handle_error - handle a gpu error
3179  * @dev_priv: i915 device private
3180  * @engine_mask: mask representing engines that are hung
3181  * @flags: control flags
3182  * @fmt: Error message format string
3183  *
3184  * Do some basic checking of register state at error time and
3185  * dump it to the syslog.  Also call i915_capture_error_state() to make
3186  * sure we get a record and make it available in debugfs.  Fire a uevent
3187  * so userspace knows something bad happened (should trigger collection
3188  * of a ring dump etc.).
3189  */
3190 void i915_handle_error(struct drm_i915_private *dev_priv,
3191 		       u32 engine_mask,
3192 		       unsigned long flags,
3193 		       const char *fmt, ...)
3194 {
3195 	struct intel_engine_cs *engine;
3196 	unsigned int tmp;
3197 	char error_msg[80];
3198 	char *msg = NULL;
3199 
3200 	if (fmt) {
3201 		va_list args;
3202 
3203 		va_start(args, fmt);
3204 		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
3205 		va_end(args);
3206 
3207 		msg = error_msg;
3208 	}
3209 
3210 	/*
3211 	 * In most cases it's guaranteed that we get here with an RPM
3212 	 * reference held, for example because there is a pending GPU
3213 	 * request that won't finish until the reset is done. This
3214 	 * isn't the case at least when we get here by doing a
3215 	 * simulated reset via debugfs, so get an RPM reference.
3216 	 */
3217 	intel_runtime_pm_get(dev_priv);
3218 
3219 	engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
3220 
3221 	if (flags & I915_ERROR_CAPTURE) {
3222 		i915_capture_error_state(dev_priv, engine_mask, msg);
3223 		i915_clear_error_registers(dev_priv);
3224 	}
3225 
3226 	/*
3227 	 * Try engine reset when available. We fall back to full reset if
3228 	 * single reset fails.
3229 	 */
3230 	if (intel_has_reset_engine(dev_priv)) {
3231 		for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
3232 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
3233 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3234 					     &dev_priv->gpu_error.flags))
3235 				continue;
3236 
3237 			if (i915_reset_engine(engine, msg) == 0)
3238 				engine_mask &= ~intel_engine_flag(engine);
3239 
3240 			clear_bit(I915_RESET_ENGINE + engine->id,
3241 				  &dev_priv->gpu_error.flags);
3242 			wake_up_bit(&dev_priv->gpu_error.flags,
3243 				    I915_RESET_ENGINE + engine->id);
3244 		}
3245 	}
3246 
3247 	if (!engine_mask)
3248 		goto out;
3249 
3250 	/* Full reset needs the mutex, stop any other user trying to do so. */
3251 	if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
3252 		wait_event(dev_priv->gpu_error.reset_queue,
3253 			   !test_bit(I915_RESET_BACKOFF,
3254 				     &dev_priv->gpu_error.flags));
3255 		goto out;
3256 	}
3257 
3258 	/* Prevent any other reset-engine attempt. */
3259 	for_each_engine(engine, dev_priv, tmp) {
3260 		while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3261 					&dev_priv->gpu_error.flags))
3262 			wait_on_bit(&dev_priv->gpu_error.flags,
3263 				    I915_RESET_ENGINE + engine->id,
3264 				    TASK_UNINTERRUPTIBLE);
3265 	}
3266 
3267 	i915_reset_device(dev_priv, engine_mask, msg);
3268 
3269 	for_each_engine(engine, dev_priv, tmp) {
3270 		clear_bit(I915_RESET_ENGINE + engine->id,
3271 			  &dev_priv->gpu_error.flags);
3272 	}
3273 
3274 	clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
3275 	wake_up_all(&dev_priv->gpu_error.reset_queue);
3276 
3277 out:
3278 	intel_runtime_pm_put(dev_priv);
3279 }
3280 
3281 /* Called from drm generic code, passed 'crtc' which
3282  * we use as a pipe index
3283  */
3284 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3285 {
3286 	struct drm_i915_private *dev_priv = to_i915(dev);
3287 	unsigned long irqflags;
3288 
3289 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3290 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3291 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3292 
3293 	return 0;
3294 }
3295 
3296 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3297 {
3298 	struct drm_i915_private *dev_priv = to_i915(dev);
3299 	unsigned long irqflags;
3300 
3301 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3302 	i915_enable_pipestat(dev_priv, pipe,
3303 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
3304 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3305 
3306 	return 0;
3307 }
3308 
3309 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3310 {
3311 	struct drm_i915_private *dev_priv = to_i915(dev);
3312 	unsigned long irqflags;
3313 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3314 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3315 
3316 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3317 	ilk_enable_display_irq(dev_priv, bit);
3318 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3319 
3320 	/* Even though there is no DMC, frame counter can get stuck when
3321 	 * PSR is active as no frames are generated.
3322 	 */
3323 	if (HAS_PSR(dev_priv))
3324 		drm_vblank_restore(dev, pipe);
3325 
3326 	return 0;
3327 }
3328 
3329 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3330 {
3331 	struct drm_i915_private *dev_priv = to_i915(dev);
3332 	unsigned long irqflags;
3333 
3334 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3335 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3336 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3337 
3338 	/* Even if there is no DMC, frame counter can get stuck when
3339 	 * PSR is active as no frames are generated, so check only for PSR.
3340 	 */
3341 	if (HAS_PSR(dev_priv))
3342 		drm_vblank_restore(dev, pipe);
3343 
3344 	return 0;
3345 }
3346 
3347 /* Called from drm generic code, passed 'crtc' which
3348  * we use as a pipe index
3349  */
3350 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3351 {
3352 	struct drm_i915_private *dev_priv = to_i915(dev);
3353 	unsigned long irqflags;
3354 
3355 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3356 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3357 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3358 }
3359 
3360 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3361 {
3362 	struct drm_i915_private *dev_priv = to_i915(dev);
3363 	unsigned long irqflags;
3364 
3365 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3366 	i915_disable_pipestat(dev_priv, pipe,
3367 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
3368 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3369 }
3370 
3371 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3372 {
3373 	struct drm_i915_private *dev_priv = to_i915(dev);
3374 	unsigned long irqflags;
3375 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3376 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3377 
3378 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3379 	ilk_disable_display_irq(dev_priv, bit);
3380 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3381 }
3382 
3383 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3384 {
3385 	struct drm_i915_private *dev_priv = to_i915(dev);
3386 	unsigned long irqflags;
3387 
3388 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3389 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3390 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3391 }
3392 
3393 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3394 {
3395 	if (HAS_PCH_NOP(dev_priv))
3396 		return;
3397 
3398 	GEN3_IRQ_RESET(SDE);
3399 
3400 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3401 		I915_WRITE(SERR_INT, 0xffffffff);
3402 }
3403 
3404 /*
3405  * SDEIER is also touched by the interrupt handler to work around missed PCH
3406  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3407  * instead we unconditionally enable all PCH interrupt sources here, but then
3408  * only unmask them as needed with SDEIMR.
3409  *
3410  * This function needs to be called before interrupts are enabled.
3411  */
3412 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3413 {
3414 	struct drm_i915_private *dev_priv = to_i915(dev);
3415 
3416 	if (HAS_PCH_NOP(dev_priv))
3417 		return;
3418 
3419 	WARN_ON(I915_READ(SDEIER) != 0);
3420 	I915_WRITE(SDEIER, 0xffffffff);
3421 	POSTING_READ(SDEIER);
3422 }
3423 
3424 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3425 {
3426 	GEN3_IRQ_RESET(GT);
3427 	if (INTEL_GEN(dev_priv) >= 6)
3428 		GEN3_IRQ_RESET(GEN6_PM);
3429 }
3430 
3431 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3432 {
3433 	if (IS_CHERRYVIEW(dev_priv))
3434 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3435 	else
3436 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3437 
3438 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3439 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3440 
3441 	i9xx_pipestat_irq_reset(dev_priv);
3442 
3443 	GEN3_IRQ_RESET(VLV_);
3444 	dev_priv->irq_mask = ~0u;
3445 }
3446 
3447 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3448 {
3449 	u32 pipestat_mask;
3450 	u32 enable_mask;
3451 	enum pipe pipe;
3452 
3453 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3454 
3455 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3456 	for_each_pipe(dev_priv, pipe)
3457 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3458 
3459 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3460 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3461 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3462 		I915_LPE_PIPE_A_INTERRUPT |
3463 		I915_LPE_PIPE_B_INTERRUPT;
3464 
3465 	if (IS_CHERRYVIEW(dev_priv))
3466 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3467 			I915_LPE_PIPE_C_INTERRUPT;
3468 
3469 	WARN_ON(dev_priv->irq_mask != ~0u);
3470 
3471 	dev_priv->irq_mask = ~enable_mask;
3472 
3473 	GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3474 }
3475 
3476 /* drm_dma.h hooks
3477 */
3478 static void ironlake_irq_reset(struct drm_device *dev)
3479 {
3480 	struct drm_i915_private *dev_priv = to_i915(dev);
3481 
3482 	if (IS_GEN5(dev_priv))
3483 		I915_WRITE(HWSTAM, 0xffffffff);
3484 
3485 	GEN3_IRQ_RESET(DE);
3486 	if (IS_GEN7(dev_priv))
3487 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3488 
3489 	if (IS_HASWELL(dev_priv)) {
3490 		I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3491 		I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3492 	}
3493 
3494 	gen5_gt_irq_reset(dev_priv);
3495 
3496 	ibx_irq_reset(dev_priv);
3497 }
3498 
3499 static void valleyview_irq_reset(struct drm_device *dev)
3500 {
3501 	struct drm_i915_private *dev_priv = to_i915(dev);
3502 
3503 	I915_WRITE(VLV_MASTER_IER, 0);
3504 	POSTING_READ(VLV_MASTER_IER);
3505 
3506 	gen5_gt_irq_reset(dev_priv);
3507 
3508 	spin_lock_irq(&dev_priv->irq_lock);
3509 	if (dev_priv->display_irqs_enabled)
3510 		vlv_display_irq_reset(dev_priv);
3511 	spin_unlock_irq(&dev_priv->irq_lock);
3512 }
3513 
3514 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3515 {
3516 	GEN8_IRQ_RESET_NDX(GT, 0);
3517 	GEN8_IRQ_RESET_NDX(GT, 1);
3518 	GEN8_IRQ_RESET_NDX(GT, 2);
3519 	GEN8_IRQ_RESET_NDX(GT, 3);
3520 }
3521 
3522 static void gen8_irq_reset(struct drm_device *dev)
3523 {
3524 	struct drm_i915_private *dev_priv = to_i915(dev);
3525 	int pipe;
3526 
3527 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3528 	POSTING_READ(GEN8_MASTER_IRQ);
3529 
3530 	gen8_gt_irq_reset(dev_priv);
3531 
3532 	I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3533 	I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3534 
3535 	for_each_pipe(dev_priv, pipe)
3536 		if (intel_display_power_is_enabled(dev_priv,
3537 						   POWER_DOMAIN_PIPE(pipe)))
3538 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3539 
3540 	GEN3_IRQ_RESET(GEN8_DE_PORT_);
3541 	GEN3_IRQ_RESET(GEN8_DE_MISC_);
3542 	GEN3_IRQ_RESET(GEN8_PCU_);
3543 
3544 	if (HAS_PCH_SPLIT(dev_priv))
3545 		ibx_irq_reset(dev_priv);
3546 }
3547 
3548 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3549 {
3550 	/* Disable RCS, BCS, VCS and VECS class engines. */
3551 	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3552 	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  0);
3553 
3554 	/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3555 	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~0);
3556 	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~0);
3557 	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~0);
3558 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~0);
3559 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~0);
3560 
3561 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3562 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3563 }
3564 
3565 static void gen11_irq_reset(struct drm_device *dev)
3566 {
3567 	struct drm_i915_private *dev_priv = dev->dev_private;
3568 	int pipe;
3569 
3570 	I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
3571 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
3572 
3573 	gen11_gt_irq_reset(dev_priv);
3574 
3575 	I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3576 
3577 	for_each_pipe(dev_priv, pipe)
3578 		if (intel_display_power_is_enabled(dev_priv,
3579 						   POWER_DOMAIN_PIPE(pipe)))
3580 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3581 
3582 	GEN3_IRQ_RESET(GEN8_DE_PORT_);
3583 	GEN3_IRQ_RESET(GEN8_DE_MISC_);
3584 	GEN3_IRQ_RESET(GEN11_DE_HPD_);
3585 	GEN3_IRQ_RESET(GEN11_GU_MISC_);
3586 	GEN3_IRQ_RESET(GEN8_PCU_);
3587 }
3588 
3589 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3590 				     u8 pipe_mask)
3591 {
3592 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3593 	enum pipe pipe;
3594 
3595 	spin_lock_irq(&dev_priv->irq_lock);
3596 
3597 	if (!intel_irqs_enabled(dev_priv)) {
3598 		spin_unlock_irq(&dev_priv->irq_lock);
3599 		return;
3600 	}
3601 
3602 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3603 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3604 				  dev_priv->de_irq_mask[pipe],
3605 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3606 
3607 	spin_unlock_irq(&dev_priv->irq_lock);
3608 }
3609 
3610 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3611 				     u8 pipe_mask)
3612 {
3613 	enum pipe pipe;
3614 
3615 	spin_lock_irq(&dev_priv->irq_lock);
3616 
3617 	if (!intel_irqs_enabled(dev_priv)) {
3618 		spin_unlock_irq(&dev_priv->irq_lock);
3619 		return;
3620 	}
3621 
3622 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3623 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3624 
3625 	spin_unlock_irq(&dev_priv->irq_lock);
3626 
3627 	/* make sure we're done processing display irqs */
3628 	synchronize_irq(dev_priv->drm.irq);
3629 }
3630 
3631 static void cherryview_irq_reset(struct drm_device *dev)
3632 {
3633 	struct drm_i915_private *dev_priv = to_i915(dev);
3634 
3635 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3636 	POSTING_READ(GEN8_MASTER_IRQ);
3637 
3638 	gen8_gt_irq_reset(dev_priv);
3639 
3640 	GEN3_IRQ_RESET(GEN8_PCU_);
3641 
3642 	spin_lock_irq(&dev_priv->irq_lock);
3643 	if (dev_priv->display_irqs_enabled)
3644 		vlv_display_irq_reset(dev_priv);
3645 	spin_unlock_irq(&dev_priv->irq_lock);
3646 }
3647 
3648 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3649 				  const u32 hpd[HPD_NUM_PINS])
3650 {
3651 	struct intel_encoder *encoder;
3652 	u32 enabled_irqs = 0;
3653 
3654 	for_each_intel_encoder(&dev_priv->drm, encoder)
3655 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3656 			enabled_irqs |= hpd[encoder->hpd_pin];
3657 
3658 	return enabled_irqs;
3659 }
3660 
3661 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3662 {
3663 	u32 hotplug;
3664 
3665 	/*
3666 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3667 	 * duration to 2ms (which is the minimum in the Display Port spec).
3668 	 * The pulse duration bits are reserved on LPT+.
3669 	 */
3670 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3671 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3672 		     PORTC_PULSE_DURATION_MASK |
3673 		     PORTD_PULSE_DURATION_MASK);
3674 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3675 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3676 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3677 	/*
3678 	 * When CPU and PCH are on the same package, port A
3679 	 * HPD must be enabled in both north and south.
3680 	 */
3681 	if (HAS_PCH_LPT_LP(dev_priv))
3682 		hotplug |= PORTA_HOTPLUG_ENABLE;
3683 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3684 }
3685 
3686 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3687 {
3688 	u32 hotplug_irqs, enabled_irqs;
3689 
3690 	if (HAS_PCH_IBX(dev_priv)) {
3691 		hotplug_irqs = SDE_HOTPLUG_MASK;
3692 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3693 	} else {
3694 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3695 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3696 	}
3697 
3698 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3699 
3700 	ibx_hpd_detection_setup(dev_priv);
3701 }
3702 
3703 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3704 {
3705 	u32 hotplug;
3706 
3707 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3708 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3709 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3710 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3711 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3712 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3713 
3714 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3715 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3716 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3717 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3718 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3719 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3720 }
3721 
3722 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3723 {
3724 	u32 hotplug_irqs, enabled_irqs;
3725 	u32 val;
3726 
3727 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3728 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3729 
3730 	val = I915_READ(GEN11_DE_HPD_IMR);
3731 	val &= ~hotplug_irqs;
3732 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3733 	POSTING_READ(GEN11_DE_HPD_IMR);
3734 
3735 	gen11_hpd_detection_setup(dev_priv);
3736 }
3737 
3738 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3739 {
3740 	u32 val, hotplug;
3741 
3742 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3743 	if (HAS_PCH_CNP(dev_priv)) {
3744 		val = I915_READ(SOUTH_CHICKEN1);
3745 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3746 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3747 		I915_WRITE(SOUTH_CHICKEN1, val);
3748 	}
3749 
3750 	/* Enable digital hotplug on the PCH */
3751 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3752 	hotplug |= PORTA_HOTPLUG_ENABLE |
3753 		   PORTB_HOTPLUG_ENABLE |
3754 		   PORTC_HOTPLUG_ENABLE |
3755 		   PORTD_HOTPLUG_ENABLE;
3756 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3757 
3758 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3759 	hotplug |= PORTE_HOTPLUG_ENABLE;
3760 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3761 }
3762 
3763 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3764 {
3765 	u32 hotplug_irqs, enabled_irqs;
3766 
3767 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3768 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3769 
3770 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3771 
3772 	spt_hpd_detection_setup(dev_priv);
3773 }
3774 
3775 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3776 {
3777 	u32 hotplug;
3778 
3779 	/*
3780 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3781 	 * duration to 2ms (which is the minimum in the Display Port spec)
3782 	 * The pulse duration bits are reserved on HSW+.
3783 	 */
3784 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3785 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3786 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3787 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
3788 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3789 }
3790 
3791 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3792 {
3793 	u32 hotplug_irqs, enabled_irqs;
3794 
3795 	if (INTEL_GEN(dev_priv) >= 8) {
3796 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3797 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3798 
3799 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3800 	} else if (INTEL_GEN(dev_priv) >= 7) {
3801 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3802 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3803 
3804 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3805 	} else {
3806 		hotplug_irqs = DE_DP_A_HOTPLUG;
3807 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3808 
3809 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3810 	}
3811 
3812 	ilk_hpd_detection_setup(dev_priv);
3813 
3814 	ibx_hpd_irq_setup(dev_priv);
3815 }
3816 
3817 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3818 				      u32 enabled_irqs)
3819 {
3820 	u32 hotplug;
3821 
3822 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3823 	hotplug |= PORTA_HOTPLUG_ENABLE |
3824 		   PORTB_HOTPLUG_ENABLE |
3825 		   PORTC_HOTPLUG_ENABLE;
3826 
3827 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3828 		      hotplug, enabled_irqs);
3829 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3830 
3831 	/*
3832 	 * For BXT invert bit has to be set based on AOB design
3833 	 * for HPD detection logic, update it based on VBT fields.
3834 	 */
3835 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3836 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3837 		hotplug |= BXT_DDIA_HPD_INVERT;
3838 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3839 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3840 		hotplug |= BXT_DDIB_HPD_INVERT;
3841 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3842 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3843 		hotplug |= BXT_DDIC_HPD_INVERT;
3844 
3845 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3846 }
3847 
3848 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3849 {
3850 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3851 }
3852 
3853 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3854 {
3855 	u32 hotplug_irqs, enabled_irqs;
3856 
3857 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3858 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3859 
3860 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3861 
3862 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3863 }
3864 
3865 static void ibx_irq_postinstall(struct drm_device *dev)
3866 {
3867 	struct drm_i915_private *dev_priv = to_i915(dev);
3868 	u32 mask;
3869 
3870 	if (HAS_PCH_NOP(dev_priv))
3871 		return;
3872 
3873 	if (HAS_PCH_IBX(dev_priv))
3874 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3875 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3876 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3877 	else
3878 		mask = SDE_GMBUS_CPT;
3879 
3880 	gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3881 	I915_WRITE(SDEIMR, ~mask);
3882 
3883 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3884 	    HAS_PCH_LPT(dev_priv))
3885 		ibx_hpd_detection_setup(dev_priv);
3886 	else
3887 		spt_hpd_detection_setup(dev_priv);
3888 }
3889 
3890 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3891 {
3892 	struct drm_i915_private *dev_priv = to_i915(dev);
3893 	u32 pm_irqs, gt_irqs;
3894 
3895 	pm_irqs = gt_irqs = 0;
3896 
3897 	dev_priv->gt_irq_mask = ~0;
3898 	if (HAS_L3_DPF(dev_priv)) {
3899 		/* L3 parity interrupt is always unmasked. */
3900 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3901 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
3902 	}
3903 
3904 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3905 	if (IS_GEN5(dev_priv)) {
3906 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
3907 	} else {
3908 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3909 	}
3910 
3911 	GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3912 
3913 	if (INTEL_GEN(dev_priv) >= 6) {
3914 		/*
3915 		 * RPS interrupts will get enabled/disabled on demand when RPS
3916 		 * itself is enabled/disabled.
3917 		 */
3918 		if (HAS_VEBOX(dev_priv)) {
3919 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3920 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3921 		}
3922 
3923 		dev_priv->pm_imr = 0xffffffff;
3924 		GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3925 	}
3926 }
3927 
3928 static int ironlake_irq_postinstall(struct drm_device *dev)
3929 {
3930 	struct drm_i915_private *dev_priv = to_i915(dev);
3931 	u32 display_mask, extra_mask;
3932 
3933 	if (INTEL_GEN(dev_priv) >= 7) {
3934 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3935 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3936 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3937 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3938 			      DE_DP_A_HOTPLUG_IVB);
3939 	} else {
3940 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3941 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3942 				DE_PIPEA_CRC_DONE | DE_POISON);
3943 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3944 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3945 			      DE_DP_A_HOTPLUG);
3946 	}
3947 
3948 	if (IS_HASWELL(dev_priv)) {
3949 		gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
3950 		intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3951 		display_mask |= DE_EDP_PSR_INT_HSW;
3952 	}
3953 
3954 	dev_priv->irq_mask = ~display_mask;
3955 
3956 	ibx_irq_pre_postinstall(dev);
3957 
3958 	GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3959 
3960 	gen5_gt_irq_postinstall(dev);
3961 
3962 	ilk_hpd_detection_setup(dev_priv);
3963 
3964 	ibx_irq_postinstall(dev);
3965 
3966 	if (IS_IRONLAKE_M(dev_priv)) {
3967 		/* Enable PCU event interrupts
3968 		 *
3969 		 * spinlocking not required here for correctness since interrupt
3970 		 * setup is guaranteed to run in single-threaded context. But we
3971 		 * need it to make the assert_spin_locked happy. */
3972 		spin_lock_irq(&dev_priv->irq_lock);
3973 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3974 		spin_unlock_irq(&dev_priv->irq_lock);
3975 	}
3976 
3977 	return 0;
3978 }
3979 
3980 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3981 {
3982 	lockdep_assert_held(&dev_priv->irq_lock);
3983 
3984 	if (dev_priv->display_irqs_enabled)
3985 		return;
3986 
3987 	dev_priv->display_irqs_enabled = true;
3988 
3989 	if (intel_irqs_enabled(dev_priv)) {
3990 		vlv_display_irq_reset(dev_priv);
3991 		vlv_display_irq_postinstall(dev_priv);
3992 	}
3993 }
3994 
3995 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3996 {
3997 	lockdep_assert_held(&dev_priv->irq_lock);
3998 
3999 	if (!dev_priv->display_irqs_enabled)
4000 		return;
4001 
4002 	dev_priv->display_irqs_enabled = false;
4003 
4004 	if (intel_irqs_enabled(dev_priv))
4005 		vlv_display_irq_reset(dev_priv);
4006 }
4007 
4008 
4009 static int valleyview_irq_postinstall(struct drm_device *dev)
4010 {
4011 	struct drm_i915_private *dev_priv = to_i915(dev);
4012 
4013 	gen5_gt_irq_postinstall(dev);
4014 
4015 	spin_lock_irq(&dev_priv->irq_lock);
4016 	if (dev_priv->display_irqs_enabled)
4017 		vlv_display_irq_postinstall(dev_priv);
4018 	spin_unlock_irq(&dev_priv->irq_lock);
4019 
4020 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4021 	POSTING_READ(VLV_MASTER_IER);
4022 
4023 	return 0;
4024 }
4025 
4026 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4027 {
4028 	/* These are interrupts we'll toggle with the ring mask register */
4029 	uint32_t gt_interrupts[] = {
4030 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4031 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4032 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4033 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4034 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4035 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4036 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
4037 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4038 		0,
4039 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4040 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4041 		};
4042 
4043 	if (HAS_L3_DPF(dev_priv))
4044 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
4045 
4046 	dev_priv->pm_ier = 0x0;
4047 	dev_priv->pm_imr = ~dev_priv->pm_ier;
4048 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4049 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4050 	/*
4051 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4052 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
4053 	 */
4054 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4055 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4056 }
4057 
4058 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4059 {
4060 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4061 	uint32_t de_pipe_enables;
4062 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4063 	u32 de_port_enables;
4064 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
4065 	enum pipe pipe;
4066 
4067 	if (INTEL_GEN(dev_priv) <= 10)
4068 		de_misc_masked |= GEN8_DE_MISC_GSE;
4069 
4070 	if (INTEL_GEN(dev_priv) >= 9) {
4071 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4072 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4073 				  GEN9_AUX_CHANNEL_D;
4074 		if (IS_GEN9_LP(dev_priv))
4075 			de_port_masked |= BXT_DE_PORT_GMBUS;
4076 	} else {
4077 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4078 	}
4079 
4080 	if (INTEL_GEN(dev_priv) >= 11)
4081 		de_port_masked |= ICL_AUX_CHANNEL_E;
4082 
4083 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4084 		de_port_masked |= CNL_AUX_CHANNEL_F;
4085 
4086 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4087 					   GEN8_PIPE_FIFO_UNDERRUN;
4088 
4089 	de_port_enables = de_port_masked;
4090 	if (IS_GEN9_LP(dev_priv))
4091 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4092 	else if (IS_BROADWELL(dev_priv))
4093 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4094 
4095 	gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4096 	intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4097 
4098 	for_each_pipe(dev_priv, pipe) {
4099 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4100 
4101 		if (intel_display_power_is_enabled(dev_priv,
4102 				POWER_DOMAIN_PIPE(pipe)))
4103 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
4104 					  dev_priv->de_irq_mask[pipe],
4105 					  de_pipe_enables);
4106 	}
4107 
4108 	GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4109 	GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4110 
4111 	if (INTEL_GEN(dev_priv) >= 11) {
4112 		u32 de_hpd_masked = 0;
4113 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4114 				     GEN11_DE_TBT_HOTPLUG_MASK;
4115 
4116 		GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
4117 		gen11_hpd_detection_setup(dev_priv);
4118 	} else if (IS_GEN9_LP(dev_priv)) {
4119 		bxt_hpd_detection_setup(dev_priv);
4120 	} else if (IS_BROADWELL(dev_priv)) {
4121 		ilk_hpd_detection_setup(dev_priv);
4122 	}
4123 }
4124 
4125 static int gen8_irq_postinstall(struct drm_device *dev)
4126 {
4127 	struct drm_i915_private *dev_priv = to_i915(dev);
4128 
4129 	if (HAS_PCH_SPLIT(dev_priv))
4130 		ibx_irq_pre_postinstall(dev);
4131 
4132 	gen8_gt_irq_postinstall(dev_priv);
4133 	gen8_de_irq_postinstall(dev_priv);
4134 
4135 	if (HAS_PCH_SPLIT(dev_priv))
4136 		ibx_irq_postinstall(dev);
4137 
4138 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4139 	POSTING_READ(GEN8_MASTER_IRQ);
4140 
4141 	return 0;
4142 }
4143 
4144 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4145 {
4146 	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4147 
4148 	BUILD_BUG_ON(irqs & 0xffff0000);
4149 
4150 	/* Enable RCS, BCS, VCS and VECS class interrupts. */
4151 	I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4152 	I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,	  irqs << 16 | irqs);
4153 
4154 	/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4155 	I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,	~(irqs << 16));
4156 	I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,	~(irqs << 16));
4157 	I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,	~(irqs | irqs << 16));
4158 	I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,	~(irqs | irqs << 16));
4159 	I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK,	~(irqs | irqs << 16));
4160 
4161 	/*
4162 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
4163 	 * is enabled/disabled.
4164 	 */
4165 	dev_priv->pm_ier = 0x0;
4166 	dev_priv->pm_imr = ~dev_priv->pm_ier;
4167 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4168 	I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4169 }
4170 
4171 static int gen11_irq_postinstall(struct drm_device *dev)
4172 {
4173 	struct drm_i915_private *dev_priv = dev->dev_private;
4174 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4175 
4176 	gen11_gt_irq_postinstall(dev_priv);
4177 	gen8_de_irq_postinstall(dev_priv);
4178 
4179 	GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4180 
4181 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4182 
4183 	I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
4184 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
4185 
4186 	return 0;
4187 }
4188 
4189 static int cherryview_irq_postinstall(struct drm_device *dev)
4190 {
4191 	struct drm_i915_private *dev_priv = to_i915(dev);
4192 
4193 	gen8_gt_irq_postinstall(dev_priv);
4194 
4195 	spin_lock_irq(&dev_priv->irq_lock);
4196 	if (dev_priv->display_irqs_enabled)
4197 		vlv_display_irq_postinstall(dev_priv);
4198 	spin_unlock_irq(&dev_priv->irq_lock);
4199 
4200 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4201 	POSTING_READ(GEN8_MASTER_IRQ);
4202 
4203 	return 0;
4204 }
4205 
4206 static void i8xx_irq_reset(struct drm_device *dev)
4207 {
4208 	struct drm_i915_private *dev_priv = to_i915(dev);
4209 
4210 	i9xx_pipestat_irq_reset(dev_priv);
4211 
4212 	I915_WRITE16(HWSTAM, 0xffff);
4213 
4214 	GEN2_IRQ_RESET();
4215 }
4216 
4217 static int i8xx_irq_postinstall(struct drm_device *dev)
4218 {
4219 	struct drm_i915_private *dev_priv = to_i915(dev);
4220 	u16 enable_mask;
4221 
4222 	I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4223 			    I915_ERROR_MEMORY_REFRESH));
4224 
4225 	/* Unmask the interrupts that we always want on. */
4226 	dev_priv->irq_mask =
4227 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4228 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
4229 
4230 	enable_mask =
4231 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4232 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4233 		I915_USER_INTERRUPT;
4234 
4235 	GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4236 
4237 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4238 	 * just to make the assert_spin_locked check happy. */
4239 	spin_lock_irq(&dev_priv->irq_lock);
4240 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4241 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4242 	spin_unlock_irq(&dev_priv->irq_lock);
4243 
4244 	return 0;
4245 }
4246 
4247 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4248 {
4249 	struct drm_device *dev = arg;
4250 	struct drm_i915_private *dev_priv = to_i915(dev);
4251 	irqreturn_t ret = IRQ_NONE;
4252 
4253 	if (!intel_irqs_enabled(dev_priv))
4254 		return IRQ_NONE;
4255 
4256 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4257 	disable_rpm_wakeref_asserts(dev_priv);
4258 
4259 	do {
4260 		u32 pipe_stats[I915_MAX_PIPES] = {};
4261 		u16 iir;
4262 
4263 		iir = I915_READ16(IIR);
4264 		if (iir == 0)
4265 			break;
4266 
4267 		ret = IRQ_HANDLED;
4268 
4269 		/* Call regardless, as some status bits might not be
4270 		 * signalled in iir */
4271 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4272 
4273 		I915_WRITE16(IIR, iir);
4274 
4275 		if (iir & I915_USER_INTERRUPT)
4276 			notify_ring(dev_priv->engine[RCS]);
4277 
4278 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4279 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4280 
4281 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4282 	} while (0);
4283 
4284 	enable_rpm_wakeref_asserts(dev_priv);
4285 
4286 	return ret;
4287 }
4288 
4289 static void i915_irq_reset(struct drm_device *dev)
4290 {
4291 	struct drm_i915_private *dev_priv = to_i915(dev);
4292 
4293 	if (I915_HAS_HOTPLUG(dev_priv)) {
4294 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4295 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4296 	}
4297 
4298 	i9xx_pipestat_irq_reset(dev_priv);
4299 
4300 	I915_WRITE(HWSTAM, 0xffffffff);
4301 
4302 	GEN3_IRQ_RESET();
4303 }
4304 
4305 static int i915_irq_postinstall(struct drm_device *dev)
4306 {
4307 	struct drm_i915_private *dev_priv = to_i915(dev);
4308 	u32 enable_mask;
4309 
4310 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4311 			  I915_ERROR_MEMORY_REFRESH));
4312 
4313 	/* Unmask the interrupts that we always want on. */
4314 	dev_priv->irq_mask =
4315 		~(I915_ASLE_INTERRUPT |
4316 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4317 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
4318 
4319 	enable_mask =
4320 		I915_ASLE_INTERRUPT |
4321 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4322 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4323 		I915_USER_INTERRUPT;
4324 
4325 	if (I915_HAS_HOTPLUG(dev_priv)) {
4326 		/* Enable in IER... */
4327 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4328 		/* and unmask in IMR */
4329 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4330 	}
4331 
4332 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4333 
4334 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4335 	 * just to make the assert_spin_locked check happy. */
4336 	spin_lock_irq(&dev_priv->irq_lock);
4337 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4338 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4339 	spin_unlock_irq(&dev_priv->irq_lock);
4340 
4341 	i915_enable_asle_pipestat(dev_priv);
4342 
4343 	return 0;
4344 }
4345 
4346 static irqreturn_t i915_irq_handler(int irq, void *arg)
4347 {
4348 	struct drm_device *dev = arg;
4349 	struct drm_i915_private *dev_priv = to_i915(dev);
4350 	irqreturn_t ret = IRQ_NONE;
4351 
4352 	if (!intel_irqs_enabled(dev_priv))
4353 		return IRQ_NONE;
4354 
4355 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4356 	disable_rpm_wakeref_asserts(dev_priv);
4357 
4358 	do {
4359 		u32 pipe_stats[I915_MAX_PIPES] = {};
4360 		u32 hotplug_status = 0;
4361 		u32 iir;
4362 
4363 		iir = I915_READ(IIR);
4364 		if (iir == 0)
4365 			break;
4366 
4367 		ret = IRQ_HANDLED;
4368 
4369 		if (I915_HAS_HOTPLUG(dev_priv) &&
4370 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4371 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4372 
4373 		/* Call regardless, as some status bits might not be
4374 		 * signalled in iir */
4375 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4376 
4377 		I915_WRITE(IIR, iir);
4378 
4379 		if (iir & I915_USER_INTERRUPT)
4380 			notify_ring(dev_priv->engine[RCS]);
4381 
4382 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4383 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4384 
4385 		if (hotplug_status)
4386 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4387 
4388 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4389 	} while (0);
4390 
4391 	enable_rpm_wakeref_asserts(dev_priv);
4392 
4393 	return ret;
4394 }
4395 
4396 static void i965_irq_reset(struct drm_device *dev)
4397 {
4398 	struct drm_i915_private *dev_priv = to_i915(dev);
4399 
4400 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4401 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4402 
4403 	i9xx_pipestat_irq_reset(dev_priv);
4404 
4405 	I915_WRITE(HWSTAM, 0xffffffff);
4406 
4407 	GEN3_IRQ_RESET();
4408 }
4409 
4410 static int i965_irq_postinstall(struct drm_device *dev)
4411 {
4412 	struct drm_i915_private *dev_priv = to_i915(dev);
4413 	u32 enable_mask;
4414 	u32 error_mask;
4415 
4416 	/*
4417 	 * Enable some error detection, note the instruction error mask
4418 	 * bit is reserved, so we leave it masked.
4419 	 */
4420 	if (IS_G4X(dev_priv)) {
4421 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4422 			       GM45_ERROR_MEM_PRIV |
4423 			       GM45_ERROR_CP_PRIV |
4424 			       I915_ERROR_MEMORY_REFRESH);
4425 	} else {
4426 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4427 			       I915_ERROR_MEMORY_REFRESH);
4428 	}
4429 	I915_WRITE(EMR, error_mask);
4430 
4431 	/* Unmask the interrupts that we always want on. */
4432 	dev_priv->irq_mask =
4433 		~(I915_ASLE_INTERRUPT |
4434 		  I915_DISPLAY_PORT_INTERRUPT |
4435 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4436 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4437 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4438 
4439 	enable_mask =
4440 		I915_ASLE_INTERRUPT |
4441 		I915_DISPLAY_PORT_INTERRUPT |
4442 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4443 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4444 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4445 		I915_USER_INTERRUPT;
4446 
4447 	if (IS_G4X(dev_priv))
4448 		enable_mask |= I915_BSD_USER_INTERRUPT;
4449 
4450 	GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4451 
4452 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4453 	 * just to make the assert_spin_locked check happy. */
4454 	spin_lock_irq(&dev_priv->irq_lock);
4455 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4456 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4457 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4458 	spin_unlock_irq(&dev_priv->irq_lock);
4459 
4460 	i915_enable_asle_pipestat(dev_priv);
4461 
4462 	return 0;
4463 }
4464 
4465 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4466 {
4467 	u32 hotplug_en;
4468 
4469 	lockdep_assert_held(&dev_priv->irq_lock);
4470 
4471 	/* Note HDMI and DP share hotplug bits */
4472 	/* enable bits are the same for all generations */
4473 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4474 	/* Programming the CRT detection parameters tends
4475 	   to generate a spurious hotplug event about three
4476 	   seconds later.  So just do it once.
4477 	*/
4478 	if (IS_G4X(dev_priv))
4479 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4480 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4481 
4482 	/* Ignore TV since it's buggy */
4483 	i915_hotplug_interrupt_update_locked(dev_priv,
4484 					     HOTPLUG_INT_EN_MASK |
4485 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4486 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4487 					     hotplug_en);
4488 }
4489 
4490 static irqreturn_t i965_irq_handler(int irq, void *arg)
4491 {
4492 	struct drm_device *dev = arg;
4493 	struct drm_i915_private *dev_priv = to_i915(dev);
4494 	irqreturn_t ret = IRQ_NONE;
4495 
4496 	if (!intel_irqs_enabled(dev_priv))
4497 		return IRQ_NONE;
4498 
4499 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4500 	disable_rpm_wakeref_asserts(dev_priv);
4501 
4502 	do {
4503 		u32 pipe_stats[I915_MAX_PIPES] = {};
4504 		u32 hotplug_status = 0;
4505 		u32 iir;
4506 
4507 		iir = I915_READ(IIR);
4508 		if (iir == 0)
4509 			break;
4510 
4511 		ret = IRQ_HANDLED;
4512 
4513 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4514 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4515 
4516 		/* Call regardless, as some status bits might not be
4517 		 * signalled in iir */
4518 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4519 
4520 		I915_WRITE(IIR, iir);
4521 
4522 		if (iir & I915_USER_INTERRUPT)
4523 			notify_ring(dev_priv->engine[RCS]);
4524 
4525 		if (iir & I915_BSD_USER_INTERRUPT)
4526 			notify_ring(dev_priv->engine[VCS]);
4527 
4528 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4529 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4530 
4531 		if (hotplug_status)
4532 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4533 
4534 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4535 	} while (0);
4536 
4537 	enable_rpm_wakeref_asserts(dev_priv);
4538 
4539 	return ret;
4540 }
4541 
4542 /**
4543  * intel_irq_init - initializes irq support
4544  * @dev_priv: i915 device instance
4545  *
4546  * This function initializes all the irq support including work items, timers
4547  * and all the vtables. It does not setup the interrupt itself though.
4548  */
4549 void intel_irq_init(struct drm_i915_private *dev_priv)
4550 {
4551 	struct drm_device *dev = &dev_priv->drm;
4552 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4553 	int i;
4554 
4555 	intel_hpd_init_work(dev_priv);
4556 
4557 	INIT_WORK(&rps->work, gen6_pm_rps_work);
4558 
4559 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4560 	for (i = 0; i < MAX_L3_SLICES; ++i)
4561 		dev_priv->l3_parity.remap_info[i] = NULL;
4562 
4563 	if (HAS_GUC_SCHED(dev_priv))
4564 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4565 
4566 	/* Let's track the enabled rps events */
4567 	if (IS_VALLEYVIEW(dev_priv))
4568 		/* WaGsvRC0ResidencyMethod:vlv */
4569 		dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4570 	else
4571 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4572 
4573 	rps->pm_intrmsk_mbz = 0;
4574 
4575 	/*
4576 	 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4577 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
4578 	 *
4579 	 * TODO: verify if this can be reproduced on VLV,CHV.
4580 	 */
4581 	if (INTEL_GEN(dev_priv) <= 7)
4582 		rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4583 
4584 	if (INTEL_GEN(dev_priv) >= 8)
4585 		rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4586 
4587 	if (IS_GEN2(dev_priv)) {
4588 		/* Gen2 doesn't have a hardware frame counter */
4589 		dev->max_vblank_count = 0;
4590 	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4591 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4592 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4593 	} else {
4594 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4595 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4596 	}
4597 
4598 	/*
4599 	 * Opt out of the vblank disable timer on everything except gen2.
4600 	 * Gen2 doesn't have a hardware frame counter and so depends on
4601 	 * vblank interrupts to produce sane vblank seuquence numbers.
4602 	 */
4603 	if (!IS_GEN2(dev_priv))
4604 		dev->vblank_disable_immediate = true;
4605 
4606 	/* Most platforms treat the display irq block as an always-on
4607 	 * power domain. vlv/chv can disable it at runtime and need
4608 	 * special care to avoid writing any of the display block registers
4609 	 * outside of the power domain. We defer setting up the display irqs
4610 	 * in this case to the runtime pm.
4611 	 */
4612 	dev_priv->display_irqs_enabled = true;
4613 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4614 		dev_priv->display_irqs_enabled = false;
4615 
4616 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4617 
4618 	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4619 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4620 
4621 	if (IS_CHERRYVIEW(dev_priv)) {
4622 		dev->driver->irq_handler = cherryview_irq_handler;
4623 		dev->driver->irq_preinstall = cherryview_irq_reset;
4624 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4625 		dev->driver->irq_uninstall = cherryview_irq_reset;
4626 		dev->driver->enable_vblank = i965_enable_vblank;
4627 		dev->driver->disable_vblank = i965_disable_vblank;
4628 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4629 	} else if (IS_VALLEYVIEW(dev_priv)) {
4630 		dev->driver->irq_handler = valleyview_irq_handler;
4631 		dev->driver->irq_preinstall = valleyview_irq_reset;
4632 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4633 		dev->driver->irq_uninstall = valleyview_irq_reset;
4634 		dev->driver->enable_vblank = i965_enable_vblank;
4635 		dev->driver->disable_vblank = i965_disable_vblank;
4636 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4637 	} else if (INTEL_GEN(dev_priv) >= 11) {
4638 		dev->driver->irq_handler = gen11_irq_handler;
4639 		dev->driver->irq_preinstall = gen11_irq_reset;
4640 		dev->driver->irq_postinstall = gen11_irq_postinstall;
4641 		dev->driver->irq_uninstall = gen11_irq_reset;
4642 		dev->driver->enable_vblank = gen8_enable_vblank;
4643 		dev->driver->disable_vblank = gen8_disable_vblank;
4644 		dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4645 	} else if (INTEL_GEN(dev_priv) >= 8) {
4646 		dev->driver->irq_handler = gen8_irq_handler;
4647 		dev->driver->irq_preinstall = gen8_irq_reset;
4648 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4649 		dev->driver->irq_uninstall = gen8_irq_reset;
4650 		dev->driver->enable_vblank = gen8_enable_vblank;
4651 		dev->driver->disable_vblank = gen8_disable_vblank;
4652 		if (IS_GEN9_LP(dev_priv))
4653 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4654 		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4655 			 HAS_PCH_CNP(dev_priv))
4656 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4657 		else
4658 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4659 	} else if (HAS_PCH_SPLIT(dev_priv)) {
4660 		dev->driver->irq_handler = ironlake_irq_handler;
4661 		dev->driver->irq_preinstall = ironlake_irq_reset;
4662 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4663 		dev->driver->irq_uninstall = ironlake_irq_reset;
4664 		dev->driver->enable_vblank = ironlake_enable_vblank;
4665 		dev->driver->disable_vblank = ironlake_disable_vblank;
4666 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4667 	} else {
4668 		if (IS_GEN2(dev_priv)) {
4669 			dev->driver->irq_preinstall = i8xx_irq_reset;
4670 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4671 			dev->driver->irq_handler = i8xx_irq_handler;
4672 			dev->driver->irq_uninstall = i8xx_irq_reset;
4673 			dev->driver->enable_vblank = i8xx_enable_vblank;
4674 			dev->driver->disable_vblank = i8xx_disable_vblank;
4675 		} else if (IS_GEN3(dev_priv)) {
4676 			dev->driver->irq_preinstall = i915_irq_reset;
4677 			dev->driver->irq_postinstall = i915_irq_postinstall;
4678 			dev->driver->irq_uninstall = i915_irq_reset;
4679 			dev->driver->irq_handler = i915_irq_handler;
4680 			dev->driver->enable_vblank = i8xx_enable_vblank;
4681 			dev->driver->disable_vblank = i8xx_disable_vblank;
4682 		} else {
4683 			dev->driver->irq_preinstall = i965_irq_reset;
4684 			dev->driver->irq_postinstall = i965_irq_postinstall;
4685 			dev->driver->irq_uninstall = i965_irq_reset;
4686 			dev->driver->irq_handler = i965_irq_handler;
4687 			dev->driver->enable_vblank = i965_enable_vblank;
4688 			dev->driver->disable_vblank = i965_disable_vblank;
4689 		}
4690 		if (I915_HAS_HOTPLUG(dev_priv))
4691 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4692 	}
4693 }
4694 
4695 /**
4696  * intel_irq_fini - deinitializes IRQ support
4697  * @i915: i915 device instance
4698  *
4699  * This function deinitializes all the IRQ support.
4700  */
4701 void intel_irq_fini(struct drm_i915_private *i915)
4702 {
4703 	int i;
4704 
4705 	for (i = 0; i < MAX_L3_SLICES; ++i)
4706 		kfree(i915->l3_parity.remap_info[i]);
4707 }
4708 
4709 /**
4710  * intel_irq_install - enables the hardware interrupt
4711  * @dev_priv: i915 device instance
4712  *
4713  * This function enables the hardware interrupt handling, but leaves the hotplug
4714  * handling still disabled. It is called after intel_irq_init().
4715  *
4716  * In the driver load and resume code we need working interrupts in a few places
4717  * but don't want to deal with the hassle of concurrent probe and hotplug
4718  * workers. Hence the split into this two-stage approach.
4719  */
4720 int intel_irq_install(struct drm_i915_private *dev_priv)
4721 {
4722 	/*
4723 	 * We enable some interrupt sources in our postinstall hooks, so mark
4724 	 * interrupts as enabled _before_ actually enabling them to avoid
4725 	 * special cases in our ordering checks.
4726 	 */
4727 	dev_priv->runtime_pm.irqs_enabled = true;
4728 
4729 	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4730 }
4731 
4732 /**
4733  * intel_irq_uninstall - finilizes all irq handling
4734  * @dev_priv: i915 device instance
4735  *
4736  * This stops interrupt and hotplug handling and unregisters and frees all
4737  * resources acquired in the init functions.
4738  */
4739 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4740 {
4741 	drm_irq_uninstall(&dev_priv->drm);
4742 	intel_hpd_cancel_work(dev_priv);
4743 	dev_priv->runtime_pm.irqs_enabled = false;
4744 }
4745 
4746 /**
4747  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4748  * @dev_priv: i915 device instance
4749  *
4750  * This function is used to disable interrupts at runtime, both in the runtime
4751  * pm and the system suspend/resume code.
4752  */
4753 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4754 {
4755 	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4756 	dev_priv->runtime_pm.irqs_enabled = false;
4757 	synchronize_irq(dev_priv->drm.irq);
4758 }
4759 
4760 /**
4761  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4762  * @dev_priv: i915 device instance
4763  *
4764  * This function is used to enable interrupts at runtime, both in the runtime
4765  * pm and the system suspend/resume code.
4766  */
4767 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4768 {
4769 	dev_priv->runtime_pm.irqs_enabled = true;
4770 	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4771 	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4772 }
4773