xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision 4f6cce39)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47 
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51 
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55 
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59 
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 	[HPD_CRT] = SDE_CRT_HOTPLUG,
62 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67 
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75 
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83 
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92 
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101 
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110 
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117 
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 	POSTING_READ(GEN8_##type##_IMR(which)); \
122 	I915_WRITE(GEN8_##type##_IER(which), 0); \
123 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 	POSTING_READ(GEN8_##type##_IIR(which)); \
125 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 	POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128 
129 #define GEN5_IRQ_RESET(type) do { \
130 	I915_WRITE(type##IMR, 0xffffffff); \
131 	POSTING_READ(type##IMR); \
132 	I915_WRITE(type##IER, 0); \
133 	I915_WRITE(type##IIR, 0xffffffff); \
134 	POSTING_READ(type##IIR); \
135 	I915_WRITE(type##IIR, 0xffffffff); \
136 	POSTING_READ(type##IIR); \
137 } while (0)
138 
139 /*
140  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141  */
142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 				    i915_reg_t reg)
144 {
145 	u32 val = I915_READ(reg);
146 
147 	if (val == 0)
148 		return;
149 
150 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 	     i915_mmio_reg_offset(reg), val);
152 	I915_WRITE(reg, 0xffffffff);
153 	POSTING_READ(reg);
154 	I915_WRITE(reg, 0xffffffff);
155 	POSTING_READ(reg);
156 }
157 
158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 	POSTING_READ(GEN8_##type##_IMR(which)); \
163 } while (0)
164 
165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 	I915_WRITE(type##IER, (ier_val)); \
168 	I915_WRITE(type##IMR, (imr_val)); \
169 	POSTING_READ(type##IMR); \
170 } while (0)
171 
172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
174 
175 /* For display hotplug interrupt */
176 static inline void
177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
178 				     uint32_t mask,
179 				     uint32_t bits)
180 {
181 	uint32_t val;
182 
183 	assert_spin_locked(&dev_priv->irq_lock);
184 	WARN_ON(bits & ~mask);
185 
186 	val = I915_READ(PORT_HOTPLUG_EN);
187 	val &= ~mask;
188 	val |= bits;
189 	I915_WRITE(PORT_HOTPLUG_EN, val);
190 }
191 
192 /**
193  * i915_hotplug_interrupt_update - update hotplug interrupt enable
194  * @dev_priv: driver private
195  * @mask: bits to update
196  * @bits: bits to enable
197  * NOTE: the HPD enable bits are modified both inside and outside
198  * of an interrupt context. To avoid that read-modify-write cycles
199  * interfer, these bits are protected by a spinlock. Since this
200  * function is usually not called from a context where the lock is
201  * held already, this function acquires the lock itself. A non-locking
202  * version is also available.
203  */
204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
205 				   uint32_t mask,
206 				   uint32_t bits)
207 {
208 	spin_lock_irq(&dev_priv->irq_lock);
209 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
210 	spin_unlock_irq(&dev_priv->irq_lock);
211 }
212 
213 /**
214  * ilk_update_display_irq - update DEIMR
215  * @dev_priv: driver private
216  * @interrupt_mask: mask of interrupt bits to update
217  * @enabled_irq_mask: mask of interrupt bits to enable
218  */
219 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
220 			    uint32_t interrupt_mask,
221 			    uint32_t enabled_irq_mask)
222 {
223 	uint32_t new_val;
224 
225 	assert_spin_locked(&dev_priv->irq_lock);
226 
227 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
228 
229 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
230 		return;
231 
232 	new_val = dev_priv->irq_mask;
233 	new_val &= ~interrupt_mask;
234 	new_val |= (~enabled_irq_mask & interrupt_mask);
235 
236 	if (new_val != dev_priv->irq_mask) {
237 		dev_priv->irq_mask = new_val;
238 		I915_WRITE(DEIMR, dev_priv->irq_mask);
239 		POSTING_READ(DEIMR);
240 	}
241 }
242 
243 /**
244  * ilk_update_gt_irq - update GTIMR
245  * @dev_priv: driver private
246  * @interrupt_mask: mask of interrupt bits to update
247  * @enabled_irq_mask: mask of interrupt bits to enable
248  */
249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
250 			      uint32_t interrupt_mask,
251 			      uint32_t enabled_irq_mask)
252 {
253 	assert_spin_locked(&dev_priv->irq_lock);
254 
255 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
256 
257 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
258 		return;
259 
260 	dev_priv->gt_irq_mask &= ~interrupt_mask;
261 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
262 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
263 }
264 
265 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266 {
267 	ilk_update_gt_irq(dev_priv, mask, mask);
268 	POSTING_READ_FW(GTIMR);
269 }
270 
271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
272 {
273 	ilk_update_gt_irq(dev_priv, mask, 0);
274 }
275 
276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
277 {
278 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
279 }
280 
281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
282 {
283 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
284 }
285 
286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
287 {
288 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
289 }
290 
291 /**
292  * snb_update_pm_irq - update GEN6_PMIMR
293  * @dev_priv: driver private
294  * @interrupt_mask: mask of interrupt bits to update
295  * @enabled_irq_mask: mask of interrupt bits to enable
296  */
297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
298 			      uint32_t interrupt_mask,
299 			      uint32_t enabled_irq_mask)
300 {
301 	uint32_t new_val;
302 
303 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
304 
305 	assert_spin_locked(&dev_priv->irq_lock);
306 
307 	new_val = dev_priv->pm_imr;
308 	new_val &= ~interrupt_mask;
309 	new_val |= (~enabled_irq_mask & interrupt_mask);
310 
311 	if (new_val != dev_priv->pm_imr) {
312 		dev_priv->pm_imr = new_val;
313 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
314 		POSTING_READ(gen6_pm_imr(dev_priv));
315 	}
316 }
317 
318 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
319 {
320 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
321 		return;
322 
323 	snb_update_pm_irq(dev_priv, mask, mask);
324 }
325 
326 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
327 {
328 	snb_update_pm_irq(dev_priv, mask, 0);
329 }
330 
331 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
332 {
333 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 		return;
335 
336 	__gen6_mask_pm_irq(dev_priv, mask);
337 }
338 
339 void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
340 {
341 	i915_reg_t reg = gen6_pm_iir(dev_priv);
342 
343 	assert_spin_locked(&dev_priv->irq_lock);
344 
345 	I915_WRITE(reg, reset_mask);
346 	I915_WRITE(reg, reset_mask);
347 	POSTING_READ(reg);
348 }
349 
350 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
351 {
352 	assert_spin_locked(&dev_priv->irq_lock);
353 
354 	dev_priv->pm_ier |= enable_mask;
355 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
356 	gen6_unmask_pm_irq(dev_priv, enable_mask);
357 	/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
358 }
359 
360 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
361 {
362 	assert_spin_locked(&dev_priv->irq_lock);
363 
364 	dev_priv->pm_ier &= ~disable_mask;
365 	__gen6_mask_pm_irq(dev_priv, disable_mask);
366 	I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
367 	/* though a barrier is missing here, but don't really need a one */
368 }
369 
370 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
371 {
372 	spin_lock_irq(&dev_priv->irq_lock);
373 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
374 	dev_priv->rps.pm_iir = 0;
375 	spin_unlock_irq(&dev_priv->irq_lock);
376 }
377 
378 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
379 {
380 	if (READ_ONCE(dev_priv->rps.interrupts_enabled))
381 		return;
382 
383 	spin_lock_irq(&dev_priv->irq_lock);
384 	WARN_ON_ONCE(dev_priv->rps.pm_iir);
385 	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
386 	dev_priv->rps.interrupts_enabled = true;
387 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
388 
389 	spin_unlock_irq(&dev_priv->irq_lock);
390 }
391 
392 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
393 {
394 	return (mask & ~dev_priv->rps.pm_intr_keep);
395 }
396 
397 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
398 {
399 	if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
400 		return;
401 
402 	spin_lock_irq(&dev_priv->irq_lock);
403 	dev_priv->rps.interrupts_enabled = false;
404 
405 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
406 
407 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
408 
409 	spin_unlock_irq(&dev_priv->irq_lock);
410 	synchronize_irq(dev_priv->drm.irq);
411 
412 	/* Now that we will not be generating any more work, flush any
413 	 * outsanding tasks. As we are called on the RPS idle path,
414 	 * we will reset the GPU to minimum frequencies, so the current
415 	 * state of the worker can be discarded.
416 	 */
417 	cancel_work_sync(&dev_priv->rps.work);
418 	gen6_reset_rps_interrupts(dev_priv);
419 }
420 
421 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
422 {
423 	spin_lock_irq(&dev_priv->irq_lock);
424 	gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
425 	spin_unlock_irq(&dev_priv->irq_lock);
426 }
427 
428 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
429 {
430 	spin_lock_irq(&dev_priv->irq_lock);
431 	if (!dev_priv->guc.interrupts_enabled) {
432 		WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
433 				       dev_priv->pm_guc_events);
434 		dev_priv->guc.interrupts_enabled = true;
435 		gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
436 	}
437 	spin_unlock_irq(&dev_priv->irq_lock);
438 }
439 
440 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
441 {
442 	spin_lock_irq(&dev_priv->irq_lock);
443 	dev_priv->guc.interrupts_enabled = false;
444 
445 	gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
446 
447 	spin_unlock_irq(&dev_priv->irq_lock);
448 	synchronize_irq(dev_priv->drm.irq);
449 
450 	gen9_reset_guc_interrupts(dev_priv);
451 }
452 
453 /**
454  * bdw_update_port_irq - update DE port interrupt
455  * @dev_priv: driver private
456  * @interrupt_mask: mask of interrupt bits to update
457  * @enabled_irq_mask: mask of interrupt bits to enable
458  */
459 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
460 				uint32_t interrupt_mask,
461 				uint32_t enabled_irq_mask)
462 {
463 	uint32_t new_val;
464 	uint32_t old_val;
465 
466 	assert_spin_locked(&dev_priv->irq_lock);
467 
468 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
469 
470 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
471 		return;
472 
473 	old_val = I915_READ(GEN8_DE_PORT_IMR);
474 
475 	new_val = old_val;
476 	new_val &= ~interrupt_mask;
477 	new_val |= (~enabled_irq_mask & interrupt_mask);
478 
479 	if (new_val != old_val) {
480 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
481 		POSTING_READ(GEN8_DE_PORT_IMR);
482 	}
483 }
484 
485 /**
486  * bdw_update_pipe_irq - update DE pipe interrupt
487  * @dev_priv: driver private
488  * @pipe: pipe whose interrupt to update
489  * @interrupt_mask: mask of interrupt bits to update
490  * @enabled_irq_mask: mask of interrupt bits to enable
491  */
492 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
493 			 enum pipe pipe,
494 			 uint32_t interrupt_mask,
495 			 uint32_t enabled_irq_mask)
496 {
497 	uint32_t new_val;
498 
499 	assert_spin_locked(&dev_priv->irq_lock);
500 
501 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
502 
503 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
504 		return;
505 
506 	new_val = dev_priv->de_irq_mask[pipe];
507 	new_val &= ~interrupt_mask;
508 	new_val |= (~enabled_irq_mask & interrupt_mask);
509 
510 	if (new_val != dev_priv->de_irq_mask[pipe]) {
511 		dev_priv->de_irq_mask[pipe] = new_val;
512 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
513 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
514 	}
515 }
516 
517 /**
518  * ibx_display_interrupt_update - update SDEIMR
519  * @dev_priv: driver private
520  * @interrupt_mask: mask of interrupt bits to update
521  * @enabled_irq_mask: mask of interrupt bits to enable
522  */
523 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
524 				  uint32_t interrupt_mask,
525 				  uint32_t enabled_irq_mask)
526 {
527 	uint32_t sdeimr = I915_READ(SDEIMR);
528 	sdeimr &= ~interrupt_mask;
529 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
530 
531 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
532 
533 	assert_spin_locked(&dev_priv->irq_lock);
534 
535 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
536 		return;
537 
538 	I915_WRITE(SDEIMR, sdeimr);
539 	POSTING_READ(SDEIMR);
540 }
541 
542 static void
543 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
544 		       u32 enable_mask, u32 status_mask)
545 {
546 	i915_reg_t reg = PIPESTAT(pipe);
547 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
548 
549 	assert_spin_locked(&dev_priv->irq_lock);
550 	WARN_ON(!intel_irqs_enabled(dev_priv));
551 
552 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
553 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
554 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
555 		      pipe_name(pipe), enable_mask, status_mask))
556 		return;
557 
558 	if ((pipestat & enable_mask) == enable_mask)
559 		return;
560 
561 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
562 
563 	/* Enable the interrupt, clear any pending status */
564 	pipestat |= enable_mask | status_mask;
565 	I915_WRITE(reg, pipestat);
566 	POSTING_READ(reg);
567 }
568 
569 static void
570 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
571 		        u32 enable_mask, u32 status_mask)
572 {
573 	i915_reg_t reg = PIPESTAT(pipe);
574 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
575 
576 	assert_spin_locked(&dev_priv->irq_lock);
577 	WARN_ON(!intel_irqs_enabled(dev_priv));
578 
579 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
580 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
581 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
582 		      pipe_name(pipe), enable_mask, status_mask))
583 		return;
584 
585 	if ((pipestat & enable_mask) == 0)
586 		return;
587 
588 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
589 
590 	pipestat &= ~enable_mask;
591 	I915_WRITE(reg, pipestat);
592 	POSTING_READ(reg);
593 }
594 
595 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
596 {
597 	u32 enable_mask = status_mask << 16;
598 
599 	/*
600 	 * On pipe A we don't support the PSR interrupt yet,
601 	 * on pipe B and C the same bit MBZ.
602 	 */
603 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
604 		return 0;
605 	/*
606 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
607 	 * A the same bit is for perf counters which we don't use either.
608 	 */
609 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
610 		return 0;
611 
612 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
613 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
614 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
615 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
616 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
617 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
618 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
619 
620 	return enable_mask;
621 }
622 
623 void
624 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
625 		     u32 status_mask)
626 {
627 	u32 enable_mask;
628 
629 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
630 		enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
631 							   status_mask);
632 	else
633 		enable_mask = status_mask << 16;
634 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
635 }
636 
637 void
638 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
639 		      u32 status_mask)
640 {
641 	u32 enable_mask;
642 
643 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
644 		enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
645 							   status_mask);
646 	else
647 		enable_mask = status_mask << 16;
648 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
649 }
650 
651 /**
652  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
653  * @dev_priv: i915 device private
654  */
655 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
656 {
657 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
658 		return;
659 
660 	spin_lock_irq(&dev_priv->irq_lock);
661 
662 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
663 	if (INTEL_GEN(dev_priv) >= 4)
664 		i915_enable_pipestat(dev_priv, PIPE_A,
665 				     PIPE_LEGACY_BLC_EVENT_STATUS);
666 
667 	spin_unlock_irq(&dev_priv->irq_lock);
668 }
669 
670 /*
671  * This timing diagram depicts the video signal in and
672  * around the vertical blanking period.
673  *
674  * Assumptions about the fictitious mode used in this example:
675  *  vblank_start >= 3
676  *  vsync_start = vblank_start + 1
677  *  vsync_end = vblank_start + 2
678  *  vtotal = vblank_start + 3
679  *
680  *           start of vblank:
681  *           latch double buffered registers
682  *           increment frame counter (ctg+)
683  *           generate start of vblank interrupt (gen4+)
684  *           |
685  *           |          frame start:
686  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
687  *           |          may be shifted forward 1-3 extra lines via PIPECONF
688  *           |          |
689  *           |          |  start of vsync:
690  *           |          |  generate vsync interrupt
691  *           |          |  |
692  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
693  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
694  * ----va---> <-----------------vb--------------------> <--------va-------------
695  *       |          |       <----vs----->                     |
696  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
697  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
698  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
699  *       |          |                                         |
700  *       last visible pixel                                   first visible pixel
701  *                  |                                         increment frame counter (gen3/4)
702  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
703  *
704  * x  = horizontal active
705  * _  = horizontal blanking
706  * hs = horizontal sync
707  * va = vertical active
708  * vb = vertical blanking
709  * vs = vertical sync
710  * vbs = vblank_start (number)
711  *
712  * Summary:
713  * - most events happen at the start of horizontal sync
714  * - frame start happens at the start of horizontal blank, 1-4 lines
715  *   (depending on PIPECONF settings) after the start of vblank
716  * - gen3/4 pixel and frame counter are synchronized with the start
717  *   of horizontal active on the first line of vertical active
718  */
719 
720 /* Called from drm generic code, passed a 'crtc', which
721  * we use as a pipe index
722  */
723 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
724 {
725 	struct drm_i915_private *dev_priv = to_i915(dev);
726 	i915_reg_t high_frame, low_frame;
727 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
728 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
729 								pipe);
730 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
731 
732 	htotal = mode->crtc_htotal;
733 	hsync_start = mode->crtc_hsync_start;
734 	vbl_start = mode->crtc_vblank_start;
735 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
736 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
737 
738 	/* Convert to pixel count */
739 	vbl_start *= htotal;
740 
741 	/* Start of vblank event occurs at start of hsync */
742 	vbl_start -= htotal - hsync_start;
743 
744 	high_frame = PIPEFRAME(pipe);
745 	low_frame = PIPEFRAMEPIXEL(pipe);
746 
747 	/*
748 	 * High & low register fields aren't synchronized, so make sure
749 	 * we get a low value that's stable across two reads of the high
750 	 * register.
751 	 */
752 	do {
753 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
754 		low   = I915_READ(low_frame);
755 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
756 	} while (high1 != high2);
757 
758 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
759 	pixel = low & PIPE_PIXEL_MASK;
760 	low >>= PIPE_FRAME_LOW_SHIFT;
761 
762 	/*
763 	 * The frame counter increments at beginning of active.
764 	 * Cook up a vblank counter by also checking the pixel
765 	 * counter against vblank start.
766 	 */
767 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
768 }
769 
770 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
771 {
772 	struct drm_i915_private *dev_priv = to_i915(dev);
773 
774 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
775 }
776 
777 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
778 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
779 {
780 	struct drm_device *dev = crtc->base.dev;
781 	struct drm_i915_private *dev_priv = to_i915(dev);
782 	const struct drm_display_mode *mode = &crtc->base.hwmode;
783 	enum pipe pipe = crtc->pipe;
784 	int position, vtotal;
785 
786 	vtotal = mode->crtc_vtotal;
787 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
788 		vtotal /= 2;
789 
790 	if (IS_GEN2(dev_priv))
791 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
792 	else
793 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
794 
795 	/*
796 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
797 	 * read it just before the start of vblank.  So try it again
798 	 * so we don't accidentally end up spanning a vblank frame
799 	 * increment, causing the pipe_update_end() code to squak at us.
800 	 *
801 	 * The nature of this problem means we can't simply check the ISR
802 	 * bit and return the vblank start value; nor can we use the scanline
803 	 * debug register in the transcoder as it appears to have the same
804 	 * problem.  We may need to extend this to include other platforms,
805 	 * but so far testing only shows the problem on HSW.
806 	 */
807 	if (HAS_DDI(dev_priv) && !position) {
808 		int i, temp;
809 
810 		for (i = 0; i < 100; i++) {
811 			udelay(1);
812 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
813 				DSL_LINEMASK_GEN3;
814 			if (temp != position) {
815 				position = temp;
816 				break;
817 			}
818 		}
819 	}
820 
821 	/*
822 	 * See update_scanline_offset() for the details on the
823 	 * scanline_offset adjustment.
824 	 */
825 	return (position + crtc->scanline_offset) % vtotal;
826 }
827 
828 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
829 				    unsigned int flags, int *vpos, int *hpos,
830 				    ktime_t *stime, ktime_t *etime,
831 				    const struct drm_display_mode *mode)
832 {
833 	struct drm_i915_private *dev_priv = to_i915(dev);
834 	struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
835 								pipe);
836 	int position;
837 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
838 	bool in_vbl = true;
839 	int ret = 0;
840 	unsigned long irqflags;
841 
842 	if (WARN_ON(!mode->crtc_clock)) {
843 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
844 				 "pipe %c\n", pipe_name(pipe));
845 		return 0;
846 	}
847 
848 	htotal = mode->crtc_htotal;
849 	hsync_start = mode->crtc_hsync_start;
850 	vtotal = mode->crtc_vtotal;
851 	vbl_start = mode->crtc_vblank_start;
852 	vbl_end = mode->crtc_vblank_end;
853 
854 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
855 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
856 		vbl_end /= 2;
857 		vtotal /= 2;
858 	}
859 
860 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
861 
862 	/*
863 	 * Lock uncore.lock, as we will do multiple timing critical raw
864 	 * register reads, potentially with preemption disabled, so the
865 	 * following code must not block on uncore.lock.
866 	 */
867 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
868 
869 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
870 
871 	/* Get optional system timestamp before query. */
872 	if (stime)
873 		*stime = ktime_get();
874 
875 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
876 		/* No obvious pixelcount register. Only query vertical
877 		 * scanout position from Display scan line register.
878 		 */
879 		position = __intel_get_crtc_scanline(intel_crtc);
880 	} else {
881 		/* Have access to pixelcount since start of frame.
882 		 * We can split this into vertical and horizontal
883 		 * scanout position.
884 		 */
885 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
886 
887 		/* convert to pixel counts */
888 		vbl_start *= htotal;
889 		vbl_end *= htotal;
890 		vtotal *= htotal;
891 
892 		/*
893 		 * In interlaced modes, the pixel counter counts all pixels,
894 		 * so one field will have htotal more pixels. In order to avoid
895 		 * the reported position from jumping backwards when the pixel
896 		 * counter is beyond the length of the shorter field, just
897 		 * clamp the position the length of the shorter field. This
898 		 * matches how the scanline counter based position works since
899 		 * the scanline counter doesn't count the two half lines.
900 		 */
901 		if (position >= vtotal)
902 			position = vtotal - 1;
903 
904 		/*
905 		 * Start of vblank interrupt is triggered at start of hsync,
906 		 * just prior to the first active line of vblank. However we
907 		 * consider lines to start at the leading edge of horizontal
908 		 * active. So, should we get here before we've crossed into
909 		 * the horizontal active of the first line in vblank, we would
910 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
911 		 * always add htotal-hsync_start to the current pixel position.
912 		 */
913 		position = (position + htotal - hsync_start) % vtotal;
914 	}
915 
916 	/* Get optional system timestamp after query. */
917 	if (etime)
918 		*etime = ktime_get();
919 
920 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
921 
922 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
923 
924 	in_vbl = position >= vbl_start && position < vbl_end;
925 
926 	/*
927 	 * While in vblank, position will be negative
928 	 * counting up towards 0 at vbl_end. And outside
929 	 * vblank, position will be positive counting
930 	 * up since vbl_end.
931 	 */
932 	if (position >= vbl_start)
933 		position -= vbl_end;
934 	else
935 		position += vtotal - vbl_end;
936 
937 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
938 		*vpos = position;
939 		*hpos = 0;
940 	} else {
941 		*vpos = position / htotal;
942 		*hpos = position - (*vpos * htotal);
943 	}
944 
945 	/* In vblank? */
946 	if (in_vbl)
947 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
948 
949 	return ret;
950 }
951 
952 int intel_get_crtc_scanline(struct intel_crtc *crtc)
953 {
954 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
955 	unsigned long irqflags;
956 	int position;
957 
958 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
959 	position = __intel_get_crtc_scanline(crtc);
960 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
961 
962 	return position;
963 }
964 
965 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
966 			      int *max_error,
967 			      struct timeval *vblank_time,
968 			      unsigned flags)
969 {
970 	struct drm_i915_private *dev_priv = to_i915(dev);
971 	struct intel_crtc *crtc;
972 
973 	if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
974 		DRM_ERROR("Invalid crtc %u\n", pipe);
975 		return -EINVAL;
976 	}
977 
978 	/* Get drm_crtc to timestamp: */
979 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
980 	if (crtc == NULL) {
981 		DRM_ERROR("Invalid crtc %u\n", pipe);
982 		return -EINVAL;
983 	}
984 
985 	if (!crtc->base.hwmode.crtc_clock) {
986 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
987 		return -EBUSY;
988 	}
989 
990 	/* Helper routine in DRM core does all the work: */
991 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
992 						     vblank_time, flags,
993 						     &crtc->base.hwmode);
994 }
995 
996 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
997 {
998 	u32 busy_up, busy_down, max_avg, min_avg;
999 	u8 new_delay;
1000 
1001 	spin_lock(&mchdev_lock);
1002 
1003 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1004 
1005 	new_delay = dev_priv->ips.cur_delay;
1006 
1007 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1008 	busy_up = I915_READ(RCPREVBSYTUPAVG);
1009 	busy_down = I915_READ(RCPREVBSYTDNAVG);
1010 	max_avg = I915_READ(RCBMAXAVG);
1011 	min_avg = I915_READ(RCBMINAVG);
1012 
1013 	/* Handle RCS change request from hw */
1014 	if (busy_up > max_avg) {
1015 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1016 			new_delay = dev_priv->ips.cur_delay - 1;
1017 		if (new_delay < dev_priv->ips.max_delay)
1018 			new_delay = dev_priv->ips.max_delay;
1019 	} else if (busy_down < min_avg) {
1020 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1021 			new_delay = dev_priv->ips.cur_delay + 1;
1022 		if (new_delay > dev_priv->ips.min_delay)
1023 			new_delay = dev_priv->ips.min_delay;
1024 	}
1025 
1026 	if (ironlake_set_drps(dev_priv, new_delay))
1027 		dev_priv->ips.cur_delay = new_delay;
1028 
1029 	spin_unlock(&mchdev_lock);
1030 
1031 	return;
1032 }
1033 
1034 static void notify_ring(struct intel_engine_cs *engine)
1035 {
1036 	smp_store_mb(engine->breadcrumbs.irq_posted, true);
1037 	if (intel_engine_wakeup(engine))
1038 		trace_i915_gem_request_notify(engine);
1039 }
1040 
1041 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1042 			struct intel_rps_ei *ei)
1043 {
1044 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1045 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1046 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1047 }
1048 
1049 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1050 			 const struct intel_rps_ei *old,
1051 			 const struct intel_rps_ei *now,
1052 			 int threshold)
1053 {
1054 	u64 time, c0;
1055 	unsigned int mul = 100;
1056 
1057 	if (old->cz_clock == 0)
1058 		return false;
1059 
1060 	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1061 		mul <<= 8;
1062 
1063 	time = now->cz_clock - old->cz_clock;
1064 	time *= threshold * dev_priv->czclk_freq;
1065 
1066 	/* Workload can be split between render + media, e.g. SwapBuffers
1067 	 * being blitted in X after being rendered in mesa. To account for
1068 	 * this we need to combine both engines into our activity counter.
1069 	 */
1070 	c0 = now->render_c0 - old->render_c0;
1071 	c0 += now->media_c0 - old->media_c0;
1072 	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1073 
1074 	return c0 >= time;
1075 }
1076 
1077 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1078 {
1079 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1080 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1081 }
1082 
1083 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1084 {
1085 	struct intel_rps_ei now;
1086 	u32 events = 0;
1087 
1088 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1089 		return 0;
1090 
1091 	vlv_c0_read(dev_priv, &now);
1092 	if (now.cz_clock == 0)
1093 		return 0;
1094 
1095 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1096 		if (!vlv_c0_above(dev_priv,
1097 				  &dev_priv->rps.down_ei, &now,
1098 				  dev_priv->rps.down_threshold))
1099 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1100 		dev_priv->rps.down_ei = now;
1101 	}
1102 
1103 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1104 		if (vlv_c0_above(dev_priv,
1105 				 &dev_priv->rps.up_ei, &now,
1106 				 dev_priv->rps.up_threshold))
1107 			events |= GEN6_PM_RP_UP_THRESHOLD;
1108 		dev_priv->rps.up_ei = now;
1109 	}
1110 
1111 	return events;
1112 }
1113 
1114 static bool any_waiters(struct drm_i915_private *dev_priv)
1115 {
1116 	struct intel_engine_cs *engine;
1117 	enum intel_engine_id id;
1118 
1119 	for_each_engine(engine, dev_priv, id)
1120 		if (intel_engine_has_waiter(engine))
1121 			return true;
1122 
1123 	return false;
1124 }
1125 
1126 static void gen6_pm_rps_work(struct work_struct *work)
1127 {
1128 	struct drm_i915_private *dev_priv =
1129 		container_of(work, struct drm_i915_private, rps.work);
1130 	bool client_boost;
1131 	int new_delay, adj, min, max;
1132 	u32 pm_iir;
1133 
1134 	spin_lock_irq(&dev_priv->irq_lock);
1135 	/* Speed up work cancelation during disabling rps interrupts. */
1136 	if (!dev_priv->rps.interrupts_enabled) {
1137 		spin_unlock_irq(&dev_priv->irq_lock);
1138 		return;
1139 	}
1140 
1141 	pm_iir = dev_priv->rps.pm_iir;
1142 	dev_priv->rps.pm_iir = 0;
1143 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1144 	gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1145 	client_boost = dev_priv->rps.client_boost;
1146 	dev_priv->rps.client_boost = false;
1147 	spin_unlock_irq(&dev_priv->irq_lock);
1148 
1149 	/* Make sure we didn't queue anything we're not going to process. */
1150 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1151 
1152 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1153 		return;
1154 
1155 	mutex_lock(&dev_priv->rps.hw_lock);
1156 
1157 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1158 
1159 	adj = dev_priv->rps.last_adj;
1160 	new_delay = dev_priv->rps.cur_freq;
1161 	min = dev_priv->rps.min_freq_softlimit;
1162 	max = dev_priv->rps.max_freq_softlimit;
1163 	if (client_boost || any_waiters(dev_priv))
1164 		max = dev_priv->rps.max_freq;
1165 	if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1166 		new_delay = dev_priv->rps.boost_freq;
1167 		adj = 0;
1168 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1169 		if (adj > 0)
1170 			adj *= 2;
1171 		else /* CHV needs even encode values */
1172 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1173 
1174 		if (new_delay >= dev_priv->rps.max_freq_softlimit)
1175 			adj = 0;
1176 		/*
1177 		 * For better performance, jump directly
1178 		 * to RPe if we're below it.
1179 		 */
1180 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1181 			new_delay = dev_priv->rps.efficient_freq;
1182 			adj = 0;
1183 		}
1184 	} else if (client_boost || any_waiters(dev_priv)) {
1185 		adj = 0;
1186 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1187 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1188 			new_delay = dev_priv->rps.efficient_freq;
1189 		else
1190 			new_delay = dev_priv->rps.min_freq_softlimit;
1191 		adj = 0;
1192 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1193 		if (adj < 0)
1194 			adj *= 2;
1195 		else /* CHV needs even encode values */
1196 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1197 
1198 		if (new_delay <= dev_priv->rps.min_freq_softlimit)
1199 			adj = 0;
1200 	} else { /* unknown event */
1201 		adj = 0;
1202 	}
1203 
1204 	dev_priv->rps.last_adj = adj;
1205 
1206 	/* sysfs frequency interfaces may have snuck in while servicing the
1207 	 * interrupt
1208 	 */
1209 	new_delay += adj;
1210 	new_delay = clamp_t(int, new_delay, min, max);
1211 
1212 	intel_set_rps(dev_priv, new_delay);
1213 
1214 	mutex_unlock(&dev_priv->rps.hw_lock);
1215 }
1216 
1217 
1218 /**
1219  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1220  * occurred.
1221  * @work: workqueue struct
1222  *
1223  * Doesn't actually do anything except notify userspace. As a consequence of
1224  * this event, userspace should try to remap the bad rows since statistically
1225  * it is likely the same row is more likely to go bad again.
1226  */
1227 static void ivybridge_parity_work(struct work_struct *work)
1228 {
1229 	struct drm_i915_private *dev_priv =
1230 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1231 	u32 error_status, row, bank, subbank;
1232 	char *parity_event[6];
1233 	uint32_t misccpctl;
1234 	uint8_t slice = 0;
1235 
1236 	/* We must turn off DOP level clock gating to access the L3 registers.
1237 	 * In order to prevent a get/put style interface, acquire struct mutex
1238 	 * any time we access those registers.
1239 	 */
1240 	mutex_lock(&dev_priv->drm.struct_mutex);
1241 
1242 	/* If we've screwed up tracking, just let the interrupt fire again */
1243 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1244 		goto out;
1245 
1246 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1247 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1248 	POSTING_READ(GEN7_MISCCPCTL);
1249 
1250 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1251 		i915_reg_t reg;
1252 
1253 		slice--;
1254 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1255 			break;
1256 
1257 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1258 
1259 		reg = GEN7_L3CDERRST1(slice);
1260 
1261 		error_status = I915_READ(reg);
1262 		row = GEN7_PARITY_ERROR_ROW(error_status);
1263 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1264 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1265 
1266 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1267 		POSTING_READ(reg);
1268 
1269 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1270 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1271 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1272 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1273 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1274 		parity_event[5] = NULL;
1275 
1276 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1277 				   KOBJ_CHANGE, parity_event);
1278 
1279 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1280 			  slice, row, bank, subbank);
1281 
1282 		kfree(parity_event[4]);
1283 		kfree(parity_event[3]);
1284 		kfree(parity_event[2]);
1285 		kfree(parity_event[1]);
1286 	}
1287 
1288 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1289 
1290 out:
1291 	WARN_ON(dev_priv->l3_parity.which_slice);
1292 	spin_lock_irq(&dev_priv->irq_lock);
1293 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1294 	spin_unlock_irq(&dev_priv->irq_lock);
1295 
1296 	mutex_unlock(&dev_priv->drm.struct_mutex);
1297 }
1298 
1299 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1300 					       u32 iir)
1301 {
1302 	if (!HAS_L3_DPF(dev_priv))
1303 		return;
1304 
1305 	spin_lock(&dev_priv->irq_lock);
1306 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1307 	spin_unlock(&dev_priv->irq_lock);
1308 
1309 	iir &= GT_PARITY_ERROR(dev_priv);
1310 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1311 		dev_priv->l3_parity.which_slice |= 1 << 1;
1312 
1313 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1314 		dev_priv->l3_parity.which_slice |= 1 << 0;
1315 
1316 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1317 }
1318 
1319 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1320 			       u32 gt_iir)
1321 {
1322 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1323 		notify_ring(dev_priv->engine[RCS]);
1324 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1325 		notify_ring(dev_priv->engine[VCS]);
1326 }
1327 
1328 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1329 			       u32 gt_iir)
1330 {
1331 	if (gt_iir & GT_RENDER_USER_INTERRUPT)
1332 		notify_ring(dev_priv->engine[RCS]);
1333 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1334 		notify_ring(dev_priv->engine[VCS]);
1335 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1336 		notify_ring(dev_priv->engine[BCS]);
1337 
1338 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1339 		      GT_BSD_CS_ERROR_INTERRUPT |
1340 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1341 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1342 
1343 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1344 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1345 }
1346 
1347 static __always_inline void
1348 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1349 {
1350 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1351 		notify_ring(engine);
1352 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1353 		tasklet_schedule(&engine->irq_tasklet);
1354 }
1355 
1356 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1357 				   u32 master_ctl,
1358 				   u32 gt_iir[4])
1359 {
1360 	irqreturn_t ret = IRQ_NONE;
1361 
1362 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1363 		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1364 		if (gt_iir[0]) {
1365 			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1366 			ret = IRQ_HANDLED;
1367 		} else
1368 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1369 	}
1370 
1371 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1372 		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1373 		if (gt_iir[1]) {
1374 			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1375 			ret = IRQ_HANDLED;
1376 		} else
1377 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1378 	}
1379 
1380 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1381 		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1382 		if (gt_iir[3]) {
1383 			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1384 			ret = IRQ_HANDLED;
1385 		} else
1386 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1387 	}
1388 
1389 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1390 		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1391 		if (gt_iir[2] & (dev_priv->pm_rps_events |
1392 				 dev_priv->pm_guc_events)) {
1393 			I915_WRITE_FW(GEN8_GT_IIR(2),
1394 				      gt_iir[2] & (dev_priv->pm_rps_events |
1395 						   dev_priv->pm_guc_events));
1396 			ret = IRQ_HANDLED;
1397 		} else
1398 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1399 	}
1400 
1401 	return ret;
1402 }
1403 
1404 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1405 				u32 gt_iir[4])
1406 {
1407 	if (gt_iir[0]) {
1408 		gen8_cs_irq_handler(dev_priv->engine[RCS],
1409 				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1410 		gen8_cs_irq_handler(dev_priv->engine[BCS],
1411 				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1412 	}
1413 
1414 	if (gt_iir[1]) {
1415 		gen8_cs_irq_handler(dev_priv->engine[VCS],
1416 				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1417 		gen8_cs_irq_handler(dev_priv->engine[VCS2],
1418 				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1419 	}
1420 
1421 	if (gt_iir[3])
1422 		gen8_cs_irq_handler(dev_priv->engine[VECS],
1423 				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1424 
1425 	if (gt_iir[2] & dev_priv->pm_rps_events)
1426 		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1427 
1428 	if (gt_iir[2] & dev_priv->pm_guc_events)
1429 		gen9_guc_irq_handler(dev_priv, gt_iir[2]);
1430 }
1431 
1432 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1433 {
1434 	switch (port) {
1435 	case PORT_A:
1436 		return val & PORTA_HOTPLUG_LONG_DETECT;
1437 	case PORT_B:
1438 		return val & PORTB_HOTPLUG_LONG_DETECT;
1439 	case PORT_C:
1440 		return val & PORTC_HOTPLUG_LONG_DETECT;
1441 	default:
1442 		return false;
1443 	}
1444 }
1445 
1446 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1447 {
1448 	switch (port) {
1449 	case PORT_E:
1450 		return val & PORTE_HOTPLUG_LONG_DETECT;
1451 	default:
1452 		return false;
1453 	}
1454 }
1455 
1456 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1457 {
1458 	switch (port) {
1459 	case PORT_A:
1460 		return val & PORTA_HOTPLUG_LONG_DETECT;
1461 	case PORT_B:
1462 		return val & PORTB_HOTPLUG_LONG_DETECT;
1463 	case PORT_C:
1464 		return val & PORTC_HOTPLUG_LONG_DETECT;
1465 	case PORT_D:
1466 		return val & PORTD_HOTPLUG_LONG_DETECT;
1467 	default:
1468 		return false;
1469 	}
1470 }
1471 
1472 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1473 {
1474 	switch (port) {
1475 	case PORT_A:
1476 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1477 	default:
1478 		return false;
1479 	}
1480 }
1481 
1482 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1483 {
1484 	switch (port) {
1485 	case PORT_B:
1486 		return val & PORTB_HOTPLUG_LONG_DETECT;
1487 	case PORT_C:
1488 		return val & PORTC_HOTPLUG_LONG_DETECT;
1489 	case PORT_D:
1490 		return val & PORTD_HOTPLUG_LONG_DETECT;
1491 	default:
1492 		return false;
1493 	}
1494 }
1495 
1496 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1497 {
1498 	switch (port) {
1499 	case PORT_B:
1500 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1501 	case PORT_C:
1502 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1503 	case PORT_D:
1504 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1505 	default:
1506 		return false;
1507 	}
1508 }
1509 
1510 /*
1511  * Get a bit mask of pins that have triggered, and which ones may be long.
1512  * This can be called multiple times with the same masks to accumulate
1513  * hotplug detection results from several registers.
1514  *
1515  * Note that the caller is expected to zero out the masks initially.
1516  */
1517 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1518 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1519 			     const u32 hpd[HPD_NUM_PINS],
1520 			     bool long_pulse_detect(enum port port, u32 val))
1521 {
1522 	enum port port;
1523 	int i;
1524 
1525 	for_each_hpd_pin(i) {
1526 		if ((hpd[i] & hotplug_trigger) == 0)
1527 			continue;
1528 
1529 		*pin_mask |= BIT(i);
1530 
1531 		if (!intel_hpd_pin_to_port(i, &port))
1532 			continue;
1533 
1534 		if (long_pulse_detect(port, dig_hotplug_reg))
1535 			*long_mask |= BIT(i);
1536 	}
1537 
1538 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1539 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1540 
1541 }
1542 
1543 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1544 {
1545 	wake_up_all(&dev_priv->gmbus_wait_queue);
1546 }
1547 
1548 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1549 {
1550 	wake_up_all(&dev_priv->gmbus_wait_queue);
1551 }
1552 
1553 #if defined(CONFIG_DEBUG_FS)
1554 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1555 					 enum pipe pipe,
1556 					 uint32_t crc0, uint32_t crc1,
1557 					 uint32_t crc2, uint32_t crc3,
1558 					 uint32_t crc4)
1559 {
1560 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1561 	struct intel_pipe_crc_entry *entry;
1562 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1563 	struct drm_driver *driver = dev_priv->drm.driver;
1564 	uint32_t crcs[5];
1565 	int head, tail;
1566 
1567 	spin_lock(&pipe_crc->lock);
1568 	if (pipe_crc->source) {
1569 		if (!pipe_crc->entries) {
1570 			spin_unlock(&pipe_crc->lock);
1571 			DRM_DEBUG_KMS("spurious interrupt\n");
1572 			return;
1573 		}
1574 
1575 		head = pipe_crc->head;
1576 		tail = pipe_crc->tail;
1577 
1578 		if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1579 			spin_unlock(&pipe_crc->lock);
1580 			DRM_ERROR("CRC buffer overflowing\n");
1581 			return;
1582 		}
1583 
1584 		entry = &pipe_crc->entries[head];
1585 
1586 		entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1587 		entry->crc[0] = crc0;
1588 		entry->crc[1] = crc1;
1589 		entry->crc[2] = crc2;
1590 		entry->crc[3] = crc3;
1591 		entry->crc[4] = crc4;
1592 
1593 		head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1594 		pipe_crc->head = head;
1595 
1596 		spin_unlock(&pipe_crc->lock);
1597 
1598 		wake_up_interruptible(&pipe_crc->wq);
1599 	} else {
1600 		/*
1601 		 * For some not yet identified reason, the first CRC is
1602 		 * bonkers. So let's just wait for the next vblank and read
1603 		 * out the buggy result.
1604 		 *
1605 		 * On CHV sometimes the second CRC is bonkers as well, so
1606 		 * don't trust that one either.
1607 		 */
1608 		if (pipe_crc->skipped == 0 ||
1609 		    (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
1610 			pipe_crc->skipped++;
1611 			spin_unlock(&pipe_crc->lock);
1612 			return;
1613 		}
1614 		spin_unlock(&pipe_crc->lock);
1615 		crcs[0] = crc0;
1616 		crcs[1] = crc1;
1617 		crcs[2] = crc2;
1618 		crcs[3] = crc3;
1619 		crcs[4] = crc4;
1620 		drm_crtc_add_crc_entry(&crtc->base, true,
1621 				       drm_accurate_vblank_count(&crtc->base),
1622 				       crcs);
1623 	}
1624 }
1625 #else
1626 static inline void
1627 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1628 			     enum pipe pipe,
1629 			     uint32_t crc0, uint32_t crc1,
1630 			     uint32_t crc2, uint32_t crc3,
1631 			     uint32_t crc4) {}
1632 #endif
1633 
1634 
1635 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1636 				     enum pipe pipe)
1637 {
1638 	display_pipe_crc_irq_handler(dev_priv, pipe,
1639 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1640 				     0, 0, 0, 0);
1641 }
1642 
1643 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1644 				     enum pipe pipe)
1645 {
1646 	display_pipe_crc_irq_handler(dev_priv, pipe,
1647 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1648 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1649 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1650 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1651 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1652 }
1653 
1654 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1655 				      enum pipe pipe)
1656 {
1657 	uint32_t res1, res2;
1658 
1659 	if (INTEL_GEN(dev_priv) >= 3)
1660 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1661 	else
1662 		res1 = 0;
1663 
1664 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1665 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1666 	else
1667 		res2 = 0;
1668 
1669 	display_pipe_crc_irq_handler(dev_priv, pipe,
1670 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1671 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1672 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1673 				     res1, res2);
1674 }
1675 
1676 /* The RPS events need forcewake, so we add them to a work queue and mask their
1677  * IMR bits until the work is done. Other interrupts can be processed without
1678  * the work queue. */
1679 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1680 {
1681 	if (pm_iir & dev_priv->pm_rps_events) {
1682 		spin_lock(&dev_priv->irq_lock);
1683 		gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1684 		if (dev_priv->rps.interrupts_enabled) {
1685 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1686 			schedule_work(&dev_priv->rps.work);
1687 		}
1688 		spin_unlock(&dev_priv->irq_lock);
1689 	}
1690 
1691 	if (INTEL_INFO(dev_priv)->gen >= 8)
1692 		return;
1693 
1694 	if (HAS_VEBOX(dev_priv)) {
1695 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1696 			notify_ring(dev_priv->engine[VECS]);
1697 
1698 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1699 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1700 	}
1701 }
1702 
1703 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1704 {
1705 	if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
1706 		/* Sample the log buffer flush related bits & clear them out now
1707 		 * itself from the message identity register to minimize the
1708 		 * probability of losing a flush interrupt, when there are back
1709 		 * to back flush interrupts.
1710 		 * There can be a new flush interrupt, for different log buffer
1711 		 * type (like for ISR), whilst Host is handling one (for DPC).
1712 		 * Since same bit is used in message register for ISR & DPC, it
1713 		 * could happen that GuC sets the bit for 2nd interrupt but Host
1714 		 * clears out the bit on handling the 1st interrupt.
1715 		 */
1716 		u32 msg, flush;
1717 
1718 		msg = I915_READ(SOFT_SCRATCH(15));
1719 		flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
1720 			       INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
1721 		if (flush) {
1722 			/* Clear the message bits that are handled */
1723 			I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
1724 
1725 			/* Handle flush interrupt in bottom half */
1726 			queue_work(dev_priv->guc.log.flush_wq,
1727 				   &dev_priv->guc.log.flush_work);
1728 
1729 			dev_priv->guc.log.flush_interrupt_count++;
1730 		} else {
1731 			/* Not clearing of unhandled event bits won't result in
1732 			 * re-triggering of the interrupt.
1733 			 */
1734 		}
1735 	}
1736 }
1737 
1738 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1739 				     enum pipe pipe)
1740 {
1741 	bool ret;
1742 
1743 	ret = drm_handle_vblank(&dev_priv->drm, pipe);
1744 	if (ret)
1745 		intel_finish_page_flip_mmio(dev_priv, pipe);
1746 
1747 	return ret;
1748 }
1749 
1750 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1751 					u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1752 {
1753 	int pipe;
1754 
1755 	spin_lock(&dev_priv->irq_lock);
1756 
1757 	if (!dev_priv->display_irqs_enabled) {
1758 		spin_unlock(&dev_priv->irq_lock);
1759 		return;
1760 	}
1761 
1762 	for_each_pipe(dev_priv, pipe) {
1763 		i915_reg_t reg;
1764 		u32 mask, iir_bit = 0;
1765 
1766 		/*
1767 		 * PIPESTAT bits get signalled even when the interrupt is
1768 		 * disabled with the mask bits, and some of the status bits do
1769 		 * not generate interrupts at all (like the underrun bit). Hence
1770 		 * we need to be careful that we only handle what we want to
1771 		 * handle.
1772 		 */
1773 
1774 		/* fifo underruns are filterered in the underrun handler. */
1775 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1776 
1777 		switch (pipe) {
1778 		case PIPE_A:
1779 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1780 			break;
1781 		case PIPE_B:
1782 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1783 			break;
1784 		case PIPE_C:
1785 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1786 			break;
1787 		}
1788 		if (iir & iir_bit)
1789 			mask |= dev_priv->pipestat_irq_mask[pipe];
1790 
1791 		if (!mask)
1792 			continue;
1793 
1794 		reg = PIPESTAT(pipe);
1795 		mask |= PIPESTAT_INT_ENABLE_MASK;
1796 		pipe_stats[pipe] = I915_READ(reg) & mask;
1797 
1798 		/*
1799 		 * Clear the PIPE*STAT regs before the IIR
1800 		 */
1801 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1802 					PIPESTAT_INT_STATUS_MASK))
1803 			I915_WRITE(reg, pipe_stats[pipe]);
1804 	}
1805 	spin_unlock(&dev_priv->irq_lock);
1806 }
1807 
1808 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1809 					    u32 pipe_stats[I915_MAX_PIPES])
1810 {
1811 	enum pipe pipe;
1812 
1813 	for_each_pipe(dev_priv, pipe) {
1814 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1815 		    intel_pipe_handle_vblank(dev_priv, pipe))
1816 			intel_check_page_flip(dev_priv, pipe);
1817 
1818 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1819 			intel_finish_page_flip_cs(dev_priv, pipe);
1820 
1821 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1822 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1823 
1824 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1825 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1826 	}
1827 
1828 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1829 		gmbus_irq_handler(dev_priv);
1830 }
1831 
1832 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1833 {
1834 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1835 
1836 	if (hotplug_status)
1837 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1838 
1839 	return hotplug_status;
1840 }
1841 
1842 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1843 				 u32 hotplug_status)
1844 {
1845 	u32 pin_mask = 0, long_mask = 0;
1846 
1847 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1848 	    IS_CHERRYVIEW(dev_priv)) {
1849 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1850 
1851 		if (hotplug_trigger) {
1852 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1853 					   hotplug_trigger, hpd_status_g4x,
1854 					   i9xx_port_hotplug_long_detect);
1855 
1856 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1857 		}
1858 
1859 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1860 			dp_aux_irq_handler(dev_priv);
1861 	} else {
1862 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1863 
1864 		if (hotplug_trigger) {
1865 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1866 					   hotplug_trigger, hpd_status_i915,
1867 					   i9xx_port_hotplug_long_detect);
1868 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1869 		}
1870 	}
1871 }
1872 
1873 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1874 {
1875 	struct drm_device *dev = arg;
1876 	struct drm_i915_private *dev_priv = to_i915(dev);
1877 	irqreturn_t ret = IRQ_NONE;
1878 
1879 	if (!intel_irqs_enabled(dev_priv))
1880 		return IRQ_NONE;
1881 
1882 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1883 	disable_rpm_wakeref_asserts(dev_priv);
1884 
1885 	do {
1886 		u32 iir, gt_iir, pm_iir;
1887 		u32 pipe_stats[I915_MAX_PIPES] = {};
1888 		u32 hotplug_status = 0;
1889 		u32 ier = 0;
1890 
1891 		gt_iir = I915_READ(GTIIR);
1892 		pm_iir = I915_READ(GEN6_PMIIR);
1893 		iir = I915_READ(VLV_IIR);
1894 
1895 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1896 			break;
1897 
1898 		ret = IRQ_HANDLED;
1899 
1900 		/*
1901 		 * Theory on interrupt generation, based on empirical evidence:
1902 		 *
1903 		 * x = ((VLV_IIR & VLV_IER) ||
1904 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1905 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1906 		 *
1907 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1908 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1909 		 * guarantee the CPU interrupt will be raised again even if we
1910 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1911 		 * bits this time around.
1912 		 */
1913 		I915_WRITE(VLV_MASTER_IER, 0);
1914 		ier = I915_READ(VLV_IER);
1915 		I915_WRITE(VLV_IER, 0);
1916 
1917 		if (gt_iir)
1918 			I915_WRITE(GTIIR, gt_iir);
1919 		if (pm_iir)
1920 			I915_WRITE(GEN6_PMIIR, pm_iir);
1921 
1922 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1923 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1924 
1925 		/* Call regardless, as some status bits might not be
1926 		 * signalled in iir */
1927 		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1928 
1929 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1930 			   I915_LPE_PIPE_B_INTERRUPT))
1931 			intel_lpe_audio_irq_handler(dev_priv);
1932 
1933 		/*
1934 		 * VLV_IIR is single buffered, and reflects the level
1935 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1936 		 */
1937 		if (iir)
1938 			I915_WRITE(VLV_IIR, iir);
1939 
1940 		I915_WRITE(VLV_IER, ier);
1941 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1942 		POSTING_READ(VLV_MASTER_IER);
1943 
1944 		if (gt_iir)
1945 			snb_gt_irq_handler(dev_priv, gt_iir);
1946 		if (pm_iir)
1947 			gen6_rps_irq_handler(dev_priv, pm_iir);
1948 
1949 		if (hotplug_status)
1950 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1951 
1952 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1953 	} while (0);
1954 
1955 	enable_rpm_wakeref_asserts(dev_priv);
1956 
1957 	return ret;
1958 }
1959 
1960 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1961 {
1962 	struct drm_device *dev = arg;
1963 	struct drm_i915_private *dev_priv = to_i915(dev);
1964 	irqreturn_t ret = IRQ_NONE;
1965 
1966 	if (!intel_irqs_enabled(dev_priv))
1967 		return IRQ_NONE;
1968 
1969 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1970 	disable_rpm_wakeref_asserts(dev_priv);
1971 
1972 	do {
1973 		u32 master_ctl, iir;
1974 		u32 gt_iir[4] = {};
1975 		u32 pipe_stats[I915_MAX_PIPES] = {};
1976 		u32 hotplug_status = 0;
1977 		u32 ier = 0;
1978 
1979 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1980 		iir = I915_READ(VLV_IIR);
1981 
1982 		if (master_ctl == 0 && iir == 0)
1983 			break;
1984 
1985 		ret = IRQ_HANDLED;
1986 
1987 		/*
1988 		 * Theory on interrupt generation, based on empirical evidence:
1989 		 *
1990 		 * x = ((VLV_IIR & VLV_IER) ||
1991 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1992 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1993 		 *
1994 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1995 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1996 		 * guarantee the CPU interrupt will be raised again even if we
1997 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1998 		 * bits this time around.
1999 		 */
2000 		I915_WRITE(GEN8_MASTER_IRQ, 0);
2001 		ier = I915_READ(VLV_IER);
2002 		I915_WRITE(VLV_IER, 0);
2003 
2004 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2005 
2006 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
2007 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2008 
2009 		/* Call regardless, as some status bits might not be
2010 		 * signalled in iir */
2011 		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2012 
2013 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2014 			   I915_LPE_PIPE_B_INTERRUPT |
2015 			   I915_LPE_PIPE_C_INTERRUPT))
2016 			intel_lpe_audio_irq_handler(dev_priv);
2017 
2018 		/*
2019 		 * VLV_IIR is single buffered, and reflects the level
2020 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2021 		 */
2022 		if (iir)
2023 			I915_WRITE(VLV_IIR, iir);
2024 
2025 		I915_WRITE(VLV_IER, ier);
2026 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2027 		POSTING_READ(GEN8_MASTER_IRQ);
2028 
2029 		gen8_gt_irq_handler(dev_priv, gt_iir);
2030 
2031 		if (hotplug_status)
2032 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2033 
2034 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2035 	} while (0);
2036 
2037 	enable_rpm_wakeref_asserts(dev_priv);
2038 
2039 	return ret;
2040 }
2041 
2042 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2043 				u32 hotplug_trigger,
2044 				const u32 hpd[HPD_NUM_PINS])
2045 {
2046 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2047 
2048 	/*
2049 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2050 	 * unless we touch the hotplug register, even if hotplug_trigger is
2051 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2052 	 * errors.
2053 	 */
2054 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2055 	if (!hotplug_trigger) {
2056 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2057 			PORTD_HOTPLUG_STATUS_MASK |
2058 			PORTC_HOTPLUG_STATUS_MASK |
2059 			PORTB_HOTPLUG_STATUS_MASK;
2060 		dig_hotplug_reg &= ~mask;
2061 	}
2062 
2063 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2064 	if (!hotplug_trigger)
2065 		return;
2066 
2067 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2068 			   dig_hotplug_reg, hpd,
2069 			   pch_port_hotplug_long_detect);
2070 
2071 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2072 }
2073 
2074 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2075 {
2076 	int pipe;
2077 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2078 
2079 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2080 
2081 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
2082 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2083 			       SDE_AUDIO_POWER_SHIFT);
2084 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2085 				 port_name(port));
2086 	}
2087 
2088 	if (pch_iir & SDE_AUX_MASK)
2089 		dp_aux_irq_handler(dev_priv);
2090 
2091 	if (pch_iir & SDE_GMBUS)
2092 		gmbus_irq_handler(dev_priv);
2093 
2094 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
2095 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2096 
2097 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
2098 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2099 
2100 	if (pch_iir & SDE_POISON)
2101 		DRM_ERROR("PCH poison interrupt\n");
2102 
2103 	if (pch_iir & SDE_FDI_MASK)
2104 		for_each_pipe(dev_priv, pipe)
2105 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2106 					 pipe_name(pipe),
2107 					 I915_READ(FDI_RX_IIR(pipe)));
2108 
2109 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2110 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2111 
2112 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2113 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2114 
2115 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2116 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2117 
2118 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2119 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2120 }
2121 
2122 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2123 {
2124 	u32 err_int = I915_READ(GEN7_ERR_INT);
2125 	enum pipe pipe;
2126 
2127 	if (err_int & ERR_INT_POISON)
2128 		DRM_ERROR("Poison interrupt\n");
2129 
2130 	for_each_pipe(dev_priv, pipe) {
2131 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2132 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2133 
2134 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2135 			if (IS_IVYBRIDGE(dev_priv))
2136 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
2137 			else
2138 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
2139 		}
2140 	}
2141 
2142 	I915_WRITE(GEN7_ERR_INT, err_int);
2143 }
2144 
2145 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2146 {
2147 	u32 serr_int = I915_READ(SERR_INT);
2148 
2149 	if (serr_int & SERR_INT_POISON)
2150 		DRM_ERROR("PCH poison interrupt\n");
2151 
2152 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2153 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2154 
2155 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2156 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2157 
2158 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2159 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2160 
2161 	I915_WRITE(SERR_INT, serr_int);
2162 }
2163 
2164 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2165 {
2166 	int pipe;
2167 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2168 
2169 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2170 
2171 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2172 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2173 			       SDE_AUDIO_POWER_SHIFT_CPT);
2174 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2175 				 port_name(port));
2176 	}
2177 
2178 	if (pch_iir & SDE_AUX_MASK_CPT)
2179 		dp_aux_irq_handler(dev_priv);
2180 
2181 	if (pch_iir & SDE_GMBUS_CPT)
2182 		gmbus_irq_handler(dev_priv);
2183 
2184 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2185 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2186 
2187 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2188 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2189 
2190 	if (pch_iir & SDE_FDI_MASK_CPT)
2191 		for_each_pipe(dev_priv, pipe)
2192 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2193 					 pipe_name(pipe),
2194 					 I915_READ(FDI_RX_IIR(pipe)));
2195 
2196 	if (pch_iir & SDE_ERROR_CPT)
2197 		cpt_serr_int_handler(dev_priv);
2198 }
2199 
2200 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2201 {
2202 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2203 		~SDE_PORTE_HOTPLUG_SPT;
2204 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2205 	u32 pin_mask = 0, long_mask = 0;
2206 
2207 	if (hotplug_trigger) {
2208 		u32 dig_hotplug_reg;
2209 
2210 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2211 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2212 
2213 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2214 				   dig_hotplug_reg, hpd_spt,
2215 				   spt_port_hotplug_long_detect);
2216 	}
2217 
2218 	if (hotplug2_trigger) {
2219 		u32 dig_hotplug_reg;
2220 
2221 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2222 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2223 
2224 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2225 				   dig_hotplug_reg, hpd_spt,
2226 				   spt_port_hotplug2_long_detect);
2227 	}
2228 
2229 	if (pin_mask)
2230 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2231 
2232 	if (pch_iir & SDE_GMBUS_CPT)
2233 		gmbus_irq_handler(dev_priv);
2234 }
2235 
2236 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2237 				u32 hotplug_trigger,
2238 				const u32 hpd[HPD_NUM_PINS])
2239 {
2240 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2241 
2242 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2243 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2244 
2245 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2246 			   dig_hotplug_reg, hpd,
2247 			   ilk_port_hotplug_long_detect);
2248 
2249 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2250 }
2251 
2252 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2253 				    u32 de_iir)
2254 {
2255 	enum pipe pipe;
2256 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2257 
2258 	if (hotplug_trigger)
2259 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2260 
2261 	if (de_iir & DE_AUX_CHANNEL_A)
2262 		dp_aux_irq_handler(dev_priv);
2263 
2264 	if (de_iir & DE_GSE)
2265 		intel_opregion_asle_intr(dev_priv);
2266 
2267 	if (de_iir & DE_POISON)
2268 		DRM_ERROR("Poison interrupt\n");
2269 
2270 	for_each_pipe(dev_priv, pipe) {
2271 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2272 		    intel_pipe_handle_vblank(dev_priv, pipe))
2273 			intel_check_page_flip(dev_priv, pipe);
2274 
2275 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2276 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2277 
2278 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2279 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2280 
2281 		/* plane/pipes map 1:1 on ilk+ */
2282 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2283 			intel_finish_page_flip_cs(dev_priv, pipe);
2284 	}
2285 
2286 	/* check event from PCH */
2287 	if (de_iir & DE_PCH_EVENT) {
2288 		u32 pch_iir = I915_READ(SDEIIR);
2289 
2290 		if (HAS_PCH_CPT(dev_priv))
2291 			cpt_irq_handler(dev_priv, pch_iir);
2292 		else
2293 			ibx_irq_handler(dev_priv, pch_iir);
2294 
2295 		/* should clear PCH hotplug event before clear CPU irq */
2296 		I915_WRITE(SDEIIR, pch_iir);
2297 	}
2298 
2299 	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2300 		ironlake_rps_change_irq_handler(dev_priv);
2301 }
2302 
2303 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2304 				    u32 de_iir)
2305 {
2306 	enum pipe pipe;
2307 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2308 
2309 	if (hotplug_trigger)
2310 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2311 
2312 	if (de_iir & DE_ERR_INT_IVB)
2313 		ivb_err_int_handler(dev_priv);
2314 
2315 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2316 		dp_aux_irq_handler(dev_priv);
2317 
2318 	if (de_iir & DE_GSE_IVB)
2319 		intel_opregion_asle_intr(dev_priv);
2320 
2321 	for_each_pipe(dev_priv, pipe) {
2322 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2323 		    intel_pipe_handle_vblank(dev_priv, pipe))
2324 			intel_check_page_flip(dev_priv, pipe);
2325 
2326 		/* plane/pipes map 1:1 on ilk+ */
2327 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2328 			intel_finish_page_flip_cs(dev_priv, pipe);
2329 	}
2330 
2331 	/* check event from PCH */
2332 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2333 		u32 pch_iir = I915_READ(SDEIIR);
2334 
2335 		cpt_irq_handler(dev_priv, pch_iir);
2336 
2337 		/* clear PCH hotplug event before clear CPU irq */
2338 		I915_WRITE(SDEIIR, pch_iir);
2339 	}
2340 }
2341 
2342 /*
2343  * To handle irqs with the minimum potential races with fresh interrupts, we:
2344  * 1 - Disable Master Interrupt Control.
2345  * 2 - Find the source(s) of the interrupt.
2346  * 3 - Clear the Interrupt Identity bits (IIR).
2347  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2348  * 5 - Re-enable Master Interrupt Control.
2349  */
2350 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2351 {
2352 	struct drm_device *dev = arg;
2353 	struct drm_i915_private *dev_priv = to_i915(dev);
2354 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2355 	irqreturn_t ret = IRQ_NONE;
2356 
2357 	if (!intel_irqs_enabled(dev_priv))
2358 		return IRQ_NONE;
2359 
2360 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2361 	disable_rpm_wakeref_asserts(dev_priv);
2362 
2363 	/* disable master interrupt before clearing iir  */
2364 	de_ier = I915_READ(DEIER);
2365 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2366 	POSTING_READ(DEIER);
2367 
2368 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2369 	 * interrupts will will be stored on its back queue, and then we'll be
2370 	 * able to process them after we restore SDEIER (as soon as we restore
2371 	 * it, we'll get an interrupt if SDEIIR still has something to process
2372 	 * due to its back queue). */
2373 	if (!HAS_PCH_NOP(dev_priv)) {
2374 		sde_ier = I915_READ(SDEIER);
2375 		I915_WRITE(SDEIER, 0);
2376 		POSTING_READ(SDEIER);
2377 	}
2378 
2379 	/* Find, clear, then process each source of interrupt */
2380 
2381 	gt_iir = I915_READ(GTIIR);
2382 	if (gt_iir) {
2383 		I915_WRITE(GTIIR, gt_iir);
2384 		ret = IRQ_HANDLED;
2385 		if (INTEL_GEN(dev_priv) >= 6)
2386 			snb_gt_irq_handler(dev_priv, gt_iir);
2387 		else
2388 			ilk_gt_irq_handler(dev_priv, gt_iir);
2389 	}
2390 
2391 	de_iir = I915_READ(DEIIR);
2392 	if (de_iir) {
2393 		I915_WRITE(DEIIR, de_iir);
2394 		ret = IRQ_HANDLED;
2395 		if (INTEL_GEN(dev_priv) >= 7)
2396 			ivb_display_irq_handler(dev_priv, de_iir);
2397 		else
2398 			ilk_display_irq_handler(dev_priv, de_iir);
2399 	}
2400 
2401 	if (INTEL_GEN(dev_priv) >= 6) {
2402 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2403 		if (pm_iir) {
2404 			I915_WRITE(GEN6_PMIIR, pm_iir);
2405 			ret = IRQ_HANDLED;
2406 			gen6_rps_irq_handler(dev_priv, pm_iir);
2407 		}
2408 	}
2409 
2410 	I915_WRITE(DEIER, de_ier);
2411 	POSTING_READ(DEIER);
2412 	if (!HAS_PCH_NOP(dev_priv)) {
2413 		I915_WRITE(SDEIER, sde_ier);
2414 		POSTING_READ(SDEIER);
2415 	}
2416 
2417 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2418 	enable_rpm_wakeref_asserts(dev_priv);
2419 
2420 	return ret;
2421 }
2422 
2423 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2424 				u32 hotplug_trigger,
2425 				const u32 hpd[HPD_NUM_PINS])
2426 {
2427 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2428 
2429 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2430 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2431 
2432 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2433 			   dig_hotplug_reg, hpd,
2434 			   bxt_port_hotplug_long_detect);
2435 
2436 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2437 }
2438 
2439 static irqreturn_t
2440 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2441 {
2442 	irqreturn_t ret = IRQ_NONE;
2443 	u32 iir;
2444 	enum pipe pipe;
2445 
2446 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2447 		iir = I915_READ(GEN8_DE_MISC_IIR);
2448 		if (iir) {
2449 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2450 			ret = IRQ_HANDLED;
2451 			if (iir & GEN8_DE_MISC_GSE)
2452 				intel_opregion_asle_intr(dev_priv);
2453 			else
2454 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2455 		}
2456 		else
2457 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2458 	}
2459 
2460 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2461 		iir = I915_READ(GEN8_DE_PORT_IIR);
2462 		if (iir) {
2463 			u32 tmp_mask;
2464 			bool found = false;
2465 
2466 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2467 			ret = IRQ_HANDLED;
2468 
2469 			tmp_mask = GEN8_AUX_CHANNEL_A;
2470 			if (INTEL_INFO(dev_priv)->gen >= 9)
2471 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2472 					    GEN9_AUX_CHANNEL_C |
2473 					    GEN9_AUX_CHANNEL_D;
2474 
2475 			if (iir & tmp_mask) {
2476 				dp_aux_irq_handler(dev_priv);
2477 				found = true;
2478 			}
2479 
2480 			if (IS_GEN9_LP(dev_priv)) {
2481 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2482 				if (tmp_mask) {
2483 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
2484 							    hpd_bxt);
2485 					found = true;
2486 				}
2487 			} else if (IS_BROADWELL(dev_priv)) {
2488 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2489 				if (tmp_mask) {
2490 					ilk_hpd_irq_handler(dev_priv,
2491 							    tmp_mask, hpd_bdw);
2492 					found = true;
2493 				}
2494 			}
2495 
2496 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2497 				gmbus_irq_handler(dev_priv);
2498 				found = true;
2499 			}
2500 
2501 			if (!found)
2502 				DRM_ERROR("Unexpected DE Port interrupt\n");
2503 		}
2504 		else
2505 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2506 	}
2507 
2508 	for_each_pipe(dev_priv, pipe) {
2509 		u32 flip_done, fault_errors;
2510 
2511 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2512 			continue;
2513 
2514 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2515 		if (!iir) {
2516 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2517 			continue;
2518 		}
2519 
2520 		ret = IRQ_HANDLED;
2521 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2522 
2523 		if (iir & GEN8_PIPE_VBLANK &&
2524 		    intel_pipe_handle_vblank(dev_priv, pipe))
2525 			intel_check_page_flip(dev_priv, pipe);
2526 
2527 		flip_done = iir;
2528 		if (INTEL_INFO(dev_priv)->gen >= 9)
2529 			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2530 		else
2531 			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2532 
2533 		if (flip_done)
2534 			intel_finish_page_flip_cs(dev_priv, pipe);
2535 
2536 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2537 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2538 
2539 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2540 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2541 
2542 		fault_errors = iir;
2543 		if (INTEL_INFO(dev_priv)->gen >= 9)
2544 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2545 		else
2546 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2547 
2548 		if (fault_errors)
2549 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2550 				  pipe_name(pipe),
2551 				  fault_errors);
2552 	}
2553 
2554 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2555 	    master_ctl & GEN8_DE_PCH_IRQ) {
2556 		/*
2557 		 * FIXME(BDW): Assume for now that the new interrupt handling
2558 		 * scheme also closed the SDE interrupt handling race we've seen
2559 		 * on older pch-split platforms. But this needs testing.
2560 		 */
2561 		iir = I915_READ(SDEIIR);
2562 		if (iir) {
2563 			I915_WRITE(SDEIIR, iir);
2564 			ret = IRQ_HANDLED;
2565 
2566 			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2567 				spt_irq_handler(dev_priv, iir);
2568 			else
2569 				cpt_irq_handler(dev_priv, iir);
2570 		} else {
2571 			/*
2572 			 * Like on previous PCH there seems to be something
2573 			 * fishy going on with forwarding PCH interrupts.
2574 			 */
2575 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2576 		}
2577 	}
2578 
2579 	return ret;
2580 }
2581 
2582 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2583 {
2584 	struct drm_device *dev = arg;
2585 	struct drm_i915_private *dev_priv = to_i915(dev);
2586 	u32 master_ctl;
2587 	u32 gt_iir[4] = {};
2588 	irqreturn_t ret;
2589 
2590 	if (!intel_irqs_enabled(dev_priv))
2591 		return IRQ_NONE;
2592 
2593 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2594 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2595 	if (!master_ctl)
2596 		return IRQ_NONE;
2597 
2598 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2599 
2600 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2601 	disable_rpm_wakeref_asserts(dev_priv);
2602 
2603 	/* Find, clear, then process each source of interrupt */
2604 	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2605 	gen8_gt_irq_handler(dev_priv, gt_iir);
2606 	ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2607 
2608 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2609 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2610 
2611 	enable_rpm_wakeref_asserts(dev_priv);
2612 
2613 	return ret;
2614 }
2615 
2616 static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2617 {
2618 	/*
2619 	 * Notify all waiters for GPU completion events that reset state has
2620 	 * been changed, and that they need to restart their wait after
2621 	 * checking for potential errors (and bail out to drop locks if there is
2622 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2623 	 */
2624 
2625 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2626 	wake_up_all(&dev_priv->gpu_error.wait_queue);
2627 
2628 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2629 	wake_up_all(&dev_priv->pending_flip_queue);
2630 }
2631 
2632 /**
2633  * i915_reset_and_wakeup - do process context error handling work
2634  * @dev_priv: i915 device private
2635  *
2636  * Fire an error uevent so userspace can see that a hang or error
2637  * was detected.
2638  */
2639 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2640 {
2641 	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2642 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2643 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2644 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2645 
2646 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2647 
2648 	DRM_DEBUG_DRIVER("resetting chip\n");
2649 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2650 
2651 	/*
2652 	 * In most cases it's guaranteed that we get here with an RPM
2653 	 * reference held, for example because there is a pending GPU
2654 	 * request that won't finish until the reset is done. This
2655 	 * isn't the case at least when we get here by doing a
2656 	 * simulated reset via debugs, so get an RPM reference.
2657 	 */
2658 	intel_runtime_pm_get(dev_priv);
2659 	intel_prepare_reset(dev_priv);
2660 
2661 	do {
2662 		/*
2663 		 * All state reset _must_ be completed before we update the
2664 		 * reset counter, for otherwise waiters might miss the reset
2665 		 * pending state and not properly drop locks, resulting in
2666 		 * deadlocks with the reset work.
2667 		 */
2668 		if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
2669 			i915_reset(dev_priv);
2670 			mutex_unlock(&dev_priv->drm.struct_mutex);
2671 		}
2672 
2673 		/* We need to wait for anyone holding the lock to wakeup */
2674 	} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
2675 				     I915_RESET_IN_PROGRESS,
2676 				     TASK_UNINTERRUPTIBLE,
2677 				     HZ));
2678 
2679 	intel_finish_reset(dev_priv);
2680 	intel_runtime_pm_put(dev_priv);
2681 
2682 	if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
2683 		kobject_uevent_env(kobj,
2684 				   KOBJ_CHANGE, reset_done_event);
2685 
2686 	/*
2687 	 * Note: The wake_up also serves as a memory barrier so that
2688 	 * waiters see the updated value of the dev_priv->gpu_error.
2689 	 */
2690 	wake_up_all(&dev_priv->gpu_error.reset_queue);
2691 }
2692 
2693 static inline void
2694 i915_err_print_instdone(struct drm_i915_private *dev_priv,
2695 			struct intel_instdone *instdone)
2696 {
2697 	int slice;
2698 	int subslice;
2699 
2700 	pr_err("  INSTDONE: 0x%08x\n", instdone->instdone);
2701 
2702 	if (INTEL_GEN(dev_priv) <= 3)
2703 		return;
2704 
2705 	pr_err("  SC_INSTDONE: 0x%08x\n", instdone->slice_common);
2706 
2707 	if (INTEL_GEN(dev_priv) <= 6)
2708 		return;
2709 
2710 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2711 		pr_err("  SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
2712 		       slice, subslice, instdone->sampler[slice][subslice]);
2713 
2714 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2715 		pr_err("  ROW_INSTDONE[%d][%d]: 0x%08x\n",
2716 		       slice, subslice, instdone->row[slice][subslice]);
2717 }
2718 
2719 static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
2720 {
2721 	u32 eir;
2722 
2723 	if (!IS_GEN2(dev_priv))
2724 		I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
2725 
2726 	if (INTEL_GEN(dev_priv) < 4)
2727 		I915_WRITE(IPEIR, I915_READ(IPEIR));
2728 	else
2729 		I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
2730 
2731 	I915_WRITE(EIR, I915_READ(EIR));
2732 	eir = I915_READ(EIR);
2733 	if (eir) {
2734 		/*
2735 		 * some errors might have become stuck,
2736 		 * mask them.
2737 		 */
2738 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
2739 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2740 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2741 	}
2742 }
2743 
2744 /**
2745  * i915_handle_error - handle a gpu error
2746  * @dev_priv: i915 device private
2747  * @engine_mask: mask representing engines that are hung
2748  * @fmt: Error message format string
2749  *
2750  * Do some basic checking of register state at error time and
2751  * dump it to the syslog.  Also call i915_capture_error_state() to make
2752  * sure we get a record and make it available in debugfs.  Fire a uevent
2753  * so userspace knows something bad happened (should trigger collection
2754  * of a ring dump etc.).
2755  */
2756 void i915_handle_error(struct drm_i915_private *dev_priv,
2757 		       u32 engine_mask,
2758 		       const char *fmt, ...)
2759 {
2760 	va_list args;
2761 	char error_msg[80];
2762 
2763 	va_start(args, fmt);
2764 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2765 	va_end(args);
2766 
2767 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
2768 	i915_clear_error_registers(dev_priv);
2769 
2770 	if (!engine_mask)
2771 		return;
2772 
2773 	if (test_and_set_bit(I915_RESET_IN_PROGRESS,
2774 			     &dev_priv->gpu_error.flags))
2775 		return;
2776 
2777 	/*
2778 	 * Wakeup waiting processes so that the reset function
2779 	 * i915_reset_and_wakeup doesn't deadlock trying to grab
2780 	 * various locks. By bumping the reset counter first, the woken
2781 	 * processes will see a reset in progress and back off,
2782 	 * releasing their locks and then wait for the reset completion.
2783 	 * We must do this for _all_ gpu waiters that might hold locks
2784 	 * that the reset work needs to acquire.
2785 	 *
2786 	 * Note: The wake_up also provides a memory barrier to ensure that the
2787 	 * waiters see the updated value of the reset flags.
2788 	 */
2789 	i915_error_wake_up(dev_priv);
2790 
2791 	i915_reset_and_wakeup(dev_priv);
2792 }
2793 
2794 /* Called from drm generic code, passed 'crtc' which
2795  * we use as a pipe index
2796  */
2797 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
2798 {
2799 	struct drm_i915_private *dev_priv = to_i915(dev);
2800 	unsigned long irqflags;
2801 
2802 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2803 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2804 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2805 
2806 	return 0;
2807 }
2808 
2809 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2810 {
2811 	struct drm_i915_private *dev_priv = to_i915(dev);
2812 	unsigned long irqflags;
2813 
2814 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2815 	i915_enable_pipestat(dev_priv, pipe,
2816 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2817 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2818 
2819 	return 0;
2820 }
2821 
2822 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2823 {
2824 	struct drm_i915_private *dev_priv = to_i915(dev);
2825 	unsigned long irqflags;
2826 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2827 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2828 
2829 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2830 	ilk_enable_display_irq(dev_priv, bit);
2831 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2832 
2833 	return 0;
2834 }
2835 
2836 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2837 {
2838 	struct drm_i915_private *dev_priv = to_i915(dev);
2839 	unsigned long irqflags;
2840 
2841 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2842 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2843 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2844 
2845 	return 0;
2846 }
2847 
2848 /* Called from drm generic code, passed 'crtc' which
2849  * we use as a pipe index
2850  */
2851 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2852 {
2853 	struct drm_i915_private *dev_priv = to_i915(dev);
2854 	unsigned long irqflags;
2855 
2856 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2857 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2858 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2859 }
2860 
2861 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
2862 {
2863 	struct drm_i915_private *dev_priv = to_i915(dev);
2864 	unsigned long irqflags;
2865 
2866 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2867 	i915_disable_pipestat(dev_priv, pipe,
2868 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2869 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2870 }
2871 
2872 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2873 {
2874 	struct drm_i915_private *dev_priv = to_i915(dev);
2875 	unsigned long irqflags;
2876 	uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
2877 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2878 
2879 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2880 	ilk_disable_display_irq(dev_priv, bit);
2881 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2882 }
2883 
2884 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2885 {
2886 	struct drm_i915_private *dev_priv = to_i915(dev);
2887 	unsigned long irqflags;
2888 
2889 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2890 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2891 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2892 }
2893 
2894 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2895 {
2896 	if (HAS_PCH_NOP(dev_priv))
2897 		return;
2898 
2899 	GEN5_IRQ_RESET(SDE);
2900 
2901 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2902 		I915_WRITE(SERR_INT, 0xffffffff);
2903 }
2904 
2905 /*
2906  * SDEIER is also touched by the interrupt handler to work around missed PCH
2907  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2908  * instead we unconditionally enable all PCH interrupt sources here, but then
2909  * only unmask them as needed with SDEIMR.
2910  *
2911  * This function needs to be called before interrupts are enabled.
2912  */
2913 static void ibx_irq_pre_postinstall(struct drm_device *dev)
2914 {
2915 	struct drm_i915_private *dev_priv = to_i915(dev);
2916 
2917 	if (HAS_PCH_NOP(dev_priv))
2918 		return;
2919 
2920 	WARN_ON(I915_READ(SDEIER) != 0);
2921 	I915_WRITE(SDEIER, 0xffffffff);
2922 	POSTING_READ(SDEIER);
2923 }
2924 
2925 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
2926 {
2927 	GEN5_IRQ_RESET(GT);
2928 	if (INTEL_GEN(dev_priv) >= 6)
2929 		GEN5_IRQ_RESET(GEN6_PM);
2930 }
2931 
2932 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2933 {
2934 	enum pipe pipe;
2935 
2936 	if (IS_CHERRYVIEW(dev_priv))
2937 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2938 	else
2939 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2940 
2941 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2942 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2943 
2944 	for_each_pipe(dev_priv, pipe) {
2945 		I915_WRITE(PIPESTAT(pipe),
2946 			   PIPE_FIFO_UNDERRUN_STATUS |
2947 			   PIPESTAT_INT_STATUS_MASK);
2948 		dev_priv->pipestat_irq_mask[pipe] = 0;
2949 	}
2950 
2951 	GEN5_IRQ_RESET(VLV_);
2952 	dev_priv->irq_mask = ~0;
2953 }
2954 
2955 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2956 {
2957 	u32 pipestat_mask;
2958 	u32 enable_mask;
2959 	enum pipe pipe;
2960 	u32 val;
2961 
2962 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
2963 			PIPE_CRC_DONE_INTERRUPT_STATUS;
2964 
2965 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2966 	for_each_pipe(dev_priv, pipe)
2967 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2968 
2969 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2970 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2971 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2972 	if (IS_CHERRYVIEW(dev_priv))
2973 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2974 
2975 	WARN_ON(dev_priv->irq_mask != ~0);
2976 
2977 	val = (I915_LPE_PIPE_A_INTERRUPT |
2978 		I915_LPE_PIPE_B_INTERRUPT |
2979 		I915_LPE_PIPE_C_INTERRUPT);
2980 
2981 	enable_mask |= val;
2982 
2983 	dev_priv->irq_mask = ~enable_mask;
2984 
2985 	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
2986 }
2987 
2988 /* drm_dma.h hooks
2989 */
2990 static void ironlake_irq_reset(struct drm_device *dev)
2991 {
2992 	struct drm_i915_private *dev_priv = to_i915(dev);
2993 
2994 	I915_WRITE(HWSTAM, 0xffffffff);
2995 
2996 	GEN5_IRQ_RESET(DE);
2997 	if (IS_GEN7(dev_priv))
2998 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2999 
3000 	gen5_gt_irq_reset(dev_priv);
3001 
3002 	ibx_irq_reset(dev_priv);
3003 }
3004 
3005 static void valleyview_irq_preinstall(struct drm_device *dev)
3006 {
3007 	struct drm_i915_private *dev_priv = to_i915(dev);
3008 
3009 	I915_WRITE(VLV_MASTER_IER, 0);
3010 	POSTING_READ(VLV_MASTER_IER);
3011 
3012 	gen5_gt_irq_reset(dev_priv);
3013 
3014 	spin_lock_irq(&dev_priv->irq_lock);
3015 	if (dev_priv->display_irqs_enabled)
3016 		vlv_display_irq_reset(dev_priv);
3017 	spin_unlock_irq(&dev_priv->irq_lock);
3018 }
3019 
3020 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3021 {
3022 	GEN8_IRQ_RESET_NDX(GT, 0);
3023 	GEN8_IRQ_RESET_NDX(GT, 1);
3024 	GEN8_IRQ_RESET_NDX(GT, 2);
3025 	GEN8_IRQ_RESET_NDX(GT, 3);
3026 }
3027 
3028 static void gen8_irq_reset(struct drm_device *dev)
3029 {
3030 	struct drm_i915_private *dev_priv = to_i915(dev);
3031 	int pipe;
3032 
3033 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3034 	POSTING_READ(GEN8_MASTER_IRQ);
3035 
3036 	gen8_gt_irq_reset(dev_priv);
3037 
3038 	for_each_pipe(dev_priv, pipe)
3039 		if (intel_display_power_is_enabled(dev_priv,
3040 						   POWER_DOMAIN_PIPE(pipe)))
3041 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3042 
3043 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3044 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3045 	GEN5_IRQ_RESET(GEN8_PCU_);
3046 
3047 	if (HAS_PCH_SPLIT(dev_priv))
3048 		ibx_irq_reset(dev_priv);
3049 }
3050 
3051 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3052 				     unsigned int pipe_mask)
3053 {
3054 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3055 	enum pipe pipe;
3056 
3057 	spin_lock_irq(&dev_priv->irq_lock);
3058 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3059 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3060 				  dev_priv->de_irq_mask[pipe],
3061 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3062 	spin_unlock_irq(&dev_priv->irq_lock);
3063 }
3064 
3065 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3066 				     unsigned int pipe_mask)
3067 {
3068 	enum pipe pipe;
3069 
3070 	spin_lock_irq(&dev_priv->irq_lock);
3071 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3072 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3073 	spin_unlock_irq(&dev_priv->irq_lock);
3074 
3075 	/* make sure we're done processing display irqs */
3076 	synchronize_irq(dev_priv->drm.irq);
3077 }
3078 
3079 static void cherryview_irq_preinstall(struct drm_device *dev)
3080 {
3081 	struct drm_i915_private *dev_priv = to_i915(dev);
3082 
3083 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3084 	POSTING_READ(GEN8_MASTER_IRQ);
3085 
3086 	gen8_gt_irq_reset(dev_priv);
3087 
3088 	GEN5_IRQ_RESET(GEN8_PCU_);
3089 
3090 	spin_lock_irq(&dev_priv->irq_lock);
3091 	if (dev_priv->display_irqs_enabled)
3092 		vlv_display_irq_reset(dev_priv);
3093 	spin_unlock_irq(&dev_priv->irq_lock);
3094 }
3095 
3096 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3097 				  const u32 hpd[HPD_NUM_PINS])
3098 {
3099 	struct intel_encoder *encoder;
3100 	u32 enabled_irqs = 0;
3101 
3102 	for_each_intel_encoder(&dev_priv->drm, encoder)
3103 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3104 			enabled_irqs |= hpd[encoder->hpd_pin];
3105 
3106 	return enabled_irqs;
3107 }
3108 
3109 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3110 {
3111 	u32 hotplug_irqs, hotplug, enabled_irqs;
3112 
3113 	if (HAS_PCH_IBX(dev_priv)) {
3114 		hotplug_irqs = SDE_HOTPLUG_MASK;
3115 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3116 	} else {
3117 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3118 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3119 	}
3120 
3121 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3122 
3123 	/*
3124 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3125 	 * duration to 2ms (which is the minimum in the Display Port spec).
3126 	 * The pulse duration bits are reserved on LPT+.
3127 	 */
3128 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3129 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3130 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3131 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3132 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3133 	/*
3134 	 * When CPU and PCH are on the same package, port A
3135 	 * HPD must be enabled in both north and south.
3136 	 */
3137 	if (HAS_PCH_LPT_LP(dev_priv))
3138 		hotplug |= PORTA_HOTPLUG_ENABLE;
3139 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3140 }
3141 
3142 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3143 {
3144 	u32 hotplug;
3145 
3146 	/* Enable digital hotplug on the PCH */
3147 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3148 	hotplug |= PORTA_HOTPLUG_ENABLE |
3149 		   PORTB_HOTPLUG_ENABLE |
3150 		   PORTC_HOTPLUG_ENABLE |
3151 		   PORTD_HOTPLUG_ENABLE;
3152 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3153 
3154 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3155 	hotplug |= PORTE_HOTPLUG_ENABLE;
3156 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3157 }
3158 
3159 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3160 {
3161 	u32 hotplug_irqs, enabled_irqs;
3162 
3163 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3164 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3165 
3166 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3167 
3168 	spt_hpd_detection_setup(dev_priv);
3169 }
3170 
3171 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3172 {
3173 	u32 hotplug_irqs, hotplug, enabled_irqs;
3174 
3175 	if (INTEL_GEN(dev_priv) >= 8) {
3176 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3177 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3178 
3179 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3180 	} else if (INTEL_GEN(dev_priv) >= 7) {
3181 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3182 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3183 
3184 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3185 	} else {
3186 		hotplug_irqs = DE_DP_A_HOTPLUG;
3187 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3188 
3189 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3190 	}
3191 
3192 	/*
3193 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3194 	 * duration to 2ms (which is the minimum in the Display Port spec)
3195 	 * The pulse duration bits are reserved on HSW+.
3196 	 */
3197 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3198 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3199 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3200 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3201 
3202 	ibx_hpd_irq_setup(dev_priv);
3203 }
3204 
3205 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3206 				      u32 enabled_irqs)
3207 {
3208 	u32 hotplug;
3209 
3210 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3211 	hotplug |= PORTA_HOTPLUG_ENABLE |
3212 		   PORTB_HOTPLUG_ENABLE |
3213 		   PORTC_HOTPLUG_ENABLE;
3214 
3215 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3216 		      hotplug, enabled_irqs);
3217 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3218 
3219 	/*
3220 	 * For BXT invert bit has to be set based on AOB design
3221 	 * for HPD detection logic, update it based on VBT fields.
3222 	 */
3223 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3224 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3225 		hotplug |= BXT_DDIA_HPD_INVERT;
3226 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3227 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3228 		hotplug |= BXT_DDIB_HPD_INVERT;
3229 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3230 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3231 		hotplug |= BXT_DDIC_HPD_INVERT;
3232 
3233 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3234 }
3235 
3236 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3237 {
3238 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3239 }
3240 
3241 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3242 {
3243 	u32 hotplug_irqs, enabled_irqs;
3244 
3245 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3246 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3247 
3248 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3249 
3250 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3251 }
3252 
3253 static void ibx_irq_postinstall(struct drm_device *dev)
3254 {
3255 	struct drm_i915_private *dev_priv = to_i915(dev);
3256 	u32 mask;
3257 
3258 	if (HAS_PCH_NOP(dev_priv))
3259 		return;
3260 
3261 	if (HAS_PCH_IBX(dev_priv))
3262 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3263 	else
3264 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3265 
3266 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3267 	I915_WRITE(SDEIMR, ~mask);
3268 
3269 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3270 	    HAS_PCH_LPT(dev_priv))
3271 		; /* TODO: Enable HPD detection on older PCH platforms too */
3272 	else
3273 		spt_hpd_detection_setup(dev_priv);
3274 }
3275 
3276 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3277 {
3278 	struct drm_i915_private *dev_priv = to_i915(dev);
3279 	u32 pm_irqs, gt_irqs;
3280 
3281 	pm_irqs = gt_irqs = 0;
3282 
3283 	dev_priv->gt_irq_mask = ~0;
3284 	if (HAS_L3_DPF(dev_priv)) {
3285 		/* L3 parity interrupt is always unmasked. */
3286 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3287 		gt_irqs |= GT_PARITY_ERROR(dev_priv);
3288 	}
3289 
3290 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3291 	if (IS_GEN5(dev_priv)) {
3292 		gt_irqs |= ILK_BSD_USER_INTERRUPT;
3293 	} else {
3294 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3295 	}
3296 
3297 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3298 
3299 	if (INTEL_GEN(dev_priv) >= 6) {
3300 		/*
3301 		 * RPS interrupts will get enabled/disabled on demand when RPS
3302 		 * itself is enabled/disabled.
3303 		 */
3304 		if (HAS_VEBOX(dev_priv)) {
3305 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3306 			dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3307 		}
3308 
3309 		dev_priv->pm_imr = 0xffffffff;
3310 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3311 	}
3312 }
3313 
3314 static int ironlake_irq_postinstall(struct drm_device *dev)
3315 {
3316 	struct drm_i915_private *dev_priv = to_i915(dev);
3317 	u32 display_mask, extra_mask;
3318 
3319 	if (INTEL_GEN(dev_priv) >= 7) {
3320 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3321 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3322 				DE_PLANEB_FLIP_DONE_IVB |
3323 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3324 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3325 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3326 			      DE_DP_A_HOTPLUG_IVB);
3327 	} else {
3328 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3329 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3330 				DE_AUX_CHANNEL_A |
3331 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3332 				DE_POISON);
3333 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3334 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3335 			      DE_DP_A_HOTPLUG);
3336 	}
3337 
3338 	dev_priv->irq_mask = ~display_mask;
3339 
3340 	I915_WRITE(HWSTAM, 0xeffe);
3341 
3342 	ibx_irq_pre_postinstall(dev);
3343 
3344 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3345 
3346 	gen5_gt_irq_postinstall(dev);
3347 
3348 	ibx_irq_postinstall(dev);
3349 
3350 	if (IS_IRONLAKE_M(dev_priv)) {
3351 		/* Enable PCU event interrupts
3352 		 *
3353 		 * spinlocking not required here for correctness since interrupt
3354 		 * setup is guaranteed to run in single-threaded context. But we
3355 		 * need it to make the assert_spin_locked happy. */
3356 		spin_lock_irq(&dev_priv->irq_lock);
3357 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3358 		spin_unlock_irq(&dev_priv->irq_lock);
3359 	}
3360 
3361 	return 0;
3362 }
3363 
3364 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3365 {
3366 	assert_spin_locked(&dev_priv->irq_lock);
3367 
3368 	if (dev_priv->display_irqs_enabled)
3369 		return;
3370 
3371 	dev_priv->display_irqs_enabled = true;
3372 
3373 	if (intel_irqs_enabled(dev_priv)) {
3374 		vlv_display_irq_reset(dev_priv);
3375 		vlv_display_irq_postinstall(dev_priv);
3376 	}
3377 }
3378 
3379 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3380 {
3381 	assert_spin_locked(&dev_priv->irq_lock);
3382 
3383 	if (!dev_priv->display_irqs_enabled)
3384 		return;
3385 
3386 	dev_priv->display_irqs_enabled = false;
3387 
3388 	if (intel_irqs_enabled(dev_priv))
3389 		vlv_display_irq_reset(dev_priv);
3390 }
3391 
3392 
3393 static int valleyview_irq_postinstall(struct drm_device *dev)
3394 {
3395 	struct drm_i915_private *dev_priv = to_i915(dev);
3396 
3397 	gen5_gt_irq_postinstall(dev);
3398 
3399 	spin_lock_irq(&dev_priv->irq_lock);
3400 	if (dev_priv->display_irqs_enabled)
3401 		vlv_display_irq_postinstall(dev_priv);
3402 	spin_unlock_irq(&dev_priv->irq_lock);
3403 
3404 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3405 	POSTING_READ(VLV_MASTER_IER);
3406 
3407 	return 0;
3408 }
3409 
3410 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3411 {
3412 	/* These are interrupts we'll toggle with the ring mask register */
3413 	uint32_t gt_interrupts[] = {
3414 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3415 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3416 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3417 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3418 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3419 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3420 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3421 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3422 		0,
3423 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3424 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3425 		};
3426 
3427 	if (HAS_L3_DPF(dev_priv))
3428 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3429 
3430 	dev_priv->pm_ier = 0x0;
3431 	dev_priv->pm_imr = ~dev_priv->pm_ier;
3432 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3433 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3434 	/*
3435 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3436 	 * is enabled/disabled. Same wil be the case for GuC interrupts.
3437 	 */
3438 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
3439 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3440 }
3441 
3442 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3443 {
3444 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3445 	uint32_t de_pipe_enables;
3446 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3447 	u32 de_port_enables;
3448 	u32 de_misc_masked = GEN8_DE_MISC_GSE;
3449 	enum pipe pipe;
3450 
3451 	if (INTEL_INFO(dev_priv)->gen >= 9) {
3452 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3453 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3454 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3455 				  GEN9_AUX_CHANNEL_D;
3456 		if (IS_GEN9_LP(dev_priv))
3457 			de_port_masked |= BXT_DE_PORT_GMBUS;
3458 	} else {
3459 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3460 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3461 	}
3462 
3463 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3464 					   GEN8_PIPE_FIFO_UNDERRUN;
3465 
3466 	de_port_enables = de_port_masked;
3467 	if (IS_GEN9_LP(dev_priv))
3468 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3469 	else if (IS_BROADWELL(dev_priv))
3470 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3471 
3472 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3473 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3474 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3475 
3476 	for_each_pipe(dev_priv, pipe)
3477 		if (intel_display_power_is_enabled(dev_priv,
3478 				POWER_DOMAIN_PIPE(pipe)))
3479 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3480 					  dev_priv->de_irq_mask[pipe],
3481 					  de_pipe_enables);
3482 
3483 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3484 	GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3485 
3486 	if (IS_GEN9_LP(dev_priv))
3487 		bxt_hpd_detection_setup(dev_priv);
3488 }
3489 
3490 static int gen8_irq_postinstall(struct drm_device *dev)
3491 {
3492 	struct drm_i915_private *dev_priv = to_i915(dev);
3493 
3494 	if (HAS_PCH_SPLIT(dev_priv))
3495 		ibx_irq_pre_postinstall(dev);
3496 
3497 	gen8_gt_irq_postinstall(dev_priv);
3498 	gen8_de_irq_postinstall(dev_priv);
3499 
3500 	if (HAS_PCH_SPLIT(dev_priv))
3501 		ibx_irq_postinstall(dev);
3502 
3503 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3504 	POSTING_READ(GEN8_MASTER_IRQ);
3505 
3506 	return 0;
3507 }
3508 
3509 static int cherryview_irq_postinstall(struct drm_device *dev)
3510 {
3511 	struct drm_i915_private *dev_priv = to_i915(dev);
3512 
3513 	gen8_gt_irq_postinstall(dev_priv);
3514 
3515 	spin_lock_irq(&dev_priv->irq_lock);
3516 	if (dev_priv->display_irqs_enabled)
3517 		vlv_display_irq_postinstall(dev_priv);
3518 	spin_unlock_irq(&dev_priv->irq_lock);
3519 
3520 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3521 	POSTING_READ(GEN8_MASTER_IRQ);
3522 
3523 	return 0;
3524 }
3525 
3526 static void gen8_irq_uninstall(struct drm_device *dev)
3527 {
3528 	struct drm_i915_private *dev_priv = to_i915(dev);
3529 
3530 	if (!dev_priv)
3531 		return;
3532 
3533 	gen8_irq_reset(dev);
3534 }
3535 
3536 static void valleyview_irq_uninstall(struct drm_device *dev)
3537 {
3538 	struct drm_i915_private *dev_priv = to_i915(dev);
3539 
3540 	if (!dev_priv)
3541 		return;
3542 
3543 	I915_WRITE(VLV_MASTER_IER, 0);
3544 	POSTING_READ(VLV_MASTER_IER);
3545 
3546 	gen5_gt_irq_reset(dev_priv);
3547 
3548 	I915_WRITE(HWSTAM, 0xffffffff);
3549 
3550 	spin_lock_irq(&dev_priv->irq_lock);
3551 	if (dev_priv->display_irqs_enabled)
3552 		vlv_display_irq_reset(dev_priv);
3553 	spin_unlock_irq(&dev_priv->irq_lock);
3554 }
3555 
3556 static void cherryview_irq_uninstall(struct drm_device *dev)
3557 {
3558 	struct drm_i915_private *dev_priv = to_i915(dev);
3559 
3560 	if (!dev_priv)
3561 		return;
3562 
3563 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3564 	POSTING_READ(GEN8_MASTER_IRQ);
3565 
3566 	gen8_gt_irq_reset(dev_priv);
3567 
3568 	GEN5_IRQ_RESET(GEN8_PCU_);
3569 
3570 	spin_lock_irq(&dev_priv->irq_lock);
3571 	if (dev_priv->display_irqs_enabled)
3572 		vlv_display_irq_reset(dev_priv);
3573 	spin_unlock_irq(&dev_priv->irq_lock);
3574 }
3575 
3576 static void ironlake_irq_uninstall(struct drm_device *dev)
3577 {
3578 	struct drm_i915_private *dev_priv = to_i915(dev);
3579 
3580 	if (!dev_priv)
3581 		return;
3582 
3583 	ironlake_irq_reset(dev);
3584 }
3585 
3586 static void i8xx_irq_preinstall(struct drm_device * dev)
3587 {
3588 	struct drm_i915_private *dev_priv = to_i915(dev);
3589 	int pipe;
3590 
3591 	for_each_pipe(dev_priv, pipe)
3592 		I915_WRITE(PIPESTAT(pipe), 0);
3593 	I915_WRITE16(IMR, 0xffff);
3594 	I915_WRITE16(IER, 0x0);
3595 	POSTING_READ16(IER);
3596 }
3597 
3598 static int i8xx_irq_postinstall(struct drm_device *dev)
3599 {
3600 	struct drm_i915_private *dev_priv = to_i915(dev);
3601 
3602 	I915_WRITE16(EMR,
3603 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3604 
3605 	/* Unmask the interrupts that we always want on. */
3606 	dev_priv->irq_mask =
3607 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3608 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3609 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3610 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3611 	I915_WRITE16(IMR, dev_priv->irq_mask);
3612 
3613 	I915_WRITE16(IER,
3614 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3615 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3616 		     I915_USER_INTERRUPT);
3617 	POSTING_READ16(IER);
3618 
3619 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3620 	 * just to make the assert_spin_locked check happy. */
3621 	spin_lock_irq(&dev_priv->irq_lock);
3622 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3623 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3624 	spin_unlock_irq(&dev_priv->irq_lock);
3625 
3626 	return 0;
3627 }
3628 
3629 /*
3630  * Returns true when a page flip has completed.
3631  */
3632 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3633 			       int plane, int pipe, u32 iir)
3634 {
3635 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3636 
3637 	if (!intel_pipe_handle_vblank(dev_priv, pipe))
3638 		return false;
3639 
3640 	if ((iir & flip_pending) == 0)
3641 		goto check_page_flip;
3642 
3643 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3644 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3645 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3646 	 * the flip is completed (no longer pending). Since this doesn't raise
3647 	 * an interrupt per se, we watch for the change at vblank.
3648 	 */
3649 	if (I915_READ16(ISR) & flip_pending)
3650 		goto check_page_flip;
3651 
3652 	intel_finish_page_flip_cs(dev_priv, pipe);
3653 	return true;
3654 
3655 check_page_flip:
3656 	intel_check_page_flip(dev_priv, pipe);
3657 	return false;
3658 }
3659 
3660 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3661 {
3662 	struct drm_device *dev = arg;
3663 	struct drm_i915_private *dev_priv = to_i915(dev);
3664 	u16 iir, new_iir;
3665 	u32 pipe_stats[2];
3666 	int pipe;
3667 	u16 flip_mask =
3668 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3669 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3670 	irqreturn_t ret;
3671 
3672 	if (!intel_irqs_enabled(dev_priv))
3673 		return IRQ_NONE;
3674 
3675 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3676 	disable_rpm_wakeref_asserts(dev_priv);
3677 
3678 	ret = IRQ_NONE;
3679 	iir = I915_READ16(IIR);
3680 	if (iir == 0)
3681 		goto out;
3682 
3683 	while (iir & ~flip_mask) {
3684 		/* Can't rely on pipestat interrupt bit in iir as it might
3685 		 * have been cleared after the pipestat interrupt was received.
3686 		 * It doesn't set the bit in iir again, but it still produces
3687 		 * interrupts (for non-MSI).
3688 		 */
3689 		spin_lock(&dev_priv->irq_lock);
3690 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3691 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3692 
3693 		for_each_pipe(dev_priv, pipe) {
3694 			i915_reg_t reg = PIPESTAT(pipe);
3695 			pipe_stats[pipe] = I915_READ(reg);
3696 
3697 			/*
3698 			 * Clear the PIPE*STAT regs before the IIR
3699 			 */
3700 			if (pipe_stats[pipe] & 0x8000ffff)
3701 				I915_WRITE(reg, pipe_stats[pipe]);
3702 		}
3703 		spin_unlock(&dev_priv->irq_lock);
3704 
3705 		I915_WRITE16(IIR, iir & ~flip_mask);
3706 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3707 
3708 		if (iir & I915_USER_INTERRUPT)
3709 			notify_ring(dev_priv->engine[RCS]);
3710 
3711 		for_each_pipe(dev_priv, pipe) {
3712 			int plane = pipe;
3713 			if (HAS_FBC(dev_priv))
3714 				plane = !plane;
3715 
3716 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3717 			    i8xx_handle_vblank(dev_priv, plane, pipe, iir))
3718 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3719 
3720 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3721 				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3722 
3723 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3724 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3725 								    pipe);
3726 		}
3727 
3728 		iir = new_iir;
3729 	}
3730 	ret = IRQ_HANDLED;
3731 
3732 out:
3733 	enable_rpm_wakeref_asserts(dev_priv);
3734 
3735 	return ret;
3736 }
3737 
3738 static void i8xx_irq_uninstall(struct drm_device * dev)
3739 {
3740 	struct drm_i915_private *dev_priv = to_i915(dev);
3741 	int pipe;
3742 
3743 	for_each_pipe(dev_priv, pipe) {
3744 		/* Clear enable bits; then clear status bits */
3745 		I915_WRITE(PIPESTAT(pipe), 0);
3746 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3747 	}
3748 	I915_WRITE16(IMR, 0xffff);
3749 	I915_WRITE16(IER, 0x0);
3750 	I915_WRITE16(IIR, I915_READ16(IIR));
3751 }
3752 
3753 static void i915_irq_preinstall(struct drm_device * dev)
3754 {
3755 	struct drm_i915_private *dev_priv = to_i915(dev);
3756 	int pipe;
3757 
3758 	if (I915_HAS_HOTPLUG(dev_priv)) {
3759 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3760 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3761 	}
3762 
3763 	I915_WRITE16(HWSTAM, 0xeffe);
3764 	for_each_pipe(dev_priv, pipe)
3765 		I915_WRITE(PIPESTAT(pipe), 0);
3766 	I915_WRITE(IMR, 0xffffffff);
3767 	I915_WRITE(IER, 0x0);
3768 	POSTING_READ(IER);
3769 }
3770 
3771 static int i915_irq_postinstall(struct drm_device *dev)
3772 {
3773 	struct drm_i915_private *dev_priv = to_i915(dev);
3774 	u32 enable_mask;
3775 
3776 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3777 
3778 	/* Unmask the interrupts that we always want on. */
3779 	dev_priv->irq_mask =
3780 		~(I915_ASLE_INTERRUPT |
3781 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3782 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3783 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3784 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3785 
3786 	enable_mask =
3787 		I915_ASLE_INTERRUPT |
3788 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3789 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3790 		I915_USER_INTERRUPT;
3791 
3792 	if (I915_HAS_HOTPLUG(dev_priv)) {
3793 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3794 		POSTING_READ(PORT_HOTPLUG_EN);
3795 
3796 		/* Enable in IER... */
3797 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3798 		/* and unmask in IMR */
3799 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3800 	}
3801 
3802 	I915_WRITE(IMR, dev_priv->irq_mask);
3803 	I915_WRITE(IER, enable_mask);
3804 	POSTING_READ(IER);
3805 
3806 	i915_enable_asle_pipestat(dev_priv);
3807 
3808 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3809 	 * just to make the assert_spin_locked check happy. */
3810 	spin_lock_irq(&dev_priv->irq_lock);
3811 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3812 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3813 	spin_unlock_irq(&dev_priv->irq_lock);
3814 
3815 	return 0;
3816 }
3817 
3818 /*
3819  * Returns true when a page flip has completed.
3820  */
3821 static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
3822 			       int plane, int pipe, u32 iir)
3823 {
3824 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3825 
3826 	if (!intel_pipe_handle_vblank(dev_priv, pipe))
3827 		return false;
3828 
3829 	if ((iir & flip_pending) == 0)
3830 		goto check_page_flip;
3831 
3832 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3833 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3834 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3835 	 * the flip is completed (no longer pending). Since this doesn't raise
3836 	 * an interrupt per se, we watch for the change at vblank.
3837 	 */
3838 	if (I915_READ(ISR) & flip_pending)
3839 		goto check_page_flip;
3840 
3841 	intel_finish_page_flip_cs(dev_priv, pipe);
3842 	return true;
3843 
3844 check_page_flip:
3845 	intel_check_page_flip(dev_priv, pipe);
3846 	return false;
3847 }
3848 
3849 static irqreturn_t i915_irq_handler(int irq, void *arg)
3850 {
3851 	struct drm_device *dev = arg;
3852 	struct drm_i915_private *dev_priv = to_i915(dev);
3853 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3854 	u32 flip_mask =
3855 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3856 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3857 	int pipe, ret = IRQ_NONE;
3858 
3859 	if (!intel_irqs_enabled(dev_priv))
3860 		return IRQ_NONE;
3861 
3862 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3863 	disable_rpm_wakeref_asserts(dev_priv);
3864 
3865 	iir = I915_READ(IIR);
3866 	do {
3867 		bool irq_received = (iir & ~flip_mask) != 0;
3868 		bool blc_event = false;
3869 
3870 		/* Can't rely on pipestat interrupt bit in iir as it might
3871 		 * have been cleared after the pipestat interrupt was received.
3872 		 * It doesn't set the bit in iir again, but it still produces
3873 		 * interrupts (for non-MSI).
3874 		 */
3875 		spin_lock(&dev_priv->irq_lock);
3876 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3877 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3878 
3879 		for_each_pipe(dev_priv, pipe) {
3880 			i915_reg_t reg = PIPESTAT(pipe);
3881 			pipe_stats[pipe] = I915_READ(reg);
3882 
3883 			/* Clear the PIPE*STAT regs before the IIR */
3884 			if (pipe_stats[pipe] & 0x8000ffff) {
3885 				I915_WRITE(reg, pipe_stats[pipe]);
3886 				irq_received = true;
3887 			}
3888 		}
3889 		spin_unlock(&dev_priv->irq_lock);
3890 
3891 		if (!irq_received)
3892 			break;
3893 
3894 		/* Consume port.  Then clear IIR or we'll miss events */
3895 		if (I915_HAS_HOTPLUG(dev_priv) &&
3896 		    iir & I915_DISPLAY_PORT_INTERRUPT) {
3897 			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3898 			if (hotplug_status)
3899 				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3900 		}
3901 
3902 		I915_WRITE(IIR, iir & ~flip_mask);
3903 		new_iir = I915_READ(IIR); /* Flush posted writes */
3904 
3905 		if (iir & I915_USER_INTERRUPT)
3906 			notify_ring(dev_priv->engine[RCS]);
3907 
3908 		for_each_pipe(dev_priv, pipe) {
3909 			int plane = pipe;
3910 			if (HAS_FBC(dev_priv))
3911 				plane = !plane;
3912 
3913 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3914 			    i915_handle_vblank(dev_priv, plane, pipe, iir))
3915 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3916 
3917 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3918 				blc_event = true;
3919 
3920 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3921 				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3922 
3923 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3924 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3925 								    pipe);
3926 		}
3927 
3928 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3929 			intel_opregion_asle_intr(dev_priv);
3930 
3931 		/* With MSI, interrupts are only generated when iir
3932 		 * transitions from zero to nonzero.  If another bit got
3933 		 * set while we were handling the existing iir bits, then
3934 		 * we would never get another interrupt.
3935 		 *
3936 		 * This is fine on non-MSI as well, as if we hit this path
3937 		 * we avoid exiting the interrupt handler only to generate
3938 		 * another one.
3939 		 *
3940 		 * Note that for MSI this could cause a stray interrupt report
3941 		 * if an interrupt landed in the time between writing IIR and
3942 		 * the posting read.  This should be rare enough to never
3943 		 * trigger the 99% of 100,000 interrupts test for disabling
3944 		 * stray interrupts.
3945 		 */
3946 		ret = IRQ_HANDLED;
3947 		iir = new_iir;
3948 	} while (iir & ~flip_mask);
3949 
3950 	enable_rpm_wakeref_asserts(dev_priv);
3951 
3952 	return ret;
3953 }
3954 
3955 static void i915_irq_uninstall(struct drm_device * dev)
3956 {
3957 	struct drm_i915_private *dev_priv = to_i915(dev);
3958 	int pipe;
3959 
3960 	if (I915_HAS_HOTPLUG(dev_priv)) {
3961 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3962 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3963 	}
3964 
3965 	I915_WRITE16(HWSTAM, 0xffff);
3966 	for_each_pipe(dev_priv, pipe) {
3967 		/* Clear enable bits; then clear status bits */
3968 		I915_WRITE(PIPESTAT(pipe), 0);
3969 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3970 	}
3971 	I915_WRITE(IMR, 0xffffffff);
3972 	I915_WRITE(IER, 0x0);
3973 
3974 	I915_WRITE(IIR, I915_READ(IIR));
3975 }
3976 
3977 static void i965_irq_preinstall(struct drm_device * dev)
3978 {
3979 	struct drm_i915_private *dev_priv = to_i915(dev);
3980 	int pipe;
3981 
3982 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3983 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3984 
3985 	I915_WRITE(HWSTAM, 0xeffe);
3986 	for_each_pipe(dev_priv, pipe)
3987 		I915_WRITE(PIPESTAT(pipe), 0);
3988 	I915_WRITE(IMR, 0xffffffff);
3989 	I915_WRITE(IER, 0x0);
3990 	POSTING_READ(IER);
3991 }
3992 
3993 static int i965_irq_postinstall(struct drm_device *dev)
3994 {
3995 	struct drm_i915_private *dev_priv = to_i915(dev);
3996 	u32 enable_mask;
3997 	u32 error_mask;
3998 
3999 	/* Unmask the interrupts that we always want on. */
4000 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4001 			       I915_DISPLAY_PORT_INTERRUPT |
4002 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4003 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4004 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4005 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4006 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4007 
4008 	enable_mask = ~dev_priv->irq_mask;
4009 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4010 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4011 	enable_mask |= I915_USER_INTERRUPT;
4012 
4013 	if (IS_G4X(dev_priv))
4014 		enable_mask |= I915_BSD_USER_INTERRUPT;
4015 
4016 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4017 	 * just to make the assert_spin_locked check happy. */
4018 	spin_lock_irq(&dev_priv->irq_lock);
4019 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4020 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4021 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4022 	spin_unlock_irq(&dev_priv->irq_lock);
4023 
4024 	/*
4025 	 * Enable some error detection, note the instruction error mask
4026 	 * bit is reserved, so we leave it masked.
4027 	 */
4028 	if (IS_G4X(dev_priv)) {
4029 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4030 			       GM45_ERROR_MEM_PRIV |
4031 			       GM45_ERROR_CP_PRIV |
4032 			       I915_ERROR_MEMORY_REFRESH);
4033 	} else {
4034 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4035 			       I915_ERROR_MEMORY_REFRESH);
4036 	}
4037 	I915_WRITE(EMR, error_mask);
4038 
4039 	I915_WRITE(IMR, dev_priv->irq_mask);
4040 	I915_WRITE(IER, enable_mask);
4041 	POSTING_READ(IER);
4042 
4043 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4044 	POSTING_READ(PORT_HOTPLUG_EN);
4045 
4046 	i915_enable_asle_pipestat(dev_priv);
4047 
4048 	return 0;
4049 }
4050 
4051 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4052 {
4053 	u32 hotplug_en;
4054 
4055 	assert_spin_locked(&dev_priv->irq_lock);
4056 
4057 	/* Note HDMI and DP share hotplug bits */
4058 	/* enable bits are the same for all generations */
4059 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4060 	/* Programming the CRT detection parameters tends
4061 	   to generate a spurious hotplug event about three
4062 	   seconds later.  So just do it once.
4063 	*/
4064 	if (IS_G4X(dev_priv))
4065 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4066 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4067 
4068 	/* Ignore TV since it's buggy */
4069 	i915_hotplug_interrupt_update_locked(dev_priv,
4070 					     HOTPLUG_INT_EN_MASK |
4071 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4072 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4073 					     hotplug_en);
4074 }
4075 
4076 static irqreturn_t i965_irq_handler(int irq, void *arg)
4077 {
4078 	struct drm_device *dev = arg;
4079 	struct drm_i915_private *dev_priv = to_i915(dev);
4080 	u32 iir, new_iir;
4081 	u32 pipe_stats[I915_MAX_PIPES];
4082 	int ret = IRQ_NONE, pipe;
4083 	u32 flip_mask =
4084 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4085 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4086 
4087 	if (!intel_irqs_enabled(dev_priv))
4088 		return IRQ_NONE;
4089 
4090 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4091 	disable_rpm_wakeref_asserts(dev_priv);
4092 
4093 	iir = I915_READ(IIR);
4094 
4095 	for (;;) {
4096 		bool irq_received = (iir & ~flip_mask) != 0;
4097 		bool blc_event = false;
4098 
4099 		/* Can't rely on pipestat interrupt bit in iir as it might
4100 		 * have been cleared after the pipestat interrupt was received.
4101 		 * It doesn't set the bit in iir again, but it still produces
4102 		 * interrupts (for non-MSI).
4103 		 */
4104 		spin_lock(&dev_priv->irq_lock);
4105 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4106 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4107 
4108 		for_each_pipe(dev_priv, pipe) {
4109 			i915_reg_t reg = PIPESTAT(pipe);
4110 			pipe_stats[pipe] = I915_READ(reg);
4111 
4112 			/*
4113 			 * Clear the PIPE*STAT regs before the IIR
4114 			 */
4115 			if (pipe_stats[pipe] & 0x8000ffff) {
4116 				I915_WRITE(reg, pipe_stats[pipe]);
4117 				irq_received = true;
4118 			}
4119 		}
4120 		spin_unlock(&dev_priv->irq_lock);
4121 
4122 		if (!irq_received)
4123 			break;
4124 
4125 		ret = IRQ_HANDLED;
4126 
4127 		/* Consume port.  Then clear IIR or we'll miss events */
4128 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4129 			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4130 			if (hotplug_status)
4131 				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4132 		}
4133 
4134 		I915_WRITE(IIR, iir & ~flip_mask);
4135 		new_iir = I915_READ(IIR); /* Flush posted writes */
4136 
4137 		if (iir & I915_USER_INTERRUPT)
4138 			notify_ring(dev_priv->engine[RCS]);
4139 		if (iir & I915_BSD_USER_INTERRUPT)
4140 			notify_ring(dev_priv->engine[VCS]);
4141 
4142 		for_each_pipe(dev_priv, pipe) {
4143 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4144 			    i915_handle_vblank(dev_priv, pipe, pipe, iir))
4145 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4146 
4147 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4148 				blc_event = true;
4149 
4150 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4151 				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4152 
4153 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4154 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4155 		}
4156 
4157 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4158 			intel_opregion_asle_intr(dev_priv);
4159 
4160 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4161 			gmbus_irq_handler(dev_priv);
4162 
4163 		/* With MSI, interrupts are only generated when iir
4164 		 * transitions from zero to nonzero.  If another bit got
4165 		 * set while we were handling the existing iir bits, then
4166 		 * we would never get another interrupt.
4167 		 *
4168 		 * This is fine on non-MSI as well, as if we hit this path
4169 		 * we avoid exiting the interrupt handler only to generate
4170 		 * another one.
4171 		 *
4172 		 * Note that for MSI this could cause a stray interrupt report
4173 		 * if an interrupt landed in the time between writing IIR and
4174 		 * the posting read.  This should be rare enough to never
4175 		 * trigger the 99% of 100,000 interrupts test for disabling
4176 		 * stray interrupts.
4177 		 */
4178 		iir = new_iir;
4179 	}
4180 
4181 	enable_rpm_wakeref_asserts(dev_priv);
4182 
4183 	return ret;
4184 }
4185 
4186 static void i965_irq_uninstall(struct drm_device * dev)
4187 {
4188 	struct drm_i915_private *dev_priv = to_i915(dev);
4189 	int pipe;
4190 
4191 	if (!dev_priv)
4192 		return;
4193 
4194 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4195 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4196 
4197 	I915_WRITE(HWSTAM, 0xffffffff);
4198 	for_each_pipe(dev_priv, pipe)
4199 		I915_WRITE(PIPESTAT(pipe), 0);
4200 	I915_WRITE(IMR, 0xffffffff);
4201 	I915_WRITE(IER, 0x0);
4202 
4203 	for_each_pipe(dev_priv, pipe)
4204 		I915_WRITE(PIPESTAT(pipe),
4205 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4206 	I915_WRITE(IIR, I915_READ(IIR));
4207 }
4208 
4209 /**
4210  * intel_irq_init - initializes irq support
4211  * @dev_priv: i915 device instance
4212  *
4213  * This function initializes all the irq support including work items, timers
4214  * and all the vtables. It does not setup the interrupt itself though.
4215  */
4216 void intel_irq_init(struct drm_i915_private *dev_priv)
4217 {
4218 	struct drm_device *dev = &dev_priv->drm;
4219 
4220 	intel_hpd_init_work(dev_priv);
4221 
4222 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4223 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4224 
4225 	if (HAS_GUC_SCHED(dev_priv))
4226 		dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4227 
4228 	/* Let's track the enabled rps events */
4229 	if (IS_VALLEYVIEW(dev_priv))
4230 		/* WaGsvRC0ResidencyMethod:vlv */
4231 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4232 	else
4233 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4234 
4235 	dev_priv->rps.pm_intr_keep = 0;
4236 
4237 	/*
4238 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4239 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
4240 	 *
4241 	 * TODO: verify if this can be reproduced on VLV,CHV.
4242 	 */
4243 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4244 		dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4245 
4246 	if (INTEL_INFO(dev_priv)->gen >= 8)
4247 		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
4248 
4249 	if (IS_GEN2(dev_priv)) {
4250 		/* Gen2 doesn't have a hardware frame counter */
4251 		dev->max_vblank_count = 0;
4252 		dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
4253 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4254 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4255 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4256 	} else {
4257 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4258 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4259 	}
4260 
4261 	/*
4262 	 * Opt out of the vblank disable timer on everything except gen2.
4263 	 * Gen2 doesn't have a hardware frame counter and so depends on
4264 	 * vblank interrupts to produce sane vblank seuquence numbers.
4265 	 */
4266 	if (!IS_GEN2(dev_priv))
4267 		dev->vblank_disable_immediate = true;
4268 
4269 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4270 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4271 
4272 	if (IS_CHERRYVIEW(dev_priv)) {
4273 		dev->driver->irq_handler = cherryview_irq_handler;
4274 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4275 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4276 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4277 		dev->driver->enable_vblank = i965_enable_vblank;
4278 		dev->driver->disable_vblank = i965_disable_vblank;
4279 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4280 	} else if (IS_VALLEYVIEW(dev_priv)) {
4281 		dev->driver->irq_handler = valleyview_irq_handler;
4282 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4283 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4284 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4285 		dev->driver->enable_vblank = i965_enable_vblank;
4286 		dev->driver->disable_vblank = i965_disable_vblank;
4287 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4288 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4289 		dev->driver->irq_handler = gen8_irq_handler;
4290 		dev->driver->irq_preinstall = gen8_irq_reset;
4291 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4292 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4293 		dev->driver->enable_vblank = gen8_enable_vblank;
4294 		dev->driver->disable_vblank = gen8_disable_vblank;
4295 		if (IS_GEN9_LP(dev_priv))
4296 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4297 		else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
4298 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4299 		else
4300 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4301 	} else if (HAS_PCH_SPLIT(dev_priv)) {
4302 		dev->driver->irq_handler = ironlake_irq_handler;
4303 		dev->driver->irq_preinstall = ironlake_irq_reset;
4304 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4305 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4306 		dev->driver->enable_vblank = ironlake_enable_vblank;
4307 		dev->driver->disable_vblank = ironlake_disable_vblank;
4308 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4309 	} else {
4310 		if (IS_GEN2(dev_priv)) {
4311 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4312 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4313 			dev->driver->irq_handler = i8xx_irq_handler;
4314 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4315 			dev->driver->enable_vblank = i8xx_enable_vblank;
4316 			dev->driver->disable_vblank = i8xx_disable_vblank;
4317 		} else if (IS_GEN3(dev_priv)) {
4318 			dev->driver->irq_preinstall = i915_irq_preinstall;
4319 			dev->driver->irq_postinstall = i915_irq_postinstall;
4320 			dev->driver->irq_uninstall = i915_irq_uninstall;
4321 			dev->driver->irq_handler = i915_irq_handler;
4322 			dev->driver->enable_vblank = i8xx_enable_vblank;
4323 			dev->driver->disable_vblank = i8xx_disable_vblank;
4324 		} else {
4325 			dev->driver->irq_preinstall = i965_irq_preinstall;
4326 			dev->driver->irq_postinstall = i965_irq_postinstall;
4327 			dev->driver->irq_uninstall = i965_irq_uninstall;
4328 			dev->driver->irq_handler = i965_irq_handler;
4329 			dev->driver->enable_vblank = i965_enable_vblank;
4330 			dev->driver->disable_vblank = i965_disable_vblank;
4331 		}
4332 		if (I915_HAS_HOTPLUG(dev_priv))
4333 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4334 	}
4335 }
4336 
4337 /**
4338  * intel_irq_install - enables the hardware interrupt
4339  * @dev_priv: i915 device instance
4340  *
4341  * This function enables the hardware interrupt handling, but leaves the hotplug
4342  * handling still disabled. It is called after intel_irq_init().
4343  *
4344  * In the driver load and resume code we need working interrupts in a few places
4345  * but don't want to deal with the hassle of concurrent probe and hotplug
4346  * workers. Hence the split into this two-stage approach.
4347  */
4348 int intel_irq_install(struct drm_i915_private *dev_priv)
4349 {
4350 	/*
4351 	 * We enable some interrupt sources in our postinstall hooks, so mark
4352 	 * interrupts as enabled _before_ actually enabling them to avoid
4353 	 * special cases in our ordering checks.
4354 	 */
4355 	dev_priv->pm.irqs_enabled = true;
4356 
4357 	return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4358 }
4359 
4360 /**
4361  * intel_irq_uninstall - finilizes all irq handling
4362  * @dev_priv: i915 device instance
4363  *
4364  * This stops interrupt and hotplug handling and unregisters and frees all
4365  * resources acquired in the init functions.
4366  */
4367 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4368 {
4369 	drm_irq_uninstall(&dev_priv->drm);
4370 	intel_hpd_cancel_work(dev_priv);
4371 	dev_priv->pm.irqs_enabled = false;
4372 }
4373 
4374 /**
4375  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4376  * @dev_priv: i915 device instance
4377  *
4378  * This function is used to disable interrupts at runtime, both in the runtime
4379  * pm and the system suspend/resume code.
4380  */
4381 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4382 {
4383 	dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4384 	dev_priv->pm.irqs_enabled = false;
4385 	synchronize_irq(dev_priv->drm.irq);
4386 }
4387 
4388 /**
4389  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4390  * @dev_priv: i915 device instance
4391  *
4392  * This function is used to enable interrupts at runtime, both in the runtime
4393  * pm and the system suspend/resume code.
4394  */
4395 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4396 {
4397 	dev_priv->pm.irqs_enabled = true;
4398 	dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4399 	dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4400 }
4401