xref: /openbmc/linux/drivers/gpu/drm/i915/i915_irq.c (revision c2baf4b7097cb66e7ee3c2fa0f585d386dab6300)
1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2c0e09200SDave Airlie  */
3c0e09200SDave Airlie /*
4c0e09200SDave Airlie  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5c0e09200SDave Airlie  * All Rights Reserved.
6c0e09200SDave Airlie  *
7c0e09200SDave Airlie  * Permission is hereby granted, free of charge, to any person obtaining a
8c0e09200SDave Airlie  * copy of this software and associated documentation files (the
9c0e09200SDave Airlie  * "Software"), to deal in the Software without restriction, including
10c0e09200SDave Airlie  * without limitation the rights to use, copy, modify, merge, publish,
11c0e09200SDave Airlie  * distribute, sub license, and/or sell copies of the Software, and to
12c0e09200SDave Airlie  * permit persons to whom the Software is furnished to do so, subject to
13c0e09200SDave Airlie  * the following conditions:
14c0e09200SDave Airlie  *
15c0e09200SDave Airlie  * The above copyright notice and this permission notice (including the
16c0e09200SDave Airlie  * next paragraph) shall be included in all copies or substantial portions
17c0e09200SDave Airlie  * of the Software.
18c0e09200SDave Airlie  *
19c0e09200SDave Airlie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20c0e09200SDave Airlie  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21c0e09200SDave Airlie  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22c0e09200SDave Airlie  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23c0e09200SDave Airlie  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24c0e09200SDave Airlie  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25c0e09200SDave Airlie  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26c0e09200SDave Airlie  *
27c0e09200SDave Airlie  */
28c0e09200SDave Airlie 
29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30a70491ccSJoe Perches 
3163eeaf38SJesse Barnes #include <linux/sysrq.h>
325a0e3ad6STejun Heo #include <linux/slab.h>
33760285e7SDavid Howells #include <drm/drmP.h>
34760285e7SDavid Howells #include <drm/i915_drm.h>
35c0e09200SDave Airlie #include "i915_drv.h"
361c5d22f7SChris Wilson #include "i915_trace.h"
3779e53945SJesse Barnes #include "intel_drv.h"
38c0e09200SDave Airlie 
39e5868a31SEgbert Eich static const u32 hpd_ibx[] = {
40e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG,
41e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
45e5868a31SEgbert Eich };
46e5868a31SEgbert Eich 
47e5868a31SEgbert Eich static const u32 hpd_cpt[] = {
48e5868a31SEgbert Eich 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
4973c352a2SDaniel Vetter 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50e5868a31SEgbert Eich 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51e5868a31SEgbert Eich 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52e5868a31SEgbert Eich 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53e5868a31SEgbert Eich };
54e5868a31SEgbert Eich 
55e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = {
56e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
57e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62e5868a31SEgbert Eich };
63e5868a31SEgbert Eich 
64e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = {
65e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71e5868a31SEgbert Eich };
72e5868a31SEgbert Eich 
73e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74e5868a31SEgbert Eich 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75e5868a31SEgbert Eich 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76e5868a31SEgbert Eich 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77e5868a31SEgbert Eich 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78e5868a31SEgbert Eich 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79e5868a31SEgbert Eich 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80e5868a31SEgbert Eich };
81e5868a31SEgbert Eich 
82036a4a7dSZhenyu Wang /* For display hotplug interrupt */
83995b6762SChris Wilson static void
84f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
85036a4a7dSZhenyu Wang {
864bc9d430SDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
874bc9d430SDaniel Vetter 
88c67a470bSPaulo Zanoni 	if (dev_priv->pc8.irqs_disabled) {
89c67a470bSPaulo Zanoni 		WARN(1, "IRQs disabled\n");
90c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.deimr &= ~mask;
91c67a470bSPaulo Zanoni 		return;
92c67a470bSPaulo Zanoni 	}
93c67a470bSPaulo Zanoni 
941ec14ad3SChris Wilson 	if ((dev_priv->irq_mask & mask) != 0) {
951ec14ad3SChris Wilson 		dev_priv->irq_mask &= ~mask;
961ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
973143a2bfSChris Wilson 		POSTING_READ(DEIMR);
98036a4a7dSZhenyu Wang 	}
99036a4a7dSZhenyu Wang }
100036a4a7dSZhenyu Wang 
1010ff9800aSPaulo Zanoni static void
102f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
103036a4a7dSZhenyu Wang {
1044bc9d430SDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
1054bc9d430SDaniel Vetter 
106c67a470bSPaulo Zanoni 	if (dev_priv->pc8.irqs_disabled) {
107c67a470bSPaulo Zanoni 		WARN(1, "IRQs disabled\n");
108c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.deimr |= mask;
109c67a470bSPaulo Zanoni 		return;
110c67a470bSPaulo Zanoni 	}
111c67a470bSPaulo Zanoni 
1121ec14ad3SChris Wilson 	if ((dev_priv->irq_mask & mask) != mask) {
1131ec14ad3SChris Wilson 		dev_priv->irq_mask |= mask;
1141ec14ad3SChris Wilson 		I915_WRITE(DEIMR, dev_priv->irq_mask);
1153143a2bfSChris Wilson 		POSTING_READ(DEIMR);
116036a4a7dSZhenyu Wang 	}
117036a4a7dSZhenyu Wang }
118036a4a7dSZhenyu Wang 
11943eaea13SPaulo Zanoni /**
12043eaea13SPaulo Zanoni  * ilk_update_gt_irq - update GTIMR
12143eaea13SPaulo Zanoni  * @dev_priv: driver private
12243eaea13SPaulo Zanoni  * @interrupt_mask: mask of interrupt bits to update
12343eaea13SPaulo Zanoni  * @enabled_irq_mask: mask of interrupt bits to enable
12443eaea13SPaulo Zanoni  */
12543eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
12643eaea13SPaulo Zanoni 			      uint32_t interrupt_mask,
12743eaea13SPaulo Zanoni 			      uint32_t enabled_irq_mask)
12843eaea13SPaulo Zanoni {
12943eaea13SPaulo Zanoni 	assert_spin_locked(&dev_priv->irq_lock);
13043eaea13SPaulo Zanoni 
131c67a470bSPaulo Zanoni 	if (dev_priv->pc8.irqs_disabled) {
132c67a470bSPaulo Zanoni 		WARN(1, "IRQs disabled\n");
133c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
134c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
135c67a470bSPaulo Zanoni 						interrupt_mask);
136c67a470bSPaulo Zanoni 		return;
137c67a470bSPaulo Zanoni 	}
138c67a470bSPaulo Zanoni 
13943eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask &= ~interrupt_mask;
14043eaea13SPaulo Zanoni 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
14143eaea13SPaulo Zanoni 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
14243eaea13SPaulo Zanoni 	POSTING_READ(GTIMR);
14343eaea13SPaulo Zanoni }
14443eaea13SPaulo Zanoni 
14543eaea13SPaulo Zanoni void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
14643eaea13SPaulo Zanoni {
14743eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, mask);
14843eaea13SPaulo Zanoni }
14943eaea13SPaulo Zanoni 
15043eaea13SPaulo Zanoni void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
15143eaea13SPaulo Zanoni {
15243eaea13SPaulo Zanoni 	ilk_update_gt_irq(dev_priv, mask, 0);
15343eaea13SPaulo Zanoni }
15443eaea13SPaulo Zanoni 
155edbfdb45SPaulo Zanoni /**
156edbfdb45SPaulo Zanoni   * snb_update_pm_irq - update GEN6_PMIMR
157edbfdb45SPaulo Zanoni   * @dev_priv: driver private
158edbfdb45SPaulo Zanoni   * @interrupt_mask: mask of interrupt bits to update
159edbfdb45SPaulo Zanoni   * @enabled_irq_mask: mask of interrupt bits to enable
160edbfdb45SPaulo Zanoni   */
161edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
162edbfdb45SPaulo Zanoni 			      uint32_t interrupt_mask,
163edbfdb45SPaulo Zanoni 			      uint32_t enabled_irq_mask)
164edbfdb45SPaulo Zanoni {
165605cd25bSPaulo Zanoni 	uint32_t new_val;
166edbfdb45SPaulo Zanoni 
167edbfdb45SPaulo Zanoni 	assert_spin_locked(&dev_priv->irq_lock);
168edbfdb45SPaulo Zanoni 
169c67a470bSPaulo Zanoni 	if (dev_priv->pc8.irqs_disabled) {
170c67a470bSPaulo Zanoni 		WARN(1, "IRQs disabled\n");
171c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
172c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
173c67a470bSPaulo Zanoni 						     interrupt_mask);
174c67a470bSPaulo Zanoni 		return;
175c67a470bSPaulo Zanoni 	}
176c67a470bSPaulo Zanoni 
177605cd25bSPaulo Zanoni 	new_val = dev_priv->pm_irq_mask;
178f52ecbcfSPaulo Zanoni 	new_val &= ~interrupt_mask;
179f52ecbcfSPaulo Zanoni 	new_val |= (~enabled_irq_mask & interrupt_mask);
180f52ecbcfSPaulo Zanoni 
181605cd25bSPaulo Zanoni 	if (new_val != dev_priv->pm_irq_mask) {
182605cd25bSPaulo Zanoni 		dev_priv->pm_irq_mask = new_val;
183605cd25bSPaulo Zanoni 		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
184edbfdb45SPaulo Zanoni 		POSTING_READ(GEN6_PMIMR);
185edbfdb45SPaulo Zanoni 	}
186f52ecbcfSPaulo Zanoni }
187edbfdb45SPaulo Zanoni 
188edbfdb45SPaulo Zanoni void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
189edbfdb45SPaulo Zanoni {
190edbfdb45SPaulo Zanoni 	snb_update_pm_irq(dev_priv, mask, mask);
191edbfdb45SPaulo Zanoni }
192edbfdb45SPaulo Zanoni 
193edbfdb45SPaulo Zanoni void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
194edbfdb45SPaulo Zanoni {
195edbfdb45SPaulo Zanoni 	snb_update_pm_irq(dev_priv, mask, 0);
196edbfdb45SPaulo Zanoni }
197edbfdb45SPaulo Zanoni 
1988664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev)
1998664281bSPaulo Zanoni {
2008664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
2018664281bSPaulo Zanoni 	struct intel_crtc *crtc;
2028664281bSPaulo Zanoni 	enum pipe pipe;
2038664281bSPaulo Zanoni 
2044bc9d430SDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
2054bc9d430SDaniel Vetter 
2068664281bSPaulo Zanoni 	for_each_pipe(pipe) {
2078664281bSPaulo Zanoni 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2088664281bSPaulo Zanoni 
2098664281bSPaulo Zanoni 		if (crtc->cpu_fifo_underrun_disabled)
2108664281bSPaulo Zanoni 			return false;
2118664281bSPaulo Zanoni 	}
2128664281bSPaulo Zanoni 
2138664281bSPaulo Zanoni 	return true;
2148664281bSPaulo Zanoni }
2158664281bSPaulo Zanoni 
2168664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev)
2178664281bSPaulo Zanoni {
2188664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
2198664281bSPaulo Zanoni 	enum pipe pipe;
2208664281bSPaulo Zanoni 	struct intel_crtc *crtc;
2218664281bSPaulo Zanoni 
222fee884edSDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
223fee884edSDaniel Vetter 
2248664281bSPaulo Zanoni 	for_each_pipe(pipe) {
2258664281bSPaulo Zanoni 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2268664281bSPaulo Zanoni 
2278664281bSPaulo Zanoni 		if (crtc->pch_fifo_underrun_disabled)
2288664281bSPaulo Zanoni 			return false;
2298664281bSPaulo Zanoni 	}
2308664281bSPaulo Zanoni 
2318664281bSPaulo Zanoni 	return true;
2328664281bSPaulo Zanoni }
2338664281bSPaulo Zanoni 
2348664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
2358664281bSPaulo Zanoni 						 enum pipe pipe, bool enable)
2368664281bSPaulo Zanoni {
2378664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
2388664281bSPaulo Zanoni 	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
2398664281bSPaulo Zanoni 					  DE_PIPEB_FIFO_UNDERRUN;
2408664281bSPaulo Zanoni 
2418664281bSPaulo Zanoni 	if (enable)
2428664281bSPaulo Zanoni 		ironlake_enable_display_irq(dev_priv, bit);
2438664281bSPaulo Zanoni 	else
2448664281bSPaulo Zanoni 		ironlake_disable_display_irq(dev_priv, bit);
2458664281bSPaulo Zanoni }
2468664281bSPaulo Zanoni 
2478664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
2487336df65SDaniel Vetter 						  enum pipe pipe, bool enable)
2498664281bSPaulo Zanoni {
2508664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
2518664281bSPaulo Zanoni 	if (enable) {
2527336df65SDaniel Vetter 		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
2537336df65SDaniel Vetter 
2548664281bSPaulo Zanoni 		if (!ivb_can_enable_err_int(dev))
2558664281bSPaulo Zanoni 			return;
2568664281bSPaulo Zanoni 
2578664281bSPaulo Zanoni 		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
2588664281bSPaulo Zanoni 	} else {
2597336df65SDaniel Vetter 		bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
2607336df65SDaniel Vetter 
2617336df65SDaniel Vetter 		/* Change the state _after_ we've read out the current one. */
2628664281bSPaulo Zanoni 		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
2637336df65SDaniel Vetter 
2647336df65SDaniel Vetter 		if (!was_enabled &&
2657336df65SDaniel Vetter 		    (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
2667336df65SDaniel Vetter 			DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
2677336df65SDaniel Vetter 				      pipe_name(pipe));
2687336df65SDaniel Vetter 		}
2698664281bSPaulo Zanoni 	}
2708664281bSPaulo Zanoni }
2718664281bSPaulo Zanoni 
272fee884edSDaniel Vetter /**
273fee884edSDaniel Vetter  * ibx_display_interrupt_update - update SDEIMR
274fee884edSDaniel Vetter  * @dev_priv: driver private
275fee884edSDaniel Vetter  * @interrupt_mask: mask of interrupt bits to update
276fee884edSDaniel Vetter  * @enabled_irq_mask: mask of interrupt bits to enable
277fee884edSDaniel Vetter  */
278fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
279fee884edSDaniel Vetter 					 uint32_t interrupt_mask,
280fee884edSDaniel Vetter 					 uint32_t enabled_irq_mask)
281fee884edSDaniel Vetter {
282fee884edSDaniel Vetter 	uint32_t sdeimr = I915_READ(SDEIMR);
283fee884edSDaniel Vetter 	sdeimr &= ~interrupt_mask;
284fee884edSDaniel Vetter 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
285fee884edSDaniel Vetter 
286fee884edSDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
287fee884edSDaniel Vetter 
288c67a470bSPaulo Zanoni 	if (dev_priv->pc8.irqs_disabled &&
289c67a470bSPaulo Zanoni 	    (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
290c67a470bSPaulo Zanoni 		WARN(1, "IRQs disabled\n");
291c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
292c67a470bSPaulo Zanoni 		dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
293c67a470bSPaulo Zanoni 						 interrupt_mask);
294c67a470bSPaulo Zanoni 		return;
295c67a470bSPaulo Zanoni 	}
296c67a470bSPaulo Zanoni 
297fee884edSDaniel Vetter 	I915_WRITE(SDEIMR, sdeimr);
298fee884edSDaniel Vetter 	POSTING_READ(SDEIMR);
299fee884edSDaniel Vetter }
300fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \
301fee884edSDaniel Vetter 	ibx_display_interrupt_update((dev_priv), (bits), (bits))
302fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \
303fee884edSDaniel Vetter 	ibx_display_interrupt_update((dev_priv), (bits), 0)
304fee884edSDaniel Vetter 
305de28075dSDaniel Vetter static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
306de28075dSDaniel Vetter 					    enum transcoder pch_transcoder,
3078664281bSPaulo Zanoni 					    bool enable)
3088664281bSPaulo Zanoni {
3098664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
310de28075dSDaniel Vetter 	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
311de28075dSDaniel Vetter 		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
3128664281bSPaulo Zanoni 
3138664281bSPaulo Zanoni 	if (enable)
314fee884edSDaniel Vetter 		ibx_enable_display_interrupt(dev_priv, bit);
3158664281bSPaulo Zanoni 	else
316fee884edSDaniel Vetter 		ibx_disable_display_interrupt(dev_priv, bit);
3178664281bSPaulo Zanoni }
3188664281bSPaulo Zanoni 
3198664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
3208664281bSPaulo Zanoni 					    enum transcoder pch_transcoder,
3218664281bSPaulo Zanoni 					    bool enable)
3228664281bSPaulo Zanoni {
3238664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
3248664281bSPaulo Zanoni 
3258664281bSPaulo Zanoni 	if (enable) {
3261dd246fbSDaniel Vetter 		I915_WRITE(SERR_INT,
3271dd246fbSDaniel Vetter 			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
3281dd246fbSDaniel Vetter 
3298664281bSPaulo Zanoni 		if (!cpt_can_enable_serr_int(dev))
3308664281bSPaulo Zanoni 			return;
3318664281bSPaulo Zanoni 
332fee884edSDaniel Vetter 		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
3338664281bSPaulo Zanoni 	} else {
3341dd246fbSDaniel Vetter 		uint32_t tmp = I915_READ(SERR_INT);
3351dd246fbSDaniel Vetter 		bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
3361dd246fbSDaniel Vetter 
3371dd246fbSDaniel Vetter 		/* Change the state _after_ we've read out the current one. */
338fee884edSDaniel Vetter 		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
3391dd246fbSDaniel Vetter 
3401dd246fbSDaniel Vetter 		if (!was_enabled &&
3411dd246fbSDaniel Vetter 		    (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
3421dd246fbSDaniel Vetter 			DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
3431dd246fbSDaniel Vetter 				      transcoder_name(pch_transcoder));
3441dd246fbSDaniel Vetter 		}
3458664281bSPaulo Zanoni 	}
3468664281bSPaulo Zanoni }
3478664281bSPaulo Zanoni 
3488664281bSPaulo Zanoni /**
3498664281bSPaulo Zanoni  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
3508664281bSPaulo Zanoni  * @dev: drm device
3518664281bSPaulo Zanoni  * @pipe: pipe
3528664281bSPaulo Zanoni  * @enable: true if we want to report FIFO underrun errors, false otherwise
3538664281bSPaulo Zanoni  *
3548664281bSPaulo Zanoni  * This function makes us disable or enable CPU fifo underruns for a specific
3558664281bSPaulo Zanoni  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
3568664281bSPaulo Zanoni  * reporting for one pipe may also disable all the other CPU error interruts for
3578664281bSPaulo Zanoni  * the other pipes, due to the fact that there's just one interrupt mask/enable
3588664281bSPaulo Zanoni  * bit for all the pipes.
3598664281bSPaulo Zanoni  *
3608664281bSPaulo Zanoni  * Returns the previous state of underrun reporting.
3618664281bSPaulo Zanoni  */
3628664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
3638664281bSPaulo Zanoni 					   enum pipe pipe, bool enable)
3648664281bSPaulo Zanoni {
3658664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
3668664281bSPaulo Zanoni 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
3678664281bSPaulo Zanoni 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3688664281bSPaulo Zanoni 	unsigned long flags;
3698664281bSPaulo Zanoni 	bool ret;
3708664281bSPaulo Zanoni 
3718664281bSPaulo Zanoni 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
3728664281bSPaulo Zanoni 
3738664281bSPaulo Zanoni 	ret = !intel_crtc->cpu_fifo_underrun_disabled;
3748664281bSPaulo Zanoni 
3758664281bSPaulo Zanoni 	if (enable == ret)
3768664281bSPaulo Zanoni 		goto done;
3778664281bSPaulo Zanoni 
3788664281bSPaulo Zanoni 	intel_crtc->cpu_fifo_underrun_disabled = !enable;
3798664281bSPaulo Zanoni 
3808664281bSPaulo Zanoni 	if (IS_GEN5(dev) || IS_GEN6(dev))
3818664281bSPaulo Zanoni 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
3828664281bSPaulo Zanoni 	else if (IS_GEN7(dev))
3837336df65SDaniel Vetter 		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
3848664281bSPaulo Zanoni 
3858664281bSPaulo Zanoni done:
3868664281bSPaulo Zanoni 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
3878664281bSPaulo Zanoni 	return ret;
3888664281bSPaulo Zanoni }
3898664281bSPaulo Zanoni 
3908664281bSPaulo Zanoni /**
3918664281bSPaulo Zanoni  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
3928664281bSPaulo Zanoni  * @dev: drm device
3938664281bSPaulo Zanoni  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
3948664281bSPaulo Zanoni  * @enable: true if we want to report FIFO underrun errors, false otherwise
3958664281bSPaulo Zanoni  *
3968664281bSPaulo Zanoni  * This function makes us disable or enable PCH fifo underruns for a specific
3978664281bSPaulo Zanoni  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
3988664281bSPaulo Zanoni  * underrun reporting for one transcoder may also disable all the other PCH
3998664281bSPaulo Zanoni  * error interruts for the other transcoders, due to the fact that there's just
4008664281bSPaulo Zanoni  * one interrupt mask/enable bit for all the transcoders.
4018664281bSPaulo Zanoni  *
4028664281bSPaulo Zanoni  * Returns the previous state of underrun reporting.
4038664281bSPaulo Zanoni  */
4048664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
4058664281bSPaulo Zanoni 					   enum transcoder pch_transcoder,
4068664281bSPaulo Zanoni 					   bool enable)
4078664281bSPaulo Zanoni {
4088664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
409de28075dSDaniel Vetter 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
410de28075dSDaniel Vetter 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4118664281bSPaulo Zanoni 	unsigned long flags;
4128664281bSPaulo Zanoni 	bool ret;
4138664281bSPaulo Zanoni 
414de28075dSDaniel Vetter 	/*
415de28075dSDaniel Vetter 	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
416de28075dSDaniel Vetter 	 * has only one pch transcoder A that all pipes can use. To avoid racy
417de28075dSDaniel Vetter 	 * pch transcoder -> pipe lookups from interrupt code simply store the
418de28075dSDaniel Vetter 	 * underrun statistics in crtc A. Since we never expose this anywhere
419de28075dSDaniel Vetter 	 * nor use it outside of the fifo underrun code here using the "wrong"
420de28075dSDaniel Vetter 	 * crtc on LPT won't cause issues.
421de28075dSDaniel Vetter 	 */
4228664281bSPaulo Zanoni 
4238664281bSPaulo Zanoni 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
4248664281bSPaulo Zanoni 
4258664281bSPaulo Zanoni 	ret = !intel_crtc->pch_fifo_underrun_disabled;
4268664281bSPaulo Zanoni 
4278664281bSPaulo Zanoni 	if (enable == ret)
4288664281bSPaulo Zanoni 		goto done;
4298664281bSPaulo Zanoni 
4308664281bSPaulo Zanoni 	intel_crtc->pch_fifo_underrun_disabled = !enable;
4318664281bSPaulo Zanoni 
4328664281bSPaulo Zanoni 	if (HAS_PCH_IBX(dev))
433de28075dSDaniel Vetter 		ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
4348664281bSPaulo Zanoni 	else
4358664281bSPaulo Zanoni 		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
4368664281bSPaulo Zanoni 
4378664281bSPaulo Zanoni done:
4388664281bSPaulo Zanoni 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
4398664281bSPaulo Zanoni 	return ret;
4408664281bSPaulo Zanoni }
4418664281bSPaulo Zanoni 
4428664281bSPaulo Zanoni 
4437c463586SKeith Packard void
4447c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
4457c463586SKeith Packard {
4469db4a9c7SJesse Barnes 	u32 reg = PIPESTAT(pipe);
44746c06a30SVille Syrjälä 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
4487c463586SKeith Packard 
449b79480baSDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
450b79480baSDaniel Vetter 
45146c06a30SVille Syrjälä 	if ((pipestat & mask) == mask)
45246c06a30SVille Syrjälä 		return;
45346c06a30SVille Syrjälä 
4547c463586SKeith Packard 	/* Enable the interrupt, clear any pending status */
45546c06a30SVille Syrjälä 	pipestat |= mask | (mask >> 16);
45646c06a30SVille Syrjälä 	I915_WRITE(reg, pipestat);
4573143a2bfSChris Wilson 	POSTING_READ(reg);
4587c463586SKeith Packard }
4597c463586SKeith Packard 
4607c463586SKeith Packard void
4617c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
4627c463586SKeith Packard {
4639db4a9c7SJesse Barnes 	u32 reg = PIPESTAT(pipe);
46446c06a30SVille Syrjälä 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
4657c463586SKeith Packard 
466b79480baSDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
467b79480baSDaniel Vetter 
46846c06a30SVille Syrjälä 	if ((pipestat & mask) == 0)
46946c06a30SVille Syrjälä 		return;
47046c06a30SVille Syrjälä 
47146c06a30SVille Syrjälä 	pipestat &= ~mask;
47246c06a30SVille Syrjälä 	I915_WRITE(reg, pipestat);
4733143a2bfSChris Wilson 	POSTING_READ(reg);
4747c463586SKeith Packard }
4757c463586SKeith Packard 
476c0e09200SDave Airlie /**
477f49e38ddSJani Nikula  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
47801c66889SZhao Yakui  */
479f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev)
48001c66889SZhao Yakui {
4811ec14ad3SChris Wilson 	drm_i915_private_t *dev_priv = dev->dev_private;
4821ec14ad3SChris Wilson 	unsigned long irqflags;
4831ec14ad3SChris Wilson 
484f49e38ddSJani Nikula 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
485f49e38ddSJani Nikula 		return;
486f49e38ddSJani Nikula 
4871ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
48801c66889SZhao Yakui 
489f898780bSJani Nikula 	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
490a6c45cf0SChris Wilson 	if (INTEL_INFO(dev)->gen >= 4)
491f898780bSJani Nikula 		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
4921ec14ad3SChris Wilson 
4931ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
49401c66889SZhao Yakui }
49501c66889SZhao Yakui 
49601c66889SZhao Yakui /**
4970a3e67a4SJesse Barnes  * i915_pipe_enabled - check if a pipe is enabled
4980a3e67a4SJesse Barnes  * @dev: DRM device
4990a3e67a4SJesse Barnes  * @pipe: pipe to check
5000a3e67a4SJesse Barnes  *
5010a3e67a4SJesse Barnes  * Reading certain registers when the pipe is disabled can hang the chip.
5020a3e67a4SJesse Barnes  * Use this routine to make sure the PLL is running and the pipe is active
5030a3e67a4SJesse Barnes  * before reading such registers if unsure.
5040a3e67a4SJesse Barnes  */
5050a3e67a4SJesse Barnes static int
5060a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe)
5070a3e67a4SJesse Barnes {
5080a3e67a4SJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
509702e7a56SPaulo Zanoni 
510a01025afSDaniel Vetter 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
511a01025afSDaniel Vetter 		/* Locking is horribly broken here, but whatever. */
512a01025afSDaniel Vetter 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
513a01025afSDaniel Vetter 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
51471f8ba6bSPaulo Zanoni 
515a01025afSDaniel Vetter 		return intel_crtc->active;
516a01025afSDaniel Vetter 	} else {
517a01025afSDaniel Vetter 		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
518a01025afSDaniel Vetter 	}
5190a3e67a4SJesse Barnes }
5200a3e67a4SJesse Barnes 
52142f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which
52242f52ef8SKeith Packard  * we use as a pipe index
52342f52ef8SKeith Packard  */
524f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
5250a3e67a4SJesse Barnes {
5260a3e67a4SJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
5270a3e67a4SJesse Barnes 	unsigned long high_frame;
5280a3e67a4SJesse Barnes 	unsigned long low_frame;
529391f75e2SVille Syrjälä 	u32 high1, high2, low, pixel, vbl_start;
5300a3e67a4SJesse Barnes 
5310a3e67a4SJesse Barnes 	if (!i915_pipe_enabled(dev, pipe)) {
53244d98a61SZhao Yakui 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
5339db4a9c7SJesse Barnes 				"pipe %c\n", pipe_name(pipe));
5340a3e67a4SJesse Barnes 		return 0;
5350a3e67a4SJesse Barnes 	}
5360a3e67a4SJesse Barnes 
537391f75e2SVille Syrjälä 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
538391f75e2SVille Syrjälä 		struct intel_crtc *intel_crtc =
539391f75e2SVille Syrjälä 			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
540391f75e2SVille Syrjälä 		const struct drm_display_mode *mode =
541391f75e2SVille Syrjälä 			&intel_crtc->config.adjusted_mode;
542391f75e2SVille Syrjälä 
543391f75e2SVille Syrjälä 		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
544391f75e2SVille Syrjälä 	} else {
545391f75e2SVille Syrjälä 		enum transcoder cpu_transcoder =
546391f75e2SVille Syrjälä 			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
547391f75e2SVille Syrjälä 		u32 htotal;
548391f75e2SVille Syrjälä 
549391f75e2SVille Syrjälä 		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
550391f75e2SVille Syrjälä 		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
551391f75e2SVille Syrjälä 
552391f75e2SVille Syrjälä 		vbl_start *= htotal;
553391f75e2SVille Syrjälä 	}
554391f75e2SVille Syrjälä 
5559db4a9c7SJesse Barnes 	high_frame = PIPEFRAME(pipe);
5569db4a9c7SJesse Barnes 	low_frame = PIPEFRAMEPIXEL(pipe);
5575eddb70bSChris Wilson 
5580a3e67a4SJesse Barnes 	/*
5590a3e67a4SJesse Barnes 	 * High & low register fields aren't synchronized, so make sure
5600a3e67a4SJesse Barnes 	 * we get a low value that's stable across two reads of the high
5610a3e67a4SJesse Barnes 	 * register.
5620a3e67a4SJesse Barnes 	 */
5630a3e67a4SJesse Barnes 	do {
5645eddb70bSChris Wilson 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
565391f75e2SVille Syrjälä 		low   = I915_READ(low_frame);
5665eddb70bSChris Wilson 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
5670a3e67a4SJesse Barnes 	} while (high1 != high2);
5680a3e67a4SJesse Barnes 
5695eddb70bSChris Wilson 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
570391f75e2SVille Syrjälä 	pixel = low & PIPE_PIXEL_MASK;
5715eddb70bSChris Wilson 	low >>= PIPE_FRAME_LOW_SHIFT;
572391f75e2SVille Syrjälä 
573391f75e2SVille Syrjälä 	/*
574391f75e2SVille Syrjälä 	 * The frame counter increments at beginning of active.
575391f75e2SVille Syrjälä 	 * Cook up a vblank counter by also checking the pixel
576391f75e2SVille Syrjälä 	 * counter against vblank start.
577391f75e2SVille Syrjälä 	 */
578391f75e2SVille Syrjälä 	return ((high1 << 8) | low) + (pixel >= vbl_start);
5790a3e67a4SJesse Barnes }
5800a3e67a4SJesse Barnes 
581f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
5829880b7a5SJesse Barnes {
5839880b7a5SJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
5849db4a9c7SJesse Barnes 	int reg = PIPE_FRMCOUNT_GM45(pipe);
5859880b7a5SJesse Barnes 
5869880b7a5SJesse Barnes 	if (!i915_pipe_enabled(dev, pipe)) {
58744d98a61SZhao Yakui 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
5889db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
5899880b7a5SJesse Barnes 		return 0;
5909880b7a5SJesse Barnes 	}
5919880b7a5SJesse Barnes 
5929880b7a5SJesse Barnes 	return I915_READ(reg);
5939880b7a5SJesse Barnes }
5949880b7a5SJesse Barnes 
595f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
5960af7e4dfSMario Kleiner 			     int *vpos, int *hpos)
5970af7e4dfSMario Kleiner {
598*c2baf4b7SVille Syrjälä 	struct drm_i915_private *dev_priv = dev->dev_private;
599*c2baf4b7SVille Syrjälä 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
600*c2baf4b7SVille Syrjälä 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
601*c2baf4b7SVille Syrjälä 	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
602*c2baf4b7SVille Syrjälä 	u32 position;
6030af7e4dfSMario Kleiner 	int vbl_start, vbl_end, htotal, vtotal;
6040af7e4dfSMario Kleiner 	bool in_vbl = true;
6050af7e4dfSMario Kleiner 	int ret = 0;
6060af7e4dfSMario Kleiner 
607*c2baf4b7SVille Syrjälä 	if (!intel_crtc->active) {
6080af7e4dfSMario Kleiner 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
6099db4a9c7SJesse Barnes 				 "pipe %c\n", pipe_name(pipe));
6100af7e4dfSMario Kleiner 		return 0;
6110af7e4dfSMario Kleiner 	}
6120af7e4dfSMario Kleiner 
613*c2baf4b7SVille Syrjälä 	htotal = mode->crtc_htotal;
614*c2baf4b7SVille Syrjälä 	vtotal = mode->crtc_vtotal;
615*c2baf4b7SVille Syrjälä 	vbl_start = mode->crtc_vblank_start;
616*c2baf4b7SVille Syrjälä 	vbl_end = mode->crtc_vblank_end;
6170af7e4dfSMario Kleiner 
618*c2baf4b7SVille Syrjälä 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
619*c2baf4b7SVille Syrjälä 
620*c2baf4b7SVille Syrjälä 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
6210af7e4dfSMario Kleiner 		/* No obvious pixelcount register. Only query vertical
6220af7e4dfSMario Kleiner 		 * scanout position from Display scan line register.
6230af7e4dfSMario Kleiner 		 */
6240af7e4dfSMario Kleiner 		position = I915_READ(PIPEDSL(pipe));
6250af7e4dfSMario Kleiner 
6260af7e4dfSMario Kleiner 		/* Decode into vertical scanout position. Don't have
6270af7e4dfSMario Kleiner 		 * horizontal scanout position.
6280af7e4dfSMario Kleiner 		 */
6290af7e4dfSMario Kleiner 		*vpos = position & 0x1fff;
6300af7e4dfSMario Kleiner 		*hpos = 0;
6310af7e4dfSMario Kleiner 	} else {
6320af7e4dfSMario Kleiner 		/* Have access to pixelcount since start of frame.
6330af7e4dfSMario Kleiner 		 * We can split this into vertical and horizontal
6340af7e4dfSMario Kleiner 		 * scanout position.
6350af7e4dfSMario Kleiner 		 */
6360af7e4dfSMario Kleiner 		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
6370af7e4dfSMario Kleiner 
6380af7e4dfSMario Kleiner 		*vpos = position / htotal;
6390af7e4dfSMario Kleiner 		*hpos = position - (*vpos * htotal);
6400af7e4dfSMario Kleiner 	}
6410af7e4dfSMario Kleiner 
642*c2baf4b7SVille Syrjälä 	in_vbl = *vpos >= vbl_start && *vpos < vbl_end;
6430af7e4dfSMario Kleiner 
6440af7e4dfSMario Kleiner 	/* Inside "upper part" of vblank area? Apply corrective offset: */
6450af7e4dfSMario Kleiner 	if (in_vbl && (*vpos >= vbl_start))
6460af7e4dfSMario Kleiner 		*vpos = *vpos - vtotal;
6470af7e4dfSMario Kleiner 
6480af7e4dfSMario Kleiner 	/* In vblank? */
6490af7e4dfSMario Kleiner 	if (in_vbl)
6500af7e4dfSMario Kleiner 		ret |= DRM_SCANOUTPOS_INVBL;
6510af7e4dfSMario Kleiner 
6520af7e4dfSMario Kleiner 	return ret;
6530af7e4dfSMario Kleiner }
6540af7e4dfSMario Kleiner 
655f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
6560af7e4dfSMario Kleiner 			      int *max_error,
6570af7e4dfSMario Kleiner 			      struct timeval *vblank_time,
6580af7e4dfSMario Kleiner 			      unsigned flags)
6590af7e4dfSMario Kleiner {
6604041b853SChris Wilson 	struct drm_crtc *crtc;
6610af7e4dfSMario Kleiner 
6627eb552aeSBen Widawsky 	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
6634041b853SChris Wilson 		DRM_ERROR("Invalid crtc %d\n", pipe);
6640af7e4dfSMario Kleiner 		return -EINVAL;
6650af7e4dfSMario Kleiner 	}
6660af7e4dfSMario Kleiner 
6670af7e4dfSMario Kleiner 	/* Get drm_crtc to timestamp: */
6684041b853SChris Wilson 	crtc = intel_get_crtc_for_pipe(dev, pipe);
6694041b853SChris Wilson 	if (crtc == NULL) {
6704041b853SChris Wilson 		DRM_ERROR("Invalid crtc %d\n", pipe);
6714041b853SChris Wilson 		return -EINVAL;
6724041b853SChris Wilson 	}
6734041b853SChris Wilson 
6744041b853SChris Wilson 	if (!crtc->enabled) {
6754041b853SChris Wilson 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
6764041b853SChris Wilson 		return -EBUSY;
6774041b853SChris Wilson 	}
6780af7e4dfSMario Kleiner 
6790af7e4dfSMario Kleiner 	/* Helper routine in DRM core does all the work: */
6804041b853SChris Wilson 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
6814041b853SChris Wilson 						     vblank_time, flags,
6824041b853SChris Wilson 						     crtc);
6830af7e4dfSMario Kleiner }
6840af7e4dfSMario Kleiner 
68567c347ffSJani Nikula static bool intel_hpd_irq_event(struct drm_device *dev,
68667c347ffSJani Nikula 				struct drm_connector *connector)
687321a1b30SEgbert Eich {
688321a1b30SEgbert Eich 	enum drm_connector_status old_status;
689321a1b30SEgbert Eich 
690321a1b30SEgbert Eich 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
691321a1b30SEgbert Eich 	old_status = connector->status;
692321a1b30SEgbert Eich 
693321a1b30SEgbert Eich 	connector->status = connector->funcs->detect(connector, false);
69467c347ffSJani Nikula 	if (old_status == connector->status)
69567c347ffSJani Nikula 		return false;
69667c347ffSJani Nikula 
69767c347ffSJani Nikula 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
698321a1b30SEgbert Eich 		      connector->base.id,
699321a1b30SEgbert Eich 		      drm_get_connector_name(connector),
70067c347ffSJani Nikula 		      drm_get_connector_status_name(old_status),
70167c347ffSJani Nikula 		      drm_get_connector_status_name(connector->status));
70267c347ffSJani Nikula 
70367c347ffSJani Nikula 	return true;
704321a1b30SEgbert Eich }
705321a1b30SEgbert Eich 
7065ca58282SJesse Barnes /*
7075ca58282SJesse Barnes  * Handle hotplug events outside the interrupt handler proper.
7085ca58282SJesse Barnes  */
709ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
710ac4c16c5SEgbert Eich 
7115ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work)
7125ca58282SJesse Barnes {
7135ca58282SJesse Barnes 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
7145ca58282SJesse Barnes 						    hotplug_work);
7155ca58282SJesse Barnes 	struct drm_device *dev = dev_priv->dev;
716c31c4ba3SKeith Packard 	struct drm_mode_config *mode_config = &dev->mode_config;
717cd569aedSEgbert Eich 	struct intel_connector *intel_connector;
718cd569aedSEgbert Eich 	struct intel_encoder *intel_encoder;
719cd569aedSEgbert Eich 	struct drm_connector *connector;
720cd569aedSEgbert Eich 	unsigned long irqflags;
721cd569aedSEgbert Eich 	bool hpd_disabled = false;
722321a1b30SEgbert Eich 	bool changed = false;
723142e2398SEgbert Eich 	u32 hpd_event_bits;
7245ca58282SJesse Barnes 
72552d7ecedSDaniel Vetter 	/* HPD irq before everything is fully set up. */
72652d7ecedSDaniel Vetter 	if (!dev_priv->enable_hotplug_processing)
72752d7ecedSDaniel Vetter 		return;
72852d7ecedSDaniel Vetter 
729a65e34c7SKeith Packard 	mutex_lock(&mode_config->mutex);
730e67189abSJesse Barnes 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
731e67189abSJesse Barnes 
732cd569aedSEgbert Eich 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
733142e2398SEgbert Eich 
734142e2398SEgbert Eich 	hpd_event_bits = dev_priv->hpd_event_bits;
735142e2398SEgbert Eich 	dev_priv->hpd_event_bits = 0;
736cd569aedSEgbert Eich 	list_for_each_entry(connector, &mode_config->connector_list, head) {
737cd569aedSEgbert Eich 		intel_connector = to_intel_connector(connector);
738cd569aedSEgbert Eich 		intel_encoder = intel_connector->encoder;
739cd569aedSEgbert Eich 		if (intel_encoder->hpd_pin > HPD_NONE &&
740cd569aedSEgbert Eich 		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
741cd569aedSEgbert Eich 		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
742cd569aedSEgbert Eich 			DRM_INFO("HPD interrupt storm detected on connector %s: "
743cd569aedSEgbert Eich 				 "switching from hotplug detection to polling\n",
744cd569aedSEgbert Eich 				drm_get_connector_name(connector));
745cd569aedSEgbert Eich 			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
746cd569aedSEgbert Eich 			connector->polled = DRM_CONNECTOR_POLL_CONNECT
747cd569aedSEgbert Eich 				| DRM_CONNECTOR_POLL_DISCONNECT;
748cd569aedSEgbert Eich 			hpd_disabled = true;
749cd569aedSEgbert Eich 		}
750142e2398SEgbert Eich 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
751142e2398SEgbert Eich 			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
752142e2398SEgbert Eich 				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
753142e2398SEgbert Eich 		}
754cd569aedSEgbert Eich 	}
755cd569aedSEgbert Eich 	 /* if there were no outputs to poll, poll was disabled,
756cd569aedSEgbert Eich 	  * therefore make sure it's enabled when disabling HPD on
757cd569aedSEgbert Eich 	  * some connectors */
758ac4c16c5SEgbert Eich 	if (hpd_disabled) {
759cd569aedSEgbert Eich 		drm_kms_helper_poll_enable(dev);
760ac4c16c5SEgbert Eich 		mod_timer(&dev_priv->hotplug_reenable_timer,
761ac4c16c5SEgbert Eich 			  jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
762ac4c16c5SEgbert Eich 	}
763cd569aedSEgbert Eich 
764cd569aedSEgbert Eich 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
765cd569aedSEgbert Eich 
766321a1b30SEgbert Eich 	list_for_each_entry(connector, &mode_config->connector_list, head) {
767321a1b30SEgbert Eich 		intel_connector = to_intel_connector(connector);
768321a1b30SEgbert Eich 		intel_encoder = intel_connector->encoder;
769321a1b30SEgbert Eich 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
770cd569aedSEgbert Eich 			if (intel_encoder->hot_plug)
771cd569aedSEgbert Eich 				intel_encoder->hot_plug(intel_encoder);
772321a1b30SEgbert Eich 			if (intel_hpd_irq_event(dev, connector))
773321a1b30SEgbert Eich 				changed = true;
774321a1b30SEgbert Eich 		}
775321a1b30SEgbert Eich 	}
77640ee3381SKeith Packard 	mutex_unlock(&mode_config->mutex);
77740ee3381SKeith Packard 
778321a1b30SEgbert Eich 	if (changed)
779321a1b30SEgbert Eich 		drm_kms_helper_hotplug_event(dev);
7805ca58282SJesse Barnes }
7815ca58282SJesse Barnes 
782d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev)
783f97108d1SJesse Barnes {
784f97108d1SJesse Barnes 	drm_i915_private_t *dev_priv = dev->dev_private;
785b5b72e89SMatthew Garrett 	u32 busy_up, busy_down, max_avg, min_avg;
7869270388eSDaniel Vetter 	u8 new_delay;
7879270388eSDaniel Vetter 
788d0ecd7e2SDaniel Vetter 	spin_lock(&mchdev_lock);
789f97108d1SJesse Barnes 
79073edd18fSDaniel Vetter 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
79173edd18fSDaniel Vetter 
79220e4d407SDaniel Vetter 	new_delay = dev_priv->ips.cur_delay;
7939270388eSDaniel Vetter 
7947648fa99SJesse Barnes 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
795b5b72e89SMatthew Garrett 	busy_up = I915_READ(RCPREVBSYTUPAVG);
796b5b72e89SMatthew Garrett 	busy_down = I915_READ(RCPREVBSYTDNAVG);
797f97108d1SJesse Barnes 	max_avg = I915_READ(RCBMAXAVG);
798f97108d1SJesse Barnes 	min_avg = I915_READ(RCBMINAVG);
799f97108d1SJesse Barnes 
800f97108d1SJesse Barnes 	/* Handle RCS change request from hw */
801b5b72e89SMatthew Garrett 	if (busy_up > max_avg) {
80220e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
80320e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay - 1;
80420e4d407SDaniel Vetter 		if (new_delay < dev_priv->ips.max_delay)
80520e4d407SDaniel Vetter 			new_delay = dev_priv->ips.max_delay;
806b5b72e89SMatthew Garrett 	} else if (busy_down < min_avg) {
80720e4d407SDaniel Vetter 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
80820e4d407SDaniel Vetter 			new_delay = dev_priv->ips.cur_delay + 1;
80920e4d407SDaniel Vetter 		if (new_delay > dev_priv->ips.min_delay)
81020e4d407SDaniel Vetter 			new_delay = dev_priv->ips.min_delay;
811f97108d1SJesse Barnes 	}
812f97108d1SJesse Barnes 
8137648fa99SJesse Barnes 	if (ironlake_set_drps(dev, new_delay))
81420e4d407SDaniel Vetter 		dev_priv->ips.cur_delay = new_delay;
815f97108d1SJesse Barnes 
816d0ecd7e2SDaniel Vetter 	spin_unlock(&mchdev_lock);
8179270388eSDaniel Vetter 
818f97108d1SJesse Barnes 	return;
819f97108d1SJesse Barnes }
820f97108d1SJesse Barnes 
821549f7365SChris Wilson static void notify_ring(struct drm_device *dev,
822549f7365SChris Wilson 			struct intel_ring_buffer *ring)
823549f7365SChris Wilson {
824475553deSChris Wilson 	if (ring->obj == NULL)
825475553deSChris Wilson 		return;
826475553deSChris Wilson 
827814e9b57SChris Wilson 	trace_i915_gem_request_complete(ring);
8289862e600SChris Wilson 
829549f7365SChris Wilson 	wake_up_all(&ring->irq_queue);
83010cd45b6SMika Kuoppala 	i915_queue_hangcheck(dev);
831549f7365SChris Wilson }
832549f7365SChris Wilson 
8334912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work)
8343b8d8d91SJesse Barnes {
8354912d041SBen Widawsky 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
836c6a828d3SDaniel Vetter 						    rps.work);
837edbfdb45SPaulo Zanoni 	u32 pm_iir;
838dd75fdc8SChris Wilson 	int new_delay, adj;
8393b8d8d91SJesse Barnes 
84059cdb63dSDaniel Vetter 	spin_lock_irq(&dev_priv->irq_lock);
841c6a828d3SDaniel Vetter 	pm_iir = dev_priv->rps.pm_iir;
842c6a828d3SDaniel Vetter 	dev_priv->rps.pm_iir = 0;
8434848405cSBen Widawsky 	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
844edbfdb45SPaulo Zanoni 	snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
84559cdb63dSDaniel Vetter 	spin_unlock_irq(&dev_priv->irq_lock);
8464912d041SBen Widawsky 
84760611c13SPaulo Zanoni 	/* Make sure we didn't queue anything we're not going to process. */
84860611c13SPaulo Zanoni 	WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
84960611c13SPaulo Zanoni 
8504848405cSBen Widawsky 	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
8513b8d8d91SJesse Barnes 		return;
8523b8d8d91SJesse Barnes 
8534fc688ceSJesse Barnes 	mutex_lock(&dev_priv->rps.hw_lock);
8547b9e0ae6SChris Wilson 
855dd75fdc8SChris Wilson 	adj = dev_priv->rps.last_adj;
8567425034aSVille Syrjälä 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
857dd75fdc8SChris Wilson 		if (adj > 0)
858dd75fdc8SChris Wilson 			adj *= 2;
859dd75fdc8SChris Wilson 		else
860dd75fdc8SChris Wilson 			adj = 1;
861dd75fdc8SChris Wilson 		new_delay = dev_priv->rps.cur_delay + adj;
8627425034aSVille Syrjälä 
8637425034aSVille Syrjälä 		/*
8647425034aSVille Syrjälä 		 * For better performance, jump directly
8657425034aSVille Syrjälä 		 * to RPe if we're below it.
8667425034aSVille Syrjälä 		 */
867dd75fdc8SChris Wilson 		if (new_delay < dev_priv->rps.rpe_delay)
8687425034aSVille Syrjälä 			new_delay = dev_priv->rps.rpe_delay;
869dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
870dd75fdc8SChris Wilson 		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
871dd75fdc8SChris Wilson 			new_delay = dev_priv->rps.rpe_delay;
872dd75fdc8SChris Wilson 		else
873dd75fdc8SChris Wilson 			new_delay = dev_priv->rps.min_delay;
874dd75fdc8SChris Wilson 		adj = 0;
875dd75fdc8SChris Wilson 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
876dd75fdc8SChris Wilson 		if (adj < 0)
877dd75fdc8SChris Wilson 			adj *= 2;
878dd75fdc8SChris Wilson 		else
879dd75fdc8SChris Wilson 			adj = -1;
880dd75fdc8SChris Wilson 		new_delay = dev_priv->rps.cur_delay + adj;
881dd75fdc8SChris Wilson 	} else { /* unknown event */
882dd75fdc8SChris Wilson 		new_delay = dev_priv->rps.cur_delay;
883dd75fdc8SChris Wilson 	}
8843b8d8d91SJesse Barnes 
88579249636SBen Widawsky 	/* sysfs frequency interfaces may have snuck in while servicing the
88679249636SBen Widawsky 	 * interrupt
88779249636SBen Widawsky 	 */
888dd75fdc8SChris Wilson 	if (new_delay < (int)dev_priv->rps.min_delay)
889dd75fdc8SChris Wilson 		new_delay = dev_priv->rps.min_delay;
890dd75fdc8SChris Wilson 	if (new_delay > (int)dev_priv->rps.max_delay)
891dd75fdc8SChris Wilson 		new_delay = dev_priv->rps.max_delay;
892dd75fdc8SChris Wilson 	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
893dd75fdc8SChris Wilson 
8940a073b84SJesse Barnes 	if (IS_VALLEYVIEW(dev_priv->dev))
8950a073b84SJesse Barnes 		valleyview_set_rps(dev_priv->dev, new_delay);
8960a073b84SJesse Barnes 	else
8974912d041SBen Widawsky 		gen6_set_rps(dev_priv->dev, new_delay);
8983b8d8d91SJesse Barnes 
8994fc688ceSJesse Barnes 	mutex_unlock(&dev_priv->rps.hw_lock);
9003b8d8d91SJesse Barnes }
9013b8d8d91SJesse Barnes 
902e3689190SBen Widawsky 
903e3689190SBen Widawsky /**
904e3689190SBen Widawsky  * ivybridge_parity_work - Workqueue called when a parity error interrupt
905e3689190SBen Widawsky  * occurred.
906e3689190SBen Widawsky  * @work: workqueue struct
907e3689190SBen Widawsky  *
908e3689190SBen Widawsky  * Doesn't actually do anything except notify userspace. As a consequence of
909e3689190SBen Widawsky  * this event, userspace should try to remap the bad rows since statistically
910e3689190SBen Widawsky  * it is likely the same row is more likely to go bad again.
911e3689190SBen Widawsky  */
912e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work)
913e3689190SBen Widawsky {
914e3689190SBen Widawsky 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
915a4da4fa4SDaniel Vetter 						    l3_parity.error_work);
916e3689190SBen Widawsky 	u32 error_status, row, bank, subbank;
91735a85ac6SBen Widawsky 	char *parity_event[6];
918e3689190SBen Widawsky 	uint32_t misccpctl;
919e3689190SBen Widawsky 	unsigned long flags;
92035a85ac6SBen Widawsky 	uint8_t slice = 0;
921e3689190SBen Widawsky 
922e3689190SBen Widawsky 	/* We must turn off DOP level clock gating to access the L3 registers.
923e3689190SBen Widawsky 	 * In order to prevent a get/put style interface, acquire struct mutex
924e3689190SBen Widawsky 	 * any time we access those registers.
925e3689190SBen Widawsky 	 */
926e3689190SBen Widawsky 	mutex_lock(&dev_priv->dev->struct_mutex);
927e3689190SBen Widawsky 
92835a85ac6SBen Widawsky 	/* If we've screwed up tracking, just let the interrupt fire again */
92935a85ac6SBen Widawsky 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
93035a85ac6SBen Widawsky 		goto out;
93135a85ac6SBen Widawsky 
932e3689190SBen Widawsky 	misccpctl = I915_READ(GEN7_MISCCPCTL);
933e3689190SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
934e3689190SBen Widawsky 	POSTING_READ(GEN7_MISCCPCTL);
935e3689190SBen Widawsky 
93635a85ac6SBen Widawsky 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
93735a85ac6SBen Widawsky 		u32 reg;
93835a85ac6SBen Widawsky 
93935a85ac6SBen Widawsky 		slice--;
94035a85ac6SBen Widawsky 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
94135a85ac6SBen Widawsky 			break;
94235a85ac6SBen Widawsky 
94335a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
94435a85ac6SBen Widawsky 
94535a85ac6SBen Widawsky 		reg = GEN7_L3CDERRST1 + (slice * 0x200);
94635a85ac6SBen Widawsky 
94735a85ac6SBen Widawsky 		error_status = I915_READ(reg);
948e3689190SBen Widawsky 		row = GEN7_PARITY_ERROR_ROW(error_status);
949e3689190SBen Widawsky 		bank = GEN7_PARITY_ERROR_BANK(error_status);
950e3689190SBen Widawsky 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
951e3689190SBen Widawsky 
95235a85ac6SBen Widawsky 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
95335a85ac6SBen Widawsky 		POSTING_READ(reg);
954e3689190SBen Widawsky 
955cce723edSBen Widawsky 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
956e3689190SBen Widawsky 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
957e3689190SBen Widawsky 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
958e3689190SBen Widawsky 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
95935a85ac6SBen Widawsky 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
96035a85ac6SBen Widawsky 		parity_event[5] = NULL;
961e3689190SBen Widawsky 
962e3689190SBen Widawsky 		kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
963e3689190SBen Widawsky 				   KOBJ_CHANGE, parity_event);
964e3689190SBen Widawsky 
96535a85ac6SBen Widawsky 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
96635a85ac6SBen Widawsky 			  slice, row, bank, subbank);
967e3689190SBen Widawsky 
96835a85ac6SBen Widawsky 		kfree(parity_event[4]);
969e3689190SBen Widawsky 		kfree(parity_event[3]);
970e3689190SBen Widawsky 		kfree(parity_event[2]);
971e3689190SBen Widawsky 		kfree(parity_event[1]);
972e3689190SBen Widawsky 	}
973e3689190SBen Widawsky 
97435a85ac6SBen Widawsky 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
97535a85ac6SBen Widawsky 
97635a85ac6SBen Widawsky out:
97735a85ac6SBen Widawsky 	WARN_ON(dev_priv->l3_parity.which_slice);
97835a85ac6SBen Widawsky 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
97935a85ac6SBen Widawsky 	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
98035a85ac6SBen Widawsky 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
98135a85ac6SBen Widawsky 
98235a85ac6SBen Widawsky 	mutex_unlock(&dev_priv->dev->struct_mutex);
98335a85ac6SBen Widawsky }
98435a85ac6SBen Widawsky 
98535a85ac6SBen Widawsky static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
986e3689190SBen Widawsky {
987e3689190SBen Widawsky 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
988e3689190SBen Widawsky 
989040d2baaSBen Widawsky 	if (!HAS_L3_DPF(dev))
990e3689190SBen Widawsky 		return;
991e3689190SBen Widawsky 
992d0ecd7e2SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
99335a85ac6SBen Widawsky 	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
994d0ecd7e2SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
995e3689190SBen Widawsky 
99635a85ac6SBen Widawsky 	iir &= GT_PARITY_ERROR(dev);
99735a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
99835a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 1;
99935a85ac6SBen Widawsky 
100035a85ac6SBen Widawsky 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
100135a85ac6SBen Widawsky 		dev_priv->l3_parity.which_slice |= 1 << 0;
100235a85ac6SBen Widawsky 
1003a4da4fa4SDaniel Vetter 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1004e3689190SBen Widawsky }
1005e3689190SBen Widawsky 
1006f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev,
1007f1af8fc1SPaulo Zanoni 			       struct drm_i915_private *dev_priv,
1008f1af8fc1SPaulo Zanoni 			       u32 gt_iir)
1009f1af8fc1SPaulo Zanoni {
1010f1af8fc1SPaulo Zanoni 	if (gt_iir &
1011f1af8fc1SPaulo Zanoni 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1012f1af8fc1SPaulo Zanoni 		notify_ring(dev, &dev_priv->ring[RCS]);
1013f1af8fc1SPaulo Zanoni 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1014f1af8fc1SPaulo Zanoni 		notify_ring(dev, &dev_priv->ring[VCS]);
1015f1af8fc1SPaulo Zanoni }
1016f1af8fc1SPaulo Zanoni 
1017e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev,
1018e7b4c6b1SDaniel Vetter 			       struct drm_i915_private *dev_priv,
1019e7b4c6b1SDaniel Vetter 			       u32 gt_iir)
1020e7b4c6b1SDaniel Vetter {
1021e7b4c6b1SDaniel Vetter 
1022cc609d5dSBen Widawsky 	if (gt_iir &
1023cc609d5dSBen Widawsky 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1024e7b4c6b1SDaniel Vetter 		notify_ring(dev, &dev_priv->ring[RCS]);
1025cc609d5dSBen Widawsky 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1026e7b4c6b1SDaniel Vetter 		notify_ring(dev, &dev_priv->ring[VCS]);
1027cc609d5dSBen Widawsky 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1028e7b4c6b1SDaniel Vetter 		notify_ring(dev, &dev_priv->ring[BCS]);
1029e7b4c6b1SDaniel Vetter 
1030cc609d5dSBen Widawsky 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1031cc609d5dSBen Widawsky 		      GT_BSD_CS_ERROR_INTERRUPT |
1032cc609d5dSBen Widawsky 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1033e7b4c6b1SDaniel Vetter 		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1034e7b4c6b1SDaniel Vetter 		i915_handle_error(dev, false);
1035e7b4c6b1SDaniel Vetter 	}
1036e3689190SBen Widawsky 
103735a85ac6SBen Widawsky 	if (gt_iir & GT_PARITY_ERROR(dev))
103835a85ac6SBen Widawsky 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1039e7b4c6b1SDaniel Vetter }
1040e7b4c6b1SDaniel Vetter 
1041b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000
1042b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5
1043b543fb04SEgbert Eich 
104410a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev,
1045b543fb04SEgbert Eich 					 u32 hotplug_trigger,
1046b543fb04SEgbert Eich 					 const u32 *hpd)
1047b543fb04SEgbert Eich {
1048b543fb04SEgbert Eich 	drm_i915_private_t *dev_priv = dev->dev_private;
1049b543fb04SEgbert Eich 	int i;
105010a504deSDaniel Vetter 	bool storm_detected = false;
1051b543fb04SEgbert Eich 
105291d131d2SDaniel Vetter 	if (!hotplug_trigger)
105391d131d2SDaniel Vetter 		return;
105491d131d2SDaniel Vetter 
1055b5ea2d56SDaniel Vetter 	spin_lock(&dev_priv->irq_lock);
1056b543fb04SEgbert Eich 	for (i = 1; i < HPD_NUM_PINS; i++) {
1057821450c6SEgbert Eich 
1058b8f102e8SEgbert Eich 		WARN(((hpd[i] & hotplug_trigger) &&
1059b8f102e8SEgbert Eich 		      dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1060b8f102e8SEgbert Eich 		     "Received HPD interrupt although disabled\n");
1061b8f102e8SEgbert Eich 
1062b543fb04SEgbert Eich 		if (!(hpd[i] & hotplug_trigger) ||
1063b543fb04SEgbert Eich 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1064b543fb04SEgbert Eich 			continue;
1065b543fb04SEgbert Eich 
1066bc5ead8cSJani Nikula 		dev_priv->hpd_event_bits |= (1 << i);
1067b543fb04SEgbert Eich 		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1068b543fb04SEgbert Eich 				   dev_priv->hpd_stats[i].hpd_last_jiffies
1069b543fb04SEgbert Eich 				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1070b543fb04SEgbert Eich 			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1071b543fb04SEgbert Eich 			dev_priv->hpd_stats[i].hpd_cnt = 0;
1072b8f102e8SEgbert Eich 			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1073b543fb04SEgbert Eich 		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1074b543fb04SEgbert Eich 			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1075142e2398SEgbert Eich 			dev_priv->hpd_event_bits &= ~(1 << i);
1076b543fb04SEgbert Eich 			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
107710a504deSDaniel Vetter 			storm_detected = true;
1078b543fb04SEgbert Eich 		} else {
1079b543fb04SEgbert Eich 			dev_priv->hpd_stats[i].hpd_cnt++;
1080b8f102e8SEgbert Eich 			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1081b8f102e8SEgbert Eich 				      dev_priv->hpd_stats[i].hpd_cnt);
1082b543fb04SEgbert Eich 		}
1083b543fb04SEgbert Eich 	}
1084b543fb04SEgbert Eich 
108510a504deSDaniel Vetter 	if (storm_detected)
108610a504deSDaniel Vetter 		dev_priv->display.hpd_irq_setup(dev);
1087b5ea2d56SDaniel Vetter 	spin_unlock(&dev_priv->irq_lock);
10885876fa0dSDaniel Vetter 
1089645416f5SDaniel Vetter 	/*
1090645416f5SDaniel Vetter 	 * Our hotplug handler can grab modeset locks (by calling down into the
1091645416f5SDaniel Vetter 	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1092645416f5SDaniel Vetter 	 * queue for otherwise the flush_work in the pageflip code will
1093645416f5SDaniel Vetter 	 * deadlock.
1094645416f5SDaniel Vetter 	 */
1095645416f5SDaniel Vetter 	schedule_work(&dev_priv->hotplug_work);
1096b543fb04SEgbert Eich }
1097b543fb04SEgbert Eich 
1098515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev)
1099515ac2bbSDaniel Vetter {
110028c70f16SDaniel Vetter 	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
110128c70f16SDaniel Vetter 
110228c70f16SDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1103515ac2bbSDaniel Vetter }
1104515ac2bbSDaniel Vetter 
1105ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev)
1106ce99c256SDaniel Vetter {
11079ee32feaSDaniel Vetter 	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
11089ee32feaSDaniel Vetter 
11099ee32feaSDaniel Vetter 	wake_up_all(&dev_priv->gmbus_wait_queue);
1110ce99c256SDaniel Vetter }
1111ce99c256SDaniel Vetter 
11121403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their
11131403c0d4SPaulo Zanoni  * IMR bits until the work is done. Other interrupts can be processed without
11141403c0d4SPaulo Zanoni  * the work queue. */
11151403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1116baf02a1fSBen Widawsky {
111741a05a3aSDaniel Vetter 	if (pm_iir & GEN6_PM_RPS_EVENTS) {
111859cdb63dSDaniel Vetter 		spin_lock(&dev_priv->irq_lock);
11194848405cSBen Widawsky 		dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
11204d3b3d5fSPaulo Zanoni 		snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
112159cdb63dSDaniel Vetter 		spin_unlock(&dev_priv->irq_lock);
11222adbee62SDaniel Vetter 
11232adbee62SDaniel Vetter 		queue_work(dev_priv->wq, &dev_priv->rps.work);
112441a05a3aSDaniel Vetter 	}
1125baf02a1fSBen Widawsky 
11261403c0d4SPaulo Zanoni 	if (HAS_VEBOX(dev_priv->dev)) {
112712638c57SBen Widawsky 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
112812638c57SBen Widawsky 			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
112912638c57SBen Widawsky 
113012638c57SBen Widawsky 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
113112638c57SBen Widawsky 			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
113212638c57SBen Widawsky 			i915_handle_error(dev_priv->dev, false);
113312638c57SBen Widawsky 		}
113412638c57SBen Widawsky 	}
11351403c0d4SPaulo Zanoni }
1136baf02a1fSBen Widawsky 
1137ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg)
11387e231dbeSJesse Barnes {
11397e231dbeSJesse Barnes 	struct drm_device *dev = (struct drm_device *) arg;
11407e231dbeSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
11417e231dbeSJesse Barnes 	u32 iir, gt_iir, pm_iir;
11427e231dbeSJesse Barnes 	irqreturn_t ret = IRQ_NONE;
11437e231dbeSJesse Barnes 	unsigned long irqflags;
11447e231dbeSJesse Barnes 	int pipe;
11457e231dbeSJesse Barnes 	u32 pipe_stats[I915_MAX_PIPES];
11467e231dbeSJesse Barnes 
11477e231dbeSJesse Barnes 	atomic_inc(&dev_priv->irq_received);
11487e231dbeSJesse Barnes 
11497e231dbeSJesse Barnes 	while (true) {
11507e231dbeSJesse Barnes 		iir = I915_READ(VLV_IIR);
11517e231dbeSJesse Barnes 		gt_iir = I915_READ(GTIIR);
11527e231dbeSJesse Barnes 		pm_iir = I915_READ(GEN6_PMIIR);
11537e231dbeSJesse Barnes 
11547e231dbeSJesse Barnes 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
11557e231dbeSJesse Barnes 			goto out;
11567e231dbeSJesse Barnes 
11577e231dbeSJesse Barnes 		ret = IRQ_HANDLED;
11587e231dbeSJesse Barnes 
1159e7b4c6b1SDaniel Vetter 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
11607e231dbeSJesse Barnes 
11617e231dbeSJesse Barnes 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
11627e231dbeSJesse Barnes 		for_each_pipe(pipe) {
11637e231dbeSJesse Barnes 			int reg = PIPESTAT(pipe);
11647e231dbeSJesse Barnes 			pipe_stats[pipe] = I915_READ(reg);
11657e231dbeSJesse Barnes 
11667e231dbeSJesse Barnes 			/*
11677e231dbeSJesse Barnes 			 * Clear the PIPE*STAT regs before the IIR
11687e231dbeSJesse Barnes 			 */
11697e231dbeSJesse Barnes 			if (pipe_stats[pipe] & 0x8000ffff) {
11707e231dbeSJesse Barnes 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
11717e231dbeSJesse Barnes 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
11727e231dbeSJesse Barnes 							 pipe_name(pipe));
11737e231dbeSJesse Barnes 				I915_WRITE(reg, pipe_stats[pipe]);
11747e231dbeSJesse Barnes 			}
11757e231dbeSJesse Barnes 		}
11767e231dbeSJesse Barnes 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
11777e231dbeSJesse Barnes 
117831acc7f5SJesse Barnes 		for_each_pipe(pipe) {
117931acc7f5SJesse Barnes 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
118031acc7f5SJesse Barnes 				drm_handle_vblank(dev, pipe);
118131acc7f5SJesse Barnes 
118231acc7f5SJesse Barnes 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
118331acc7f5SJesse Barnes 				intel_prepare_page_flip(dev, pipe);
118431acc7f5SJesse Barnes 				intel_finish_page_flip(dev, pipe);
118531acc7f5SJesse Barnes 			}
118631acc7f5SJesse Barnes 		}
118731acc7f5SJesse Barnes 
11887e231dbeSJesse Barnes 		/* Consume port.  Then clear IIR or we'll miss events */
11897e231dbeSJesse Barnes 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
11907e231dbeSJesse Barnes 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1191b543fb04SEgbert Eich 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
11927e231dbeSJesse Barnes 
11937e231dbeSJesse Barnes 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
11947e231dbeSJesse Barnes 					 hotplug_status);
119591d131d2SDaniel Vetter 
119610a504deSDaniel Vetter 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
119791d131d2SDaniel Vetter 
11987e231dbeSJesse Barnes 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
11997e231dbeSJesse Barnes 			I915_READ(PORT_HOTPLUG_STAT);
12007e231dbeSJesse Barnes 		}
12017e231dbeSJesse Barnes 
1202515ac2bbSDaniel Vetter 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1203515ac2bbSDaniel Vetter 			gmbus_irq_handler(dev);
12047e231dbeSJesse Barnes 
120560611c13SPaulo Zanoni 		if (pm_iir)
1206d0ecd7e2SDaniel Vetter 			gen6_rps_irq_handler(dev_priv, pm_iir);
12077e231dbeSJesse Barnes 
12087e231dbeSJesse Barnes 		I915_WRITE(GTIIR, gt_iir);
12097e231dbeSJesse Barnes 		I915_WRITE(GEN6_PMIIR, pm_iir);
12107e231dbeSJesse Barnes 		I915_WRITE(VLV_IIR, iir);
12117e231dbeSJesse Barnes 	}
12127e231dbeSJesse Barnes 
12137e231dbeSJesse Barnes out:
12147e231dbeSJesse Barnes 	return ret;
12157e231dbeSJesse Barnes }
12167e231dbeSJesse Barnes 
121723e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1218776ad806SJesse Barnes {
1219776ad806SJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
12209db4a9c7SJesse Barnes 	int pipe;
1221b543fb04SEgbert Eich 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1222776ad806SJesse Barnes 
122310a504deSDaniel Vetter 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
122491d131d2SDaniel Vetter 
1225cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1226cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1227776ad806SJesse Barnes 			       SDE_AUDIO_POWER_SHIFT);
1228cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1229cfc33bf7SVille Syrjälä 				 port_name(port));
1230cfc33bf7SVille Syrjälä 	}
1231776ad806SJesse Barnes 
1232ce99c256SDaniel Vetter 	if (pch_iir & SDE_AUX_MASK)
1233ce99c256SDaniel Vetter 		dp_aux_irq_handler(dev);
1234ce99c256SDaniel Vetter 
1235776ad806SJesse Barnes 	if (pch_iir & SDE_GMBUS)
1236515ac2bbSDaniel Vetter 		gmbus_irq_handler(dev);
1237776ad806SJesse Barnes 
1238776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1239776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1240776ad806SJesse Barnes 
1241776ad806SJesse Barnes 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1242776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1243776ad806SJesse Barnes 
1244776ad806SJesse Barnes 	if (pch_iir & SDE_POISON)
1245776ad806SJesse Barnes 		DRM_ERROR("PCH poison interrupt\n");
1246776ad806SJesse Barnes 
12479db4a9c7SJesse Barnes 	if (pch_iir & SDE_FDI_MASK)
12489db4a9c7SJesse Barnes 		for_each_pipe(pipe)
12499db4a9c7SJesse Barnes 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
12509db4a9c7SJesse Barnes 					 pipe_name(pipe),
12519db4a9c7SJesse Barnes 					 I915_READ(FDI_RX_IIR(pipe)));
1252776ad806SJesse Barnes 
1253776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1254776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1255776ad806SJesse Barnes 
1256776ad806SJesse Barnes 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1257776ad806SJesse Barnes 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1258776ad806SJesse Barnes 
1259776ad806SJesse Barnes 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
12608664281bSPaulo Zanoni 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
12618664281bSPaulo Zanoni 							  false))
12628664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
12638664281bSPaulo Zanoni 
12648664281bSPaulo Zanoni 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
12658664281bSPaulo Zanoni 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
12668664281bSPaulo Zanoni 							  false))
12678664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
12688664281bSPaulo Zanoni }
12698664281bSPaulo Zanoni 
12708664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev)
12718664281bSPaulo Zanoni {
12728664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
12738664281bSPaulo Zanoni 	u32 err_int = I915_READ(GEN7_ERR_INT);
12748664281bSPaulo Zanoni 
1275de032bf4SPaulo Zanoni 	if (err_int & ERR_INT_POISON)
1276de032bf4SPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
1277de032bf4SPaulo Zanoni 
12788664281bSPaulo Zanoni 	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
12798664281bSPaulo Zanoni 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
12808664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
12818664281bSPaulo Zanoni 
12828664281bSPaulo Zanoni 	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
12838664281bSPaulo Zanoni 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
12848664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
12858664281bSPaulo Zanoni 
12868664281bSPaulo Zanoni 	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
12878664281bSPaulo Zanoni 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
12888664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
12898664281bSPaulo Zanoni 
12908664281bSPaulo Zanoni 	I915_WRITE(GEN7_ERR_INT, err_int);
12918664281bSPaulo Zanoni }
12928664281bSPaulo Zanoni 
12938664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev)
12948664281bSPaulo Zanoni {
12958664281bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
12968664281bSPaulo Zanoni 	u32 serr_int = I915_READ(SERR_INT);
12978664281bSPaulo Zanoni 
1298de032bf4SPaulo Zanoni 	if (serr_int & SERR_INT_POISON)
1299de032bf4SPaulo Zanoni 		DRM_ERROR("PCH poison interrupt\n");
1300de032bf4SPaulo Zanoni 
13018664281bSPaulo Zanoni 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
13028664281bSPaulo Zanoni 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
13038664281bSPaulo Zanoni 							  false))
13048664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
13058664281bSPaulo Zanoni 
13068664281bSPaulo Zanoni 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
13078664281bSPaulo Zanoni 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
13088664281bSPaulo Zanoni 							  false))
13098664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
13108664281bSPaulo Zanoni 
13118664281bSPaulo Zanoni 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
13128664281bSPaulo Zanoni 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
13138664281bSPaulo Zanoni 							  false))
13148664281bSPaulo Zanoni 			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
13158664281bSPaulo Zanoni 
13168664281bSPaulo Zanoni 	I915_WRITE(SERR_INT, serr_int);
1317776ad806SJesse Barnes }
1318776ad806SJesse Barnes 
131923e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
132023e81d69SAdam Jackson {
132123e81d69SAdam Jackson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
132223e81d69SAdam Jackson 	int pipe;
1323b543fb04SEgbert Eich 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
132423e81d69SAdam Jackson 
132510a504deSDaniel Vetter 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
132691d131d2SDaniel Vetter 
1327cfc33bf7SVille Syrjälä 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1328cfc33bf7SVille Syrjälä 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
132923e81d69SAdam Jackson 			       SDE_AUDIO_POWER_SHIFT_CPT);
1330cfc33bf7SVille Syrjälä 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1331cfc33bf7SVille Syrjälä 				 port_name(port));
1332cfc33bf7SVille Syrjälä 	}
133323e81d69SAdam Jackson 
133423e81d69SAdam Jackson 	if (pch_iir & SDE_AUX_MASK_CPT)
1335ce99c256SDaniel Vetter 		dp_aux_irq_handler(dev);
133623e81d69SAdam Jackson 
133723e81d69SAdam Jackson 	if (pch_iir & SDE_GMBUS_CPT)
1338515ac2bbSDaniel Vetter 		gmbus_irq_handler(dev);
133923e81d69SAdam Jackson 
134023e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
134123e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
134223e81d69SAdam Jackson 
134323e81d69SAdam Jackson 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
134423e81d69SAdam Jackson 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
134523e81d69SAdam Jackson 
134623e81d69SAdam Jackson 	if (pch_iir & SDE_FDI_MASK_CPT)
134723e81d69SAdam Jackson 		for_each_pipe(pipe)
134823e81d69SAdam Jackson 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
134923e81d69SAdam Jackson 					 pipe_name(pipe),
135023e81d69SAdam Jackson 					 I915_READ(FDI_RX_IIR(pipe)));
13518664281bSPaulo Zanoni 
13528664281bSPaulo Zanoni 	if (pch_iir & SDE_ERROR_CPT)
13538664281bSPaulo Zanoni 		cpt_serr_int_handler(dev);
135423e81d69SAdam Jackson }
135523e81d69SAdam Jackson 
1356c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1357c008bc6eSPaulo Zanoni {
1358c008bc6eSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
1359c008bc6eSPaulo Zanoni 
1360c008bc6eSPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A)
1361c008bc6eSPaulo Zanoni 		dp_aux_irq_handler(dev);
1362c008bc6eSPaulo Zanoni 
1363c008bc6eSPaulo Zanoni 	if (de_iir & DE_GSE)
1364c008bc6eSPaulo Zanoni 		intel_opregion_asle_intr(dev);
1365c008bc6eSPaulo Zanoni 
1366c008bc6eSPaulo Zanoni 	if (de_iir & DE_PIPEA_VBLANK)
1367c008bc6eSPaulo Zanoni 		drm_handle_vblank(dev, 0);
1368c008bc6eSPaulo Zanoni 
1369c008bc6eSPaulo Zanoni 	if (de_iir & DE_PIPEB_VBLANK)
1370c008bc6eSPaulo Zanoni 		drm_handle_vblank(dev, 1);
1371c008bc6eSPaulo Zanoni 
1372c008bc6eSPaulo Zanoni 	if (de_iir & DE_POISON)
1373c008bc6eSPaulo Zanoni 		DRM_ERROR("Poison interrupt\n");
1374c008bc6eSPaulo Zanoni 
1375c008bc6eSPaulo Zanoni 	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1376c008bc6eSPaulo Zanoni 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1377c008bc6eSPaulo Zanoni 			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1378c008bc6eSPaulo Zanoni 
1379c008bc6eSPaulo Zanoni 	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1380c008bc6eSPaulo Zanoni 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1381c008bc6eSPaulo Zanoni 			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1382c008bc6eSPaulo Zanoni 
1383c008bc6eSPaulo Zanoni 	if (de_iir & DE_PLANEA_FLIP_DONE) {
1384c008bc6eSPaulo Zanoni 		intel_prepare_page_flip(dev, 0);
1385c008bc6eSPaulo Zanoni 		intel_finish_page_flip_plane(dev, 0);
1386c008bc6eSPaulo Zanoni 	}
1387c008bc6eSPaulo Zanoni 
1388c008bc6eSPaulo Zanoni 	if (de_iir & DE_PLANEB_FLIP_DONE) {
1389c008bc6eSPaulo Zanoni 		intel_prepare_page_flip(dev, 1);
1390c008bc6eSPaulo Zanoni 		intel_finish_page_flip_plane(dev, 1);
1391c008bc6eSPaulo Zanoni 	}
1392c008bc6eSPaulo Zanoni 
1393c008bc6eSPaulo Zanoni 	/* check event from PCH */
1394c008bc6eSPaulo Zanoni 	if (de_iir & DE_PCH_EVENT) {
1395c008bc6eSPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
1396c008bc6eSPaulo Zanoni 
1397c008bc6eSPaulo Zanoni 		if (HAS_PCH_CPT(dev))
1398c008bc6eSPaulo Zanoni 			cpt_irq_handler(dev, pch_iir);
1399c008bc6eSPaulo Zanoni 		else
1400c008bc6eSPaulo Zanoni 			ibx_irq_handler(dev, pch_iir);
1401c008bc6eSPaulo Zanoni 
1402c008bc6eSPaulo Zanoni 		/* should clear PCH hotplug event before clear CPU irq */
1403c008bc6eSPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
1404c008bc6eSPaulo Zanoni 	}
1405c008bc6eSPaulo Zanoni 
1406c008bc6eSPaulo Zanoni 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1407c008bc6eSPaulo Zanoni 		ironlake_rps_change_irq_handler(dev);
1408c008bc6eSPaulo Zanoni }
1409c008bc6eSPaulo Zanoni 
14109719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
14119719fb98SPaulo Zanoni {
14129719fb98SPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
14139719fb98SPaulo Zanoni 	int i;
14149719fb98SPaulo Zanoni 
14159719fb98SPaulo Zanoni 	if (de_iir & DE_ERR_INT_IVB)
14169719fb98SPaulo Zanoni 		ivb_err_int_handler(dev);
14179719fb98SPaulo Zanoni 
14189719fb98SPaulo Zanoni 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
14199719fb98SPaulo Zanoni 		dp_aux_irq_handler(dev);
14209719fb98SPaulo Zanoni 
14219719fb98SPaulo Zanoni 	if (de_iir & DE_GSE_IVB)
14229719fb98SPaulo Zanoni 		intel_opregion_asle_intr(dev);
14239719fb98SPaulo Zanoni 
14249719fb98SPaulo Zanoni 	for (i = 0; i < 3; i++) {
14259719fb98SPaulo Zanoni 		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
14269719fb98SPaulo Zanoni 			drm_handle_vblank(dev, i);
14279719fb98SPaulo Zanoni 		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
14289719fb98SPaulo Zanoni 			intel_prepare_page_flip(dev, i);
14299719fb98SPaulo Zanoni 			intel_finish_page_flip_plane(dev, i);
14309719fb98SPaulo Zanoni 		}
14319719fb98SPaulo Zanoni 	}
14329719fb98SPaulo Zanoni 
14339719fb98SPaulo Zanoni 	/* check event from PCH */
14349719fb98SPaulo Zanoni 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
14359719fb98SPaulo Zanoni 		u32 pch_iir = I915_READ(SDEIIR);
14369719fb98SPaulo Zanoni 
14379719fb98SPaulo Zanoni 		cpt_irq_handler(dev, pch_iir);
14389719fb98SPaulo Zanoni 
14399719fb98SPaulo Zanoni 		/* clear PCH hotplug event before clear CPU irq */
14409719fb98SPaulo Zanoni 		I915_WRITE(SDEIIR, pch_iir);
14419719fb98SPaulo Zanoni 	}
14429719fb98SPaulo Zanoni }
14439719fb98SPaulo Zanoni 
1444f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1445b1f14ad0SJesse Barnes {
1446b1f14ad0SJesse Barnes 	struct drm_device *dev = (struct drm_device *) arg;
1447b1f14ad0SJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1448f1af8fc1SPaulo Zanoni 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
14490e43406bSChris Wilson 	irqreturn_t ret = IRQ_NONE;
1450b1f14ad0SJesse Barnes 
1451b1f14ad0SJesse Barnes 	atomic_inc(&dev_priv->irq_received);
1452b1f14ad0SJesse Barnes 
14538664281bSPaulo Zanoni 	/* We get interrupts on unclaimed registers, so check for this before we
14548664281bSPaulo Zanoni 	 * do any I915_{READ,WRITE}. */
1455907b28c5SChris Wilson 	intel_uncore_check_errors(dev);
14568664281bSPaulo Zanoni 
1457b1f14ad0SJesse Barnes 	/* disable master interrupt before clearing iir  */
1458b1f14ad0SJesse Barnes 	de_ier = I915_READ(DEIER);
1459b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
146023a78516SPaulo Zanoni 	POSTING_READ(DEIER);
14610e43406bSChris Wilson 
146244498aeaSPaulo Zanoni 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
146344498aeaSPaulo Zanoni 	 * interrupts will will be stored on its back queue, and then we'll be
146444498aeaSPaulo Zanoni 	 * able to process them after we restore SDEIER (as soon as we restore
146544498aeaSPaulo Zanoni 	 * it, we'll get an interrupt if SDEIIR still has something to process
146644498aeaSPaulo Zanoni 	 * due to its back queue). */
1467ab5c608bSBen Widawsky 	if (!HAS_PCH_NOP(dev)) {
146844498aeaSPaulo Zanoni 		sde_ier = I915_READ(SDEIER);
146944498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, 0);
147044498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
1471ab5c608bSBen Widawsky 	}
147244498aeaSPaulo Zanoni 
14730e43406bSChris Wilson 	gt_iir = I915_READ(GTIIR);
14740e43406bSChris Wilson 	if (gt_iir) {
1475d8fc8a47SPaulo Zanoni 		if (INTEL_INFO(dev)->gen >= 6)
14760e43406bSChris Wilson 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1477d8fc8a47SPaulo Zanoni 		else
1478d8fc8a47SPaulo Zanoni 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
14790e43406bSChris Wilson 		I915_WRITE(GTIIR, gt_iir);
14800e43406bSChris Wilson 		ret = IRQ_HANDLED;
14810e43406bSChris Wilson 	}
1482b1f14ad0SJesse Barnes 
1483b1f14ad0SJesse Barnes 	de_iir = I915_READ(DEIIR);
14840e43406bSChris Wilson 	if (de_iir) {
1485f1af8fc1SPaulo Zanoni 		if (INTEL_INFO(dev)->gen >= 7)
14869719fb98SPaulo Zanoni 			ivb_display_irq_handler(dev, de_iir);
1487f1af8fc1SPaulo Zanoni 		else
1488f1af8fc1SPaulo Zanoni 			ilk_display_irq_handler(dev, de_iir);
14890e43406bSChris Wilson 		I915_WRITE(DEIIR, de_iir);
14900e43406bSChris Wilson 		ret = IRQ_HANDLED;
14910e43406bSChris Wilson 	}
14920e43406bSChris Wilson 
1493f1af8fc1SPaulo Zanoni 	if (INTEL_INFO(dev)->gen >= 6) {
1494f1af8fc1SPaulo Zanoni 		u32 pm_iir = I915_READ(GEN6_PMIIR);
14950e43406bSChris Wilson 		if (pm_iir) {
1496d0ecd7e2SDaniel Vetter 			gen6_rps_irq_handler(dev_priv, pm_iir);
1497b1f14ad0SJesse Barnes 			I915_WRITE(GEN6_PMIIR, pm_iir);
14980e43406bSChris Wilson 			ret = IRQ_HANDLED;
14990e43406bSChris Wilson 		}
1500f1af8fc1SPaulo Zanoni 	}
1501b1f14ad0SJesse Barnes 
1502b1f14ad0SJesse Barnes 	I915_WRITE(DEIER, de_ier);
1503b1f14ad0SJesse Barnes 	POSTING_READ(DEIER);
1504ab5c608bSBen Widawsky 	if (!HAS_PCH_NOP(dev)) {
150544498aeaSPaulo Zanoni 		I915_WRITE(SDEIER, sde_ier);
150644498aeaSPaulo Zanoni 		POSTING_READ(SDEIER);
1507ab5c608bSBen Widawsky 	}
1508b1f14ad0SJesse Barnes 
1509b1f14ad0SJesse Barnes 	return ret;
1510b1f14ad0SJesse Barnes }
1511b1f14ad0SJesse Barnes 
151217e1df07SDaniel Vetter static void i915_error_wake_up(struct drm_i915_private *dev_priv,
151317e1df07SDaniel Vetter 			       bool reset_completed)
151417e1df07SDaniel Vetter {
151517e1df07SDaniel Vetter 	struct intel_ring_buffer *ring;
151617e1df07SDaniel Vetter 	int i;
151717e1df07SDaniel Vetter 
151817e1df07SDaniel Vetter 	/*
151917e1df07SDaniel Vetter 	 * Notify all waiters for GPU completion events that reset state has
152017e1df07SDaniel Vetter 	 * been changed, and that they need to restart their wait after
152117e1df07SDaniel Vetter 	 * checking for potential errors (and bail out to drop locks if there is
152217e1df07SDaniel Vetter 	 * a gpu reset pending so that i915_error_work_func can acquire them).
152317e1df07SDaniel Vetter 	 */
152417e1df07SDaniel Vetter 
152517e1df07SDaniel Vetter 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
152617e1df07SDaniel Vetter 	for_each_ring(ring, dev_priv, i)
152717e1df07SDaniel Vetter 		wake_up_all(&ring->irq_queue);
152817e1df07SDaniel Vetter 
152917e1df07SDaniel Vetter 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
153017e1df07SDaniel Vetter 	wake_up_all(&dev_priv->pending_flip_queue);
153117e1df07SDaniel Vetter 
153217e1df07SDaniel Vetter 	/*
153317e1df07SDaniel Vetter 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
153417e1df07SDaniel Vetter 	 * reset state is cleared.
153517e1df07SDaniel Vetter 	 */
153617e1df07SDaniel Vetter 	if (reset_completed)
153717e1df07SDaniel Vetter 		wake_up_all(&dev_priv->gpu_error.reset_queue);
153817e1df07SDaniel Vetter }
153917e1df07SDaniel Vetter 
15408a905236SJesse Barnes /**
15418a905236SJesse Barnes  * i915_error_work_func - do process context error handling work
15428a905236SJesse Barnes  * @work: work struct
15438a905236SJesse Barnes  *
15448a905236SJesse Barnes  * Fire an error uevent so userspace can see that a hang or error
15458a905236SJesse Barnes  * was detected.
15468a905236SJesse Barnes  */
15478a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work)
15488a905236SJesse Barnes {
15491f83fee0SDaniel Vetter 	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
15501f83fee0SDaniel Vetter 						    work);
15511f83fee0SDaniel Vetter 	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
15521f83fee0SDaniel Vetter 						    gpu_error);
15538a905236SJesse Barnes 	struct drm_device *dev = dev_priv->dev;
1554cce723edSBen Widawsky 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1555cce723edSBen Widawsky 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1556cce723edSBen Widawsky 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
155717e1df07SDaniel Vetter 	int ret;
15588a905236SJesse Barnes 
1559f316a42cSBen Gamari 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
15608a905236SJesse Barnes 
15617db0ba24SDaniel Vetter 	/*
15627db0ba24SDaniel Vetter 	 * Note that there's only one work item which does gpu resets, so we
15637db0ba24SDaniel Vetter 	 * need not worry about concurrent gpu resets potentially incrementing
15647db0ba24SDaniel Vetter 	 * error->reset_counter twice. We only need to take care of another
15657db0ba24SDaniel Vetter 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
15667db0ba24SDaniel Vetter 	 * quick check for that is good enough: schedule_work ensures the
15677db0ba24SDaniel Vetter 	 * correct ordering between hang detection and this work item, and since
15687db0ba24SDaniel Vetter 	 * the reset in-progress bit is only ever set by code outside of this
15697db0ba24SDaniel Vetter 	 * work we don't need to worry about any other races.
15707db0ba24SDaniel Vetter 	 */
15717db0ba24SDaniel Vetter 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
157244d98a61SZhao Yakui 		DRM_DEBUG_DRIVER("resetting chip\n");
15737db0ba24SDaniel Vetter 		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
15747db0ba24SDaniel Vetter 				   reset_event);
15751f83fee0SDaniel Vetter 
157617e1df07SDaniel Vetter 		/*
157717e1df07SDaniel Vetter 		 * All state reset _must_ be completed before we update the
157817e1df07SDaniel Vetter 		 * reset counter, for otherwise waiters might miss the reset
157917e1df07SDaniel Vetter 		 * pending state and not properly drop locks, resulting in
158017e1df07SDaniel Vetter 		 * deadlocks with the reset work.
158117e1df07SDaniel Vetter 		 */
1582f69061beSDaniel Vetter 		ret = i915_reset(dev);
1583f69061beSDaniel Vetter 
158417e1df07SDaniel Vetter 		intel_display_handle_reset(dev);
158517e1df07SDaniel Vetter 
1586f69061beSDaniel Vetter 		if (ret == 0) {
1587f69061beSDaniel Vetter 			/*
1588f69061beSDaniel Vetter 			 * After all the gem state is reset, increment the reset
1589f69061beSDaniel Vetter 			 * counter and wake up everyone waiting for the reset to
1590f69061beSDaniel Vetter 			 * complete.
1591f69061beSDaniel Vetter 			 *
1592f69061beSDaniel Vetter 			 * Since unlock operations are a one-sided barrier only,
1593f69061beSDaniel Vetter 			 * we need to insert a barrier here to order any seqno
1594f69061beSDaniel Vetter 			 * updates before
1595f69061beSDaniel Vetter 			 * the counter increment.
1596f69061beSDaniel Vetter 			 */
1597f69061beSDaniel Vetter 			smp_mb__before_atomic_inc();
1598f69061beSDaniel Vetter 			atomic_inc(&dev_priv->gpu_error.reset_counter);
1599f69061beSDaniel Vetter 
1600f69061beSDaniel Vetter 			kobject_uevent_env(&dev->primary->kdev.kobj,
1601f69061beSDaniel Vetter 					   KOBJ_CHANGE, reset_done_event);
16021f83fee0SDaniel Vetter 		} else {
16031f83fee0SDaniel Vetter 			atomic_set(&error->reset_counter, I915_WEDGED);
1604f316a42cSBen Gamari 		}
16051f83fee0SDaniel Vetter 
160617e1df07SDaniel Vetter 		/*
160717e1df07SDaniel Vetter 		 * Note: The wake_up also serves as a memory barrier so that
160817e1df07SDaniel Vetter 		 * waiters see the update value of the reset counter atomic_t.
160917e1df07SDaniel Vetter 		 */
161017e1df07SDaniel Vetter 		i915_error_wake_up(dev_priv, true);
1611f316a42cSBen Gamari 	}
16128a905236SJesse Barnes }
16138a905236SJesse Barnes 
161435aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev)
1615c0e09200SDave Airlie {
16168a905236SJesse Barnes 	struct drm_i915_private *dev_priv = dev->dev_private;
1617bd9854f9SBen Widawsky 	uint32_t instdone[I915_NUM_INSTDONE_REG];
161863eeaf38SJesse Barnes 	u32 eir = I915_READ(EIR);
1619050ee91fSBen Widawsky 	int pipe, i;
162063eeaf38SJesse Barnes 
162135aed2e6SChris Wilson 	if (!eir)
162235aed2e6SChris Wilson 		return;
162363eeaf38SJesse Barnes 
1624a70491ccSJoe Perches 	pr_err("render error detected, EIR: 0x%08x\n", eir);
16258a905236SJesse Barnes 
1626bd9854f9SBen Widawsky 	i915_get_extra_instdone(dev, instdone);
1627bd9854f9SBen Widawsky 
16288a905236SJesse Barnes 	if (IS_G4X(dev)) {
16298a905236SJesse Barnes 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
16308a905236SJesse Barnes 			u32 ipeir = I915_READ(IPEIR_I965);
16318a905236SJesse Barnes 
1632a70491ccSJoe Perches 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1633a70491ccSJoe Perches 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1634050ee91fSBen Widawsky 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1635050ee91fSBen Widawsky 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1636a70491ccSJoe Perches 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1637a70491ccSJoe Perches 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
16388a905236SJesse Barnes 			I915_WRITE(IPEIR_I965, ipeir);
16393143a2bfSChris Wilson 			POSTING_READ(IPEIR_I965);
16408a905236SJesse Barnes 		}
16418a905236SJesse Barnes 		if (eir & GM45_ERROR_PAGE_TABLE) {
16428a905236SJesse Barnes 			u32 pgtbl_err = I915_READ(PGTBL_ER);
1643a70491ccSJoe Perches 			pr_err("page table error\n");
1644a70491ccSJoe Perches 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
16458a905236SJesse Barnes 			I915_WRITE(PGTBL_ER, pgtbl_err);
16463143a2bfSChris Wilson 			POSTING_READ(PGTBL_ER);
16478a905236SJesse Barnes 		}
16488a905236SJesse Barnes 	}
16498a905236SJesse Barnes 
1650a6c45cf0SChris Wilson 	if (!IS_GEN2(dev)) {
165163eeaf38SJesse Barnes 		if (eir & I915_ERROR_PAGE_TABLE) {
165263eeaf38SJesse Barnes 			u32 pgtbl_err = I915_READ(PGTBL_ER);
1653a70491ccSJoe Perches 			pr_err("page table error\n");
1654a70491ccSJoe Perches 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
165563eeaf38SJesse Barnes 			I915_WRITE(PGTBL_ER, pgtbl_err);
16563143a2bfSChris Wilson 			POSTING_READ(PGTBL_ER);
165763eeaf38SJesse Barnes 		}
16588a905236SJesse Barnes 	}
16598a905236SJesse Barnes 
166063eeaf38SJesse Barnes 	if (eir & I915_ERROR_MEMORY_REFRESH) {
1661a70491ccSJoe Perches 		pr_err("memory refresh error:\n");
16629db4a9c7SJesse Barnes 		for_each_pipe(pipe)
1663a70491ccSJoe Perches 			pr_err("pipe %c stat: 0x%08x\n",
16649db4a9c7SJesse Barnes 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
166563eeaf38SJesse Barnes 		/* pipestat has already been acked */
166663eeaf38SJesse Barnes 	}
166763eeaf38SJesse Barnes 	if (eir & I915_ERROR_INSTRUCTION) {
1668a70491ccSJoe Perches 		pr_err("instruction error\n");
1669a70491ccSJoe Perches 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1670050ee91fSBen Widawsky 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1671050ee91fSBen Widawsky 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1672a6c45cf0SChris Wilson 		if (INTEL_INFO(dev)->gen < 4) {
167363eeaf38SJesse Barnes 			u32 ipeir = I915_READ(IPEIR);
167463eeaf38SJesse Barnes 
1675a70491ccSJoe Perches 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1676a70491ccSJoe Perches 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1677a70491ccSJoe Perches 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
167863eeaf38SJesse Barnes 			I915_WRITE(IPEIR, ipeir);
16793143a2bfSChris Wilson 			POSTING_READ(IPEIR);
168063eeaf38SJesse Barnes 		} else {
168163eeaf38SJesse Barnes 			u32 ipeir = I915_READ(IPEIR_I965);
168263eeaf38SJesse Barnes 
1683a70491ccSJoe Perches 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1684a70491ccSJoe Perches 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1685a70491ccSJoe Perches 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1686a70491ccSJoe Perches 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
168763eeaf38SJesse Barnes 			I915_WRITE(IPEIR_I965, ipeir);
16883143a2bfSChris Wilson 			POSTING_READ(IPEIR_I965);
168963eeaf38SJesse Barnes 		}
169063eeaf38SJesse Barnes 	}
169163eeaf38SJesse Barnes 
169263eeaf38SJesse Barnes 	I915_WRITE(EIR, eir);
16933143a2bfSChris Wilson 	POSTING_READ(EIR);
169463eeaf38SJesse Barnes 	eir = I915_READ(EIR);
169563eeaf38SJesse Barnes 	if (eir) {
169663eeaf38SJesse Barnes 		/*
169763eeaf38SJesse Barnes 		 * some errors might have become stuck,
169863eeaf38SJesse Barnes 		 * mask them.
169963eeaf38SJesse Barnes 		 */
170063eeaf38SJesse Barnes 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
170163eeaf38SJesse Barnes 		I915_WRITE(EMR, I915_READ(EMR) | eir);
170263eeaf38SJesse Barnes 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
170363eeaf38SJesse Barnes 	}
170435aed2e6SChris Wilson }
170535aed2e6SChris Wilson 
170635aed2e6SChris Wilson /**
170735aed2e6SChris Wilson  * i915_handle_error - handle an error interrupt
170835aed2e6SChris Wilson  * @dev: drm device
170935aed2e6SChris Wilson  *
171035aed2e6SChris Wilson  * Do some basic checking of regsiter state at error interrupt time and
171135aed2e6SChris Wilson  * dump it to the syslog.  Also call i915_capture_error_state() to make
171235aed2e6SChris Wilson  * sure we get a record and make it available in debugfs.  Fire a uevent
171335aed2e6SChris Wilson  * so userspace knows something bad happened (should trigger collection
171435aed2e6SChris Wilson  * of a ring dump etc.).
171535aed2e6SChris Wilson  */
1716527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged)
171735aed2e6SChris Wilson {
171835aed2e6SChris Wilson 	struct drm_i915_private *dev_priv = dev->dev_private;
171935aed2e6SChris Wilson 
172035aed2e6SChris Wilson 	i915_capture_error_state(dev);
172135aed2e6SChris Wilson 	i915_report_and_clear_eir(dev);
17228a905236SJesse Barnes 
1723ba1234d1SBen Gamari 	if (wedged) {
1724f69061beSDaniel Vetter 		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1725f69061beSDaniel Vetter 				&dev_priv->gpu_error.reset_counter);
1726ba1234d1SBen Gamari 
172711ed50ecSBen Gamari 		/*
172817e1df07SDaniel Vetter 		 * Wakeup waiting processes so that the reset work function
172917e1df07SDaniel Vetter 		 * i915_error_work_func doesn't deadlock trying to grab various
173017e1df07SDaniel Vetter 		 * locks. By bumping the reset counter first, the woken
173117e1df07SDaniel Vetter 		 * processes will see a reset in progress and back off,
173217e1df07SDaniel Vetter 		 * releasing their locks and then wait for the reset completion.
173317e1df07SDaniel Vetter 		 * We must do this for _all_ gpu waiters that might hold locks
173417e1df07SDaniel Vetter 		 * that the reset work needs to acquire.
173517e1df07SDaniel Vetter 		 *
173617e1df07SDaniel Vetter 		 * Note: The wake_up serves as the required memory barrier to
173717e1df07SDaniel Vetter 		 * ensure that the waiters see the updated value of the reset
173817e1df07SDaniel Vetter 		 * counter atomic_t.
173911ed50ecSBen Gamari 		 */
174017e1df07SDaniel Vetter 		i915_error_wake_up(dev_priv, false);
174111ed50ecSBen Gamari 	}
174211ed50ecSBen Gamari 
1743122f46baSDaniel Vetter 	/*
1744122f46baSDaniel Vetter 	 * Our reset work can grab modeset locks (since it needs to reset the
1745122f46baSDaniel Vetter 	 * state of outstanding pagelips). Hence it must not be run on our own
1746122f46baSDaniel Vetter 	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1747122f46baSDaniel Vetter 	 * code will deadlock.
1748122f46baSDaniel Vetter 	 */
1749122f46baSDaniel Vetter 	schedule_work(&dev_priv->gpu_error.work);
17508a905236SJesse Barnes }
17518a905236SJesse Barnes 
175221ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
17534e5359cdSSimon Farnsworth {
17544e5359cdSSimon Farnsworth 	drm_i915_private_t *dev_priv = dev->dev_private;
17554e5359cdSSimon Farnsworth 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
17564e5359cdSSimon Farnsworth 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
175705394f39SChris Wilson 	struct drm_i915_gem_object *obj;
17584e5359cdSSimon Farnsworth 	struct intel_unpin_work *work;
17594e5359cdSSimon Farnsworth 	unsigned long flags;
17604e5359cdSSimon Farnsworth 	bool stall_detected;
17614e5359cdSSimon Farnsworth 
17624e5359cdSSimon Farnsworth 	/* Ignore early vblank irqs */
17634e5359cdSSimon Farnsworth 	if (intel_crtc == NULL)
17644e5359cdSSimon Farnsworth 		return;
17654e5359cdSSimon Farnsworth 
17664e5359cdSSimon Farnsworth 	spin_lock_irqsave(&dev->event_lock, flags);
17674e5359cdSSimon Farnsworth 	work = intel_crtc->unpin_work;
17684e5359cdSSimon Farnsworth 
1769e7d841caSChris Wilson 	if (work == NULL ||
1770e7d841caSChris Wilson 	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1771e7d841caSChris Wilson 	    !work->enable_stall_check) {
17724e5359cdSSimon Farnsworth 		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
17734e5359cdSSimon Farnsworth 		spin_unlock_irqrestore(&dev->event_lock, flags);
17744e5359cdSSimon Farnsworth 		return;
17754e5359cdSSimon Farnsworth 	}
17764e5359cdSSimon Farnsworth 
17774e5359cdSSimon Farnsworth 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
177805394f39SChris Wilson 	obj = work->pending_flip_obj;
1779a6c45cf0SChris Wilson 	if (INTEL_INFO(dev)->gen >= 4) {
17809db4a9c7SJesse Barnes 		int dspsurf = DSPSURF(intel_crtc->plane);
1781446f2545SArmin Reese 		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1782f343c5f6SBen Widawsky 					i915_gem_obj_ggtt_offset(obj);
17834e5359cdSSimon Farnsworth 	} else {
17849db4a9c7SJesse Barnes 		int dspaddr = DSPADDR(intel_crtc->plane);
1785f343c5f6SBen Widawsky 		stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
178601f2c773SVille Syrjälä 							crtc->y * crtc->fb->pitches[0] +
17874e5359cdSSimon Farnsworth 							crtc->x * crtc->fb->bits_per_pixel/8);
17884e5359cdSSimon Farnsworth 	}
17894e5359cdSSimon Farnsworth 
17904e5359cdSSimon Farnsworth 	spin_unlock_irqrestore(&dev->event_lock, flags);
17914e5359cdSSimon Farnsworth 
17924e5359cdSSimon Farnsworth 	if (stall_detected) {
17934e5359cdSSimon Farnsworth 		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
17944e5359cdSSimon Farnsworth 		intel_prepare_page_flip(dev, intel_crtc->plane);
17954e5359cdSSimon Farnsworth 	}
17964e5359cdSSimon Farnsworth }
17974e5359cdSSimon Farnsworth 
179842f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
179942f52ef8SKeith Packard  * we use as a pipe index
180042f52ef8SKeith Packard  */
1801f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe)
18020a3e67a4SJesse Barnes {
18030a3e67a4SJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1804e9d21d7fSKeith Packard 	unsigned long irqflags;
180571e0ffa5SJesse Barnes 
18065eddb70bSChris Wilson 	if (!i915_pipe_enabled(dev, pipe))
180771e0ffa5SJesse Barnes 		return -EINVAL;
18080a3e67a4SJesse Barnes 
18091ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1810f796cf8fSJesse Barnes 	if (INTEL_INFO(dev)->gen >= 4)
18117c463586SKeith Packard 		i915_enable_pipestat(dev_priv, pipe,
18127c463586SKeith Packard 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
18130a3e67a4SJesse Barnes 	else
18147c463586SKeith Packard 		i915_enable_pipestat(dev_priv, pipe,
18157c463586SKeith Packard 				     PIPE_VBLANK_INTERRUPT_ENABLE);
18168692d00eSChris Wilson 
18178692d00eSChris Wilson 	/* maintain vblank delivery even in deep C-states */
18188692d00eSChris Wilson 	if (dev_priv->info->gen == 3)
18196b26c86dSDaniel Vetter 		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
18201ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
18218692d00eSChris Wilson 
18220a3e67a4SJesse Barnes 	return 0;
18230a3e67a4SJesse Barnes }
18240a3e67a4SJesse Barnes 
1825f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1826f796cf8fSJesse Barnes {
1827f796cf8fSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1828f796cf8fSJesse Barnes 	unsigned long irqflags;
1829b518421fSPaulo Zanoni 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1830b518421fSPaulo Zanoni 						     DE_PIPE_VBLANK_ILK(pipe);
1831f796cf8fSJesse Barnes 
1832f796cf8fSJesse Barnes 	if (!i915_pipe_enabled(dev, pipe))
1833f796cf8fSJesse Barnes 		return -EINVAL;
1834f796cf8fSJesse Barnes 
1835f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1836b518421fSPaulo Zanoni 	ironlake_enable_display_irq(dev_priv, bit);
1837b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1838b1f14ad0SJesse Barnes 
1839b1f14ad0SJesse Barnes 	return 0;
1840b1f14ad0SJesse Barnes }
1841b1f14ad0SJesse Barnes 
18427e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
18437e231dbeSJesse Barnes {
18447e231dbeSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
18457e231dbeSJesse Barnes 	unsigned long irqflags;
184631acc7f5SJesse Barnes 	u32 imr;
18477e231dbeSJesse Barnes 
18487e231dbeSJesse Barnes 	if (!i915_pipe_enabled(dev, pipe))
18497e231dbeSJesse Barnes 		return -EINVAL;
18507e231dbeSJesse Barnes 
18517e231dbeSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
18527e231dbeSJesse Barnes 	imr = I915_READ(VLV_IMR);
185331acc7f5SJesse Barnes 	if (pipe == 0)
18547e231dbeSJesse Barnes 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
185531acc7f5SJesse Barnes 	else
18567e231dbeSJesse Barnes 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
18577e231dbeSJesse Barnes 	I915_WRITE(VLV_IMR, imr);
185831acc7f5SJesse Barnes 	i915_enable_pipestat(dev_priv, pipe,
185931acc7f5SJesse Barnes 			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
18607e231dbeSJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
18617e231dbeSJesse Barnes 
18627e231dbeSJesse Barnes 	return 0;
18637e231dbeSJesse Barnes }
18647e231dbeSJesse Barnes 
186542f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which
186642f52ef8SKeith Packard  * we use as a pipe index
186742f52ef8SKeith Packard  */
1868f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe)
18690a3e67a4SJesse Barnes {
18700a3e67a4SJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1871e9d21d7fSKeith Packard 	unsigned long irqflags;
18720a3e67a4SJesse Barnes 
18731ec14ad3SChris Wilson 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
18748692d00eSChris Wilson 	if (dev_priv->info->gen == 3)
18756b26c86dSDaniel Vetter 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
18768692d00eSChris Wilson 
18777c463586SKeith Packard 	i915_disable_pipestat(dev_priv, pipe,
18787c463586SKeith Packard 			      PIPE_VBLANK_INTERRUPT_ENABLE |
18797c463586SKeith Packard 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
18801ec14ad3SChris Wilson 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
18810a3e67a4SJesse Barnes }
18820a3e67a4SJesse Barnes 
1883f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1884f796cf8fSJesse Barnes {
1885f796cf8fSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1886f796cf8fSJesse Barnes 	unsigned long irqflags;
1887b518421fSPaulo Zanoni 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1888b518421fSPaulo Zanoni 						     DE_PIPE_VBLANK_ILK(pipe);
1889f796cf8fSJesse Barnes 
1890f796cf8fSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1891b518421fSPaulo Zanoni 	ironlake_disable_display_irq(dev_priv, bit);
1892b1f14ad0SJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1893b1f14ad0SJesse Barnes }
1894b1f14ad0SJesse Barnes 
18957e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
18967e231dbeSJesse Barnes {
18977e231dbeSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
18987e231dbeSJesse Barnes 	unsigned long irqflags;
189931acc7f5SJesse Barnes 	u32 imr;
19007e231dbeSJesse Barnes 
19017e231dbeSJesse Barnes 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
190231acc7f5SJesse Barnes 	i915_disable_pipestat(dev_priv, pipe,
190331acc7f5SJesse Barnes 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
19047e231dbeSJesse Barnes 	imr = I915_READ(VLV_IMR);
190531acc7f5SJesse Barnes 	if (pipe == 0)
19067e231dbeSJesse Barnes 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
190731acc7f5SJesse Barnes 	else
19087e231dbeSJesse Barnes 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
19097e231dbeSJesse Barnes 	I915_WRITE(VLV_IMR, imr);
19107e231dbeSJesse Barnes 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
19117e231dbeSJesse Barnes }
19127e231dbeSJesse Barnes 
1913893eead0SChris Wilson static u32
1914893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring)
1915852835f3SZou Nan hai {
1916893eead0SChris Wilson 	return list_entry(ring->request_list.prev,
1917893eead0SChris Wilson 			  struct drm_i915_gem_request, list)->seqno;
1918893eead0SChris Wilson }
1919893eead0SChris Wilson 
19209107e9d2SChris Wilson static bool
19219107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno)
1922893eead0SChris Wilson {
19239107e9d2SChris Wilson 	return (list_empty(&ring->request_list) ||
19249107e9d2SChris Wilson 		i915_seqno_passed(seqno, ring_last_seqno(ring)));
1925f65d9421SBen Gamari }
1926f65d9421SBen Gamari 
19276274f212SChris Wilson static struct intel_ring_buffer *
19286274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
1929a24a11e6SChris Wilson {
1930a24a11e6SChris Wilson 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
19316274f212SChris Wilson 	u32 cmd, ipehr, acthd, acthd_min;
1932a24a11e6SChris Wilson 
1933a24a11e6SChris Wilson 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1934a24a11e6SChris Wilson 	if ((ipehr & ~(0x3 << 16)) !=
1935a24a11e6SChris Wilson 	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
19366274f212SChris Wilson 		return NULL;
1937a24a11e6SChris Wilson 
1938a24a11e6SChris Wilson 	/* ACTHD is likely pointing to the dword after the actual command,
1939a24a11e6SChris Wilson 	 * so scan backwards until we find the MBOX.
1940a24a11e6SChris Wilson 	 */
19416274f212SChris Wilson 	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1942a24a11e6SChris Wilson 	acthd_min = max((int)acthd - 3 * 4, 0);
1943a24a11e6SChris Wilson 	do {
1944a24a11e6SChris Wilson 		cmd = ioread32(ring->virtual_start + acthd);
1945a24a11e6SChris Wilson 		if (cmd == ipehr)
1946a24a11e6SChris Wilson 			break;
1947a24a11e6SChris Wilson 
1948a24a11e6SChris Wilson 		acthd -= 4;
1949a24a11e6SChris Wilson 		if (acthd < acthd_min)
19506274f212SChris Wilson 			return NULL;
1951a24a11e6SChris Wilson 	} while (1);
1952a24a11e6SChris Wilson 
19536274f212SChris Wilson 	*seqno = ioread32(ring->virtual_start+acthd+4)+1;
19546274f212SChris Wilson 	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1955a24a11e6SChris Wilson }
1956a24a11e6SChris Wilson 
19576274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring)
19586274f212SChris Wilson {
19596274f212SChris Wilson 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
19606274f212SChris Wilson 	struct intel_ring_buffer *signaller;
19616274f212SChris Wilson 	u32 seqno, ctl;
19626274f212SChris Wilson 
19636274f212SChris Wilson 	ring->hangcheck.deadlock = true;
19646274f212SChris Wilson 
19656274f212SChris Wilson 	signaller = semaphore_waits_for(ring, &seqno);
19666274f212SChris Wilson 	if (signaller == NULL || signaller->hangcheck.deadlock)
19676274f212SChris Wilson 		return -1;
19686274f212SChris Wilson 
19696274f212SChris Wilson 	/* cursory check for an unkickable deadlock */
19706274f212SChris Wilson 	ctl = I915_READ_CTL(signaller);
19716274f212SChris Wilson 	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
19726274f212SChris Wilson 		return -1;
19736274f212SChris Wilson 
19746274f212SChris Wilson 	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
19756274f212SChris Wilson }
19766274f212SChris Wilson 
19776274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
19786274f212SChris Wilson {
19796274f212SChris Wilson 	struct intel_ring_buffer *ring;
19806274f212SChris Wilson 	int i;
19816274f212SChris Wilson 
19826274f212SChris Wilson 	for_each_ring(ring, dev_priv, i)
19836274f212SChris Wilson 		ring->hangcheck.deadlock = false;
19846274f212SChris Wilson }
19856274f212SChris Wilson 
1986ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action
1987ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
19881ec14ad3SChris Wilson {
19891ec14ad3SChris Wilson 	struct drm_device *dev = ring->dev;
19901ec14ad3SChris Wilson 	struct drm_i915_private *dev_priv = dev->dev_private;
19919107e9d2SChris Wilson 	u32 tmp;
19929107e9d2SChris Wilson 
19936274f212SChris Wilson 	if (ring->hangcheck.acthd != acthd)
1994f2f4d82fSJani Nikula 		return HANGCHECK_ACTIVE;
19956274f212SChris Wilson 
19969107e9d2SChris Wilson 	if (IS_GEN2(dev))
1997f2f4d82fSJani Nikula 		return HANGCHECK_HUNG;
19989107e9d2SChris Wilson 
19999107e9d2SChris Wilson 	/* Is the chip hanging on a WAIT_FOR_EVENT?
20009107e9d2SChris Wilson 	 * If so we can simply poke the RB_WAIT bit
20019107e9d2SChris Wilson 	 * and break the hang. This should work on
20029107e9d2SChris Wilson 	 * all but the second generation chipsets.
20039107e9d2SChris Wilson 	 */
20049107e9d2SChris Wilson 	tmp = I915_READ_CTL(ring);
20051ec14ad3SChris Wilson 	if (tmp & RING_WAIT) {
20061ec14ad3SChris Wilson 		DRM_ERROR("Kicking stuck wait on %s\n",
20071ec14ad3SChris Wilson 			  ring->name);
200809e14bf3SChris Wilson 		i915_handle_error(dev, false);
20091ec14ad3SChris Wilson 		I915_WRITE_CTL(ring, tmp);
2010f2f4d82fSJani Nikula 		return HANGCHECK_KICK;
20111ec14ad3SChris Wilson 	}
2012a24a11e6SChris Wilson 
20136274f212SChris Wilson 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
20146274f212SChris Wilson 		switch (semaphore_passed(ring)) {
20156274f212SChris Wilson 		default:
2016f2f4d82fSJani Nikula 			return HANGCHECK_HUNG;
20176274f212SChris Wilson 		case 1:
2018a24a11e6SChris Wilson 			DRM_ERROR("Kicking stuck semaphore on %s\n",
2019a24a11e6SChris Wilson 				  ring->name);
202009e14bf3SChris Wilson 			i915_handle_error(dev, false);
2021a24a11e6SChris Wilson 			I915_WRITE_CTL(ring, tmp);
2022f2f4d82fSJani Nikula 			return HANGCHECK_KICK;
20236274f212SChris Wilson 		case 0:
2024f2f4d82fSJani Nikula 			return HANGCHECK_WAIT;
20256274f212SChris Wilson 		}
20269107e9d2SChris Wilson 	}
20279107e9d2SChris Wilson 
2028f2f4d82fSJani Nikula 	return HANGCHECK_HUNG;
2029a24a11e6SChris Wilson }
2030d1e61e7fSChris Wilson 
2031f65d9421SBen Gamari /**
2032f65d9421SBen Gamari  * This is called when the chip hasn't reported back with completed
203305407ff8SMika Kuoppala  * batchbuffers in a long time. We keep track per ring seqno progress and
203405407ff8SMika Kuoppala  * if there are no progress, hangcheck score for that ring is increased.
203505407ff8SMika Kuoppala  * Further, acthd is inspected to see if the ring is stuck. On stuck case
203605407ff8SMika Kuoppala  * we kick the ring. If we see no progress on three subsequent calls
203705407ff8SMika Kuoppala  * we assume chip is wedged and try to fix it by resetting the chip.
2038f65d9421SBen Gamari  */
2039a658b5d2SDamien Lespiau static void i915_hangcheck_elapsed(unsigned long data)
2040f65d9421SBen Gamari {
2041f65d9421SBen Gamari 	struct drm_device *dev = (struct drm_device *)data;
2042f65d9421SBen Gamari 	drm_i915_private_t *dev_priv = dev->dev_private;
2043b4519513SChris Wilson 	struct intel_ring_buffer *ring;
2044b4519513SChris Wilson 	int i;
204505407ff8SMika Kuoppala 	int busy_count = 0, rings_hung = 0;
20469107e9d2SChris Wilson 	bool stuck[I915_NUM_RINGS] = { 0 };
20479107e9d2SChris Wilson #define BUSY 1
20489107e9d2SChris Wilson #define KICK 5
20499107e9d2SChris Wilson #define HUNG 20
20509107e9d2SChris Wilson #define FIRE 30
2051893eead0SChris Wilson 
20523e0dc6b0SBen Widawsky 	if (!i915_enable_hangcheck)
20533e0dc6b0SBen Widawsky 		return;
20543e0dc6b0SBen Widawsky 
2055b4519513SChris Wilson 	for_each_ring(ring, dev_priv, i) {
205605407ff8SMika Kuoppala 		u32 seqno, acthd;
20579107e9d2SChris Wilson 		bool busy = true;
2058b4519513SChris Wilson 
20596274f212SChris Wilson 		semaphore_clear_deadlocks(dev_priv);
20606274f212SChris Wilson 
206105407ff8SMika Kuoppala 		seqno = ring->get_seqno(ring, false);
206205407ff8SMika Kuoppala 		acthd = intel_ring_get_active_head(ring);
206305407ff8SMika Kuoppala 
206405407ff8SMika Kuoppala 		if (ring->hangcheck.seqno == seqno) {
20659107e9d2SChris Wilson 			if (ring_idle(ring, seqno)) {
2066da661464SMika Kuoppala 				ring->hangcheck.action = HANGCHECK_IDLE;
2067da661464SMika Kuoppala 
20689107e9d2SChris Wilson 				if (waitqueue_active(&ring->irq_queue)) {
20699107e9d2SChris Wilson 					/* Issue a wake-up to catch stuck h/w. */
2070094f9a54SChris Wilson 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
20719107e9d2SChris Wilson 						DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
20729107e9d2SChris Wilson 							  ring->name);
20739107e9d2SChris Wilson 						wake_up_all(&ring->irq_queue);
2074094f9a54SChris Wilson 					}
2075094f9a54SChris Wilson 					/* Safeguard against driver failure */
2076094f9a54SChris Wilson 					ring->hangcheck.score += BUSY;
20779107e9d2SChris Wilson 				} else
20789107e9d2SChris Wilson 					busy = false;
207905407ff8SMika Kuoppala 			} else {
20806274f212SChris Wilson 				/* We always increment the hangcheck score
20816274f212SChris Wilson 				 * if the ring is busy and still processing
20826274f212SChris Wilson 				 * the same request, so that no single request
20836274f212SChris Wilson 				 * can run indefinitely (such as a chain of
20846274f212SChris Wilson 				 * batches). The only time we do not increment
20856274f212SChris Wilson 				 * the hangcheck score on this ring, if this
20866274f212SChris Wilson 				 * ring is in a legitimate wait for another
20876274f212SChris Wilson 				 * ring. In that case the waiting ring is a
20886274f212SChris Wilson 				 * victim and we want to be sure we catch the
20896274f212SChris Wilson 				 * right culprit. Then every time we do kick
20906274f212SChris Wilson 				 * the ring, add a small increment to the
20916274f212SChris Wilson 				 * score so that we can catch a batch that is
20926274f212SChris Wilson 				 * being repeatedly kicked and so responsible
20936274f212SChris Wilson 				 * for stalling the machine.
20949107e9d2SChris Wilson 				 */
2095ad8beaeaSMika Kuoppala 				ring->hangcheck.action = ring_stuck(ring,
2096ad8beaeaSMika Kuoppala 								    acthd);
2097ad8beaeaSMika Kuoppala 
2098ad8beaeaSMika Kuoppala 				switch (ring->hangcheck.action) {
2099da661464SMika Kuoppala 				case HANGCHECK_IDLE:
2100f2f4d82fSJani Nikula 				case HANGCHECK_WAIT:
21016274f212SChris Wilson 					break;
2102f2f4d82fSJani Nikula 				case HANGCHECK_ACTIVE:
2103ea04cb31SJani Nikula 					ring->hangcheck.score += BUSY;
21046274f212SChris Wilson 					break;
2105f2f4d82fSJani Nikula 				case HANGCHECK_KICK:
2106ea04cb31SJani Nikula 					ring->hangcheck.score += KICK;
21076274f212SChris Wilson 					break;
2108f2f4d82fSJani Nikula 				case HANGCHECK_HUNG:
2109ea04cb31SJani Nikula 					ring->hangcheck.score += HUNG;
21106274f212SChris Wilson 					stuck[i] = true;
21116274f212SChris Wilson 					break;
21126274f212SChris Wilson 				}
211305407ff8SMika Kuoppala 			}
21149107e9d2SChris Wilson 		} else {
2115da661464SMika Kuoppala 			ring->hangcheck.action = HANGCHECK_ACTIVE;
2116da661464SMika Kuoppala 
21179107e9d2SChris Wilson 			/* Gradually reduce the count so that we catch DoS
21189107e9d2SChris Wilson 			 * attempts across multiple batches.
21199107e9d2SChris Wilson 			 */
21209107e9d2SChris Wilson 			if (ring->hangcheck.score > 0)
21219107e9d2SChris Wilson 				ring->hangcheck.score--;
2122cbb465e7SChris Wilson 		}
2123f65d9421SBen Gamari 
212405407ff8SMika Kuoppala 		ring->hangcheck.seqno = seqno;
212505407ff8SMika Kuoppala 		ring->hangcheck.acthd = acthd;
21269107e9d2SChris Wilson 		busy_count += busy;
212705407ff8SMika Kuoppala 	}
212805407ff8SMika Kuoppala 
212905407ff8SMika Kuoppala 	for_each_ring(ring, dev_priv, i) {
21309107e9d2SChris Wilson 		if (ring->hangcheck.score > FIRE) {
2131b8d88d1dSDaniel Vetter 			DRM_INFO("%s on %s\n",
213205407ff8SMika Kuoppala 				 stuck[i] ? "stuck" : "no progress",
2133a43adf07SChris Wilson 				 ring->name);
2134a43adf07SChris Wilson 			rings_hung++;
213505407ff8SMika Kuoppala 		}
213605407ff8SMika Kuoppala 	}
213705407ff8SMika Kuoppala 
213805407ff8SMika Kuoppala 	if (rings_hung)
213905407ff8SMika Kuoppala 		return i915_handle_error(dev, true);
214005407ff8SMika Kuoppala 
214105407ff8SMika Kuoppala 	if (busy_count)
214205407ff8SMika Kuoppala 		/* Reset timer case chip hangs without another request
214305407ff8SMika Kuoppala 		 * being added */
214410cd45b6SMika Kuoppala 		i915_queue_hangcheck(dev);
214510cd45b6SMika Kuoppala }
214610cd45b6SMika Kuoppala 
214710cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev)
214810cd45b6SMika Kuoppala {
214910cd45b6SMika Kuoppala 	struct drm_i915_private *dev_priv = dev->dev_private;
215010cd45b6SMika Kuoppala 	if (!i915_enable_hangcheck)
215110cd45b6SMika Kuoppala 		return;
215210cd45b6SMika Kuoppala 
215399584db3SDaniel Vetter 	mod_timer(&dev_priv->gpu_error.hangcheck_timer,
215410cd45b6SMika Kuoppala 		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2155f65d9421SBen Gamari }
2156f65d9421SBen Gamari 
215791738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev)
215891738a95SPaulo Zanoni {
215991738a95SPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
216091738a95SPaulo Zanoni 
216191738a95SPaulo Zanoni 	if (HAS_PCH_NOP(dev))
216291738a95SPaulo Zanoni 		return;
216391738a95SPaulo Zanoni 
216491738a95SPaulo Zanoni 	/* south display irq */
216591738a95SPaulo Zanoni 	I915_WRITE(SDEIMR, 0xffffffff);
216691738a95SPaulo Zanoni 	/*
216791738a95SPaulo Zanoni 	 * SDEIER is also touched by the interrupt handler to work around missed
216891738a95SPaulo Zanoni 	 * PCH interrupts. Hence we can't update it after the interrupt handler
216991738a95SPaulo Zanoni 	 * is enabled - instead we unconditionally enable all PCH interrupt
217091738a95SPaulo Zanoni 	 * sources here, but then only unmask them as needed with SDEIMR.
217191738a95SPaulo Zanoni 	 */
217291738a95SPaulo Zanoni 	I915_WRITE(SDEIER, 0xffffffff);
217391738a95SPaulo Zanoni 	POSTING_READ(SDEIER);
217491738a95SPaulo Zanoni }
217591738a95SPaulo Zanoni 
2176d18ea1b5SDaniel Vetter static void gen5_gt_irq_preinstall(struct drm_device *dev)
2177d18ea1b5SDaniel Vetter {
2178d18ea1b5SDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
2179d18ea1b5SDaniel Vetter 
2180d18ea1b5SDaniel Vetter 	/* and GT */
2181d18ea1b5SDaniel Vetter 	I915_WRITE(GTIMR, 0xffffffff);
2182d18ea1b5SDaniel Vetter 	I915_WRITE(GTIER, 0x0);
2183d18ea1b5SDaniel Vetter 	POSTING_READ(GTIER);
2184d18ea1b5SDaniel Vetter 
2185d18ea1b5SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 6) {
2186d18ea1b5SDaniel Vetter 		/* and PM */
2187d18ea1b5SDaniel Vetter 		I915_WRITE(GEN6_PMIMR, 0xffffffff);
2188d18ea1b5SDaniel Vetter 		I915_WRITE(GEN6_PMIER, 0x0);
2189d18ea1b5SDaniel Vetter 		POSTING_READ(GEN6_PMIER);
2190d18ea1b5SDaniel Vetter 	}
2191d18ea1b5SDaniel Vetter }
2192d18ea1b5SDaniel Vetter 
2193c0e09200SDave Airlie /* drm_dma.h hooks
2194c0e09200SDave Airlie */
2195f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev)
2196036a4a7dSZhenyu Wang {
2197036a4a7dSZhenyu Wang 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2198036a4a7dSZhenyu Wang 
21994697995bSJesse Barnes 	atomic_set(&dev_priv->irq_received, 0);
22004697995bSJesse Barnes 
2201036a4a7dSZhenyu Wang 	I915_WRITE(HWSTAM, 0xeffe);
2202bdfcdb63SDaniel Vetter 
2203036a4a7dSZhenyu Wang 	I915_WRITE(DEIMR, 0xffffffff);
2204036a4a7dSZhenyu Wang 	I915_WRITE(DEIER, 0x0);
22053143a2bfSChris Wilson 	POSTING_READ(DEIER);
2206036a4a7dSZhenyu Wang 
2207d18ea1b5SDaniel Vetter 	gen5_gt_irq_preinstall(dev);
2208c650156aSZhenyu Wang 
220991738a95SPaulo Zanoni 	ibx_irq_preinstall(dev);
22107d99163dSBen Widawsky }
22117d99163dSBen Widawsky 
22127e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev)
22137e231dbeSJesse Barnes {
22147e231dbeSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
22157e231dbeSJesse Barnes 	int pipe;
22167e231dbeSJesse Barnes 
22177e231dbeSJesse Barnes 	atomic_set(&dev_priv->irq_received, 0);
22187e231dbeSJesse Barnes 
22197e231dbeSJesse Barnes 	/* VLV magic */
22207e231dbeSJesse Barnes 	I915_WRITE(VLV_IMR, 0);
22217e231dbeSJesse Barnes 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
22227e231dbeSJesse Barnes 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
22237e231dbeSJesse Barnes 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
22247e231dbeSJesse Barnes 
22257e231dbeSJesse Barnes 	/* and GT */
22267e231dbeSJesse Barnes 	I915_WRITE(GTIIR, I915_READ(GTIIR));
22277e231dbeSJesse Barnes 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2228d18ea1b5SDaniel Vetter 
2229d18ea1b5SDaniel Vetter 	gen5_gt_irq_preinstall(dev);
22307e231dbeSJesse Barnes 
22317e231dbeSJesse Barnes 	I915_WRITE(DPINVGTT, 0xff);
22327e231dbeSJesse Barnes 
22337e231dbeSJesse Barnes 	I915_WRITE(PORT_HOTPLUG_EN, 0);
22347e231dbeSJesse Barnes 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
22357e231dbeSJesse Barnes 	for_each_pipe(pipe)
22367e231dbeSJesse Barnes 		I915_WRITE(PIPESTAT(pipe), 0xffff);
22377e231dbeSJesse Barnes 	I915_WRITE(VLV_IIR, 0xffffffff);
22387e231dbeSJesse Barnes 	I915_WRITE(VLV_IMR, 0xffffffff);
22397e231dbeSJesse Barnes 	I915_WRITE(VLV_IER, 0x0);
22407e231dbeSJesse Barnes 	POSTING_READ(VLV_IER);
22417e231dbeSJesse Barnes }
22427e231dbeSJesse Barnes 
224382a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev)
224482a28bcfSDaniel Vetter {
224582a28bcfSDaniel Vetter 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
224682a28bcfSDaniel Vetter 	struct drm_mode_config *mode_config = &dev->mode_config;
224782a28bcfSDaniel Vetter 	struct intel_encoder *intel_encoder;
2248fee884edSDaniel Vetter 	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
224982a28bcfSDaniel Vetter 
225082a28bcfSDaniel Vetter 	if (HAS_PCH_IBX(dev)) {
2251fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK;
225282a28bcfSDaniel Vetter 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2253cd569aedSEgbert Eich 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2254fee884edSDaniel Vetter 				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
225582a28bcfSDaniel Vetter 	} else {
2256fee884edSDaniel Vetter 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
225782a28bcfSDaniel Vetter 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2258cd569aedSEgbert Eich 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2259fee884edSDaniel Vetter 				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
226082a28bcfSDaniel Vetter 	}
226182a28bcfSDaniel Vetter 
2262fee884edSDaniel Vetter 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
226382a28bcfSDaniel Vetter 
22647fe0b973SKeith Packard 	/*
22657fe0b973SKeith Packard 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
22667fe0b973SKeith Packard 	 * duration to 2ms (which is the minimum in the Display Port spec)
22677fe0b973SKeith Packard 	 *
22687fe0b973SKeith Packard 	 * This register is the same on all known PCH chips.
22697fe0b973SKeith Packard 	 */
22707fe0b973SKeith Packard 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
22717fe0b973SKeith Packard 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
22727fe0b973SKeith Packard 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
22737fe0b973SKeith Packard 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
22747fe0b973SKeith Packard 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
22757fe0b973SKeith Packard 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
22767fe0b973SKeith Packard }
22777fe0b973SKeith Packard 
2278d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev)
2279d46da437SPaulo Zanoni {
2280d46da437SPaulo Zanoni 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
228182a28bcfSDaniel Vetter 	u32 mask;
2282d46da437SPaulo Zanoni 
2283692a04cfSDaniel Vetter 	if (HAS_PCH_NOP(dev))
2284692a04cfSDaniel Vetter 		return;
2285692a04cfSDaniel Vetter 
22868664281bSPaulo Zanoni 	if (HAS_PCH_IBX(dev)) {
22878664281bSPaulo Zanoni 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2288de032bf4SPaulo Zanoni 		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
22898664281bSPaulo Zanoni 	} else {
22908664281bSPaulo Zanoni 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
22918664281bSPaulo Zanoni 
22928664281bSPaulo Zanoni 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
22938664281bSPaulo Zanoni 	}
2294ab5c608bSBen Widawsky 
2295d46da437SPaulo Zanoni 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2296d46da437SPaulo Zanoni 	I915_WRITE(SDEIMR, ~mask);
2297d46da437SPaulo Zanoni }
2298d46da437SPaulo Zanoni 
22990a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev)
23000a9a8c91SDaniel Vetter {
23010a9a8c91SDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
23020a9a8c91SDaniel Vetter 	u32 pm_irqs, gt_irqs;
23030a9a8c91SDaniel Vetter 
23040a9a8c91SDaniel Vetter 	pm_irqs = gt_irqs = 0;
23050a9a8c91SDaniel Vetter 
23060a9a8c91SDaniel Vetter 	dev_priv->gt_irq_mask = ~0;
2307040d2baaSBen Widawsky 	if (HAS_L3_DPF(dev)) {
23080a9a8c91SDaniel Vetter 		/* L3 parity interrupt is always unmasked. */
230935a85ac6SBen Widawsky 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
231035a85ac6SBen Widawsky 		gt_irqs |= GT_PARITY_ERROR(dev);
23110a9a8c91SDaniel Vetter 	}
23120a9a8c91SDaniel Vetter 
23130a9a8c91SDaniel Vetter 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
23140a9a8c91SDaniel Vetter 	if (IS_GEN5(dev)) {
23150a9a8c91SDaniel Vetter 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
23160a9a8c91SDaniel Vetter 			   ILK_BSD_USER_INTERRUPT;
23170a9a8c91SDaniel Vetter 	} else {
23180a9a8c91SDaniel Vetter 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
23190a9a8c91SDaniel Vetter 	}
23200a9a8c91SDaniel Vetter 
23210a9a8c91SDaniel Vetter 	I915_WRITE(GTIIR, I915_READ(GTIIR));
23220a9a8c91SDaniel Vetter 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
23230a9a8c91SDaniel Vetter 	I915_WRITE(GTIER, gt_irqs);
23240a9a8c91SDaniel Vetter 	POSTING_READ(GTIER);
23250a9a8c91SDaniel Vetter 
23260a9a8c91SDaniel Vetter 	if (INTEL_INFO(dev)->gen >= 6) {
23270a9a8c91SDaniel Vetter 		pm_irqs |= GEN6_PM_RPS_EVENTS;
23280a9a8c91SDaniel Vetter 
23290a9a8c91SDaniel Vetter 		if (HAS_VEBOX(dev))
23300a9a8c91SDaniel Vetter 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
23310a9a8c91SDaniel Vetter 
2332605cd25bSPaulo Zanoni 		dev_priv->pm_irq_mask = 0xffffffff;
23330a9a8c91SDaniel Vetter 		I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2334605cd25bSPaulo Zanoni 		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
23350a9a8c91SDaniel Vetter 		I915_WRITE(GEN6_PMIER, pm_irqs);
23360a9a8c91SDaniel Vetter 		POSTING_READ(GEN6_PMIER);
23370a9a8c91SDaniel Vetter 	}
23380a9a8c91SDaniel Vetter }
23390a9a8c91SDaniel Vetter 
2340f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev)
2341036a4a7dSZhenyu Wang {
23424bc9d430SDaniel Vetter 	unsigned long irqflags;
2343036a4a7dSZhenyu Wang 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
23448e76f8dcSPaulo Zanoni 	u32 display_mask, extra_mask;
23458e76f8dcSPaulo Zanoni 
23468e76f8dcSPaulo Zanoni 	if (INTEL_INFO(dev)->gen >= 7) {
23478e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
23488e76f8dcSPaulo Zanoni 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
23498e76f8dcSPaulo Zanoni 				DE_PLANEB_FLIP_DONE_IVB |
23508e76f8dcSPaulo Zanoni 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
23518e76f8dcSPaulo Zanoni 				DE_ERR_INT_IVB);
23528e76f8dcSPaulo Zanoni 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
23538e76f8dcSPaulo Zanoni 			      DE_PIPEA_VBLANK_IVB);
23548e76f8dcSPaulo Zanoni 
23558e76f8dcSPaulo Zanoni 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
23568e76f8dcSPaulo Zanoni 	} else {
23578e76f8dcSPaulo Zanoni 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2358ce99c256SDaniel Vetter 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
23598664281bSPaulo Zanoni 				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
23608e76f8dcSPaulo Zanoni 				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
23618e76f8dcSPaulo Zanoni 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
23628e76f8dcSPaulo Zanoni 	}
2363036a4a7dSZhenyu Wang 
23641ec14ad3SChris Wilson 	dev_priv->irq_mask = ~display_mask;
2365036a4a7dSZhenyu Wang 
2366036a4a7dSZhenyu Wang 	/* should always can generate irq */
2367036a4a7dSZhenyu Wang 	I915_WRITE(DEIIR, I915_READ(DEIIR));
23681ec14ad3SChris Wilson 	I915_WRITE(DEIMR, dev_priv->irq_mask);
23698e76f8dcSPaulo Zanoni 	I915_WRITE(DEIER, display_mask | extra_mask);
23703143a2bfSChris Wilson 	POSTING_READ(DEIER);
2371036a4a7dSZhenyu Wang 
23720a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
2373036a4a7dSZhenyu Wang 
2374d46da437SPaulo Zanoni 	ibx_irq_postinstall(dev);
23757fe0b973SKeith Packard 
2376f97108d1SJesse Barnes 	if (IS_IRONLAKE_M(dev)) {
23776005ce42SDaniel Vetter 		/* Enable PCU event interrupts
23786005ce42SDaniel Vetter 		 *
23796005ce42SDaniel Vetter 		 * spinlocking not required here for correctness since interrupt
23804bc9d430SDaniel Vetter 		 * setup is guaranteed to run in single-threaded context. But we
23814bc9d430SDaniel Vetter 		 * need it to make the assert_spin_locked happy. */
23824bc9d430SDaniel Vetter 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2383f97108d1SJesse Barnes 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
23844bc9d430SDaniel Vetter 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2385f97108d1SJesse Barnes 	}
2386f97108d1SJesse Barnes 
2387036a4a7dSZhenyu Wang 	return 0;
2388036a4a7dSZhenyu Wang }
2389036a4a7dSZhenyu Wang 
23907e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev)
23917e231dbeSJesse Barnes {
23927e231dbeSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
23937e231dbeSJesse Barnes 	u32 enable_mask;
239431acc7f5SJesse Barnes 	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2395b79480baSDaniel Vetter 	unsigned long irqflags;
23967e231dbeSJesse Barnes 
23977e231dbeSJesse Barnes 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
239831acc7f5SJesse Barnes 	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
239931acc7f5SJesse Barnes 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
240031acc7f5SJesse Barnes 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
24017e231dbeSJesse Barnes 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
24027e231dbeSJesse Barnes 
240331acc7f5SJesse Barnes 	/*
240431acc7f5SJesse Barnes 	 *Leave vblank interrupts masked initially.  enable/disable will
240531acc7f5SJesse Barnes 	 * toggle them based on usage.
240631acc7f5SJesse Barnes 	 */
240731acc7f5SJesse Barnes 	dev_priv->irq_mask = (~enable_mask) |
240831acc7f5SJesse Barnes 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
240931acc7f5SJesse Barnes 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
24107e231dbeSJesse Barnes 
241120afbda2SDaniel Vetter 	I915_WRITE(PORT_HOTPLUG_EN, 0);
241220afbda2SDaniel Vetter 	POSTING_READ(PORT_HOTPLUG_EN);
241320afbda2SDaniel Vetter 
24147e231dbeSJesse Barnes 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
24157e231dbeSJesse Barnes 	I915_WRITE(VLV_IER, enable_mask);
24167e231dbeSJesse Barnes 	I915_WRITE(VLV_IIR, 0xffffffff);
24177e231dbeSJesse Barnes 	I915_WRITE(PIPESTAT(0), 0xffff);
24187e231dbeSJesse Barnes 	I915_WRITE(PIPESTAT(1), 0xffff);
24197e231dbeSJesse Barnes 	POSTING_READ(VLV_IER);
24207e231dbeSJesse Barnes 
2421b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
2422b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
2423b79480baSDaniel Vetter 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
242431acc7f5SJesse Barnes 	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2425515ac2bbSDaniel Vetter 	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
242631acc7f5SJesse Barnes 	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2427b79480baSDaniel Vetter 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
242831acc7f5SJesse Barnes 
24297e231dbeSJesse Barnes 	I915_WRITE(VLV_IIR, 0xffffffff);
24307e231dbeSJesse Barnes 	I915_WRITE(VLV_IIR, 0xffffffff);
24317e231dbeSJesse Barnes 
24320a9a8c91SDaniel Vetter 	gen5_gt_irq_postinstall(dev);
24337e231dbeSJesse Barnes 
24347e231dbeSJesse Barnes 	/* ack & enable invalid PTE error interrupts */
24357e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */
24367e231dbeSJesse Barnes 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
24377e231dbeSJesse Barnes 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
24387e231dbeSJesse Barnes #endif
24397e231dbeSJesse Barnes 
24407e231dbeSJesse Barnes 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
244120afbda2SDaniel Vetter 
244220afbda2SDaniel Vetter 	return 0;
244320afbda2SDaniel Vetter }
244420afbda2SDaniel Vetter 
24457e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev)
24467e231dbeSJesse Barnes {
24477e231dbeSJesse Barnes 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24487e231dbeSJesse Barnes 	int pipe;
24497e231dbeSJesse Barnes 
24507e231dbeSJesse Barnes 	if (!dev_priv)
24517e231dbeSJesse Barnes 		return;
24527e231dbeSJesse Barnes 
2453ac4c16c5SEgbert Eich 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
2454ac4c16c5SEgbert Eich 
24557e231dbeSJesse Barnes 	for_each_pipe(pipe)
24567e231dbeSJesse Barnes 		I915_WRITE(PIPESTAT(pipe), 0xffff);
24577e231dbeSJesse Barnes 
24587e231dbeSJesse Barnes 	I915_WRITE(HWSTAM, 0xffffffff);
24597e231dbeSJesse Barnes 	I915_WRITE(PORT_HOTPLUG_EN, 0);
24607e231dbeSJesse Barnes 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
24617e231dbeSJesse Barnes 	for_each_pipe(pipe)
24627e231dbeSJesse Barnes 		I915_WRITE(PIPESTAT(pipe), 0xffff);
24637e231dbeSJesse Barnes 	I915_WRITE(VLV_IIR, 0xffffffff);
24647e231dbeSJesse Barnes 	I915_WRITE(VLV_IMR, 0xffffffff);
24657e231dbeSJesse Barnes 	I915_WRITE(VLV_IER, 0x0);
24667e231dbeSJesse Barnes 	POSTING_READ(VLV_IER);
24677e231dbeSJesse Barnes }
24687e231dbeSJesse Barnes 
2469f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev)
2470036a4a7dSZhenyu Wang {
2471036a4a7dSZhenyu Wang 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24724697995bSJesse Barnes 
24734697995bSJesse Barnes 	if (!dev_priv)
24744697995bSJesse Barnes 		return;
24754697995bSJesse Barnes 
2476ac4c16c5SEgbert Eich 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
2477ac4c16c5SEgbert Eich 
2478036a4a7dSZhenyu Wang 	I915_WRITE(HWSTAM, 0xffffffff);
2479036a4a7dSZhenyu Wang 
2480036a4a7dSZhenyu Wang 	I915_WRITE(DEIMR, 0xffffffff);
2481036a4a7dSZhenyu Wang 	I915_WRITE(DEIER, 0x0);
2482036a4a7dSZhenyu Wang 	I915_WRITE(DEIIR, I915_READ(DEIIR));
24838664281bSPaulo Zanoni 	if (IS_GEN7(dev))
24848664281bSPaulo Zanoni 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2485036a4a7dSZhenyu Wang 
2486036a4a7dSZhenyu Wang 	I915_WRITE(GTIMR, 0xffffffff);
2487036a4a7dSZhenyu Wang 	I915_WRITE(GTIER, 0x0);
2488036a4a7dSZhenyu Wang 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2489192aac1fSKeith Packard 
2490ab5c608bSBen Widawsky 	if (HAS_PCH_NOP(dev))
2491ab5c608bSBen Widawsky 		return;
2492ab5c608bSBen Widawsky 
2493192aac1fSKeith Packard 	I915_WRITE(SDEIMR, 0xffffffff);
2494192aac1fSKeith Packard 	I915_WRITE(SDEIER, 0x0);
2495192aac1fSKeith Packard 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
24968664281bSPaulo Zanoni 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
24978664281bSPaulo Zanoni 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2498036a4a7dSZhenyu Wang }
2499036a4a7dSZhenyu Wang 
2500c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev)
2501c2798b19SChris Wilson {
2502c2798b19SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2503c2798b19SChris Wilson 	int pipe;
2504c2798b19SChris Wilson 
2505c2798b19SChris Wilson 	atomic_set(&dev_priv->irq_received, 0);
2506c2798b19SChris Wilson 
2507c2798b19SChris Wilson 	for_each_pipe(pipe)
2508c2798b19SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
2509c2798b19SChris Wilson 	I915_WRITE16(IMR, 0xffff);
2510c2798b19SChris Wilson 	I915_WRITE16(IER, 0x0);
2511c2798b19SChris Wilson 	POSTING_READ16(IER);
2512c2798b19SChris Wilson }
2513c2798b19SChris Wilson 
2514c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev)
2515c2798b19SChris Wilson {
2516c2798b19SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2517c2798b19SChris Wilson 
2518c2798b19SChris Wilson 	I915_WRITE16(EMR,
2519c2798b19SChris Wilson 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2520c2798b19SChris Wilson 
2521c2798b19SChris Wilson 	/* Unmask the interrupts that we always want on. */
2522c2798b19SChris Wilson 	dev_priv->irq_mask =
2523c2798b19SChris Wilson 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2524c2798b19SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2525c2798b19SChris Wilson 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2526c2798b19SChris Wilson 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2527c2798b19SChris Wilson 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2528c2798b19SChris Wilson 	I915_WRITE16(IMR, dev_priv->irq_mask);
2529c2798b19SChris Wilson 
2530c2798b19SChris Wilson 	I915_WRITE16(IER,
2531c2798b19SChris Wilson 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2532c2798b19SChris Wilson 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2533c2798b19SChris Wilson 		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2534c2798b19SChris Wilson 		     I915_USER_INTERRUPT);
2535c2798b19SChris Wilson 	POSTING_READ16(IER);
2536c2798b19SChris Wilson 
2537c2798b19SChris Wilson 	return 0;
2538c2798b19SChris Wilson }
2539c2798b19SChris Wilson 
254090a72f87SVille Syrjälä /*
254190a72f87SVille Syrjälä  * Returns true when a page flip has completed.
254290a72f87SVille Syrjälä  */
254390a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev,
254490a72f87SVille Syrjälä 			       int pipe, u16 iir)
254590a72f87SVille Syrjälä {
254690a72f87SVille Syrjälä 	drm_i915_private_t *dev_priv = dev->dev_private;
254790a72f87SVille Syrjälä 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
254890a72f87SVille Syrjälä 
254990a72f87SVille Syrjälä 	if (!drm_handle_vblank(dev, pipe))
255090a72f87SVille Syrjälä 		return false;
255190a72f87SVille Syrjälä 
255290a72f87SVille Syrjälä 	if ((iir & flip_pending) == 0)
255390a72f87SVille Syrjälä 		return false;
255490a72f87SVille Syrjälä 
255590a72f87SVille Syrjälä 	intel_prepare_page_flip(dev, pipe);
255690a72f87SVille Syrjälä 
255790a72f87SVille Syrjälä 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
255890a72f87SVille Syrjälä 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
255990a72f87SVille Syrjälä 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
256090a72f87SVille Syrjälä 	 * the flip is completed (no longer pending). Since this doesn't raise
256190a72f87SVille Syrjälä 	 * an interrupt per se, we watch for the change at vblank.
256290a72f87SVille Syrjälä 	 */
256390a72f87SVille Syrjälä 	if (I915_READ16(ISR) & flip_pending)
256490a72f87SVille Syrjälä 		return false;
256590a72f87SVille Syrjälä 
256690a72f87SVille Syrjälä 	intel_finish_page_flip(dev, pipe);
256790a72f87SVille Syrjälä 
256890a72f87SVille Syrjälä 	return true;
256990a72f87SVille Syrjälä }
257090a72f87SVille Syrjälä 
2571ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2572c2798b19SChris Wilson {
2573c2798b19SChris Wilson 	struct drm_device *dev = (struct drm_device *) arg;
2574c2798b19SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2575c2798b19SChris Wilson 	u16 iir, new_iir;
2576c2798b19SChris Wilson 	u32 pipe_stats[2];
2577c2798b19SChris Wilson 	unsigned long irqflags;
2578c2798b19SChris Wilson 	int pipe;
2579c2798b19SChris Wilson 	u16 flip_mask =
2580c2798b19SChris Wilson 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2581c2798b19SChris Wilson 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2582c2798b19SChris Wilson 
2583c2798b19SChris Wilson 	atomic_inc(&dev_priv->irq_received);
2584c2798b19SChris Wilson 
2585c2798b19SChris Wilson 	iir = I915_READ16(IIR);
2586c2798b19SChris Wilson 	if (iir == 0)
2587c2798b19SChris Wilson 		return IRQ_NONE;
2588c2798b19SChris Wilson 
2589c2798b19SChris Wilson 	while (iir & ~flip_mask) {
2590c2798b19SChris Wilson 		/* Can't rely on pipestat interrupt bit in iir as it might
2591c2798b19SChris Wilson 		 * have been cleared after the pipestat interrupt was received.
2592c2798b19SChris Wilson 		 * It doesn't set the bit in iir again, but it still produces
2593c2798b19SChris Wilson 		 * interrupts (for non-MSI).
2594c2798b19SChris Wilson 		 */
2595c2798b19SChris Wilson 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2596c2798b19SChris Wilson 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2597c2798b19SChris Wilson 			i915_handle_error(dev, false);
2598c2798b19SChris Wilson 
2599c2798b19SChris Wilson 		for_each_pipe(pipe) {
2600c2798b19SChris Wilson 			int reg = PIPESTAT(pipe);
2601c2798b19SChris Wilson 			pipe_stats[pipe] = I915_READ(reg);
2602c2798b19SChris Wilson 
2603c2798b19SChris Wilson 			/*
2604c2798b19SChris Wilson 			 * Clear the PIPE*STAT regs before the IIR
2605c2798b19SChris Wilson 			 */
2606c2798b19SChris Wilson 			if (pipe_stats[pipe] & 0x8000ffff) {
2607c2798b19SChris Wilson 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2608c2798b19SChris Wilson 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2609c2798b19SChris Wilson 							 pipe_name(pipe));
2610c2798b19SChris Wilson 				I915_WRITE(reg, pipe_stats[pipe]);
2611c2798b19SChris Wilson 			}
2612c2798b19SChris Wilson 		}
2613c2798b19SChris Wilson 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2614c2798b19SChris Wilson 
2615c2798b19SChris Wilson 		I915_WRITE16(IIR, iir & ~flip_mask);
2616c2798b19SChris Wilson 		new_iir = I915_READ16(IIR); /* Flush posted writes */
2617c2798b19SChris Wilson 
2618d05c617eSDaniel Vetter 		i915_update_dri1_breadcrumb(dev);
2619c2798b19SChris Wilson 
2620c2798b19SChris Wilson 		if (iir & I915_USER_INTERRUPT)
2621c2798b19SChris Wilson 			notify_ring(dev, &dev_priv->ring[RCS]);
2622c2798b19SChris Wilson 
2623c2798b19SChris Wilson 		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
262490a72f87SVille Syrjälä 		    i8xx_handle_vblank(dev, 0, iir))
262590a72f87SVille Syrjälä 			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2626c2798b19SChris Wilson 
2627c2798b19SChris Wilson 		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
262890a72f87SVille Syrjälä 		    i8xx_handle_vblank(dev, 1, iir))
262990a72f87SVille Syrjälä 			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2630c2798b19SChris Wilson 
2631c2798b19SChris Wilson 		iir = new_iir;
2632c2798b19SChris Wilson 	}
2633c2798b19SChris Wilson 
2634c2798b19SChris Wilson 	return IRQ_HANDLED;
2635c2798b19SChris Wilson }
2636c2798b19SChris Wilson 
2637c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev)
2638c2798b19SChris Wilson {
2639c2798b19SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2640c2798b19SChris Wilson 	int pipe;
2641c2798b19SChris Wilson 
2642c2798b19SChris Wilson 	for_each_pipe(pipe) {
2643c2798b19SChris Wilson 		/* Clear enable bits; then clear status bits */
2644c2798b19SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
2645c2798b19SChris Wilson 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2646c2798b19SChris Wilson 	}
2647c2798b19SChris Wilson 	I915_WRITE16(IMR, 0xffff);
2648c2798b19SChris Wilson 	I915_WRITE16(IER, 0x0);
2649c2798b19SChris Wilson 	I915_WRITE16(IIR, I915_READ16(IIR));
2650c2798b19SChris Wilson }
2651c2798b19SChris Wilson 
2652a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev)
2653a266c7d5SChris Wilson {
2654a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2655a266c7d5SChris Wilson 	int pipe;
2656a266c7d5SChris Wilson 
2657a266c7d5SChris Wilson 	atomic_set(&dev_priv->irq_received, 0);
2658a266c7d5SChris Wilson 
2659a266c7d5SChris Wilson 	if (I915_HAS_HOTPLUG(dev)) {
2660a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_EN, 0);
2661a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2662a266c7d5SChris Wilson 	}
2663a266c7d5SChris Wilson 
266400d98ebdSChris Wilson 	I915_WRITE16(HWSTAM, 0xeffe);
2665a266c7d5SChris Wilson 	for_each_pipe(pipe)
2666a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
2667a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
2668a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
2669a266c7d5SChris Wilson 	POSTING_READ(IER);
2670a266c7d5SChris Wilson }
2671a266c7d5SChris Wilson 
2672a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev)
2673a266c7d5SChris Wilson {
2674a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
267538bde180SChris Wilson 	u32 enable_mask;
2676a266c7d5SChris Wilson 
267738bde180SChris Wilson 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
267838bde180SChris Wilson 
267938bde180SChris Wilson 	/* Unmask the interrupts that we always want on. */
268038bde180SChris Wilson 	dev_priv->irq_mask =
268138bde180SChris Wilson 		~(I915_ASLE_INTERRUPT |
268238bde180SChris Wilson 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
268338bde180SChris Wilson 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
268438bde180SChris Wilson 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
268538bde180SChris Wilson 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
268638bde180SChris Wilson 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
268738bde180SChris Wilson 
268838bde180SChris Wilson 	enable_mask =
268938bde180SChris Wilson 		I915_ASLE_INTERRUPT |
269038bde180SChris Wilson 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
269138bde180SChris Wilson 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
269238bde180SChris Wilson 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
269338bde180SChris Wilson 		I915_USER_INTERRUPT;
269438bde180SChris Wilson 
2695a266c7d5SChris Wilson 	if (I915_HAS_HOTPLUG(dev)) {
269620afbda2SDaniel Vetter 		I915_WRITE(PORT_HOTPLUG_EN, 0);
269720afbda2SDaniel Vetter 		POSTING_READ(PORT_HOTPLUG_EN);
269820afbda2SDaniel Vetter 
2699a266c7d5SChris Wilson 		/* Enable in IER... */
2700a266c7d5SChris Wilson 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2701a266c7d5SChris Wilson 		/* and unmask in IMR */
2702a266c7d5SChris Wilson 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2703a266c7d5SChris Wilson 	}
2704a266c7d5SChris Wilson 
2705a266c7d5SChris Wilson 	I915_WRITE(IMR, dev_priv->irq_mask);
2706a266c7d5SChris Wilson 	I915_WRITE(IER, enable_mask);
2707a266c7d5SChris Wilson 	POSTING_READ(IER);
2708a266c7d5SChris Wilson 
2709f49e38ddSJani Nikula 	i915_enable_asle_pipestat(dev);
271020afbda2SDaniel Vetter 
271120afbda2SDaniel Vetter 	return 0;
271220afbda2SDaniel Vetter }
271320afbda2SDaniel Vetter 
271490a72f87SVille Syrjälä /*
271590a72f87SVille Syrjälä  * Returns true when a page flip has completed.
271690a72f87SVille Syrjälä  */
271790a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev,
271890a72f87SVille Syrjälä 			       int plane, int pipe, u32 iir)
271990a72f87SVille Syrjälä {
272090a72f87SVille Syrjälä 	drm_i915_private_t *dev_priv = dev->dev_private;
272190a72f87SVille Syrjälä 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
272290a72f87SVille Syrjälä 
272390a72f87SVille Syrjälä 	if (!drm_handle_vblank(dev, pipe))
272490a72f87SVille Syrjälä 		return false;
272590a72f87SVille Syrjälä 
272690a72f87SVille Syrjälä 	if ((iir & flip_pending) == 0)
272790a72f87SVille Syrjälä 		return false;
272890a72f87SVille Syrjälä 
272990a72f87SVille Syrjälä 	intel_prepare_page_flip(dev, plane);
273090a72f87SVille Syrjälä 
273190a72f87SVille Syrjälä 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
273290a72f87SVille Syrjälä 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
273390a72f87SVille Syrjälä 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
273490a72f87SVille Syrjälä 	 * the flip is completed (no longer pending). Since this doesn't raise
273590a72f87SVille Syrjälä 	 * an interrupt per se, we watch for the change at vblank.
273690a72f87SVille Syrjälä 	 */
273790a72f87SVille Syrjälä 	if (I915_READ(ISR) & flip_pending)
273890a72f87SVille Syrjälä 		return false;
273990a72f87SVille Syrjälä 
274090a72f87SVille Syrjälä 	intel_finish_page_flip(dev, pipe);
274190a72f87SVille Syrjälä 
274290a72f87SVille Syrjälä 	return true;
274390a72f87SVille Syrjälä }
274490a72f87SVille Syrjälä 
2745ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg)
2746a266c7d5SChris Wilson {
2747a266c7d5SChris Wilson 	struct drm_device *dev = (struct drm_device *) arg;
2748a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
27498291ee90SChris Wilson 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2750a266c7d5SChris Wilson 	unsigned long irqflags;
275138bde180SChris Wilson 	u32 flip_mask =
275238bde180SChris Wilson 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
275338bde180SChris Wilson 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
275438bde180SChris Wilson 	int pipe, ret = IRQ_NONE;
2755a266c7d5SChris Wilson 
2756a266c7d5SChris Wilson 	atomic_inc(&dev_priv->irq_received);
2757a266c7d5SChris Wilson 
2758a266c7d5SChris Wilson 	iir = I915_READ(IIR);
275938bde180SChris Wilson 	do {
276038bde180SChris Wilson 		bool irq_received = (iir & ~flip_mask) != 0;
27618291ee90SChris Wilson 		bool blc_event = false;
2762a266c7d5SChris Wilson 
2763a266c7d5SChris Wilson 		/* Can't rely on pipestat interrupt bit in iir as it might
2764a266c7d5SChris Wilson 		 * have been cleared after the pipestat interrupt was received.
2765a266c7d5SChris Wilson 		 * It doesn't set the bit in iir again, but it still produces
2766a266c7d5SChris Wilson 		 * interrupts (for non-MSI).
2767a266c7d5SChris Wilson 		 */
2768a266c7d5SChris Wilson 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2769a266c7d5SChris Wilson 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2770a266c7d5SChris Wilson 			i915_handle_error(dev, false);
2771a266c7d5SChris Wilson 
2772a266c7d5SChris Wilson 		for_each_pipe(pipe) {
2773a266c7d5SChris Wilson 			int reg = PIPESTAT(pipe);
2774a266c7d5SChris Wilson 			pipe_stats[pipe] = I915_READ(reg);
2775a266c7d5SChris Wilson 
277638bde180SChris Wilson 			/* Clear the PIPE*STAT regs before the IIR */
2777a266c7d5SChris Wilson 			if (pipe_stats[pipe] & 0x8000ffff) {
2778a266c7d5SChris Wilson 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2779a266c7d5SChris Wilson 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2780a266c7d5SChris Wilson 							 pipe_name(pipe));
2781a266c7d5SChris Wilson 				I915_WRITE(reg, pipe_stats[pipe]);
278238bde180SChris Wilson 				irq_received = true;
2783a266c7d5SChris Wilson 			}
2784a266c7d5SChris Wilson 		}
2785a266c7d5SChris Wilson 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2786a266c7d5SChris Wilson 
2787a266c7d5SChris Wilson 		if (!irq_received)
2788a266c7d5SChris Wilson 			break;
2789a266c7d5SChris Wilson 
2790a266c7d5SChris Wilson 		/* Consume port.  Then clear IIR or we'll miss events */
2791a266c7d5SChris Wilson 		if ((I915_HAS_HOTPLUG(dev)) &&
2792a266c7d5SChris Wilson 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2793a266c7d5SChris Wilson 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2794b543fb04SEgbert Eich 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2795a266c7d5SChris Wilson 
2796a266c7d5SChris Wilson 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2797a266c7d5SChris Wilson 				  hotplug_status);
279891d131d2SDaniel Vetter 
279910a504deSDaniel Vetter 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
280091d131d2SDaniel Vetter 
2801a266c7d5SChris Wilson 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
280238bde180SChris Wilson 			POSTING_READ(PORT_HOTPLUG_STAT);
2803a266c7d5SChris Wilson 		}
2804a266c7d5SChris Wilson 
280538bde180SChris Wilson 		I915_WRITE(IIR, iir & ~flip_mask);
2806a266c7d5SChris Wilson 		new_iir = I915_READ(IIR); /* Flush posted writes */
2807a266c7d5SChris Wilson 
2808a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
2809a266c7d5SChris Wilson 			notify_ring(dev, &dev_priv->ring[RCS]);
2810a266c7d5SChris Wilson 
2811a266c7d5SChris Wilson 		for_each_pipe(pipe) {
281238bde180SChris Wilson 			int plane = pipe;
281338bde180SChris Wilson 			if (IS_MOBILE(dev))
281438bde180SChris Wilson 				plane = !plane;
28155e2032d4SVille Syrjälä 
281690a72f87SVille Syrjälä 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
281790a72f87SVille Syrjälä 			    i915_handle_vblank(dev, plane, pipe, iir))
281890a72f87SVille Syrjälä 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2819a266c7d5SChris Wilson 
2820a266c7d5SChris Wilson 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2821a266c7d5SChris Wilson 				blc_event = true;
2822a266c7d5SChris Wilson 		}
2823a266c7d5SChris Wilson 
2824a266c7d5SChris Wilson 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2825a266c7d5SChris Wilson 			intel_opregion_asle_intr(dev);
2826a266c7d5SChris Wilson 
2827a266c7d5SChris Wilson 		/* With MSI, interrupts are only generated when iir
2828a266c7d5SChris Wilson 		 * transitions from zero to nonzero.  If another bit got
2829a266c7d5SChris Wilson 		 * set while we were handling the existing iir bits, then
2830a266c7d5SChris Wilson 		 * we would never get another interrupt.
2831a266c7d5SChris Wilson 		 *
2832a266c7d5SChris Wilson 		 * This is fine on non-MSI as well, as if we hit this path
2833a266c7d5SChris Wilson 		 * we avoid exiting the interrupt handler only to generate
2834a266c7d5SChris Wilson 		 * another one.
2835a266c7d5SChris Wilson 		 *
2836a266c7d5SChris Wilson 		 * Note that for MSI this could cause a stray interrupt report
2837a266c7d5SChris Wilson 		 * if an interrupt landed in the time between writing IIR and
2838a266c7d5SChris Wilson 		 * the posting read.  This should be rare enough to never
2839a266c7d5SChris Wilson 		 * trigger the 99% of 100,000 interrupts test for disabling
2840a266c7d5SChris Wilson 		 * stray interrupts.
2841a266c7d5SChris Wilson 		 */
284238bde180SChris Wilson 		ret = IRQ_HANDLED;
2843a266c7d5SChris Wilson 		iir = new_iir;
284438bde180SChris Wilson 	} while (iir & ~flip_mask);
2845a266c7d5SChris Wilson 
2846d05c617eSDaniel Vetter 	i915_update_dri1_breadcrumb(dev);
28478291ee90SChris Wilson 
2848a266c7d5SChris Wilson 	return ret;
2849a266c7d5SChris Wilson }
2850a266c7d5SChris Wilson 
2851a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev)
2852a266c7d5SChris Wilson {
2853a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2854a266c7d5SChris Wilson 	int pipe;
2855a266c7d5SChris Wilson 
2856ac4c16c5SEgbert Eich 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
2857ac4c16c5SEgbert Eich 
2858a266c7d5SChris Wilson 	if (I915_HAS_HOTPLUG(dev)) {
2859a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_EN, 0);
2860a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2861a266c7d5SChris Wilson 	}
2862a266c7d5SChris Wilson 
286300d98ebdSChris Wilson 	I915_WRITE16(HWSTAM, 0xffff);
286455b39755SChris Wilson 	for_each_pipe(pipe) {
286555b39755SChris Wilson 		/* Clear enable bits; then clear status bits */
2866a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
286755b39755SChris Wilson 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
286855b39755SChris Wilson 	}
2869a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
2870a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
2871a266c7d5SChris Wilson 
2872a266c7d5SChris Wilson 	I915_WRITE(IIR, I915_READ(IIR));
2873a266c7d5SChris Wilson }
2874a266c7d5SChris Wilson 
2875a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev)
2876a266c7d5SChris Wilson {
2877a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2878a266c7d5SChris Wilson 	int pipe;
2879a266c7d5SChris Wilson 
2880a266c7d5SChris Wilson 	atomic_set(&dev_priv->irq_received, 0);
2881a266c7d5SChris Wilson 
2882a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2883a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2884a266c7d5SChris Wilson 
2885a266c7d5SChris Wilson 	I915_WRITE(HWSTAM, 0xeffe);
2886a266c7d5SChris Wilson 	for_each_pipe(pipe)
2887a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
2888a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
2889a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
2890a266c7d5SChris Wilson 	POSTING_READ(IER);
2891a266c7d5SChris Wilson }
2892a266c7d5SChris Wilson 
2893a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev)
2894a266c7d5SChris Wilson {
2895a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2896bbba0a97SChris Wilson 	u32 enable_mask;
2897a266c7d5SChris Wilson 	u32 error_mask;
2898b79480baSDaniel Vetter 	unsigned long irqflags;
2899a266c7d5SChris Wilson 
2900a266c7d5SChris Wilson 	/* Unmask the interrupts that we always want on. */
2901bbba0a97SChris Wilson 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2902adca4730SChris Wilson 			       I915_DISPLAY_PORT_INTERRUPT |
2903bbba0a97SChris Wilson 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2904bbba0a97SChris Wilson 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2905bbba0a97SChris Wilson 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2906bbba0a97SChris Wilson 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2907bbba0a97SChris Wilson 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2908bbba0a97SChris Wilson 
2909bbba0a97SChris Wilson 	enable_mask = ~dev_priv->irq_mask;
291021ad8330SVille Syrjälä 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
291121ad8330SVille Syrjälä 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2912bbba0a97SChris Wilson 	enable_mask |= I915_USER_INTERRUPT;
2913bbba0a97SChris Wilson 
2914bbba0a97SChris Wilson 	if (IS_G4X(dev))
2915bbba0a97SChris Wilson 		enable_mask |= I915_BSD_USER_INTERRUPT;
2916a266c7d5SChris Wilson 
2917b79480baSDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
2918b79480baSDaniel Vetter 	 * just to make the assert_spin_locked check happy. */
2919b79480baSDaniel Vetter 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2920515ac2bbSDaniel Vetter 	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2921b79480baSDaniel Vetter 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2922a266c7d5SChris Wilson 
2923a266c7d5SChris Wilson 	/*
2924a266c7d5SChris Wilson 	 * Enable some error detection, note the instruction error mask
2925a266c7d5SChris Wilson 	 * bit is reserved, so we leave it masked.
2926a266c7d5SChris Wilson 	 */
2927a266c7d5SChris Wilson 	if (IS_G4X(dev)) {
2928a266c7d5SChris Wilson 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2929a266c7d5SChris Wilson 			       GM45_ERROR_MEM_PRIV |
2930a266c7d5SChris Wilson 			       GM45_ERROR_CP_PRIV |
2931a266c7d5SChris Wilson 			       I915_ERROR_MEMORY_REFRESH);
2932a266c7d5SChris Wilson 	} else {
2933a266c7d5SChris Wilson 		error_mask = ~(I915_ERROR_PAGE_TABLE |
2934a266c7d5SChris Wilson 			       I915_ERROR_MEMORY_REFRESH);
2935a266c7d5SChris Wilson 	}
2936a266c7d5SChris Wilson 	I915_WRITE(EMR, error_mask);
2937a266c7d5SChris Wilson 
2938a266c7d5SChris Wilson 	I915_WRITE(IMR, dev_priv->irq_mask);
2939a266c7d5SChris Wilson 	I915_WRITE(IER, enable_mask);
2940a266c7d5SChris Wilson 	POSTING_READ(IER);
2941a266c7d5SChris Wilson 
294220afbda2SDaniel Vetter 	I915_WRITE(PORT_HOTPLUG_EN, 0);
294320afbda2SDaniel Vetter 	POSTING_READ(PORT_HOTPLUG_EN);
294420afbda2SDaniel Vetter 
2945f49e38ddSJani Nikula 	i915_enable_asle_pipestat(dev);
294620afbda2SDaniel Vetter 
294720afbda2SDaniel Vetter 	return 0;
294820afbda2SDaniel Vetter }
294920afbda2SDaniel Vetter 
2950bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev)
295120afbda2SDaniel Vetter {
295220afbda2SDaniel Vetter 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2953e5868a31SEgbert Eich 	struct drm_mode_config *mode_config = &dev->mode_config;
2954cd569aedSEgbert Eich 	struct intel_encoder *intel_encoder;
295520afbda2SDaniel Vetter 	u32 hotplug_en;
295620afbda2SDaniel Vetter 
2957b5ea2d56SDaniel Vetter 	assert_spin_locked(&dev_priv->irq_lock);
2958b5ea2d56SDaniel Vetter 
2959bac56d5bSEgbert Eich 	if (I915_HAS_HOTPLUG(dev)) {
2960bac56d5bSEgbert Eich 		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2961bac56d5bSEgbert Eich 		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2962adca4730SChris Wilson 		/* Note HDMI and DP share hotplug bits */
2963e5868a31SEgbert Eich 		/* enable bits are the same for all generations */
2964cd569aedSEgbert Eich 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2965cd569aedSEgbert Eich 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2966cd569aedSEgbert Eich 				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2967a266c7d5SChris Wilson 		/* Programming the CRT detection parameters tends
2968a266c7d5SChris Wilson 		   to generate a spurious hotplug event about three
2969a266c7d5SChris Wilson 		   seconds later.  So just do it once.
2970a266c7d5SChris Wilson 		*/
2971a266c7d5SChris Wilson 		if (IS_G4X(dev))
2972a266c7d5SChris Wilson 			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
297385fc95baSDaniel Vetter 		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2974a266c7d5SChris Wilson 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2975a266c7d5SChris Wilson 
2976a266c7d5SChris Wilson 		/* Ignore TV since it's buggy */
2977a266c7d5SChris Wilson 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2978a266c7d5SChris Wilson 	}
2979bac56d5bSEgbert Eich }
2980a266c7d5SChris Wilson 
2981ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg)
2982a266c7d5SChris Wilson {
2983a266c7d5SChris Wilson 	struct drm_device *dev = (struct drm_device *) arg;
2984a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2985a266c7d5SChris Wilson 	u32 iir, new_iir;
2986a266c7d5SChris Wilson 	u32 pipe_stats[I915_MAX_PIPES];
2987a266c7d5SChris Wilson 	unsigned long irqflags;
2988a266c7d5SChris Wilson 	int irq_received;
2989a266c7d5SChris Wilson 	int ret = IRQ_NONE, pipe;
299021ad8330SVille Syrjälä 	u32 flip_mask =
299121ad8330SVille Syrjälä 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
299221ad8330SVille Syrjälä 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2993a266c7d5SChris Wilson 
2994a266c7d5SChris Wilson 	atomic_inc(&dev_priv->irq_received);
2995a266c7d5SChris Wilson 
2996a266c7d5SChris Wilson 	iir = I915_READ(IIR);
2997a266c7d5SChris Wilson 
2998a266c7d5SChris Wilson 	for (;;) {
29992c8ba29fSChris Wilson 		bool blc_event = false;
30002c8ba29fSChris Wilson 
300121ad8330SVille Syrjälä 		irq_received = (iir & ~flip_mask) != 0;
3002a266c7d5SChris Wilson 
3003a266c7d5SChris Wilson 		/* Can't rely on pipestat interrupt bit in iir as it might
3004a266c7d5SChris Wilson 		 * have been cleared after the pipestat interrupt was received.
3005a266c7d5SChris Wilson 		 * It doesn't set the bit in iir again, but it still produces
3006a266c7d5SChris Wilson 		 * interrupts (for non-MSI).
3007a266c7d5SChris Wilson 		 */
3008a266c7d5SChris Wilson 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3009a266c7d5SChris Wilson 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3010a266c7d5SChris Wilson 			i915_handle_error(dev, false);
3011a266c7d5SChris Wilson 
3012a266c7d5SChris Wilson 		for_each_pipe(pipe) {
3013a266c7d5SChris Wilson 			int reg = PIPESTAT(pipe);
3014a266c7d5SChris Wilson 			pipe_stats[pipe] = I915_READ(reg);
3015a266c7d5SChris Wilson 
3016a266c7d5SChris Wilson 			/*
3017a266c7d5SChris Wilson 			 * Clear the PIPE*STAT regs before the IIR
3018a266c7d5SChris Wilson 			 */
3019a266c7d5SChris Wilson 			if (pipe_stats[pipe] & 0x8000ffff) {
3020a266c7d5SChris Wilson 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3021a266c7d5SChris Wilson 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3022a266c7d5SChris Wilson 							 pipe_name(pipe));
3023a266c7d5SChris Wilson 				I915_WRITE(reg, pipe_stats[pipe]);
3024a266c7d5SChris Wilson 				irq_received = 1;
3025a266c7d5SChris Wilson 			}
3026a266c7d5SChris Wilson 		}
3027a266c7d5SChris Wilson 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3028a266c7d5SChris Wilson 
3029a266c7d5SChris Wilson 		if (!irq_received)
3030a266c7d5SChris Wilson 			break;
3031a266c7d5SChris Wilson 
3032a266c7d5SChris Wilson 		ret = IRQ_HANDLED;
3033a266c7d5SChris Wilson 
3034a266c7d5SChris Wilson 		/* Consume port.  Then clear IIR or we'll miss events */
3035adca4730SChris Wilson 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3036a266c7d5SChris Wilson 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3037b543fb04SEgbert Eich 			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3038b543fb04SEgbert Eich 								  HOTPLUG_INT_STATUS_G4X :
30394f7fd709SDaniel Vetter 								  HOTPLUG_INT_STATUS_I915);
3040a266c7d5SChris Wilson 
3041a266c7d5SChris Wilson 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3042a266c7d5SChris Wilson 				  hotplug_status);
304391d131d2SDaniel Vetter 
304410a504deSDaniel Vetter 			intel_hpd_irq_handler(dev, hotplug_trigger,
304510a504deSDaniel Vetter 					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
304691d131d2SDaniel Vetter 
3047a266c7d5SChris Wilson 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3048a266c7d5SChris Wilson 			I915_READ(PORT_HOTPLUG_STAT);
3049a266c7d5SChris Wilson 		}
3050a266c7d5SChris Wilson 
305121ad8330SVille Syrjälä 		I915_WRITE(IIR, iir & ~flip_mask);
3052a266c7d5SChris Wilson 		new_iir = I915_READ(IIR); /* Flush posted writes */
3053a266c7d5SChris Wilson 
3054a266c7d5SChris Wilson 		if (iir & I915_USER_INTERRUPT)
3055a266c7d5SChris Wilson 			notify_ring(dev, &dev_priv->ring[RCS]);
3056a266c7d5SChris Wilson 		if (iir & I915_BSD_USER_INTERRUPT)
3057a266c7d5SChris Wilson 			notify_ring(dev, &dev_priv->ring[VCS]);
3058a266c7d5SChris Wilson 
3059a266c7d5SChris Wilson 		for_each_pipe(pipe) {
30602c8ba29fSChris Wilson 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
306190a72f87SVille Syrjälä 			    i915_handle_vblank(dev, pipe, pipe, iir))
306290a72f87SVille Syrjälä 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3063a266c7d5SChris Wilson 
3064a266c7d5SChris Wilson 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3065a266c7d5SChris Wilson 				blc_event = true;
3066a266c7d5SChris Wilson 		}
3067a266c7d5SChris Wilson 
3068a266c7d5SChris Wilson 
3069a266c7d5SChris Wilson 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3070a266c7d5SChris Wilson 			intel_opregion_asle_intr(dev);
3071a266c7d5SChris Wilson 
3072515ac2bbSDaniel Vetter 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3073515ac2bbSDaniel Vetter 			gmbus_irq_handler(dev);
3074515ac2bbSDaniel Vetter 
3075a266c7d5SChris Wilson 		/* With MSI, interrupts are only generated when iir
3076a266c7d5SChris Wilson 		 * transitions from zero to nonzero.  If another bit got
3077a266c7d5SChris Wilson 		 * set while we were handling the existing iir bits, then
3078a266c7d5SChris Wilson 		 * we would never get another interrupt.
3079a266c7d5SChris Wilson 		 *
3080a266c7d5SChris Wilson 		 * This is fine on non-MSI as well, as if we hit this path
3081a266c7d5SChris Wilson 		 * we avoid exiting the interrupt handler only to generate
3082a266c7d5SChris Wilson 		 * another one.
3083a266c7d5SChris Wilson 		 *
3084a266c7d5SChris Wilson 		 * Note that for MSI this could cause a stray interrupt report
3085a266c7d5SChris Wilson 		 * if an interrupt landed in the time between writing IIR and
3086a266c7d5SChris Wilson 		 * the posting read.  This should be rare enough to never
3087a266c7d5SChris Wilson 		 * trigger the 99% of 100,000 interrupts test for disabling
3088a266c7d5SChris Wilson 		 * stray interrupts.
3089a266c7d5SChris Wilson 		 */
3090a266c7d5SChris Wilson 		iir = new_iir;
3091a266c7d5SChris Wilson 	}
3092a266c7d5SChris Wilson 
3093d05c617eSDaniel Vetter 	i915_update_dri1_breadcrumb(dev);
30942c8ba29fSChris Wilson 
3095a266c7d5SChris Wilson 	return ret;
3096a266c7d5SChris Wilson }
3097a266c7d5SChris Wilson 
3098a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev)
3099a266c7d5SChris Wilson {
3100a266c7d5SChris Wilson 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3101a266c7d5SChris Wilson 	int pipe;
3102a266c7d5SChris Wilson 
3103a266c7d5SChris Wilson 	if (!dev_priv)
3104a266c7d5SChris Wilson 		return;
3105a266c7d5SChris Wilson 
3106ac4c16c5SEgbert Eich 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3107ac4c16c5SEgbert Eich 
3108a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3109a266c7d5SChris Wilson 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3110a266c7d5SChris Wilson 
3111a266c7d5SChris Wilson 	I915_WRITE(HWSTAM, 0xffffffff);
3112a266c7d5SChris Wilson 	for_each_pipe(pipe)
3113a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe), 0);
3114a266c7d5SChris Wilson 	I915_WRITE(IMR, 0xffffffff);
3115a266c7d5SChris Wilson 	I915_WRITE(IER, 0x0);
3116a266c7d5SChris Wilson 
3117a266c7d5SChris Wilson 	for_each_pipe(pipe)
3118a266c7d5SChris Wilson 		I915_WRITE(PIPESTAT(pipe),
3119a266c7d5SChris Wilson 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3120a266c7d5SChris Wilson 	I915_WRITE(IIR, I915_READ(IIR));
3121a266c7d5SChris Wilson }
3122a266c7d5SChris Wilson 
3123ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data)
3124ac4c16c5SEgbert Eich {
3125ac4c16c5SEgbert Eich 	drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3126ac4c16c5SEgbert Eich 	struct drm_device *dev = dev_priv->dev;
3127ac4c16c5SEgbert Eich 	struct drm_mode_config *mode_config = &dev->mode_config;
3128ac4c16c5SEgbert Eich 	unsigned long irqflags;
3129ac4c16c5SEgbert Eich 	int i;
3130ac4c16c5SEgbert Eich 
3131ac4c16c5SEgbert Eich 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3132ac4c16c5SEgbert Eich 	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3133ac4c16c5SEgbert Eich 		struct drm_connector *connector;
3134ac4c16c5SEgbert Eich 
3135ac4c16c5SEgbert Eich 		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3136ac4c16c5SEgbert Eich 			continue;
3137ac4c16c5SEgbert Eich 
3138ac4c16c5SEgbert Eich 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3139ac4c16c5SEgbert Eich 
3140ac4c16c5SEgbert Eich 		list_for_each_entry(connector, &mode_config->connector_list, head) {
3141ac4c16c5SEgbert Eich 			struct intel_connector *intel_connector = to_intel_connector(connector);
3142ac4c16c5SEgbert Eich 
3143ac4c16c5SEgbert Eich 			if (intel_connector->encoder->hpd_pin == i) {
3144ac4c16c5SEgbert Eich 				if (connector->polled != intel_connector->polled)
3145ac4c16c5SEgbert Eich 					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3146ac4c16c5SEgbert Eich 							 drm_get_connector_name(connector));
3147ac4c16c5SEgbert Eich 				connector->polled = intel_connector->polled;
3148ac4c16c5SEgbert Eich 				if (!connector->polled)
3149ac4c16c5SEgbert Eich 					connector->polled = DRM_CONNECTOR_POLL_HPD;
3150ac4c16c5SEgbert Eich 			}
3151ac4c16c5SEgbert Eich 		}
3152ac4c16c5SEgbert Eich 	}
3153ac4c16c5SEgbert Eich 	if (dev_priv->display.hpd_irq_setup)
3154ac4c16c5SEgbert Eich 		dev_priv->display.hpd_irq_setup(dev);
3155ac4c16c5SEgbert Eich 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3156ac4c16c5SEgbert Eich }
3157ac4c16c5SEgbert Eich 
3158f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev)
3159f71d4af4SJesse Barnes {
31608b2e326dSChris Wilson 	struct drm_i915_private *dev_priv = dev->dev_private;
31618b2e326dSChris Wilson 
31628b2e326dSChris Wilson 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
316399584db3SDaniel Vetter 	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3164c6a828d3SDaniel Vetter 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3165a4da4fa4SDaniel Vetter 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
31668b2e326dSChris Wilson 
316799584db3SDaniel Vetter 	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
316899584db3SDaniel Vetter 		    i915_hangcheck_elapsed,
316961bac78eSDaniel Vetter 		    (unsigned long) dev);
3170ac4c16c5SEgbert Eich 	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3171ac4c16c5SEgbert Eich 		    (unsigned long) dev_priv);
317261bac78eSDaniel Vetter 
317397a19a24STomas Janousek 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
31749ee32feaSDaniel Vetter 
31757d4e146fSEugeni Dodonov 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3176f71d4af4SJesse Barnes 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3177f71d4af4SJesse Barnes 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3178391f75e2SVille Syrjälä 	} else {
3179391f75e2SVille Syrjälä 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
3180391f75e2SVille Syrjälä 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3181f71d4af4SJesse Barnes 	}
3182f71d4af4SJesse Barnes 
3183*c2baf4b7SVille Syrjälä 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3184f71d4af4SJesse Barnes 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3185f71d4af4SJesse Barnes 		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3186*c2baf4b7SVille Syrjälä 	}
3187f71d4af4SJesse Barnes 
31887e231dbeSJesse Barnes 	if (IS_VALLEYVIEW(dev)) {
31897e231dbeSJesse Barnes 		dev->driver->irq_handler = valleyview_irq_handler;
31907e231dbeSJesse Barnes 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
31917e231dbeSJesse Barnes 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
31927e231dbeSJesse Barnes 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
31937e231dbeSJesse Barnes 		dev->driver->enable_vblank = valleyview_enable_vblank;
31947e231dbeSJesse Barnes 		dev->driver->disable_vblank = valleyview_disable_vblank;
3195fa00abe0SEgbert Eich 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3196f71d4af4SJesse Barnes 	} else if (HAS_PCH_SPLIT(dev)) {
3197f71d4af4SJesse Barnes 		dev->driver->irq_handler = ironlake_irq_handler;
3198f71d4af4SJesse Barnes 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
3199f71d4af4SJesse Barnes 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
3200f71d4af4SJesse Barnes 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
3201f71d4af4SJesse Barnes 		dev->driver->enable_vblank = ironlake_enable_vblank;
3202f71d4af4SJesse Barnes 		dev->driver->disable_vblank = ironlake_disable_vblank;
320382a28bcfSDaniel Vetter 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3204f71d4af4SJesse Barnes 	} else {
3205c2798b19SChris Wilson 		if (INTEL_INFO(dev)->gen == 2) {
3206c2798b19SChris Wilson 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
3207c2798b19SChris Wilson 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
3208c2798b19SChris Wilson 			dev->driver->irq_handler = i8xx_irq_handler;
3209c2798b19SChris Wilson 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
3210a266c7d5SChris Wilson 		} else if (INTEL_INFO(dev)->gen == 3) {
3211a266c7d5SChris Wilson 			dev->driver->irq_preinstall = i915_irq_preinstall;
3212a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i915_irq_postinstall;
3213a266c7d5SChris Wilson 			dev->driver->irq_uninstall = i915_irq_uninstall;
3214a266c7d5SChris Wilson 			dev->driver->irq_handler = i915_irq_handler;
321520afbda2SDaniel Vetter 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3216c2798b19SChris Wilson 		} else {
3217a266c7d5SChris Wilson 			dev->driver->irq_preinstall = i965_irq_preinstall;
3218a266c7d5SChris Wilson 			dev->driver->irq_postinstall = i965_irq_postinstall;
3219a266c7d5SChris Wilson 			dev->driver->irq_uninstall = i965_irq_uninstall;
3220a266c7d5SChris Wilson 			dev->driver->irq_handler = i965_irq_handler;
3221bac56d5bSEgbert Eich 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3222c2798b19SChris Wilson 		}
3223f71d4af4SJesse Barnes 		dev->driver->enable_vblank = i915_enable_vblank;
3224f71d4af4SJesse Barnes 		dev->driver->disable_vblank = i915_disable_vblank;
3225f71d4af4SJesse Barnes 	}
3226f71d4af4SJesse Barnes }
322720afbda2SDaniel Vetter 
322820afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev)
322920afbda2SDaniel Vetter {
323020afbda2SDaniel Vetter 	struct drm_i915_private *dev_priv = dev->dev_private;
3231821450c6SEgbert Eich 	struct drm_mode_config *mode_config = &dev->mode_config;
3232821450c6SEgbert Eich 	struct drm_connector *connector;
3233b5ea2d56SDaniel Vetter 	unsigned long irqflags;
3234821450c6SEgbert Eich 	int i;
323520afbda2SDaniel Vetter 
3236821450c6SEgbert Eich 	for (i = 1; i < HPD_NUM_PINS; i++) {
3237821450c6SEgbert Eich 		dev_priv->hpd_stats[i].hpd_cnt = 0;
3238821450c6SEgbert Eich 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3239821450c6SEgbert Eich 	}
3240821450c6SEgbert Eich 	list_for_each_entry(connector, &mode_config->connector_list, head) {
3241821450c6SEgbert Eich 		struct intel_connector *intel_connector = to_intel_connector(connector);
3242821450c6SEgbert Eich 		connector->polled = intel_connector->polled;
3243821450c6SEgbert Eich 		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3244821450c6SEgbert Eich 			connector->polled = DRM_CONNECTOR_POLL_HPD;
3245821450c6SEgbert Eich 	}
3246b5ea2d56SDaniel Vetter 
3247b5ea2d56SDaniel Vetter 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3248b5ea2d56SDaniel Vetter 	 * just to make the assert_spin_locked checks happy. */
3249b5ea2d56SDaniel Vetter 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
325020afbda2SDaniel Vetter 	if (dev_priv->display.hpd_irq_setup)
325120afbda2SDaniel Vetter 		dev_priv->display.hpd_irq_setup(dev);
3252b5ea2d56SDaniel Vetter 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
325320afbda2SDaniel Vetter }
3254c67a470bSPaulo Zanoni 
3255c67a470bSPaulo Zanoni /* Disable interrupts so we can allow Package C8+. */
3256c67a470bSPaulo Zanoni void hsw_pc8_disable_interrupts(struct drm_device *dev)
3257c67a470bSPaulo Zanoni {
3258c67a470bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
3259c67a470bSPaulo Zanoni 	unsigned long irqflags;
3260c67a470bSPaulo Zanoni 
3261c67a470bSPaulo Zanoni 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3262c67a470bSPaulo Zanoni 
3263c67a470bSPaulo Zanoni 	dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3264c67a470bSPaulo Zanoni 	dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3265c67a470bSPaulo Zanoni 	dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3266c67a470bSPaulo Zanoni 	dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3267c67a470bSPaulo Zanoni 	dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3268c67a470bSPaulo Zanoni 
3269c67a470bSPaulo Zanoni 	ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3270c67a470bSPaulo Zanoni 	ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3271c67a470bSPaulo Zanoni 	ilk_disable_gt_irq(dev_priv, 0xffffffff);
3272c67a470bSPaulo Zanoni 	snb_disable_pm_irq(dev_priv, 0xffffffff);
3273c67a470bSPaulo Zanoni 
3274c67a470bSPaulo Zanoni 	dev_priv->pc8.irqs_disabled = true;
3275c67a470bSPaulo Zanoni 
3276c67a470bSPaulo Zanoni 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3277c67a470bSPaulo Zanoni }
3278c67a470bSPaulo Zanoni 
3279c67a470bSPaulo Zanoni /* Restore interrupts so we can recover from Package C8+. */
3280c67a470bSPaulo Zanoni void hsw_pc8_restore_interrupts(struct drm_device *dev)
3281c67a470bSPaulo Zanoni {
3282c67a470bSPaulo Zanoni 	struct drm_i915_private *dev_priv = dev->dev_private;
3283c67a470bSPaulo Zanoni 	unsigned long irqflags;
3284c67a470bSPaulo Zanoni 	uint32_t val, expected;
3285c67a470bSPaulo Zanoni 
3286c67a470bSPaulo Zanoni 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3287c67a470bSPaulo Zanoni 
3288c67a470bSPaulo Zanoni 	val = I915_READ(DEIMR);
3289c67a470bSPaulo Zanoni 	expected = ~DE_PCH_EVENT_IVB;
3290c67a470bSPaulo Zanoni 	WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3291c67a470bSPaulo Zanoni 
3292c67a470bSPaulo Zanoni 	val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3293c67a470bSPaulo Zanoni 	expected = ~SDE_HOTPLUG_MASK_CPT;
3294c67a470bSPaulo Zanoni 	WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3295c67a470bSPaulo Zanoni 	     val, expected);
3296c67a470bSPaulo Zanoni 
3297c67a470bSPaulo Zanoni 	val = I915_READ(GTIMR);
3298c67a470bSPaulo Zanoni 	expected = 0xffffffff;
3299c67a470bSPaulo Zanoni 	WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3300c67a470bSPaulo Zanoni 
3301c67a470bSPaulo Zanoni 	val = I915_READ(GEN6_PMIMR);
3302c67a470bSPaulo Zanoni 	expected = 0xffffffff;
3303c67a470bSPaulo Zanoni 	WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3304c67a470bSPaulo Zanoni 	     expected);
3305c67a470bSPaulo Zanoni 
3306c67a470bSPaulo Zanoni 	dev_priv->pc8.irqs_disabled = false;
3307c67a470bSPaulo Zanoni 
3308c67a470bSPaulo Zanoni 	ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3309c67a470bSPaulo Zanoni 	ibx_enable_display_interrupt(dev_priv,
3310c67a470bSPaulo Zanoni 				     ~dev_priv->pc8.regsave.sdeimr &
3311c67a470bSPaulo Zanoni 				     ~SDE_HOTPLUG_MASK_CPT);
3312c67a470bSPaulo Zanoni 	ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3313c67a470bSPaulo Zanoni 	snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3314c67a470bSPaulo Zanoni 	I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3315c67a470bSPaulo Zanoni 
3316c67a470bSPaulo Zanoni 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3317c67a470bSPaulo Zanoni }
3318