1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33760285e7SDavid Howells #include <drm/drmP.h> 34760285e7SDavid Howells #include <drm/i915_drm.h> 35c0e09200SDave Airlie #include "i915_drv.h" 361c5d22f7SChris Wilson #include "i915_trace.h" 3779e53945SJesse Barnes #include "intel_drv.h" 38c0e09200SDave Airlie 39e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 40e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 41e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45e5868a31SEgbert Eich }; 46e5868a31SEgbert Eich 47e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 48e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 4973c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53e5868a31SEgbert Eich }; 54e5868a31SEgbert Eich 55e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 56e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62e5868a31SEgbert Eich }; 63e5868a31SEgbert Eich 64e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 65e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71e5868a31SEgbert Eich }; 72e5868a31SEgbert Eich 73e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80e5868a31SEgbert Eich }; 81e5868a31SEgbert Eich 82036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 83995b6762SChris Wilson static void 84f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85036a4a7dSZhenyu Wang { 864bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 874bc9d430SDaniel Vetter 88c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 89c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 90c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr &= ~mask; 91c67a470bSPaulo Zanoni return; 92c67a470bSPaulo Zanoni } 93c67a470bSPaulo Zanoni 941ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 951ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 961ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 973143a2bfSChris Wilson POSTING_READ(DEIMR); 98036a4a7dSZhenyu Wang } 99036a4a7dSZhenyu Wang } 100036a4a7dSZhenyu Wang 1010ff9800aSPaulo Zanoni static void 102f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 103036a4a7dSZhenyu Wang { 1044bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 1054bc9d430SDaniel Vetter 106c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 107c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 108c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr |= mask; 109c67a470bSPaulo Zanoni return; 110c67a470bSPaulo Zanoni } 111c67a470bSPaulo Zanoni 1121ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1131ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1141ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1153143a2bfSChris Wilson POSTING_READ(DEIMR); 116036a4a7dSZhenyu Wang } 117036a4a7dSZhenyu Wang } 118036a4a7dSZhenyu Wang 11943eaea13SPaulo Zanoni /** 12043eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 12143eaea13SPaulo Zanoni * @dev_priv: driver private 12243eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 12343eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 12443eaea13SPaulo Zanoni */ 12543eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 12643eaea13SPaulo Zanoni uint32_t interrupt_mask, 12743eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 12843eaea13SPaulo Zanoni { 12943eaea13SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 13043eaea13SPaulo Zanoni 131c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 132c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 133c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 134c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 135c67a470bSPaulo Zanoni interrupt_mask); 136c67a470bSPaulo Zanoni return; 137c67a470bSPaulo Zanoni } 138c67a470bSPaulo Zanoni 13943eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 14043eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 14143eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 14243eaea13SPaulo Zanoni POSTING_READ(GTIMR); 14343eaea13SPaulo Zanoni } 14443eaea13SPaulo Zanoni 14543eaea13SPaulo Zanoni void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 14643eaea13SPaulo Zanoni { 14743eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 14843eaea13SPaulo Zanoni } 14943eaea13SPaulo Zanoni 15043eaea13SPaulo Zanoni void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 15143eaea13SPaulo Zanoni { 15243eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 15343eaea13SPaulo Zanoni } 15443eaea13SPaulo Zanoni 155edbfdb45SPaulo Zanoni /** 156edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 157edbfdb45SPaulo Zanoni * @dev_priv: driver private 158edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 159edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 160edbfdb45SPaulo Zanoni */ 161edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 162edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 163edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 164edbfdb45SPaulo Zanoni { 165605cd25bSPaulo Zanoni uint32_t new_val; 166edbfdb45SPaulo Zanoni 167edbfdb45SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 168edbfdb45SPaulo Zanoni 169c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 170c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 171c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 172c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 173c67a470bSPaulo Zanoni interrupt_mask); 174c67a470bSPaulo Zanoni return; 175c67a470bSPaulo Zanoni } 176c67a470bSPaulo Zanoni 177605cd25bSPaulo Zanoni new_val = dev_priv->pm_irq_mask; 178f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 179f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 180f52ecbcfSPaulo Zanoni 181605cd25bSPaulo Zanoni if (new_val != dev_priv->pm_irq_mask) { 182605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = new_val; 183605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 184edbfdb45SPaulo Zanoni POSTING_READ(GEN6_PMIMR); 185edbfdb45SPaulo Zanoni } 186f52ecbcfSPaulo Zanoni } 187edbfdb45SPaulo Zanoni 188edbfdb45SPaulo Zanoni void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 189edbfdb45SPaulo Zanoni { 190edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 191edbfdb45SPaulo Zanoni } 192edbfdb45SPaulo Zanoni 193edbfdb45SPaulo Zanoni void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 194edbfdb45SPaulo Zanoni { 195edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, 0); 196edbfdb45SPaulo Zanoni } 197edbfdb45SPaulo Zanoni 1988664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 1998664281bSPaulo Zanoni { 2008664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2018664281bSPaulo Zanoni struct intel_crtc *crtc; 2028664281bSPaulo Zanoni enum pipe pipe; 2038664281bSPaulo Zanoni 2044bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2054bc9d430SDaniel Vetter 2068664281bSPaulo Zanoni for_each_pipe(pipe) { 2078664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2088664281bSPaulo Zanoni 2098664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 2108664281bSPaulo Zanoni return false; 2118664281bSPaulo Zanoni } 2128664281bSPaulo Zanoni 2138664281bSPaulo Zanoni return true; 2148664281bSPaulo Zanoni } 2158664281bSPaulo Zanoni 2168664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 2178664281bSPaulo Zanoni { 2188664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2198664281bSPaulo Zanoni enum pipe pipe; 2208664281bSPaulo Zanoni struct intel_crtc *crtc; 2218664281bSPaulo Zanoni 222fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 223fee884edSDaniel Vetter 2248664281bSPaulo Zanoni for_each_pipe(pipe) { 2258664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2268664281bSPaulo Zanoni 2278664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 2288664281bSPaulo Zanoni return false; 2298664281bSPaulo Zanoni } 2308664281bSPaulo Zanoni 2318664281bSPaulo Zanoni return true; 2328664281bSPaulo Zanoni } 2338664281bSPaulo Zanoni 2348664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 2358664281bSPaulo Zanoni enum pipe pipe, bool enable) 2368664281bSPaulo Zanoni { 2378664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2388664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 2398664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 2408664281bSPaulo Zanoni 2418664281bSPaulo Zanoni if (enable) 2428664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 2438664281bSPaulo Zanoni else 2448664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 2458664281bSPaulo Zanoni } 2468664281bSPaulo Zanoni 2478664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 2487336df65SDaniel Vetter enum pipe pipe, bool enable) 2498664281bSPaulo Zanoni { 2508664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2518664281bSPaulo Zanoni if (enable) { 2527336df65SDaniel Vetter I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 2537336df65SDaniel Vetter 2548664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 2558664281bSPaulo Zanoni return; 2568664281bSPaulo Zanoni 2578664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 2588664281bSPaulo Zanoni } else { 2597336df65SDaniel Vetter bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 2607336df65SDaniel Vetter 2617336df65SDaniel Vetter /* Change the state _after_ we've read out the current one. */ 2628664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 2637336df65SDaniel Vetter 2647336df65SDaniel Vetter if (!was_enabled && 2657336df65SDaniel Vetter (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 2667336df65SDaniel Vetter DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 2677336df65SDaniel Vetter pipe_name(pipe)); 2687336df65SDaniel Vetter } 2698664281bSPaulo Zanoni } 2708664281bSPaulo Zanoni } 2718664281bSPaulo Zanoni 272fee884edSDaniel Vetter /** 273fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 274fee884edSDaniel Vetter * @dev_priv: driver private 275fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 276fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 277fee884edSDaniel Vetter */ 278fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 279fee884edSDaniel Vetter uint32_t interrupt_mask, 280fee884edSDaniel Vetter uint32_t enabled_irq_mask) 281fee884edSDaniel Vetter { 282fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 283fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 284fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 285fee884edSDaniel Vetter 286fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 287fee884edSDaniel Vetter 288c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled && 289c67a470bSPaulo Zanoni (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 290c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 291c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 292c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 293c67a470bSPaulo Zanoni interrupt_mask); 294c67a470bSPaulo Zanoni return; 295c67a470bSPaulo Zanoni } 296c67a470bSPaulo Zanoni 297fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 298fee884edSDaniel Vetter POSTING_READ(SDEIMR); 299fee884edSDaniel Vetter } 300fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \ 301fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), (bits)) 302fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \ 303fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), 0) 304fee884edSDaniel Vetter 305de28075dSDaniel Vetter static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 306de28075dSDaniel Vetter enum transcoder pch_transcoder, 3078664281bSPaulo Zanoni bool enable) 3088664281bSPaulo Zanoni { 3098664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 310de28075dSDaniel Vetter uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 311de28075dSDaniel Vetter SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 3128664281bSPaulo Zanoni 3138664281bSPaulo Zanoni if (enable) 314fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, bit); 3158664281bSPaulo Zanoni else 316fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, bit); 3178664281bSPaulo Zanoni } 3188664281bSPaulo Zanoni 3198664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 3208664281bSPaulo Zanoni enum transcoder pch_transcoder, 3218664281bSPaulo Zanoni bool enable) 3228664281bSPaulo Zanoni { 3238664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3248664281bSPaulo Zanoni 3258664281bSPaulo Zanoni if (enable) { 3261dd246fbSDaniel Vetter I915_WRITE(SERR_INT, 3271dd246fbSDaniel Vetter SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 3281dd246fbSDaniel Vetter 3298664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 3308664281bSPaulo Zanoni return; 3318664281bSPaulo Zanoni 332fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3338664281bSPaulo Zanoni } else { 3341dd246fbSDaniel Vetter uint32_t tmp = I915_READ(SERR_INT); 3351dd246fbSDaniel Vetter bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 3361dd246fbSDaniel Vetter 3371dd246fbSDaniel Vetter /* Change the state _after_ we've read out the current one. */ 338fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3391dd246fbSDaniel Vetter 3401dd246fbSDaniel Vetter if (!was_enabled && 3411dd246fbSDaniel Vetter (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 3421dd246fbSDaniel Vetter DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 3431dd246fbSDaniel Vetter transcoder_name(pch_transcoder)); 3441dd246fbSDaniel Vetter } 3458664281bSPaulo Zanoni } 3468664281bSPaulo Zanoni } 3478664281bSPaulo Zanoni 3488664281bSPaulo Zanoni /** 3498664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 3508664281bSPaulo Zanoni * @dev: drm device 3518664281bSPaulo Zanoni * @pipe: pipe 3528664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3538664281bSPaulo Zanoni * 3548664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 3558664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 3568664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 3578664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 3588664281bSPaulo Zanoni * bit for all the pipes. 3598664281bSPaulo Zanoni * 3608664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3618664281bSPaulo Zanoni */ 3628664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 3638664281bSPaulo Zanoni enum pipe pipe, bool enable) 3648664281bSPaulo Zanoni { 3658664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3668664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 3678664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3688664281bSPaulo Zanoni unsigned long flags; 3698664281bSPaulo Zanoni bool ret; 3708664281bSPaulo Zanoni 3718664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3728664281bSPaulo Zanoni 3738664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 3748664281bSPaulo Zanoni 3758664281bSPaulo Zanoni if (enable == ret) 3768664281bSPaulo Zanoni goto done; 3778664281bSPaulo Zanoni 3788664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 3798664281bSPaulo Zanoni 3808664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 3818664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 3828664281bSPaulo Zanoni else if (IS_GEN7(dev)) 3837336df65SDaniel Vetter ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 3848664281bSPaulo Zanoni 3858664281bSPaulo Zanoni done: 3868664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 3878664281bSPaulo Zanoni return ret; 3888664281bSPaulo Zanoni } 3898664281bSPaulo Zanoni 3908664281bSPaulo Zanoni /** 3918664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 3928664281bSPaulo Zanoni * @dev: drm device 3938664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 3948664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3958664281bSPaulo Zanoni * 3968664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 3978664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 3988664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 3998664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 4008664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 4018664281bSPaulo Zanoni * 4028664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 4038664281bSPaulo Zanoni */ 4048664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 4058664281bSPaulo Zanoni enum transcoder pch_transcoder, 4068664281bSPaulo Zanoni bool enable) 4078664281bSPaulo Zanoni { 4088664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 409de28075dSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 410de28075dSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4118664281bSPaulo Zanoni unsigned long flags; 4128664281bSPaulo Zanoni bool ret; 4138664281bSPaulo Zanoni 414de28075dSDaniel Vetter /* 415de28075dSDaniel Vetter * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 416de28075dSDaniel Vetter * has only one pch transcoder A that all pipes can use. To avoid racy 417de28075dSDaniel Vetter * pch transcoder -> pipe lookups from interrupt code simply store the 418de28075dSDaniel Vetter * underrun statistics in crtc A. Since we never expose this anywhere 419de28075dSDaniel Vetter * nor use it outside of the fifo underrun code here using the "wrong" 420de28075dSDaniel Vetter * crtc on LPT won't cause issues. 421de28075dSDaniel Vetter */ 4228664281bSPaulo Zanoni 4238664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 4248664281bSPaulo Zanoni 4258664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 4268664281bSPaulo Zanoni 4278664281bSPaulo Zanoni if (enable == ret) 4288664281bSPaulo Zanoni goto done; 4298664281bSPaulo Zanoni 4308664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 4318664281bSPaulo Zanoni 4328664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 433de28075dSDaniel Vetter ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4348664281bSPaulo Zanoni else 4358664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4368664281bSPaulo Zanoni 4378664281bSPaulo Zanoni done: 4388664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 4398664281bSPaulo Zanoni return ret; 4408664281bSPaulo Zanoni } 4418664281bSPaulo Zanoni 4428664281bSPaulo Zanoni 4437c463586SKeith Packard void 4447c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4457c463586SKeith Packard { 4469db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 44746c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4487c463586SKeith Packard 449b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 450b79480baSDaniel Vetter 45146c06a30SVille Syrjälä if ((pipestat & mask) == mask) 45246c06a30SVille Syrjälä return; 45346c06a30SVille Syrjälä 4547c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 45546c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 45646c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4573143a2bfSChris Wilson POSTING_READ(reg); 4587c463586SKeith Packard } 4597c463586SKeith Packard 4607c463586SKeith Packard void 4617c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4627c463586SKeith Packard { 4639db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 46446c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4657c463586SKeith Packard 466b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 467b79480baSDaniel Vetter 46846c06a30SVille Syrjälä if ((pipestat & mask) == 0) 46946c06a30SVille Syrjälä return; 47046c06a30SVille Syrjälä 47146c06a30SVille Syrjälä pipestat &= ~mask; 47246c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4733143a2bfSChris Wilson POSTING_READ(reg); 4747c463586SKeith Packard } 4757c463586SKeith Packard 476c0e09200SDave Airlie /** 477f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 47801c66889SZhao Yakui */ 479f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 48001c66889SZhao Yakui { 4811ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 4821ec14ad3SChris Wilson unsigned long irqflags; 4831ec14ad3SChris Wilson 484f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 485f49e38ddSJani Nikula return; 486f49e38ddSJani Nikula 4871ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 48801c66889SZhao Yakui 489f898780bSJani Nikula i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 490a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 491f898780bSJani Nikula i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 4921ec14ad3SChris Wilson 4931ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 49401c66889SZhao Yakui } 49501c66889SZhao Yakui 49601c66889SZhao Yakui /** 4970a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 4980a3e67a4SJesse Barnes * @dev: DRM device 4990a3e67a4SJesse Barnes * @pipe: pipe to check 5000a3e67a4SJesse Barnes * 5010a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 5020a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 5030a3e67a4SJesse Barnes * before reading such registers if unsure. 5040a3e67a4SJesse Barnes */ 5050a3e67a4SJesse Barnes static int 5060a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 5070a3e67a4SJesse Barnes { 5080a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 509702e7a56SPaulo Zanoni 510a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 511a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 512a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 513a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 51471f8ba6bSPaulo Zanoni 515a01025afSDaniel Vetter return intel_crtc->active; 516a01025afSDaniel Vetter } else { 517a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 518a01025afSDaniel Vetter } 5190a3e67a4SJesse Barnes } 5200a3e67a4SJesse Barnes 52142f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 52242f52ef8SKeith Packard * we use as a pipe index 52342f52ef8SKeith Packard */ 524f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 5250a3e67a4SJesse Barnes { 5260a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5270a3e67a4SJesse Barnes unsigned long high_frame; 5280a3e67a4SJesse Barnes unsigned long low_frame; 529391f75e2SVille Syrjälä u32 high1, high2, low, pixel, vbl_start; 5300a3e67a4SJesse Barnes 5310a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 53244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5339db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5340a3e67a4SJesse Barnes return 0; 5350a3e67a4SJesse Barnes } 5360a3e67a4SJesse Barnes 537391f75e2SVille Syrjälä if (drm_core_check_feature(dev, DRIVER_MODESET)) { 538391f75e2SVille Syrjälä struct intel_crtc *intel_crtc = 539391f75e2SVille Syrjälä to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 540391f75e2SVille Syrjälä const struct drm_display_mode *mode = 541391f75e2SVille Syrjälä &intel_crtc->config.adjusted_mode; 542391f75e2SVille Syrjälä 543391f75e2SVille Syrjälä vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 544391f75e2SVille Syrjälä } else { 545391f75e2SVille Syrjälä enum transcoder cpu_transcoder = 546391f75e2SVille Syrjälä intel_pipe_to_cpu_transcoder(dev_priv, pipe); 547391f75e2SVille Syrjälä u32 htotal; 548391f75e2SVille Syrjälä 549391f75e2SVille Syrjälä htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 550391f75e2SVille Syrjälä vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 551391f75e2SVille Syrjälä 552391f75e2SVille Syrjälä vbl_start *= htotal; 553391f75e2SVille Syrjälä } 554391f75e2SVille Syrjälä 5559db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 5569db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 5575eddb70bSChris Wilson 5580a3e67a4SJesse Barnes /* 5590a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 5600a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 5610a3e67a4SJesse Barnes * register. 5620a3e67a4SJesse Barnes */ 5630a3e67a4SJesse Barnes do { 5645eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 565391f75e2SVille Syrjälä low = I915_READ(low_frame); 5665eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 5670a3e67a4SJesse Barnes } while (high1 != high2); 5680a3e67a4SJesse Barnes 5695eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 570391f75e2SVille Syrjälä pixel = low & PIPE_PIXEL_MASK; 5715eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 572391f75e2SVille Syrjälä 573391f75e2SVille Syrjälä /* 574391f75e2SVille Syrjälä * The frame counter increments at beginning of active. 575391f75e2SVille Syrjälä * Cook up a vblank counter by also checking the pixel 576391f75e2SVille Syrjälä * counter against vblank start. 577391f75e2SVille Syrjälä */ 578391f75e2SVille Syrjälä return ((high1 << 8) | low) + (pixel >= vbl_start); 5790a3e67a4SJesse Barnes } 5800a3e67a4SJesse Barnes 581f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 5829880b7a5SJesse Barnes { 5839880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5849db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 5859880b7a5SJesse Barnes 5869880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 58744d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5889db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5899880b7a5SJesse Barnes return 0; 5909880b7a5SJesse Barnes } 5919880b7a5SJesse Barnes 5929880b7a5SJesse Barnes return I915_READ(reg); 5939880b7a5SJesse Barnes } 5949880b7a5SJesse Barnes 595f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 5960af7e4dfSMario Kleiner int *vpos, int *hpos) 5970af7e4dfSMario Kleiner { 598c2baf4b7SVille Syrjälä struct drm_i915_private *dev_priv = dev->dev_private; 599c2baf4b7SVille Syrjälä struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 600c2baf4b7SVille Syrjälä struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 601c2baf4b7SVille Syrjälä const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 602*3aa18df8SVille Syrjälä int position; 6030af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 6040af7e4dfSMario Kleiner bool in_vbl = true; 6050af7e4dfSMario Kleiner int ret = 0; 6060af7e4dfSMario Kleiner 607c2baf4b7SVille Syrjälä if (!intel_crtc->active) { 6080af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 6099db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 6100af7e4dfSMario Kleiner return 0; 6110af7e4dfSMario Kleiner } 6120af7e4dfSMario Kleiner 613c2baf4b7SVille Syrjälä htotal = mode->crtc_htotal; 614c2baf4b7SVille Syrjälä vtotal = mode->crtc_vtotal; 615c2baf4b7SVille Syrjälä vbl_start = mode->crtc_vblank_start; 616c2baf4b7SVille Syrjälä vbl_end = mode->crtc_vblank_end; 6170af7e4dfSMario Kleiner 618c2baf4b7SVille Syrjälä ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 619c2baf4b7SVille Syrjälä 620c2baf4b7SVille Syrjälä if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 6210af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 6220af7e4dfSMario Kleiner * scanout position from Display scan line register. 6230af7e4dfSMario Kleiner */ 624*3aa18df8SVille Syrjälä position = I915_READ(PIPEDSL(pipe)) & 0x1fff; 6250af7e4dfSMario Kleiner } else { 6260af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 6270af7e4dfSMario Kleiner * We can split this into vertical and horizontal 6280af7e4dfSMario Kleiner * scanout position. 6290af7e4dfSMario Kleiner */ 6300af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 6310af7e4dfSMario Kleiner 632*3aa18df8SVille Syrjälä /* convert to pixel counts */ 633*3aa18df8SVille Syrjälä vbl_start *= htotal; 634*3aa18df8SVille Syrjälä vbl_end *= htotal; 635*3aa18df8SVille Syrjälä vtotal *= htotal; 636*3aa18df8SVille Syrjälä } 637*3aa18df8SVille Syrjälä 638*3aa18df8SVille Syrjälä in_vbl = position >= vbl_start && position < vbl_end; 639*3aa18df8SVille Syrjälä 640*3aa18df8SVille Syrjälä /* 641*3aa18df8SVille Syrjälä * While in vblank, position will be negative 642*3aa18df8SVille Syrjälä * counting up towards 0 at vbl_end. And outside 643*3aa18df8SVille Syrjälä * vblank, position will be positive counting 644*3aa18df8SVille Syrjälä * up since vbl_end. 645*3aa18df8SVille Syrjälä */ 646*3aa18df8SVille Syrjälä if (position >= vbl_start) 647*3aa18df8SVille Syrjälä position -= vbl_end; 648*3aa18df8SVille Syrjälä else 649*3aa18df8SVille Syrjälä position += vtotal - vbl_end; 650*3aa18df8SVille Syrjälä 651*3aa18df8SVille Syrjälä if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 652*3aa18df8SVille Syrjälä *vpos = position; 653*3aa18df8SVille Syrjälä *hpos = 0; 654*3aa18df8SVille Syrjälä } else { 6550af7e4dfSMario Kleiner *vpos = position / htotal; 6560af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 6570af7e4dfSMario Kleiner } 6580af7e4dfSMario Kleiner 6590af7e4dfSMario Kleiner /* In vblank? */ 6600af7e4dfSMario Kleiner if (in_vbl) 6610af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 6620af7e4dfSMario Kleiner 6630af7e4dfSMario Kleiner return ret; 6640af7e4dfSMario Kleiner } 6650af7e4dfSMario Kleiner 666f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 6670af7e4dfSMario Kleiner int *max_error, 6680af7e4dfSMario Kleiner struct timeval *vblank_time, 6690af7e4dfSMario Kleiner unsigned flags) 6700af7e4dfSMario Kleiner { 6714041b853SChris Wilson struct drm_crtc *crtc; 6720af7e4dfSMario Kleiner 6737eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 6744041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 6750af7e4dfSMario Kleiner return -EINVAL; 6760af7e4dfSMario Kleiner } 6770af7e4dfSMario Kleiner 6780af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 6794041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 6804041b853SChris Wilson if (crtc == NULL) { 6814041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 6824041b853SChris Wilson return -EINVAL; 6834041b853SChris Wilson } 6844041b853SChris Wilson 6854041b853SChris Wilson if (!crtc->enabled) { 6864041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 6874041b853SChris Wilson return -EBUSY; 6884041b853SChris Wilson } 6890af7e4dfSMario Kleiner 6900af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 6914041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 6924041b853SChris Wilson vblank_time, flags, 6934041b853SChris Wilson crtc); 6940af7e4dfSMario Kleiner } 6950af7e4dfSMario Kleiner 69667c347ffSJani Nikula static bool intel_hpd_irq_event(struct drm_device *dev, 69767c347ffSJani Nikula struct drm_connector *connector) 698321a1b30SEgbert Eich { 699321a1b30SEgbert Eich enum drm_connector_status old_status; 700321a1b30SEgbert Eich 701321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 702321a1b30SEgbert Eich old_status = connector->status; 703321a1b30SEgbert Eich 704321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 70567c347ffSJani Nikula if (old_status == connector->status) 70667c347ffSJani Nikula return false; 70767c347ffSJani Nikula 70867c347ffSJani Nikula DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 709321a1b30SEgbert Eich connector->base.id, 710321a1b30SEgbert Eich drm_get_connector_name(connector), 71167c347ffSJani Nikula drm_get_connector_status_name(old_status), 71267c347ffSJani Nikula drm_get_connector_status_name(connector->status)); 71367c347ffSJani Nikula 71467c347ffSJani Nikula return true; 715321a1b30SEgbert Eich } 716321a1b30SEgbert Eich 7175ca58282SJesse Barnes /* 7185ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 7195ca58282SJesse Barnes */ 720ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 721ac4c16c5SEgbert Eich 7225ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 7235ca58282SJesse Barnes { 7245ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 7255ca58282SJesse Barnes hotplug_work); 7265ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 727c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 728cd569aedSEgbert Eich struct intel_connector *intel_connector; 729cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 730cd569aedSEgbert Eich struct drm_connector *connector; 731cd569aedSEgbert Eich unsigned long irqflags; 732cd569aedSEgbert Eich bool hpd_disabled = false; 733321a1b30SEgbert Eich bool changed = false; 734142e2398SEgbert Eich u32 hpd_event_bits; 7355ca58282SJesse Barnes 73652d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 73752d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 73852d7ecedSDaniel Vetter return; 73952d7ecedSDaniel Vetter 740a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 741e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 742e67189abSJesse Barnes 743cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 744142e2398SEgbert Eich 745142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 746142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 747cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 748cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 749cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 750cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 751cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 752cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 753cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 754cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 755cd569aedSEgbert Eich drm_get_connector_name(connector)); 756cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 757cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 758cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 759cd569aedSEgbert Eich hpd_disabled = true; 760cd569aedSEgbert Eich } 761142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 762142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 763142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 764142e2398SEgbert Eich } 765cd569aedSEgbert Eich } 766cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 767cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 768cd569aedSEgbert Eich * some connectors */ 769ac4c16c5SEgbert Eich if (hpd_disabled) { 770cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 771ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 772ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 773ac4c16c5SEgbert Eich } 774cd569aedSEgbert Eich 775cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 776cd569aedSEgbert Eich 777321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 778321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 779321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 780321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 781cd569aedSEgbert Eich if (intel_encoder->hot_plug) 782cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 783321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 784321a1b30SEgbert Eich changed = true; 785321a1b30SEgbert Eich } 786321a1b30SEgbert Eich } 78740ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 78840ee3381SKeith Packard 789321a1b30SEgbert Eich if (changed) 790321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 7915ca58282SJesse Barnes } 7925ca58282SJesse Barnes 793d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev) 794f97108d1SJesse Barnes { 795f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 796b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 7979270388eSDaniel Vetter u8 new_delay; 7989270388eSDaniel Vetter 799d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 800f97108d1SJesse Barnes 80173edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 80273edd18fSDaniel Vetter 80320e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 8049270388eSDaniel Vetter 8057648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 806b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 807b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 808f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 809f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 810f97108d1SJesse Barnes 811f97108d1SJesse Barnes /* Handle RCS change request from hw */ 812b5b72e89SMatthew Garrett if (busy_up > max_avg) { 81320e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 81420e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 81520e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 81620e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 817b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 81820e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 81920e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 82020e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 82120e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 822f97108d1SJesse Barnes } 823f97108d1SJesse Barnes 8247648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 82520e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 826f97108d1SJesse Barnes 827d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 8289270388eSDaniel Vetter 829f97108d1SJesse Barnes return; 830f97108d1SJesse Barnes } 831f97108d1SJesse Barnes 832549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 833549f7365SChris Wilson struct intel_ring_buffer *ring) 834549f7365SChris Wilson { 835475553deSChris Wilson if (ring->obj == NULL) 836475553deSChris Wilson return; 837475553deSChris Wilson 838814e9b57SChris Wilson trace_i915_gem_request_complete(ring); 8399862e600SChris Wilson 840549f7365SChris Wilson wake_up_all(&ring->irq_queue); 84110cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 842549f7365SChris Wilson } 843549f7365SChris Wilson 8444912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 8453b8d8d91SJesse Barnes { 8464912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 847c6a828d3SDaniel Vetter rps.work); 848edbfdb45SPaulo Zanoni u32 pm_iir; 849dd75fdc8SChris Wilson int new_delay, adj; 8503b8d8d91SJesse Barnes 85159cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 852c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 853c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 8544848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 855edbfdb45SPaulo Zanoni snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 85659cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 8574912d041SBen Widawsky 85860611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 85960611c13SPaulo Zanoni WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 86060611c13SPaulo Zanoni 8614848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 8623b8d8d91SJesse Barnes return; 8633b8d8d91SJesse Barnes 8644fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 8657b9e0ae6SChris Wilson 866dd75fdc8SChris Wilson adj = dev_priv->rps.last_adj; 8677425034aSVille Syrjälä if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 868dd75fdc8SChris Wilson if (adj > 0) 869dd75fdc8SChris Wilson adj *= 2; 870dd75fdc8SChris Wilson else 871dd75fdc8SChris Wilson adj = 1; 872dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay + adj; 8737425034aSVille Syrjälä 8747425034aSVille Syrjälä /* 8757425034aSVille Syrjälä * For better performance, jump directly 8767425034aSVille Syrjälä * to RPe if we're below it. 8777425034aSVille Syrjälä */ 878dd75fdc8SChris Wilson if (new_delay < dev_priv->rps.rpe_delay) 8797425034aSVille Syrjälä new_delay = dev_priv->rps.rpe_delay; 880dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 881dd75fdc8SChris Wilson if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 882dd75fdc8SChris Wilson new_delay = dev_priv->rps.rpe_delay; 883dd75fdc8SChris Wilson else 884dd75fdc8SChris Wilson new_delay = dev_priv->rps.min_delay; 885dd75fdc8SChris Wilson adj = 0; 886dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 887dd75fdc8SChris Wilson if (adj < 0) 888dd75fdc8SChris Wilson adj *= 2; 889dd75fdc8SChris Wilson else 890dd75fdc8SChris Wilson adj = -1; 891dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay + adj; 892dd75fdc8SChris Wilson } else { /* unknown event */ 893dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay; 894dd75fdc8SChris Wilson } 8953b8d8d91SJesse Barnes 89679249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 89779249636SBen Widawsky * interrupt 89879249636SBen Widawsky */ 899dd75fdc8SChris Wilson if (new_delay < (int)dev_priv->rps.min_delay) 900dd75fdc8SChris Wilson new_delay = dev_priv->rps.min_delay; 901dd75fdc8SChris Wilson if (new_delay > (int)dev_priv->rps.max_delay) 902dd75fdc8SChris Wilson new_delay = dev_priv->rps.max_delay; 903dd75fdc8SChris Wilson dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 904dd75fdc8SChris Wilson 9050a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 9060a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 9070a073b84SJesse Barnes else 9084912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 9093b8d8d91SJesse Barnes 9104fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 9113b8d8d91SJesse Barnes } 9123b8d8d91SJesse Barnes 913e3689190SBen Widawsky 914e3689190SBen Widawsky /** 915e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 916e3689190SBen Widawsky * occurred. 917e3689190SBen Widawsky * @work: workqueue struct 918e3689190SBen Widawsky * 919e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 920e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 921e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 922e3689190SBen Widawsky */ 923e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 924e3689190SBen Widawsky { 925e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 926a4da4fa4SDaniel Vetter l3_parity.error_work); 927e3689190SBen Widawsky u32 error_status, row, bank, subbank; 92835a85ac6SBen Widawsky char *parity_event[6]; 929e3689190SBen Widawsky uint32_t misccpctl; 930e3689190SBen Widawsky unsigned long flags; 93135a85ac6SBen Widawsky uint8_t slice = 0; 932e3689190SBen Widawsky 933e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 934e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 935e3689190SBen Widawsky * any time we access those registers. 936e3689190SBen Widawsky */ 937e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 938e3689190SBen Widawsky 93935a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 94035a85ac6SBen Widawsky if (WARN_ON(!dev_priv->l3_parity.which_slice)) 94135a85ac6SBen Widawsky goto out; 94235a85ac6SBen Widawsky 943e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 944e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 945e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 946e3689190SBen Widawsky 94735a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 94835a85ac6SBen Widawsky u32 reg; 94935a85ac6SBen Widawsky 95035a85ac6SBen Widawsky slice--; 95135a85ac6SBen Widawsky if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 95235a85ac6SBen Widawsky break; 95335a85ac6SBen Widawsky 95435a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 95535a85ac6SBen Widawsky 95635a85ac6SBen Widawsky reg = GEN7_L3CDERRST1 + (slice * 0x200); 95735a85ac6SBen Widawsky 95835a85ac6SBen Widawsky error_status = I915_READ(reg); 959e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 960e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 961e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 962e3689190SBen Widawsky 96335a85ac6SBen Widawsky I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 96435a85ac6SBen Widawsky POSTING_READ(reg); 965e3689190SBen Widawsky 966cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 967e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 968e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 969e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 97035a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 97135a85ac6SBen Widawsky parity_event[5] = NULL; 972e3689190SBen Widawsky 973e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 974e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 975e3689190SBen Widawsky 97635a85ac6SBen Widawsky DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 97735a85ac6SBen Widawsky slice, row, bank, subbank); 978e3689190SBen Widawsky 97935a85ac6SBen Widawsky kfree(parity_event[4]); 980e3689190SBen Widawsky kfree(parity_event[3]); 981e3689190SBen Widawsky kfree(parity_event[2]); 982e3689190SBen Widawsky kfree(parity_event[1]); 983e3689190SBen Widawsky } 984e3689190SBen Widawsky 98535a85ac6SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 98635a85ac6SBen Widawsky 98735a85ac6SBen Widawsky out: 98835a85ac6SBen Widawsky WARN_ON(dev_priv->l3_parity.which_slice); 98935a85ac6SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 99035a85ac6SBen Widawsky ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 99135a85ac6SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 99235a85ac6SBen Widawsky 99335a85ac6SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 99435a85ac6SBen Widawsky } 99535a85ac6SBen Widawsky 99635a85ac6SBen Widawsky static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 997e3689190SBen Widawsky { 998e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 999e3689190SBen Widawsky 1000040d2baaSBen Widawsky if (!HAS_L3_DPF(dev)) 1001e3689190SBen Widawsky return; 1002e3689190SBen Widawsky 1003d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 100435a85ac6SBen Widawsky ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1005d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 1006e3689190SBen Widawsky 100735a85ac6SBen Widawsky iir &= GT_PARITY_ERROR(dev); 100835a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 100935a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 1; 101035a85ac6SBen Widawsky 101135a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 101235a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 0; 101335a85ac6SBen Widawsky 1014a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1015e3689190SBen Widawsky } 1016e3689190SBen Widawsky 1017f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev, 1018f1af8fc1SPaulo Zanoni struct drm_i915_private *dev_priv, 1019f1af8fc1SPaulo Zanoni u32 gt_iir) 1020f1af8fc1SPaulo Zanoni { 1021f1af8fc1SPaulo Zanoni if (gt_iir & 1022f1af8fc1SPaulo Zanoni (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1023f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[RCS]); 1024f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 1025f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[VCS]); 1026f1af8fc1SPaulo Zanoni } 1027f1af8fc1SPaulo Zanoni 1028e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 1029e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 1030e7b4c6b1SDaniel Vetter u32 gt_iir) 1031e7b4c6b1SDaniel Vetter { 1032e7b4c6b1SDaniel Vetter 1033cc609d5dSBen Widawsky if (gt_iir & 1034cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1035e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 1036cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 1037e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 1038cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 1039e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 1040e7b4c6b1SDaniel Vetter 1041cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1042cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 1043cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1044e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1045e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 1046e7b4c6b1SDaniel Vetter } 1047e3689190SBen Widawsky 104835a85ac6SBen Widawsky if (gt_iir & GT_PARITY_ERROR(dev)) 104935a85ac6SBen Widawsky ivybridge_parity_error_irq_handler(dev, gt_iir); 1050e7b4c6b1SDaniel Vetter } 1051e7b4c6b1SDaniel Vetter 1052b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 1053b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 1054b543fb04SEgbert Eich 105510a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev, 1056b543fb04SEgbert Eich u32 hotplug_trigger, 1057b543fb04SEgbert Eich const u32 *hpd) 1058b543fb04SEgbert Eich { 1059b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 1060b543fb04SEgbert Eich int i; 106110a504deSDaniel Vetter bool storm_detected = false; 1062b543fb04SEgbert Eich 106391d131d2SDaniel Vetter if (!hotplug_trigger) 106491d131d2SDaniel Vetter return; 106591d131d2SDaniel Vetter 1066b5ea2d56SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1067b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 1068821450c6SEgbert Eich 1069b8f102e8SEgbert Eich WARN(((hpd[i] & hotplug_trigger) && 1070b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1071b8f102e8SEgbert Eich "Received HPD interrupt although disabled\n"); 1072b8f102e8SEgbert Eich 1073b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 1074b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1075b543fb04SEgbert Eich continue; 1076b543fb04SEgbert Eich 1077bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 1078b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1079b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 1080b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1081b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1082b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 1083b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1084b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1085b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1086142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 1087b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 108810a504deSDaniel Vetter storm_detected = true; 1089b543fb04SEgbert Eich } else { 1090b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 1091b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1092b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt); 1093b543fb04SEgbert Eich } 1094b543fb04SEgbert Eich } 1095b543fb04SEgbert Eich 109610a504deSDaniel Vetter if (storm_detected) 109710a504deSDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 1098b5ea2d56SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 10995876fa0dSDaniel Vetter 1100645416f5SDaniel Vetter /* 1101645416f5SDaniel Vetter * Our hotplug handler can grab modeset locks (by calling down into the 1102645416f5SDaniel Vetter * fb helpers). Hence it must not be run on our own dev-priv->wq work 1103645416f5SDaniel Vetter * queue for otherwise the flush_work in the pageflip code will 1104645416f5SDaniel Vetter * deadlock. 1105645416f5SDaniel Vetter */ 1106645416f5SDaniel Vetter schedule_work(&dev_priv->hotplug_work); 1107b543fb04SEgbert Eich } 1108b543fb04SEgbert Eich 1109515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 1110515ac2bbSDaniel Vetter { 111128c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 111228c70f16SDaniel Vetter 111328c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1114515ac2bbSDaniel Vetter } 1115515ac2bbSDaniel Vetter 1116ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 1117ce99c256SDaniel Vetter { 11189ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 11199ee32feaSDaniel Vetter 11209ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1121ce99c256SDaniel Vetter } 1122ce99c256SDaniel Vetter 11231403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 11241403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 11251403c0d4SPaulo Zanoni * the work queue. */ 11261403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1127baf02a1fSBen Widawsky { 112841a05a3aSDaniel Vetter if (pm_iir & GEN6_PM_RPS_EVENTS) { 112959cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 11304848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 11314d3b3d5fSPaulo Zanoni snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 113259cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 11332adbee62SDaniel Vetter 11342adbee62SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 113541a05a3aSDaniel Vetter } 1136baf02a1fSBen Widawsky 11371403c0d4SPaulo Zanoni if (HAS_VEBOX(dev_priv->dev)) { 113812638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 113912638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 114012638c57SBen Widawsky 114112638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 114212638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 114312638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 114412638c57SBen Widawsky } 114512638c57SBen Widawsky } 11461403c0d4SPaulo Zanoni } 1147baf02a1fSBen Widawsky 1148ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 11497e231dbeSJesse Barnes { 11507e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 11517e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 11527e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 11537e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 11547e231dbeSJesse Barnes unsigned long irqflags; 11557e231dbeSJesse Barnes int pipe; 11567e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 11577e231dbeSJesse Barnes 11587e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 11597e231dbeSJesse Barnes 11607e231dbeSJesse Barnes while (true) { 11617e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 11627e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 11637e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 11647e231dbeSJesse Barnes 11657e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 11667e231dbeSJesse Barnes goto out; 11677e231dbeSJesse Barnes 11687e231dbeSJesse Barnes ret = IRQ_HANDLED; 11697e231dbeSJesse Barnes 1170e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 11717e231dbeSJesse Barnes 11727e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 11737e231dbeSJesse Barnes for_each_pipe(pipe) { 11747e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 11757e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 11767e231dbeSJesse Barnes 11777e231dbeSJesse Barnes /* 11787e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 11797e231dbeSJesse Barnes */ 11807e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 11817e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 11827e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 11837e231dbeSJesse Barnes pipe_name(pipe)); 11847e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 11857e231dbeSJesse Barnes } 11867e231dbeSJesse Barnes } 11877e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 11887e231dbeSJesse Barnes 118931acc7f5SJesse Barnes for_each_pipe(pipe) { 119031acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 119131acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 119231acc7f5SJesse Barnes 119331acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 119431acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 119531acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 119631acc7f5SJesse Barnes } 119731acc7f5SJesse Barnes } 119831acc7f5SJesse Barnes 11997e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 12007e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 12017e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1202b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 12037e231dbeSJesse Barnes 12047e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 12057e231dbeSJesse Barnes hotplug_status); 120691d131d2SDaniel Vetter 120710a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 120891d131d2SDaniel Vetter 12097e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 12107e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 12117e231dbeSJesse Barnes } 12127e231dbeSJesse Barnes 1213515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1214515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 12157e231dbeSJesse Barnes 121660611c13SPaulo Zanoni if (pm_iir) 1217d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 12187e231dbeSJesse Barnes 12197e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 12207e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 12217e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 12227e231dbeSJesse Barnes } 12237e231dbeSJesse Barnes 12247e231dbeSJesse Barnes out: 12257e231dbeSJesse Barnes return ret; 12267e231dbeSJesse Barnes } 12277e231dbeSJesse Barnes 122823e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1229776ad806SJesse Barnes { 1230776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 12319db4a9c7SJesse Barnes int pipe; 1232b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1233776ad806SJesse Barnes 123410a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 123591d131d2SDaniel Vetter 1236cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1237cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1238776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1239cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1240cfc33bf7SVille Syrjälä port_name(port)); 1241cfc33bf7SVille Syrjälä } 1242776ad806SJesse Barnes 1243ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1244ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1245ce99c256SDaniel Vetter 1246776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1247515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1248776ad806SJesse Barnes 1249776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1250776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1251776ad806SJesse Barnes 1252776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1253776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1254776ad806SJesse Barnes 1255776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1256776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1257776ad806SJesse Barnes 12589db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 12599db4a9c7SJesse Barnes for_each_pipe(pipe) 12609db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 12619db4a9c7SJesse Barnes pipe_name(pipe), 12629db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1263776ad806SJesse Barnes 1264776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1265776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1266776ad806SJesse Barnes 1267776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1268776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1269776ad806SJesse Barnes 1270776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 12718664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 12728664281bSPaulo Zanoni false)) 12738664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 12748664281bSPaulo Zanoni 12758664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 12768664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 12778664281bSPaulo Zanoni false)) 12788664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 12798664281bSPaulo Zanoni } 12808664281bSPaulo Zanoni 12818664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 12828664281bSPaulo Zanoni { 12838664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 12848664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 12858664281bSPaulo Zanoni 1286de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1287de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1288de032bf4SPaulo Zanoni 12898664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_A) 12908664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 12918664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 12928664281bSPaulo Zanoni 12938664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_B) 12948664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 12958664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 12968664281bSPaulo Zanoni 12978664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_C) 12988664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 12998664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 13008664281bSPaulo Zanoni 13018664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 13028664281bSPaulo Zanoni } 13038664281bSPaulo Zanoni 13048664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 13058664281bSPaulo Zanoni { 13068664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 13078664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 13088664281bSPaulo Zanoni 1309de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1310de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1311de032bf4SPaulo Zanoni 13128664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 13138664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 13148664281bSPaulo Zanoni false)) 13158664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 13168664281bSPaulo Zanoni 13178664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 13188664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 13198664281bSPaulo Zanoni false)) 13208664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 13218664281bSPaulo Zanoni 13228664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 13238664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 13248664281bSPaulo Zanoni false)) 13258664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 13268664281bSPaulo Zanoni 13278664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1328776ad806SJesse Barnes } 1329776ad806SJesse Barnes 133023e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 133123e81d69SAdam Jackson { 133223e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 133323e81d69SAdam Jackson int pipe; 1334b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 133523e81d69SAdam Jackson 133610a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 133791d131d2SDaniel Vetter 1338cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1339cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 134023e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1341cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1342cfc33bf7SVille Syrjälä port_name(port)); 1343cfc33bf7SVille Syrjälä } 134423e81d69SAdam Jackson 134523e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1346ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 134723e81d69SAdam Jackson 134823e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1349515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 135023e81d69SAdam Jackson 135123e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 135223e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 135323e81d69SAdam Jackson 135423e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 135523e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 135623e81d69SAdam Jackson 135723e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 135823e81d69SAdam Jackson for_each_pipe(pipe) 135923e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 136023e81d69SAdam Jackson pipe_name(pipe), 136123e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 13628664281bSPaulo Zanoni 13638664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 13648664281bSPaulo Zanoni cpt_serr_int_handler(dev); 136523e81d69SAdam Jackson } 136623e81d69SAdam Jackson 1367c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1368c008bc6eSPaulo Zanoni { 1369c008bc6eSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1370c008bc6eSPaulo Zanoni 1371c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 1372c008bc6eSPaulo Zanoni dp_aux_irq_handler(dev); 1373c008bc6eSPaulo Zanoni 1374c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 1375c008bc6eSPaulo Zanoni intel_opregion_asle_intr(dev); 1376c008bc6eSPaulo Zanoni 1377c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_VBLANK) 1378c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 0); 1379c008bc6eSPaulo Zanoni 1380c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_VBLANK) 1381c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 1); 1382c008bc6eSPaulo Zanoni 1383c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 1384c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1385c008bc6eSPaulo Zanoni 1386c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1387c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1388c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1389c008bc6eSPaulo Zanoni 1390c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1391c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1392c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1393c008bc6eSPaulo Zanoni 1394c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEA_FLIP_DONE) { 1395c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 0); 1396c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 0); 1397c008bc6eSPaulo Zanoni } 1398c008bc6eSPaulo Zanoni 1399c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEB_FLIP_DONE) { 1400c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 1); 1401c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 1); 1402c008bc6eSPaulo Zanoni } 1403c008bc6eSPaulo Zanoni 1404c008bc6eSPaulo Zanoni /* check event from PCH */ 1405c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 1406c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 1407c008bc6eSPaulo Zanoni 1408c008bc6eSPaulo Zanoni if (HAS_PCH_CPT(dev)) 1409c008bc6eSPaulo Zanoni cpt_irq_handler(dev, pch_iir); 1410c008bc6eSPaulo Zanoni else 1411c008bc6eSPaulo Zanoni ibx_irq_handler(dev, pch_iir); 1412c008bc6eSPaulo Zanoni 1413c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 1414c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 1415c008bc6eSPaulo Zanoni } 1416c008bc6eSPaulo Zanoni 1417c008bc6eSPaulo Zanoni if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1418c008bc6eSPaulo Zanoni ironlake_rps_change_irq_handler(dev); 1419c008bc6eSPaulo Zanoni } 1420c008bc6eSPaulo Zanoni 14219719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 14229719fb98SPaulo Zanoni { 14239719fb98SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 14249719fb98SPaulo Zanoni int i; 14259719fb98SPaulo Zanoni 14269719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 14279719fb98SPaulo Zanoni ivb_err_int_handler(dev); 14289719fb98SPaulo Zanoni 14299719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 14309719fb98SPaulo Zanoni dp_aux_irq_handler(dev); 14319719fb98SPaulo Zanoni 14329719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 14339719fb98SPaulo Zanoni intel_opregion_asle_intr(dev); 14349719fb98SPaulo Zanoni 14359719fb98SPaulo Zanoni for (i = 0; i < 3; i++) { 14369719fb98SPaulo Zanoni if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 14379719fb98SPaulo Zanoni drm_handle_vblank(dev, i); 14389719fb98SPaulo Zanoni if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 14399719fb98SPaulo Zanoni intel_prepare_page_flip(dev, i); 14409719fb98SPaulo Zanoni intel_finish_page_flip_plane(dev, i); 14419719fb98SPaulo Zanoni } 14429719fb98SPaulo Zanoni } 14439719fb98SPaulo Zanoni 14449719fb98SPaulo Zanoni /* check event from PCH */ 14459719fb98SPaulo Zanoni if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 14469719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 14479719fb98SPaulo Zanoni 14489719fb98SPaulo Zanoni cpt_irq_handler(dev, pch_iir); 14499719fb98SPaulo Zanoni 14509719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 14519719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 14529719fb98SPaulo Zanoni } 14539719fb98SPaulo Zanoni } 14549719fb98SPaulo Zanoni 1455f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1456b1f14ad0SJesse Barnes { 1457b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1458b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1459f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 14600e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 1461b1f14ad0SJesse Barnes 1462b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1463b1f14ad0SJesse Barnes 14648664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 14658664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 1466907b28c5SChris Wilson intel_uncore_check_errors(dev); 14678664281bSPaulo Zanoni 1468b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1469b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1470b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 147123a78516SPaulo Zanoni POSTING_READ(DEIER); 14720e43406bSChris Wilson 147344498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 147444498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 147544498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 147644498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 147744498aeaSPaulo Zanoni * due to its back queue). */ 1478ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 147944498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 148044498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 148144498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1482ab5c608bSBen Widawsky } 148344498aeaSPaulo Zanoni 14840e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 14850e43406bSChris Wilson if (gt_iir) { 1486d8fc8a47SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) 14870e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 1488d8fc8a47SPaulo Zanoni else 1489d8fc8a47SPaulo Zanoni ilk_gt_irq_handler(dev, dev_priv, gt_iir); 14900e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 14910e43406bSChris Wilson ret = IRQ_HANDLED; 14920e43406bSChris Wilson } 1493b1f14ad0SJesse Barnes 1494b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 14950e43406bSChris Wilson if (de_iir) { 1496f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) 14979719fb98SPaulo Zanoni ivb_display_irq_handler(dev, de_iir); 1498f1af8fc1SPaulo Zanoni else 1499f1af8fc1SPaulo Zanoni ilk_display_irq_handler(dev, de_iir); 15000e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 15010e43406bSChris Wilson ret = IRQ_HANDLED; 15020e43406bSChris Wilson } 15030e43406bSChris Wilson 1504f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) { 1505f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 15060e43406bSChris Wilson if (pm_iir) { 1507d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 1508b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 15090e43406bSChris Wilson ret = IRQ_HANDLED; 15100e43406bSChris Wilson } 1511f1af8fc1SPaulo Zanoni } 1512b1f14ad0SJesse Barnes 1513b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1514b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1515ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 151644498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 151744498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1518ab5c608bSBen Widawsky } 1519b1f14ad0SJesse Barnes 1520b1f14ad0SJesse Barnes return ret; 1521b1f14ad0SJesse Barnes } 1522b1f14ad0SJesse Barnes 152317e1df07SDaniel Vetter static void i915_error_wake_up(struct drm_i915_private *dev_priv, 152417e1df07SDaniel Vetter bool reset_completed) 152517e1df07SDaniel Vetter { 152617e1df07SDaniel Vetter struct intel_ring_buffer *ring; 152717e1df07SDaniel Vetter int i; 152817e1df07SDaniel Vetter 152917e1df07SDaniel Vetter /* 153017e1df07SDaniel Vetter * Notify all waiters for GPU completion events that reset state has 153117e1df07SDaniel Vetter * been changed, and that they need to restart their wait after 153217e1df07SDaniel Vetter * checking for potential errors (and bail out to drop locks if there is 153317e1df07SDaniel Vetter * a gpu reset pending so that i915_error_work_func can acquire them). 153417e1df07SDaniel Vetter */ 153517e1df07SDaniel Vetter 153617e1df07SDaniel Vetter /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 153717e1df07SDaniel Vetter for_each_ring(ring, dev_priv, i) 153817e1df07SDaniel Vetter wake_up_all(&ring->irq_queue); 153917e1df07SDaniel Vetter 154017e1df07SDaniel Vetter /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 154117e1df07SDaniel Vetter wake_up_all(&dev_priv->pending_flip_queue); 154217e1df07SDaniel Vetter 154317e1df07SDaniel Vetter /* 154417e1df07SDaniel Vetter * Signal tasks blocked in i915_gem_wait_for_error that the pending 154517e1df07SDaniel Vetter * reset state is cleared. 154617e1df07SDaniel Vetter */ 154717e1df07SDaniel Vetter if (reset_completed) 154817e1df07SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 154917e1df07SDaniel Vetter } 155017e1df07SDaniel Vetter 15518a905236SJesse Barnes /** 15528a905236SJesse Barnes * i915_error_work_func - do process context error handling work 15538a905236SJesse Barnes * @work: work struct 15548a905236SJesse Barnes * 15558a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 15568a905236SJesse Barnes * was detected. 15578a905236SJesse Barnes */ 15588a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 15598a905236SJesse Barnes { 15601f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 15611f83fee0SDaniel Vetter work); 15621f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 15631f83fee0SDaniel Vetter gpu_error); 15648a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1565cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1566cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1567cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 156817e1df07SDaniel Vetter int ret; 15698a905236SJesse Barnes 1570f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 15718a905236SJesse Barnes 15727db0ba24SDaniel Vetter /* 15737db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 15747db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 15757db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 15767db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 15777db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 15787db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 15797db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 15807db0ba24SDaniel Vetter * work we don't need to worry about any other races. 15817db0ba24SDaniel Vetter */ 15827db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 158344d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 15847db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 15857db0ba24SDaniel Vetter reset_event); 15861f83fee0SDaniel Vetter 158717e1df07SDaniel Vetter /* 158817e1df07SDaniel Vetter * All state reset _must_ be completed before we update the 158917e1df07SDaniel Vetter * reset counter, for otherwise waiters might miss the reset 159017e1df07SDaniel Vetter * pending state and not properly drop locks, resulting in 159117e1df07SDaniel Vetter * deadlocks with the reset work. 159217e1df07SDaniel Vetter */ 1593f69061beSDaniel Vetter ret = i915_reset(dev); 1594f69061beSDaniel Vetter 159517e1df07SDaniel Vetter intel_display_handle_reset(dev); 159617e1df07SDaniel Vetter 1597f69061beSDaniel Vetter if (ret == 0) { 1598f69061beSDaniel Vetter /* 1599f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1600f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1601f69061beSDaniel Vetter * complete. 1602f69061beSDaniel Vetter * 1603f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1604f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1605f69061beSDaniel Vetter * updates before 1606f69061beSDaniel Vetter * the counter increment. 1607f69061beSDaniel Vetter */ 1608f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1609f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1610f69061beSDaniel Vetter 1611f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1612f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 16131f83fee0SDaniel Vetter } else { 16141f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1615f316a42cSBen Gamari } 16161f83fee0SDaniel Vetter 161717e1df07SDaniel Vetter /* 161817e1df07SDaniel Vetter * Note: The wake_up also serves as a memory barrier so that 161917e1df07SDaniel Vetter * waiters see the update value of the reset counter atomic_t. 162017e1df07SDaniel Vetter */ 162117e1df07SDaniel Vetter i915_error_wake_up(dev_priv, true); 1622f316a42cSBen Gamari } 16238a905236SJesse Barnes } 16248a905236SJesse Barnes 162535aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 1626c0e09200SDave Airlie { 16278a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 1628bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 162963eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 1630050ee91fSBen Widawsky int pipe, i; 163163eeaf38SJesse Barnes 163235aed2e6SChris Wilson if (!eir) 163335aed2e6SChris Wilson return; 163463eeaf38SJesse Barnes 1635a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 16368a905236SJesse Barnes 1637bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 1638bd9854f9SBen Widawsky 16398a905236SJesse Barnes if (IS_G4X(dev)) { 16408a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 16418a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 16428a905236SJesse Barnes 1643a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1644a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1645050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1646050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1647a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1648a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 16498a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 16503143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 16518a905236SJesse Barnes } 16528a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 16538a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1654a70491ccSJoe Perches pr_err("page table error\n"); 1655a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 16568a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 16573143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 16588a905236SJesse Barnes } 16598a905236SJesse Barnes } 16608a905236SJesse Barnes 1661a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 166263eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 166363eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1664a70491ccSJoe Perches pr_err("page table error\n"); 1665a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 166663eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 16673143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 166863eeaf38SJesse Barnes } 16698a905236SJesse Barnes } 16708a905236SJesse Barnes 167163eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 1672a70491ccSJoe Perches pr_err("memory refresh error:\n"); 16739db4a9c7SJesse Barnes for_each_pipe(pipe) 1674a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 16759db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 167663eeaf38SJesse Barnes /* pipestat has already been acked */ 167763eeaf38SJesse Barnes } 167863eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 1679a70491ccSJoe Perches pr_err("instruction error\n"); 1680a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1681050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1682050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1683a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 168463eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 168563eeaf38SJesse Barnes 1686a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1687a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1688a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 168963eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 16903143a2bfSChris Wilson POSTING_READ(IPEIR); 169163eeaf38SJesse Barnes } else { 169263eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 169363eeaf38SJesse Barnes 1694a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1695a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1696a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1697a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 169863eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 16993143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 170063eeaf38SJesse Barnes } 170163eeaf38SJesse Barnes } 170263eeaf38SJesse Barnes 170363eeaf38SJesse Barnes I915_WRITE(EIR, eir); 17043143a2bfSChris Wilson POSTING_READ(EIR); 170563eeaf38SJesse Barnes eir = I915_READ(EIR); 170663eeaf38SJesse Barnes if (eir) { 170763eeaf38SJesse Barnes /* 170863eeaf38SJesse Barnes * some errors might have become stuck, 170963eeaf38SJesse Barnes * mask them. 171063eeaf38SJesse Barnes */ 171163eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 171263eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 171363eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 171463eeaf38SJesse Barnes } 171535aed2e6SChris Wilson } 171635aed2e6SChris Wilson 171735aed2e6SChris Wilson /** 171835aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 171935aed2e6SChris Wilson * @dev: drm device 172035aed2e6SChris Wilson * 172135aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 172235aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 172335aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 172435aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 172535aed2e6SChris Wilson * of a ring dump etc.). 172635aed2e6SChris Wilson */ 1727527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 172835aed2e6SChris Wilson { 172935aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 173035aed2e6SChris Wilson 173135aed2e6SChris Wilson i915_capture_error_state(dev); 173235aed2e6SChris Wilson i915_report_and_clear_eir(dev); 17338a905236SJesse Barnes 1734ba1234d1SBen Gamari if (wedged) { 1735f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1736f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 1737ba1234d1SBen Gamari 173811ed50ecSBen Gamari /* 173917e1df07SDaniel Vetter * Wakeup waiting processes so that the reset work function 174017e1df07SDaniel Vetter * i915_error_work_func doesn't deadlock trying to grab various 174117e1df07SDaniel Vetter * locks. By bumping the reset counter first, the woken 174217e1df07SDaniel Vetter * processes will see a reset in progress and back off, 174317e1df07SDaniel Vetter * releasing their locks and then wait for the reset completion. 174417e1df07SDaniel Vetter * We must do this for _all_ gpu waiters that might hold locks 174517e1df07SDaniel Vetter * that the reset work needs to acquire. 174617e1df07SDaniel Vetter * 174717e1df07SDaniel Vetter * Note: The wake_up serves as the required memory barrier to 174817e1df07SDaniel Vetter * ensure that the waiters see the updated value of the reset 174917e1df07SDaniel Vetter * counter atomic_t. 175011ed50ecSBen Gamari */ 175117e1df07SDaniel Vetter i915_error_wake_up(dev_priv, false); 175211ed50ecSBen Gamari } 175311ed50ecSBen Gamari 1754122f46baSDaniel Vetter /* 1755122f46baSDaniel Vetter * Our reset work can grab modeset locks (since it needs to reset the 1756122f46baSDaniel Vetter * state of outstanding pagelips). Hence it must not be run on our own 1757122f46baSDaniel Vetter * dev-priv->wq work queue for otherwise the flush_work in the pageflip 1758122f46baSDaniel Vetter * code will deadlock. 1759122f46baSDaniel Vetter */ 1760122f46baSDaniel Vetter schedule_work(&dev_priv->gpu_error.work); 17618a905236SJesse Barnes } 17628a905236SJesse Barnes 176321ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 17644e5359cdSSimon Farnsworth { 17654e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 17664e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 17674e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 176805394f39SChris Wilson struct drm_i915_gem_object *obj; 17694e5359cdSSimon Farnsworth struct intel_unpin_work *work; 17704e5359cdSSimon Farnsworth unsigned long flags; 17714e5359cdSSimon Farnsworth bool stall_detected; 17724e5359cdSSimon Farnsworth 17734e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 17744e5359cdSSimon Farnsworth if (intel_crtc == NULL) 17754e5359cdSSimon Farnsworth return; 17764e5359cdSSimon Farnsworth 17774e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 17784e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 17794e5359cdSSimon Farnsworth 1780e7d841caSChris Wilson if (work == NULL || 1781e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1782e7d841caSChris Wilson !work->enable_stall_check) { 17834e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 17844e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 17854e5359cdSSimon Farnsworth return; 17864e5359cdSSimon Farnsworth } 17874e5359cdSSimon Farnsworth 17884e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 178905394f39SChris Wilson obj = work->pending_flip_obj; 1790a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 17919db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 1792446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1793f343c5f6SBen Widawsky i915_gem_obj_ggtt_offset(obj); 17944e5359cdSSimon Farnsworth } else { 17959db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 1796f343c5f6SBen Widawsky stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 179701f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 17984e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 17994e5359cdSSimon Farnsworth } 18004e5359cdSSimon Farnsworth 18014e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 18024e5359cdSSimon Farnsworth 18034e5359cdSSimon Farnsworth if (stall_detected) { 18044e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 18054e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 18064e5359cdSSimon Farnsworth } 18074e5359cdSSimon Farnsworth } 18084e5359cdSSimon Farnsworth 180942f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 181042f52ef8SKeith Packard * we use as a pipe index 181142f52ef8SKeith Packard */ 1812f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 18130a3e67a4SJesse Barnes { 18140a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1815e9d21d7fSKeith Packard unsigned long irqflags; 181671e0ffa5SJesse Barnes 18175eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 181871e0ffa5SJesse Barnes return -EINVAL; 18190a3e67a4SJesse Barnes 18201ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1821f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 18227c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 18237c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 18240a3e67a4SJesse Barnes else 18257c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 18267c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 18278692d00eSChris Wilson 18288692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 18298692d00eSChris Wilson if (dev_priv->info->gen == 3) 18306b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 18311ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 18328692d00eSChris Wilson 18330a3e67a4SJesse Barnes return 0; 18340a3e67a4SJesse Barnes } 18350a3e67a4SJesse Barnes 1836f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1837f796cf8fSJesse Barnes { 1838f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1839f796cf8fSJesse Barnes unsigned long irqflags; 1840b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1841b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1842f796cf8fSJesse Barnes 1843f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 1844f796cf8fSJesse Barnes return -EINVAL; 1845f796cf8fSJesse Barnes 1846f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1847b518421fSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 1848b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1849b1f14ad0SJesse Barnes 1850b1f14ad0SJesse Barnes return 0; 1851b1f14ad0SJesse Barnes } 1852b1f14ad0SJesse Barnes 18537e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 18547e231dbeSJesse Barnes { 18557e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 18567e231dbeSJesse Barnes unsigned long irqflags; 185731acc7f5SJesse Barnes u32 imr; 18587e231dbeSJesse Barnes 18597e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 18607e231dbeSJesse Barnes return -EINVAL; 18617e231dbeSJesse Barnes 18627e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 18637e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 186431acc7f5SJesse Barnes if (pipe == 0) 18657e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 186631acc7f5SJesse Barnes else 18677e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 18687e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 186931acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 187031acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 18717e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 18727e231dbeSJesse Barnes 18737e231dbeSJesse Barnes return 0; 18747e231dbeSJesse Barnes } 18757e231dbeSJesse Barnes 187642f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 187742f52ef8SKeith Packard * we use as a pipe index 187842f52ef8SKeith Packard */ 1879f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 18800a3e67a4SJesse Barnes { 18810a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1882e9d21d7fSKeith Packard unsigned long irqflags; 18830a3e67a4SJesse Barnes 18841ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 18858692d00eSChris Wilson if (dev_priv->info->gen == 3) 18866b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 18878692d00eSChris Wilson 18887c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 18897c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 18907c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 18911ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 18920a3e67a4SJesse Barnes } 18930a3e67a4SJesse Barnes 1894f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1895f796cf8fSJesse Barnes { 1896f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1897f796cf8fSJesse Barnes unsigned long irqflags; 1898b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1899b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1900f796cf8fSJesse Barnes 1901f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1902b518421fSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 1903b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1904b1f14ad0SJesse Barnes } 1905b1f14ad0SJesse Barnes 19067e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 19077e231dbeSJesse Barnes { 19087e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 19097e231dbeSJesse Barnes unsigned long irqflags; 191031acc7f5SJesse Barnes u32 imr; 19117e231dbeSJesse Barnes 19127e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 191331acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 191431acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 19157e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 191631acc7f5SJesse Barnes if (pipe == 0) 19177e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 191831acc7f5SJesse Barnes else 19197e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 19207e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 19217e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 19227e231dbeSJesse Barnes } 19237e231dbeSJesse Barnes 1924893eead0SChris Wilson static u32 1925893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 1926852835f3SZou Nan hai { 1927893eead0SChris Wilson return list_entry(ring->request_list.prev, 1928893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 1929893eead0SChris Wilson } 1930893eead0SChris Wilson 19319107e9d2SChris Wilson static bool 19329107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 1933893eead0SChris Wilson { 19349107e9d2SChris Wilson return (list_empty(&ring->request_list) || 19359107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 1936f65d9421SBen Gamari } 1937f65d9421SBen Gamari 19386274f212SChris Wilson static struct intel_ring_buffer * 19396274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 1940a24a11e6SChris Wilson { 1941a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 19426274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 1943a24a11e6SChris Wilson 1944a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 1945a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 1946a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 19476274f212SChris Wilson return NULL; 1948a24a11e6SChris Wilson 1949a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 1950a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 1951a24a11e6SChris Wilson */ 19526274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 1953a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 1954a24a11e6SChris Wilson do { 1955a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 1956a24a11e6SChris Wilson if (cmd == ipehr) 1957a24a11e6SChris Wilson break; 1958a24a11e6SChris Wilson 1959a24a11e6SChris Wilson acthd -= 4; 1960a24a11e6SChris Wilson if (acthd < acthd_min) 19616274f212SChris Wilson return NULL; 1962a24a11e6SChris Wilson } while (1); 1963a24a11e6SChris Wilson 19646274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 19656274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 1966a24a11e6SChris Wilson } 1967a24a11e6SChris Wilson 19686274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 19696274f212SChris Wilson { 19706274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 19716274f212SChris Wilson struct intel_ring_buffer *signaller; 19726274f212SChris Wilson u32 seqno, ctl; 19736274f212SChris Wilson 19746274f212SChris Wilson ring->hangcheck.deadlock = true; 19756274f212SChris Wilson 19766274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 19776274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 19786274f212SChris Wilson return -1; 19796274f212SChris Wilson 19806274f212SChris Wilson /* cursory check for an unkickable deadlock */ 19816274f212SChris Wilson ctl = I915_READ_CTL(signaller); 19826274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 19836274f212SChris Wilson return -1; 19846274f212SChris Wilson 19856274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 19866274f212SChris Wilson } 19876274f212SChris Wilson 19886274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 19896274f212SChris Wilson { 19906274f212SChris Wilson struct intel_ring_buffer *ring; 19916274f212SChris Wilson int i; 19926274f212SChris Wilson 19936274f212SChris Wilson for_each_ring(ring, dev_priv, i) 19946274f212SChris Wilson ring->hangcheck.deadlock = false; 19956274f212SChris Wilson } 19966274f212SChris Wilson 1997ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action 1998ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 19991ec14ad3SChris Wilson { 20001ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 20011ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 20029107e9d2SChris Wilson u32 tmp; 20039107e9d2SChris Wilson 20046274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 2005f2f4d82fSJani Nikula return HANGCHECK_ACTIVE; 20066274f212SChris Wilson 20079107e9d2SChris Wilson if (IS_GEN2(dev)) 2008f2f4d82fSJani Nikula return HANGCHECK_HUNG; 20099107e9d2SChris Wilson 20109107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 20119107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 20129107e9d2SChris Wilson * and break the hang. This should work on 20139107e9d2SChris Wilson * all but the second generation chipsets. 20149107e9d2SChris Wilson */ 20159107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 20161ec14ad3SChris Wilson if (tmp & RING_WAIT) { 20171ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 20181ec14ad3SChris Wilson ring->name); 201909e14bf3SChris Wilson i915_handle_error(dev, false); 20201ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 2021f2f4d82fSJani Nikula return HANGCHECK_KICK; 20221ec14ad3SChris Wilson } 2023a24a11e6SChris Wilson 20246274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 20256274f212SChris Wilson switch (semaphore_passed(ring)) { 20266274f212SChris Wilson default: 2027f2f4d82fSJani Nikula return HANGCHECK_HUNG; 20286274f212SChris Wilson case 1: 2029a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 2030a24a11e6SChris Wilson ring->name); 203109e14bf3SChris Wilson i915_handle_error(dev, false); 2032a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 2033f2f4d82fSJani Nikula return HANGCHECK_KICK; 20346274f212SChris Wilson case 0: 2035f2f4d82fSJani Nikula return HANGCHECK_WAIT; 20366274f212SChris Wilson } 20379107e9d2SChris Wilson } 20389107e9d2SChris Wilson 2039f2f4d82fSJani Nikula return HANGCHECK_HUNG; 2040a24a11e6SChris Wilson } 2041d1e61e7fSChris Wilson 2042f65d9421SBen Gamari /** 2043f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 204405407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 204505407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 204605407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 204705407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 204805407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 2049f65d9421SBen Gamari */ 2050a658b5d2SDamien Lespiau static void i915_hangcheck_elapsed(unsigned long data) 2051f65d9421SBen Gamari { 2052f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 2053f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 2054b4519513SChris Wilson struct intel_ring_buffer *ring; 2055b4519513SChris Wilson int i; 205605407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 20579107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 20589107e9d2SChris Wilson #define BUSY 1 20599107e9d2SChris Wilson #define KICK 5 20609107e9d2SChris Wilson #define HUNG 20 20619107e9d2SChris Wilson #define FIRE 30 2062893eead0SChris Wilson 20633e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 20643e0dc6b0SBen Widawsky return; 20653e0dc6b0SBen Widawsky 2066b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 206705407ff8SMika Kuoppala u32 seqno, acthd; 20689107e9d2SChris Wilson bool busy = true; 2069b4519513SChris Wilson 20706274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 20716274f212SChris Wilson 207205407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 207305407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 207405407ff8SMika Kuoppala 207505407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 20769107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 2077da661464SMika Kuoppala ring->hangcheck.action = HANGCHECK_IDLE; 2078da661464SMika Kuoppala 20799107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 20809107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 2081094f9a54SChris Wilson if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 20829107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 20839107e9d2SChris Wilson ring->name); 20849107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 2085094f9a54SChris Wilson } 2086094f9a54SChris Wilson /* Safeguard against driver failure */ 2087094f9a54SChris Wilson ring->hangcheck.score += BUSY; 20889107e9d2SChris Wilson } else 20899107e9d2SChris Wilson busy = false; 209005407ff8SMika Kuoppala } else { 20916274f212SChris Wilson /* We always increment the hangcheck score 20926274f212SChris Wilson * if the ring is busy and still processing 20936274f212SChris Wilson * the same request, so that no single request 20946274f212SChris Wilson * can run indefinitely (such as a chain of 20956274f212SChris Wilson * batches). The only time we do not increment 20966274f212SChris Wilson * the hangcheck score on this ring, if this 20976274f212SChris Wilson * ring is in a legitimate wait for another 20986274f212SChris Wilson * ring. In that case the waiting ring is a 20996274f212SChris Wilson * victim and we want to be sure we catch the 21006274f212SChris Wilson * right culprit. Then every time we do kick 21016274f212SChris Wilson * the ring, add a small increment to the 21026274f212SChris Wilson * score so that we can catch a batch that is 21036274f212SChris Wilson * being repeatedly kicked and so responsible 21046274f212SChris Wilson * for stalling the machine. 21059107e9d2SChris Wilson */ 2106ad8beaeaSMika Kuoppala ring->hangcheck.action = ring_stuck(ring, 2107ad8beaeaSMika Kuoppala acthd); 2108ad8beaeaSMika Kuoppala 2109ad8beaeaSMika Kuoppala switch (ring->hangcheck.action) { 2110da661464SMika Kuoppala case HANGCHECK_IDLE: 2111f2f4d82fSJani Nikula case HANGCHECK_WAIT: 21126274f212SChris Wilson break; 2113f2f4d82fSJani Nikula case HANGCHECK_ACTIVE: 2114ea04cb31SJani Nikula ring->hangcheck.score += BUSY; 21156274f212SChris Wilson break; 2116f2f4d82fSJani Nikula case HANGCHECK_KICK: 2117ea04cb31SJani Nikula ring->hangcheck.score += KICK; 21186274f212SChris Wilson break; 2119f2f4d82fSJani Nikula case HANGCHECK_HUNG: 2120ea04cb31SJani Nikula ring->hangcheck.score += HUNG; 21216274f212SChris Wilson stuck[i] = true; 21226274f212SChris Wilson break; 21236274f212SChris Wilson } 212405407ff8SMika Kuoppala } 21259107e9d2SChris Wilson } else { 2126da661464SMika Kuoppala ring->hangcheck.action = HANGCHECK_ACTIVE; 2127da661464SMika Kuoppala 21289107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 21299107e9d2SChris Wilson * attempts across multiple batches. 21309107e9d2SChris Wilson */ 21319107e9d2SChris Wilson if (ring->hangcheck.score > 0) 21329107e9d2SChris Wilson ring->hangcheck.score--; 2133cbb465e7SChris Wilson } 2134f65d9421SBen Gamari 213505407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 213605407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 21379107e9d2SChris Wilson busy_count += busy; 213805407ff8SMika Kuoppala } 213905407ff8SMika Kuoppala 214005407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 21419107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 2142b8d88d1dSDaniel Vetter DRM_INFO("%s on %s\n", 214305407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 2144a43adf07SChris Wilson ring->name); 2145a43adf07SChris Wilson rings_hung++; 214605407ff8SMika Kuoppala } 214705407ff8SMika Kuoppala } 214805407ff8SMika Kuoppala 214905407ff8SMika Kuoppala if (rings_hung) 215005407ff8SMika Kuoppala return i915_handle_error(dev, true); 215105407ff8SMika Kuoppala 215205407ff8SMika Kuoppala if (busy_count) 215305407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 215405407ff8SMika Kuoppala * being added */ 215510cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 215610cd45b6SMika Kuoppala } 215710cd45b6SMika Kuoppala 215810cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev) 215910cd45b6SMika Kuoppala { 216010cd45b6SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 216110cd45b6SMika Kuoppala if (!i915_enable_hangcheck) 216210cd45b6SMika Kuoppala return; 216310cd45b6SMika Kuoppala 216499584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 216510cd45b6SMika Kuoppala round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2166f65d9421SBen Gamari } 2167f65d9421SBen Gamari 216891738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 216991738a95SPaulo Zanoni { 217091738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 217191738a95SPaulo Zanoni 217291738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 217391738a95SPaulo Zanoni return; 217491738a95SPaulo Zanoni 217591738a95SPaulo Zanoni /* south display irq */ 217691738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 217791738a95SPaulo Zanoni /* 217891738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 217991738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 218091738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 218191738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 218291738a95SPaulo Zanoni */ 218391738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 218491738a95SPaulo Zanoni POSTING_READ(SDEIER); 218591738a95SPaulo Zanoni } 218691738a95SPaulo Zanoni 2187d18ea1b5SDaniel Vetter static void gen5_gt_irq_preinstall(struct drm_device *dev) 2188d18ea1b5SDaniel Vetter { 2189d18ea1b5SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 2190d18ea1b5SDaniel Vetter 2191d18ea1b5SDaniel Vetter /* and GT */ 2192d18ea1b5SDaniel Vetter I915_WRITE(GTIMR, 0xffffffff); 2193d18ea1b5SDaniel Vetter I915_WRITE(GTIER, 0x0); 2194d18ea1b5SDaniel Vetter POSTING_READ(GTIER); 2195d18ea1b5SDaniel Vetter 2196d18ea1b5SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 2197d18ea1b5SDaniel Vetter /* and PM */ 2198d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIMR, 0xffffffff); 2199d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIER, 0x0); 2200d18ea1b5SDaniel Vetter POSTING_READ(GEN6_PMIER); 2201d18ea1b5SDaniel Vetter } 2202d18ea1b5SDaniel Vetter } 2203d18ea1b5SDaniel Vetter 2204c0e09200SDave Airlie /* drm_dma.h hooks 2205c0e09200SDave Airlie */ 2206f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2207036a4a7dSZhenyu Wang { 2208036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2209036a4a7dSZhenyu Wang 22104697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 22114697995bSJesse Barnes 2212036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2213bdfcdb63SDaniel Vetter 2214036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2215036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 22163143a2bfSChris Wilson POSTING_READ(DEIER); 2217036a4a7dSZhenyu Wang 2218d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 2219c650156aSZhenyu Wang 222091738a95SPaulo Zanoni ibx_irq_preinstall(dev); 22217d99163dSBen Widawsky } 22227d99163dSBen Widawsky 22237e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 22247e231dbeSJesse Barnes { 22257e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22267e231dbeSJesse Barnes int pipe; 22277e231dbeSJesse Barnes 22287e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 22297e231dbeSJesse Barnes 22307e231dbeSJesse Barnes /* VLV magic */ 22317e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 22327e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 22337e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 22347e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 22357e231dbeSJesse Barnes 22367e231dbeSJesse Barnes /* and GT */ 22377e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 22387e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2239d18ea1b5SDaniel Vetter 2240d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 22417e231dbeSJesse Barnes 22427e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 22437e231dbeSJesse Barnes 22447e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 22457e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 22467e231dbeSJesse Barnes for_each_pipe(pipe) 22477e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 22487e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 22497e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 22507e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 22517e231dbeSJesse Barnes POSTING_READ(VLV_IER); 22527e231dbeSJesse Barnes } 22537e231dbeSJesse Barnes 225482a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 225582a28bcfSDaniel Vetter { 225682a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 225782a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 225882a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 2259fee884edSDaniel Vetter u32 hotplug_irqs, hotplug, enabled_irqs = 0; 226082a28bcfSDaniel Vetter 226182a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2262fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 226382a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2264cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2265fee884edSDaniel Vetter enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 226682a28bcfSDaniel Vetter } else { 2267fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 226882a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2269cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2270fee884edSDaniel Vetter enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 227182a28bcfSDaniel Vetter } 227282a28bcfSDaniel Vetter 2273fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 227482a28bcfSDaniel Vetter 22757fe0b973SKeith Packard /* 22767fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 22777fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 22787fe0b973SKeith Packard * 22797fe0b973SKeith Packard * This register is the same on all known PCH chips. 22807fe0b973SKeith Packard */ 22817fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 22827fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 22837fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 22847fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 22857fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 22867fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 22877fe0b973SKeith Packard } 22887fe0b973SKeith Packard 2289d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2290d46da437SPaulo Zanoni { 2291d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 229282a28bcfSDaniel Vetter u32 mask; 2293d46da437SPaulo Zanoni 2294692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2295692a04cfSDaniel Vetter return; 2296692a04cfSDaniel Vetter 22978664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 22988664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2299de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 23008664281bSPaulo Zanoni } else { 23018664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 23028664281bSPaulo Zanoni 23038664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 23048664281bSPaulo Zanoni } 2305ab5c608bSBen Widawsky 2306d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2307d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2308d46da437SPaulo Zanoni } 2309d46da437SPaulo Zanoni 23100a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 23110a9a8c91SDaniel Vetter { 23120a9a8c91SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 23130a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 23140a9a8c91SDaniel Vetter 23150a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 23160a9a8c91SDaniel Vetter 23170a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 2318040d2baaSBen Widawsky if (HAS_L3_DPF(dev)) { 23190a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 232035a85ac6SBen Widawsky dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 232135a85ac6SBen Widawsky gt_irqs |= GT_PARITY_ERROR(dev); 23220a9a8c91SDaniel Vetter } 23230a9a8c91SDaniel Vetter 23240a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 23250a9a8c91SDaniel Vetter if (IS_GEN5(dev)) { 23260a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 23270a9a8c91SDaniel Vetter ILK_BSD_USER_INTERRUPT; 23280a9a8c91SDaniel Vetter } else { 23290a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 23300a9a8c91SDaniel Vetter } 23310a9a8c91SDaniel Vetter 23320a9a8c91SDaniel Vetter I915_WRITE(GTIIR, I915_READ(GTIIR)); 23330a9a8c91SDaniel Vetter I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 23340a9a8c91SDaniel Vetter I915_WRITE(GTIER, gt_irqs); 23350a9a8c91SDaniel Vetter POSTING_READ(GTIER); 23360a9a8c91SDaniel Vetter 23370a9a8c91SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 23380a9a8c91SDaniel Vetter pm_irqs |= GEN6_PM_RPS_EVENTS; 23390a9a8c91SDaniel Vetter 23400a9a8c91SDaniel Vetter if (HAS_VEBOX(dev)) 23410a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 23420a9a8c91SDaniel Vetter 2343605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = 0xffffffff; 23440a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2345605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 23460a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIER, pm_irqs); 23470a9a8c91SDaniel Vetter POSTING_READ(GEN6_PMIER); 23480a9a8c91SDaniel Vetter } 23490a9a8c91SDaniel Vetter } 23500a9a8c91SDaniel Vetter 2351f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2352036a4a7dSZhenyu Wang { 23534bc9d430SDaniel Vetter unsigned long irqflags; 2354036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 23558e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 23568e76f8dcSPaulo Zanoni 23578e76f8dcSPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) { 23588e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 23598e76f8dcSPaulo Zanoni DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 23608e76f8dcSPaulo Zanoni DE_PLANEB_FLIP_DONE_IVB | 23618e76f8dcSPaulo Zanoni DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 23628e76f8dcSPaulo Zanoni DE_ERR_INT_IVB); 23638e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 23648e76f8dcSPaulo Zanoni DE_PIPEA_VBLANK_IVB); 23658e76f8dcSPaulo Zanoni 23668e76f8dcSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 23678e76f8dcSPaulo Zanoni } else { 23688e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2369ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 23708664281bSPaulo Zanoni DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 23718e76f8dcSPaulo Zanoni DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 23728e76f8dcSPaulo Zanoni extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 23738e76f8dcSPaulo Zanoni } 2374036a4a7dSZhenyu Wang 23751ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2376036a4a7dSZhenyu Wang 2377036a4a7dSZhenyu Wang /* should always can generate irq */ 2378036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 23791ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 23808e76f8dcSPaulo Zanoni I915_WRITE(DEIER, display_mask | extra_mask); 23813143a2bfSChris Wilson POSTING_READ(DEIER); 2382036a4a7dSZhenyu Wang 23830a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 2384036a4a7dSZhenyu Wang 2385d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 23867fe0b973SKeith Packard 2387f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 23886005ce42SDaniel Vetter /* Enable PCU event interrupts 23896005ce42SDaniel Vetter * 23906005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 23914bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 23924bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 23934bc9d430SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2394f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 23954bc9d430SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2396f97108d1SJesse Barnes } 2397f97108d1SJesse Barnes 2398036a4a7dSZhenyu Wang return 0; 2399036a4a7dSZhenyu Wang } 2400036a4a7dSZhenyu Wang 24017e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 24027e231dbeSJesse Barnes { 24037e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 24047e231dbeSJesse Barnes u32 enable_mask; 240531acc7f5SJesse Barnes u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2406b79480baSDaniel Vetter unsigned long irqflags; 24077e231dbeSJesse Barnes 24087e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 240931acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 241031acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 241131acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 24127e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 24137e231dbeSJesse Barnes 241431acc7f5SJesse Barnes /* 241531acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 241631acc7f5SJesse Barnes * toggle them based on usage. 241731acc7f5SJesse Barnes */ 241831acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 241931acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 242031acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 24217e231dbeSJesse Barnes 242220afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 242320afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 242420afbda2SDaniel Vetter 24257e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 24267e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 24277e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 24287e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 24297e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 24307e231dbeSJesse Barnes POSTING_READ(VLV_IER); 24317e231dbeSJesse Barnes 2432b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2433b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2434b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 243531acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2436515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 243731acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2438b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 243931acc7f5SJesse Barnes 24407e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 24417e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 24427e231dbeSJesse Barnes 24430a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 24447e231dbeSJesse Barnes 24457e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 24467e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 24477e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 24487e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 24497e231dbeSJesse Barnes #endif 24507e231dbeSJesse Barnes 24517e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 245220afbda2SDaniel Vetter 245320afbda2SDaniel Vetter return 0; 245420afbda2SDaniel Vetter } 245520afbda2SDaniel Vetter 24567e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 24577e231dbeSJesse Barnes { 24587e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 24597e231dbeSJesse Barnes int pipe; 24607e231dbeSJesse Barnes 24617e231dbeSJesse Barnes if (!dev_priv) 24627e231dbeSJesse Barnes return; 24637e231dbeSJesse Barnes 2464ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2465ac4c16c5SEgbert Eich 24667e231dbeSJesse Barnes for_each_pipe(pipe) 24677e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 24687e231dbeSJesse Barnes 24697e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 24707e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 24717e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 24727e231dbeSJesse Barnes for_each_pipe(pipe) 24737e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 24747e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 24757e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 24767e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 24777e231dbeSJesse Barnes POSTING_READ(VLV_IER); 24787e231dbeSJesse Barnes } 24797e231dbeSJesse Barnes 2480f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 2481036a4a7dSZhenyu Wang { 2482036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 24834697995bSJesse Barnes 24844697995bSJesse Barnes if (!dev_priv) 24854697995bSJesse Barnes return; 24864697995bSJesse Barnes 2487ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2488ac4c16c5SEgbert Eich 2489036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 2490036a4a7dSZhenyu Wang 2491036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2492036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 2493036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 24948664281bSPaulo Zanoni if (IS_GEN7(dev)) 24958664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2496036a4a7dSZhenyu Wang 2497036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2498036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 2499036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 2500192aac1fSKeith Packard 2501ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 2502ab5c608bSBen Widawsky return; 2503ab5c608bSBen Widawsky 2504192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 2505192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 2506192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 25078664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 25088664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2509036a4a7dSZhenyu Wang } 2510036a4a7dSZhenyu Wang 2511c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 2512c2798b19SChris Wilson { 2513c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2514c2798b19SChris Wilson int pipe; 2515c2798b19SChris Wilson 2516c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2517c2798b19SChris Wilson 2518c2798b19SChris Wilson for_each_pipe(pipe) 2519c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2520c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2521c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2522c2798b19SChris Wilson POSTING_READ16(IER); 2523c2798b19SChris Wilson } 2524c2798b19SChris Wilson 2525c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 2526c2798b19SChris Wilson { 2527c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2528c2798b19SChris Wilson 2529c2798b19SChris Wilson I915_WRITE16(EMR, 2530c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2531c2798b19SChris Wilson 2532c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 2533c2798b19SChris Wilson dev_priv->irq_mask = 2534c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2535c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2536c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2537c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2538c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2539c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 2540c2798b19SChris Wilson 2541c2798b19SChris Wilson I915_WRITE16(IER, 2542c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2543c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2544c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2545c2798b19SChris Wilson I915_USER_INTERRUPT); 2546c2798b19SChris Wilson POSTING_READ16(IER); 2547c2798b19SChris Wilson 2548c2798b19SChris Wilson return 0; 2549c2798b19SChris Wilson } 2550c2798b19SChris Wilson 255190a72f87SVille Syrjälä /* 255290a72f87SVille Syrjälä * Returns true when a page flip has completed. 255390a72f87SVille Syrjälä */ 255490a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 255590a72f87SVille Syrjälä int pipe, u16 iir) 255690a72f87SVille Syrjälä { 255790a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 255890a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 255990a72f87SVille Syrjälä 256090a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 256190a72f87SVille Syrjälä return false; 256290a72f87SVille Syrjälä 256390a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 256490a72f87SVille Syrjälä return false; 256590a72f87SVille Syrjälä 256690a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 256790a72f87SVille Syrjälä 256890a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 256990a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 257090a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 257190a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 257290a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 257390a72f87SVille Syrjälä */ 257490a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 257590a72f87SVille Syrjälä return false; 257690a72f87SVille Syrjälä 257790a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 257890a72f87SVille Syrjälä 257990a72f87SVille Syrjälä return true; 258090a72f87SVille Syrjälä } 258190a72f87SVille Syrjälä 2582ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2583c2798b19SChris Wilson { 2584c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2585c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2586c2798b19SChris Wilson u16 iir, new_iir; 2587c2798b19SChris Wilson u32 pipe_stats[2]; 2588c2798b19SChris Wilson unsigned long irqflags; 2589c2798b19SChris Wilson int pipe; 2590c2798b19SChris Wilson u16 flip_mask = 2591c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2592c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2593c2798b19SChris Wilson 2594c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 2595c2798b19SChris Wilson 2596c2798b19SChris Wilson iir = I915_READ16(IIR); 2597c2798b19SChris Wilson if (iir == 0) 2598c2798b19SChris Wilson return IRQ_NONE; 2599c2798b19SChris Wilson 2600c2798b19SChris Wilson while (iir & ~flip_mask) { 2601c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2602c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 2603c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 2604c2798b19SChris Wilson * interrupts (for non-MSI). 2605c2798b19SChris Wilson */ 2606c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2607c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2608c2798b19SChris Wilson i915_handle_error(dev, false); 2609c2798b19SChris Wilson 2610c2798b19SChris Wilson for_each_pipe(pipe) { 2611c2798b19SChris Wilson int reg = PIPESTAT(pipe); 2612c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2613c2798b19SChris Wilson 2614c2798b19SChris Wilson /* 2615c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 2616c2798b19SChris Wilson */ 2617c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2618c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2619c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2620c2798b19SChris Wilson pipe_name(pipe)); 2621c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2622c2798b19SChris Wilson } 2623c2798b19SChris Wilson } 2624c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2625c2798b19SChris Wilson 2626c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 2627c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 2628c2798b19SChris Wilson 2629d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 2630c2798b19SChris Wilson 2631c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 2632c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2633c2798b19SChris Wilson 2634c2798b19SChris Wilson if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 263590a72f87SVille Syrjälä i8xx_handle_vblank(dev, 0, iir)) 263690a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2637c2798b19SChris Wilson 2638c2798b19SChris Wilson if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 263990a72f87SVille Syrjälä i8xx_handle_vblank(dev, 1, iir)) 264090a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2641c2798b19SChris Wilson 2642c2798b19SChris Wilson iir = new_iir; 2643c2798b19SChris Wilson } 2644c2798b19SChris Wilson 2645c2798b19SChris Wilson return IRQ_HANDLED; 2646c2798b19SChris Wilson } 2647c2798b19SChris Wilson 2648c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 2649c2798b19SChris Wilson { 2650c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2651c2798b19SChris Wilson int pipe; 2652c2798b19SChris Wilson 2653c2798b19SChris Wilson for_each_pipe(pipe) { 2654c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 2655c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2656c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2657c2798b19SChris Wilson } 2658c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2659c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2660c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 2661c2798b19SChris Wilson } 2662c2798b19SChris Wilson 2663a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 2664a266c7d5SChris Wilson { 2665a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2666a266c7d5SChris Wilson int pipe; 2667a266c7d5SChris Wilson 2668a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2669a266c7d5SChris Wilson 2670a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2671a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2672a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2673a266c7d5SChris Wilson } 2674a266c7d5SChris Wilson 267500d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 2676a266c7d5SChris Wilson for_each_pipe(pipe) 2677a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2678a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2679a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2680a266c7d5SChris Wilson POSTING_READ(IER); 2681a266c7d5SChris Wilson } 2682a266c7d5SChris Wilson 2683a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 2684a266c7d5SChris Wilson { 2685a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 268638bde180SChris Wilson u32 enable_mask; 2687a266c7d5SChris Wilson 268838bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 268938bde180SChris Wilson 269038bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 269138bde180SChris Wilson dev_priv->irq_mask = 269238bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 269338bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 269438bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 269538bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 269638bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 269738bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 269838bde180SChris Wilson 269938bde180SChris Wilson enable_mask = 270038bde180SChris Wilson I915_ASLE_INTERRUPT | 270138bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 270238bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 270338bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 270438bde180SChris Wilson I915_USER_INTERRUPT; 270538bde180SChris Wilson 2706a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 270720afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 270820afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 270920afbda2SDaniel Vetter 2710a266c7d5SChris Wilson /* Enable in IER... */ 2711a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2712a266c7d5SChris Wilson /* and unmask in IMR */ 2713a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2714a266c7d5SChris Wilson } 2715a266c7d5SChris Wilson 2716a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2717a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2718a266c7d5SChris Wilson POSTING_READ(IER); 2719a266c7d5SChris Wilson 2720f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 272120afbda2SDaniel Vetter 272220afbda2SDaniel Vetter return 0; 272320afbda2SDaniel Vetter } 272420afbda2SDaniel Vetter 272590a72f87SVille Syrjälä /* 272690a72f87SVille Syrjälä * Returns true when a page flip has completed. 272790a72f87SVille Syrjälä */ 272890a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 272990a72f87SVille Syrjälä int plane, int pipe, u32 iir) 273090a72f87SVille Syrjälä { 273190a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 273290a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 273390a72f87SVille Syrjälä 273490a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 273590a72f87SVille Syrjälä return false; 273690a72f87SVille Syrjälä 273790a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 273890a72f87SVille Syrjälä return false; 273990a72f87SVille Syrjälä 274090a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 274190a72f87SVille Syrjälä 274290a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 274390a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 274490a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 274590a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 274690a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 274790a72f87SVille Syrjälä */ 274890a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 274990a72f87SVille Syrjälä return false; 275090a72f87SVille Syrjälä 275190a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 275290a72f87SVille Syrjälä 275390a72f87SVille Syrjälä return true; 275490a72f87SVille Syrjälä } 275590a72f87SVille Syrjälä 2756ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 2757a266c7d5SChris Wilson { 2758a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2759a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 27608291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2761a266c7d5SChris Wilson unsigned long irqflags; 276238bde180SChris Wilson u32 flip_mask = 276338bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 276438bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 276538bde180SChris Wilson int pipe, ret = IRQ_NONE; 2766a266c7d5SChris Wilson 2767a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2768a266c7d5SChris Wilson 2769a266c7d5SChris Wilson iir = I915_READ(IIR); 277038bde180SChris Wilson do { 277138bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 27728291ee90SChris Wilson bool blc_event = false; 2773a266c7d5SChris Wilson 2774a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2775a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2776a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2777a266c7d5SChris Wilson * interrupts (for non-MSI). 2778a266c7d5SChris Wilson */ 2779a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2780a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2781a266c7d5SChris Wilson i915_handle_error(dev, false); 2782a266c7d5SChris Wilson 2783a266c7d5SChris Wilson for_each_pipe(pipe) { 2784a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2785a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2786a266c7d5SChris Wilson 278738bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 2788a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2789a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2790a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2791a266c7d5SChris Wilson pipe_name(pipe)); 2792a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 279338bde180SChris Wilson irq_received = true; 2794a266c7d5SChris Wilson } 2795a266c7d5SChris Wilson } 2796a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2797a266c7d5SChris Wilson 2798a266c7d5SChris Wilson if (!irq_received) 2799a266c7d5SChris Wilson break; 2800a266c7d5SChris Wilson 2801a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 2802a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 2803a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2804a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2805b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2806a266c7d5SChris Wilson 2807a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2808a266c7d5SChris Wilson hotplug_status); 280991d131d2SDaniel Vetter 281010a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 281191d131d2SDaniel Vetter 2812a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 281338bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 2814a266c7d5SChris Wilson } 2815a266c7d5SChris Wilson 281638bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 2817a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 2818a266c7d5SChris Wilson 2819a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 2820a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2821a266c7d5SChris Wilson 2822a266c7d5SChris Wilson for_each_pipe(pipe) { 282338bde180SChris Wilson int plane = pipe; 282438bde180SChris Wilson if (IS_MOBILE(dev)) 282538bde180SChris Wilson plane = !plane; 28265e2032d4SVille Syrjälä 282790a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 282890a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 282990a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 2830a266c7d5SChris Wilson 2831a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2832a266c7d5SChris Wilson blc_event = true; 2833a266c7d5SChris Wilson } 2834a266c7d5SChris Wilson 2835a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2836a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 2837a266c7d5SChris Wilson 2838a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 2839a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 2840a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 2841a266c7d5SChris Wilson * we would never get another interrupt. 2842a266c7d5SChris Wilson * 2843a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 2844a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 2845a266c7d5SChris Wilson * another one. 2846a266c7d5SChris Wilson * 2847a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 2848a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 2849a266c7d5SChris Wilson * the posting read. This should be rare enough to never 2850a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 2851a266c7d5SChris Wilson * stray interrupts. 2852a266c7d5SChris Wilson */ 285338bde180SChris Wilson ret = IRQ_HANDLED; 2854a266c7d5SChris Wilson iir = new_iir; 285538bde180SChris Wilson } while (iir & ~flip_mask); 2856a266c7d5SChris Wilson 2857d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 28588291ee90SChris Wilson 2859a266c7d5SChris Wilson return ret; 2860a266c7d5SChris Wilson } 2861a266c7d5SChris Wilson 2862a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 2863a266c7d5SChris Wilson { 2864a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2865a266c7d5SChris Wilson int pipe; 2866a266c7d5SChris Wilson 2867ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2868ac4c16c5SEgbert Eich 2869a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2870a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2871a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2872a266c7d5SChris Wilson } 2873a266c7d5SChris Wilson 287400d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 287555b39755SChris Wilson for_each_pipe(pipe) { 287655b39755SChris Wilson /* Clear enable bits; then clear status bits */ 2877a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 287855b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 287955b39755SChris Wilson } 2880a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2881a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2882a266c7d5SChris Wilson 2883a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 2884a266c7d5SChris Wilson } 2885a266c7d5SChris Wilson 2886a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 2887a266c7d5SChris Wilson { 2888a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2889a266c7d5SChris Wilson int pipe; 2890a266c7d5SChris Wilson 2891a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2892a266c7d5SChris Wilson 2893a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2894a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2895a266c7d5SChris Wilson 2896a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 2897a266c7d5SChris Wilson for_each_pipe(pipe) 2898a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2899a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2900a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2901a266c7d5SChris Wilson POSTING_READ(IER); 2902a266c7d5SChris Wilson } 2903a266c7d5SChris Wilson 2904a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 2905a266c7d5SChris Wilson { 2906a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2907bbba0a97SChris Wilson u32 enable_mask; 2908a266c7d5SChris Wilson u32 error_mask; 2909b79480baSDaniel Vetter unsigned long irqflags; 2910a266c7d5SChris Wilson 2911a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 2912bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2913adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 2914bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2915bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2916bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2917bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2918bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2919bbba0a97SChris Wilson 2920bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 292121ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 292221ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 2923bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 2924bbba0a97SChris Wilson 2925bbba0a97SChris Wilson if (IS_G4X(dev)) 2926bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 2927a266c7d5SChris Wilson 2928b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2929b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2930b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2931515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2932b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2933a266c7d5SChris Wilson 2934a266c7d5SChris Wilson /* 2935a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 2936a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 2937a266c7d5SChris Wilson */ 2938a266c7d5SChris Wilson if (IS_G4X(dev)) { 2939a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 2940a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 2941a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 2942a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2943a266c7d5SChris Wilson } else { 2944a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 2945a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2946a266c7d5SChris Wilson } 2947a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 2948a266c7d5SChris Wilson 2949a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2950a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2951a266c7d5SChris Wilson POSTING_READ(IER); 2952a266c7d5SChris Wilson 295320afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 295420afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 295520afbda2SDaniel Vetter 2956f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 295720afbda2SDaniel Vetter 295820afbda2SDaniel Vetter return 0; 295920afbda2SDaniel Vetter } 296020afbda2SDaniel Vetter 2961bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 296220afbda2SDaniel Vetter { 296320afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2964e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 2965cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 296620afbda2SDaniel Vetter u32 hotplug_en; 296720afbda2SDaniel Vetter 2968b5ea2d56SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2969b5ea2d56SDaniel Vetter 2970bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 2971bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2972bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 2973adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 2974e5868a31SEgbert Eich /* enable bits are the same for all generations */ 2975cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2976cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2977cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 2978a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 2979a266c7d5SChris Wilson to generate a spurious hotplug event about three 2980a266c7d5SChris Wilson seconds later. So just do it once. 2981a266c7d5SChris Wilson */ 2982a266c7d5SChris Wilson if (IS_G4X(dev)) 2983a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 298485fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 2985a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2986a266c7d5SChris Wilson 2987a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 2988a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2989a266c7d5SChris Wilson } 2990bac56d5bSEgbert Eich } 2991a266c7d5SChris Wilson 2992ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 2993a266c7d5SChris Wilson { 2994a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2995a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2996a266c7d5SChris Wilson u32 iir, new_iir; 2997a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 2998a266c7d5SChris Wilson unsigned long irqflags; 2999a266c7d5SChris Wilson int irq_received; 3000a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 300121ad8330SVille Syrjälä u32 flip_mask = 300221ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 300321ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3004a266c7d5SChris Wilson 3005a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3006a266c7d5SChris Wilson 3007a266c7d5SChris Wilson iir = I915_READ(IIR); 3008a266c7d5SChris Wilson 3009a266c7d5SChris Wilson for (;;) { 30102c8ba29fSChris Wilson bool blc_event = false; 30112c8ba29fSChris Wilson 301221ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 3013a266c7d5SChris Wilson 3014a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3015a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3016a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3017a266c7d5SChris Wilson * interrupts (for non-MSI). 3018a266c7d5SChris Wilson */ 3019a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3020a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3021a266c7d5SChris Wilson i915_handle_error(dev, false); 3022a266c7d5SChris Wilson 3023a266c7d5SChris Wilson for_each_pipe(pipe) { 3024a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3025a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3026a266c7d5SChris Wilson 3027a266c7d5SChris Wilson /* 3028a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 3029a266c7d5SChris Wilson */ 3030a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3031a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3032a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3033a266c7d5SChris Wilson pipe_name(pipe)); 3034a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3035a266c7d5SChris Wilson irq_received = 1; 3036a266c7d5SChris Wilson } 3037a266c7d5SChris Wilson } 3038a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3039a266c7d5SChris Wilson 3040a266c7d5SChris Wilson if (!irq_received) 3041a266c7d5SChris Wilson break; 3042a266c7d5SChris Wilson 3043a266c7d5SChris Wilson ret = IRQ_HANDLED; 3044a266c7d5SChris Wilson 3045a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3046adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3047a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3048b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3049b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 30504f7fd709SDaniel Vetter HOTPLUG_INT_STATUS_I915); 3051a266c7d5SChris Wilson 3052a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3053a266c7d5SChris Wilson hotplug_status); 305491d131d2SDaniel Vetter 305510a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, 305610a504deSDaniel Vetter IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 305791d131d2SDaniel Vetter 3058a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3059a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 3060a266c7d5SChris Wilson } 3061a266c7d5SChris Wilson 306221ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 3063a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3064a266c7d5SChris Wilson 3065a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3066a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3067a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 3068a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 3069a266c7d5SChris Wilson 3070a266c7d5SChris Wilson for_each_pipe(pipe) { 30712c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 307290a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 307390a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3074a266c7d5SChris Wilson 3075a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3076a266c7d5SChris Wilson blc_event = true; 3077a266c7d5SChris Wilson } 3078a266c7d5SChris Wilson 3079a266c7d5SChris Wilson 3080a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3081a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3082a266c7d5SChris Wilson 3083515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3084515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 3085515ac2bbSDaniel Vetter 3086a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3087a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3088a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3089a266c7d5SChris Wilson * we would never get another interrupt. 3090a266c7d5SChris Wilson * 3091a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3092a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3093a266c7d5SChris Wilson * another one. 3094a266c7d5SChris Wilson * 3095a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3096a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3097a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3098a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3099a266c7d5SChris Wilson * stray interrupts. 3100a266c7d5SChris Wilson */ 3101a266c7d5SChris Wilson iir = new_iir; 3102a266c7d5SChris Wilson } 3103a266c7d5SChris Wilson 3104d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 31052c8ba29fSChris Wilson 3106a266c7d5SChris Wilson return ret; 3107a266c7d5SChris Wilson } 3108a266c7d5SChris Wilson 3109a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 3110a266c7d5SChris Wilson { 3111a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3112a266c7d5SChris Wilson int pipe; 3113a266c7d5SChris Wilson 3114a266c7d5SChris Wilson if (!dev_priv) 3115a266c7d5SChris Wilson return; 3116a266c7d5SChris Wilson 3117ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3118ac4c16c5SEgbert Eich 3119a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3120a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3121a266c7d5SChris Wilson 3122a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 3123a266c7d5SChris Wilson for_each_pipe(pipe) 3124a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3125a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3126a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3127a266c7d5SChris Wilson 3128a266c7d5SChris Wilson for_each_pipe(pipe) 3129a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 3130a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3131a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3132a266c7d5SChris Wilson } 3133a266c7d5SChris Wilson 3134ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 3135ac4c16c5SEgbert Eich { 3136ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3137ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 3138ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3139ac4c16c5SEgbert Eich unsigned long irqflags; 3140ac4c16c5SEgbert Eich int i; 3141ac4c16c5SEgbert Eich 3142ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3143ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3144ac4c16c5SEgbert Eich struct drm_connector *connector; 3145ac4c16c5SEgbert Eich 3146ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3147ac4c16c5SEgbert Eich continue; 3148ac4c16c5SEgbert Eich 3149ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3150ac4c16c5SEgbert Eich 3151ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3152ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3153ac4c16c5SEgbert Eich 3154ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 3155ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 3156ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3157ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 3158ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 3159ac4c16c5SEgbert Eich if (!connector->polled) 3160ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3161ac4c16c5SEgbert Eich } 3162ac4c16c5SEgbert Eich } 3163ac4c16c5SEgbert Eich } 3164ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 3165ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 3166ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3167ac4c16c5SEgbert Eich } 3168ac4c16c5SEgbert Eich 3169f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 3170f71d4af4SJesse Barnes { 31718b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 31728b2e326dSChris Wilson 31738b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 317499584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3175c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3176a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 31778b2e326dSChris Wilson 317899584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 317999584db3SDaniel Vetter i915_hangcheck_elapsed, 318061bac78eSDaniel Vetter (unsigned long) dev); 3181ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3182ac4c16c5SEgbert Eich (unsigned long) dev_priv); 318361bac78eSDaniel Vetter 318497a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 31859ee32feaSDaniel Vetter 31867d4e146fSEugeni Dodonov if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3187f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3188f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3189391f75e2SVille Syrjälä } else { 3190391f75e2SVille Syrjälä dev->driver->get_vblank_counter = i915_get_vblank_counter; 3191391f75e2SVille Syrjälä dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3192f71d4af4SJesse Barnes } 3193f71d4af4SJesse Barnes 3194c2baf4b7SVille Syrjälä if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3195f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3196f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3197c2baf4b7SVille Syrjälä } 3198f71d4af4SJesse Barnes 31997e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 32007e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 32017e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 32027e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 32037e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 32047e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 32057e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3206fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3207f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3208f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3209f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3210f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3211f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3212f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3213f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 321482a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3215f71d4af4SJesse Barnes } else { 3216c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3217c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3218c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3219c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3220c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3221a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3222a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3223a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3224a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3225a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 322620afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3227c2798b19SChris Wilson } else { 3228a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3229a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3230a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3231a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3232bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3233c2798b19SChris Wilson } 3234f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3235f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3236f71d4af4SJesse Barnes } 3237f71d4af4SJesse Barnes } 323820afbda2SDaniel Vetter 323920afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 324020afbda2SDaniel Vetter { 324120afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3242821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3243821450c6SEgbert Eich struct drm_connector *connector; 3244b5ea2d56SDaniel Vetter unsigned long irqflags; 3245821450c6SEgbert Eich int i; 324620afbda2SDaniel Vetter 3247821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3248821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3249821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3250821450c6SEgbert Eich } 3251821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3252821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3253821450c6SEgbert Eich connector->polled = intel_connector->polled; 3254821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3255821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3256821450c6SEgbert Eich } 3257b5ea2d56SDaniel Vetter 3258b5ea2d56SDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3259b5ea2d56SDaniel Vetter * just to make the assert_spin_locked checks happy. */ 3260b5ea2d56SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 326120afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 326220afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 3263b5ea2d56SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 326420afbda2SDaniel Vetter } 3265c67a470bSPaulo Zanoni 3266c67a470bSPaulo Zanoni /* Disable interrupts so we can allow Package C8+. */ 3267c67a470bSPaulo Zanoni void hsw_pc8_disable_interrupts(struct drm_device *dev) 3268c67a470bSPaulo Zanoni { 3269c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3270c67a470bSPaulo Zanoni unsigned long irqflags; 3271c67a470bSPaulo Zanoni 3272c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3273c67a470bSPaulo Zanoni 3274c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3275c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3276c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3277c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3278c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3279c67a470bSPaulo Zanoni 3280c67a470bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3281c67a470bSPaulo Zanoni ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3282c67a470bSPaulo Zanoni ilk_disable_gt_irq(dev_priv, 0xffffffff); 3283c67a470bSPaulo Zanoni snb_disable_pm_irq(dev_priv, 0xffffffff); 3284c67a470bSPaulo Zanoni 3285c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = true; 3286c67a470bSPaulo Zanoni 3287c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3288c67a470bSPaulo Zanoni } 3289c67a470bSPaulo Zanoni 3290c67a470bSPaulo Zanoni /* Restore interrupts so we can recover from Package C8+. */ 3291c67a470bSPaulo Zanoni void hsw_pc8_restore_interrupts(struct drm_device *dev) 3292c67a470bSPaulo Zanoni { 3293c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3294c67a470bSPaulo Zanoni unsigned long irqflags; 3295c67a470bSPaulo Zanoni uint32_t val, expected; 3296c67a470bSPaulo Zanoni 3297c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3298c67a470bSPaulo Zanoni 3299c67a470bSPaulo Zanoni val = I915_READ(DEIMR); 3300c67a470bSPaulo Zanoni expected = ~DE_PCH_EVENT_IVB; 3301c67a470bSPaulo Zanoni WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); 3302c67a470bSPaulo Zanoni 3303c67a470bSPaulo Zanoni val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3304c67a470bSPaulo Zanoni expected = ~SDE_HOTPLUG_MASK_CPT; 3305c67a470bSPaulo Zanoni WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", 3306c67a470bSPaulo Zanoni val, expected); 3307c67a470bSPaulo Zanoni 3308c67a470bSPaulo Zanoni val = I915_READ(GTIMR); 3309c67a470bSPaulo Zanoni expected = 0xffffffff; 3310c67a470bSPaulo Zanoni WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); 3311c67a470bSPaulo Zanoni 3312c67a470bSPaulo Zanoni val = I915_READ(GEN6_PMIMR); 3313c67a470bSPaulo Zanoni expected = 0xffffffff; 3314c67a470bSPaulo Zanoni WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, 3315c67a470bSPaulo Zanoni expected); 3316c67a470bSPaulo Zanoni 3317c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = false; 3318c67a470bSPaulo Zanoni 3319c67a470bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3320c67a470bSPaulo Zanoni ibx_enable_display_interrupt(dev_priv, 3321c67a470bSPaulo Zanoni ~dev_priv->pc8.regsave.sdeimr & 3322c67a470bSPaulo Zanoni ~SDE_HOTPLUG_MASK_CPT); 3323c67a470bSPaulo Zanoni ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3324c67a470bSPaulo Zanoni snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3325c67a470bSPaulo Zanoni I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3326c67a470bSPaulo Zanoni 3327c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3328c67a470bSPaulo Zanoni } 3329