1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33760285e7SDavid Howells #include <drm/drmP.h> 34760285e7SDavid Howells #include <drm/i915_drm.h> 35c0e09200SDave Airlie #include "i915_drv.h" 361c5d22f7SChris Wilson #include "i915_trace.h" 3779e53945SJesse Barnes #include "intel_drv.h" 38c0e09200SDave Airlie 39e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 40e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 41e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45e5868a31SEgbert Eich }; 46e5868a31SEgbert Eich 47e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 48e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 4973c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53e5868a31SEgbert Eich }; 54e5868a31SEgbert Eich 55e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 56e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62e5868a31SEgbert Eich }; 63e5868a31SEgbert Eich 64e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 65e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71e5868a31SEgbert Eich }; 72e5868a31SEgbert Eich 73e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80e5868a31SEgbert Eich }; 81e5868a31SEgbert Eich 82036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 83995b6762SChris Wilson static void 84f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85036a4a7dSZhenyu Wang { 864bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 874bc9d430SDaniel Vetter 88c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 89c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 90c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr &= ~mask; 91c67a470bSPaulo Zanoni return; 92c67a470bSPaulo Zanoni } 93c67a470bSPaulo Zanoni 941ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 951ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 961ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 973143a2bfSChris Wilson POSTING_READ(DEIMR); 98036a4a7dSZhenyu Wang } 99036a4a7dSZhenyu Wang } 100036a4a7dSZhenyu Wang 1010ff9800aSPaulo Zanoni static void 102f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 103036a4a7dSZhenyu Wang { 1044bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 1054bc9d430SDaniel Vetter 106c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 107c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 108c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr |= mask; 109c67a470bSPaulo Zanoni return; 110c67a470bSPaulo Zanoni } 111c67a470bSPaulo Zanoni 1121ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1131ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1141ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1153143a2bfSChris Wilson POSTING_READ(DEIMR); 116036a4a7dSZhenyu Wang } 117036a4a7dSZhenyu Wang } 118036a4a7dSZhenyu Wang 11943eaea13SPaulo Zanoni /** 12043eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 12143eaea13SPaulo Zanoni * @dev_priv: driver private 12243eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 12343eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 12443eaea13SPaulo Zanoni */ 12543eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 12643eaea13SPaulo Zanoni uint32_t interrupt_mask, 12743eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 12843eaea13SPaulo Zanoni { 12943eaea13SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 13043eaea13SPaulo Zanoni 131c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 132c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 133c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 134c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 135c67a470bSPaulo Zanoni interrupt_mask); 136c67a470bSPaulo Zanoni return; 137c67a470bSPaulo Zanoni } 138c67a470bSPaulo Zanoni 13943eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 14043eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 14143eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 14243eaea13SPaulo Zanoni POSTING_READ(GTIMR); 14343eaea13SPaulo Zanoni } 14443eaea13SPaulo Zanoni 14543eaea13SPaulo Zanoni void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 14643eaea13SPaulo Zanoni { 14743eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 14843eaea13SPaulo Zanoni } 14943eaea13SPaulo Zanoni 15043eaea13SPaulo Zanoni void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 15143eaea13SPaulo Zanoni { 15243eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 15343eaea13SPaulo Zanoni } 15443eaea13SPaulo Zanoni 155edbfdb45SPaulo Zanoni /** 156edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 157edbfdb45SPaulo Zanoni * @dev_priv: driver private 158edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 159edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 160edbfdb45SPaulo Zanoni */ 161edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 162edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 163edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 164edbfdb45SPaulo Zanoni { 165605cd25bSPaulo Zanoni uint32_t new_val; 166edbfdb45SPaulo Zanoni 167edbfdb45SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 168edbfdb45SPaulo Zanoni 169c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 170c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 171c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 172c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 173c67a470bSPaulo Zanoni interrupt_mask); 174c67a470bSPaulo Zanoni return; 175c67a470bSPaulo Zanoni } 176c67a470bSPaulo Zanoni 177605cd25bSPaulo Zanoni new_val = dev_priv->pm_irq_mask; 178f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 179f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 180f52ecbcfSPaulo Zanoni 181605cd25bSPaulo Zanoni if (new_val != dev_priv->pm_irq_mask) { 182605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = new_val; 183605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 184edbfdb45SPaulo Zanoni POSTING_READ(GEN6_PMIMR); 185edbfdb45SPaulo Zanoni } 186f52ecbcfSPaulo Zanoni } 187edbfdb45SPaulo Zanoni 188edbfdb45SPaulo Zanoni void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 189edbfdb45SPaulo Zanoni { 190edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 191edbfdb45SPaulo Zanoni } 192edbfdb45SPaulo Zanoni 193edbfdb45SPaulo Zanoni void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 194edbfdb45SPaulo Zanoni { 195edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, 0); 196edbfdb45SPaulo Zanoni } 197edbfdb45SPaulo Zanoni 1988664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 1998664281bSPaulo Zanoni { 2008664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2018664281bSPaulo Zanoni struct intel_crtc *crtc; 2028664281bSPaulo Zanoni enum pipe pipe; 2038664281bSPaulo Zanoni 2044bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2054bc9d430SDaniel Vetter 2068664281bSPaulo Zanoni for_each_pipe(pipe) { 2078664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2088664281bSPaulo Zanoni 2098664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 2108664281bSPaulo Zanoni return false; 2118664281bSPaulo Zanoni } 2128664281bSPaulo Zanoni 2138664281bSPaulo Zanoni return true; 2148664281bSPaulo Zanoni } 2158664281bSPaulo Zanoni 2168664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 2178664281bSPaulo Zanoni { 2188664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2198664281bSPaulo Zanoni enum pipe pipe; 2208664281bSPaulo Zanoni struct intel_crtc *crtc; 2218664281bSPaulo Zanoni 222fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 223fee884edSDaniel Vetter 2248664281bSPaulo Zanoni for_each_pipe(pipe) { 2258664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2268664281bSPaulo Zanoni 2278664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 2288664281bSPaulo Zanoni return false; 2298664281bSPaulo Zanoni } 2308664281bSPaulo Zanoni 2318664281bSPaulo Zanoni return true; 2328664281bSPaulo Zanoni } 2338664281bSPaulo Zanoni 2348664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 2358664281bSPaulo Zanoni enum pipe pipe, bool enable) 2368664281bSPaulo Zanoni { 2378664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2388664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 2398664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 2408664281bSPaulo Zanoni 2418664281bSPaulo Zanoni if (enable) 2428664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 2438664281bSPaulo Zanoni else 2448664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 2458664281bSPaulo Zanoni } 2468664281bSPaulo Zanoni 2478664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 2487336df65SDaniel Vetter enum pipe pipe, bool enable) 2498664281bSPaulo Zanoni { 2508664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2518664281bSPaulo Zanoni if (enable) { 2527336df65SDaniel Vetter I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 2537336df65SDaniel Vetter 2548664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 2558664281bSPaulo Zanoni return; 2568664281bSPaulo Zanoni 2578664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 2588664281bSPaulo Zanoni } else { 2597336df65SDaniel Vetter bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 2607336df65SDaniel Vetter 2617336df65SDaniel Vetter /* Change the state _after_ we've read out the current one. */ 2628664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 2637336df65SDaniel Vetter 2647336df65SDaniel Vetter if (!was_enabled && 2657336df65SDaniel Vetter (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 2667336df65SDaniel Vetter DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 2677336df65SDaniel Vetter pipe_name(pipe)); 2687336df65SDaniel Vetter } 2698664281bSPaulo Zanoni } 2708664281bSPaulo Zanoni } 2718664281bSPaulo Zanoni 272fee884edSDaniel Vetter /** 273fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 274fee884edSDaniel Vetter * @dev_priv: driver private 275fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 276fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 277fee884edSDaniel Vetter */ 278fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 279fee884edSDaniel Vetter uint32_t interrupt_mask, 280fee884edSDaniel Vetter uint32_t enabled_irq_mask) 281fee884edSDaniel Vetter { 282fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 283fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 284fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 285fee884edSDaniel Vetter 286fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 287fee884edSDaniel Vetter 288c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled && 289c67a470bSPaulo Zanoni (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 290c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 291c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 292c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 293c67a470bSPaulo Zanoni interrupt_mask); 294c67a470bSPaulo Zanoni return; 295c67a470bSPaulo Zanoni } 296c67a470bSPaulo Zanoni 297fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 298fee884edSDaniel Vetter POSTING_READ(SDEIMR); 299fee884edSDaniel Vetter } 300fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \ 301fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), (bits)) 302fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \ 303fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), 0) 304fee884edSDaniel Vetter 305de28075dSDaniel Vetter static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 306de28075dSDaniel Vetter enum transcoder pch_transcoder, 3078664281bSPaulo Zanoni bool enable) 3088664281bSPaulo Zanoni { 3098664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 310de28075dSDaniel Vetter uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 311de28075dSDaniel Vetter SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 3128664281bSPaulo Zanoni 3138664281bSPaulo Zanoni if (enable) 314fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, bit); 3158664281bSPaulo Zanoni else 316fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, bit); 3178664281bSPaulo Zanoni } 3188664281bSPaulo Zanoni 3198664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 3208664281bSPaulo Zanoni enum transcoder pch_transcoder, 3218664281bSPaulo Zanoni bool enable) 3228664281bSPaulo Zanoni { 3238664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3248664281bSPaulo Zanoni 3258664281bSPaulo Zanoni if (enable) { 3261dd246fbSDaniel Vetter I915_WRITE(SERR_INT, 3271dd246fbSDaniel Vetter SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 3281dd246fbSDaniel Vetter 3298664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 3308664281bSPaulo Zanoni return; 3318664281bSPaulo Zanoni 332fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3338664281bSPaulo Zanoni } else { 3341dd246fbSDaniel Vetter uint32_t tmp = I915_READ(SERR_INT); 3351dd246fbSDaniel Vetter bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 3361dd246fbSDaniel Vetter 3371dd246fbSDaniel Vetter /* Change the state _after_ we've read out the current one. */ 338fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3391dd246fbSDaniel Vetter 3401dd246fbSDaniel Vetter if (!was_enabled && 3411dd246fbSDaniel Vetter (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 3421dd246fbSDaniel Vetter DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 3431dd246fbSDaniel Vetter transcoder_name(pch_transcoder)); 3441dd246fbSDaniel Vetter } 3458664281bSPaulo Zanoni } 3468664281bSPaulo Zanoni } 3478664281bSPaulo Zanoni 3488664281bSPaulo Zanoni /** 3498664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 3508664281bSPaulo Zanoni * @dev: drm device 3518664281bSPaulo Zanoni * @pipe: pipe 3528664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3538664281bSPaulo Zanoni * 3548664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 3558664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 3568664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 3578664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 3588664281bSPaulo Zanoni * bit for all the pipes. 3598664281bSPaulo Zanoni * 3608664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3618664281bSPaulo Zanoni */ 3628664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 3638664281bSPaulo Zanoni enum pipe pipe, bool enable) 3648664281bSPaulo Zanoni { 3658664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3668664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 3678664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3688664281bSPaulo Zanoni unsigned long flags; 3698664281bSPaulo Zanoni bool ret; 3708664281bSPaulo Zanoni 3718664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3728664281bSPaulo Zanoni 3738664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 3748664281bSPaulo Zanoni 3758664281bSPaulo Zanoni if (enable == ret) 3768664281bSPaulo Zanoni goto done; 3778664281bSPaulo Zanoni 3788664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 3798664281bSPaulo Zanoni 3808664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 3818664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 3828664281bSPaulo Zanoni else if (IS_GEN7(dev)) 3837336df65SDaniel Vetter ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 3848664281bSPaulo Zanoni 3858664281bSPaulo Zanoni done: 3868664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 3878664281bSPaulo Zanoni return ret; 3888664281bSPaulo Zanoni } 3898664281bSPaulo Zanoni 3908664281bSPaulo Zanoni /** 3918664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 3928664281bSPaulo Zanoni * @dev: drm device 3938664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 3948664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3958664281bSPaulo Zanoni * 3968664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 3978664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 3988664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 3998664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 4008664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 4018664281bSPaulo Zanoni * 4028664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 4038664281bSPaulo Zanoni */ 4048664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 4058664281bSPaulo Zanoni enum transcoder pch_transcoder, 4068664281bSPaulo Zanoni bool enable) 4078664281bSPaulo Zanoni { 4088664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 409de28075dSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 410de28075dSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4118664281bSPaulo Zanoni unsigned long flags; 4128664281bSPaulo Zanoni bool ret; 4138664281bSPaulo Zanoni 414de28075dSDaniel Vetter /* 415de28075dSDaniel Vetter * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 416de28075dSDaniel Vetter * has only one pch transcoder A that all pipes can use. To avoid racy 417de28075dSDaniel Vetter * pch transcoder -> pipe lookups from interrupt code simply store the 418de28075dSDaniel Vetter * underrun statistics in crtc A. Since we never expose this anywhere 419de28075dSDaniel Vetter * nor use it outside of the fifo underrun code here using the "wrong" 420de28075dSDaniel Vetter * crtc on LPT won't cause issues. 421de28075dSDaniel Vetter */ 4228664281bSPaulo Zanoni 4238664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 4248664281bSPaulo Zanoni 4258664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 4268664281bSPaulo Zanoni 4278664281bSPaulo Zanoni if (enable == ret) 4288664281bSPaulo Zanoni goto done; 4298664281bSPaulo Zanoni 4308664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 4318664281bSPaulo Zanoni 4328664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 433de28075dSDaniel Vetter ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4348664281bSPaulo Zanoni else 4358664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4368664281bSPaulo Zanoni 4378664281bSPaulo Zanoni done: 4388664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 4398664281bSPaulo Zanoni return ret; 4408664281bSPaulo Zanoni } 4418664281bSPaulo Zanoni 4428664281bSPaulo Zanoni 4437c463586SKeith Packard void 4447c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4457c463586SKeith Packard { 4469db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 44746c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4487c463586SKeith Packard 449b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 450b79480baSDaniel Vetter 45146c06a30SVille Syrjälä if ((pipestat & mask) == mask) 45246c06a30SVille Syrjälä return; 45346c06a30SVille Syrjälä 4547c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 45546c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 45646c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4573143a2bfSChris Wilson POSTING_READ(reg); 4587c463586SKeith Packard } 4597c463586SKeith Packard 4607c463586SKeith Packard void 4617c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4627c463586SKeith Packard { 4639db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 46446c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4657c463586SKeith Packard 466b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 467b79480baSDaniel Vetter 46846c06a30SVille Syrjälä if ((pipestat & mask) == 0) 46946c06a30SVille Syrjälä return; 47046c06a30SVille Syrjälä 47146c06a30SVille Syrjälä pipestat &= ~mask; 47246c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4733143a2bfSChris Wilson POSTING_READ(reg); 4747c463586SKeith Packard } 4757c463586SKeith Packard 476c0e09200SDave Airlie /** 477f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 47801c66889SZhao Yakui */ 479f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 48001c66889SZhao Yakui { 4811ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 4821ec14ad3SChris Wilson unsigned long irqflags; 4831ec14ad3SChris Wilson 484f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 485f49e38ddSJani Nikula return; 486f49e38ddSJani Nikula 4871ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 48801c66889SZhao Yakui 489f898780bSJani Nikula i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 490a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 491f898780bSJani Nikula i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 4921ec14ad3SChris Wilson 4931ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 49401c66889SZhao Yakui } 49501c66889SZhao Yakui 49601c66889SZhao Yakui /** 4970a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 4980a3e67a4SJesse Barnes * @dev: DRM device 4990a3e67a4SJesse Barnes * @pipe: pipe to check 5000a3e67a4SJesse Barnes * 5010a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 5020a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 5030a3e67a4SJesse Barnes * before reading such registers if unsure. 5040a3e67a4SJesse Barnes */ 5050a3e67a4SJesse Barnes static int 5060a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 5070a3e67a4SJesse Barnes { 5080a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 509702e7a56SPaulo Zanoni 510a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 511a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 512a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 513a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 51471f8ba6bSPaulo Zanoni 515a01025afSDaniel Vetter return intel_crtc->active; 516a01025afSDaniel Vetter } else { 517a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 518a01025afSDaniel Vetter } 5190a3e67a4SJesse Barnes } 5200a3e67a4SJesse Barnes 52142f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 52242f52ef8SKeith Packard * we use as a pipe index 52342f52ef8SKeith Packard */ 524f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 5250a3e67a4SJesse Barnes { 5260a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5270a3e67a4SJesse Barnes unsigned long high_frame; 5280a3e67a4SJesse Barnes unsigned long low_frame; 5295eddb70bSChris Wilson u32 high1, high2, low; 5300a3e67a4SJesse Barnes 5310a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 53244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5339db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5340a3e67a4SJesse Barnes return 0; 5350a3e67a4SJesse Barnes } 5360a3e67a4SJesse Barnes 5379db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 5389db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 5395eddb70bSChris Wilson 5400a3e67a4SJesse Barnes /* 5410a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 5420a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 5430a3e67a4SJesse Barnes * register. 5440a3e67a4SJesse Barnes */ 5450a3e67a4SJesse Barnes do { 5465eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 5475eddb70bSChris Wilson low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 5485eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 5490a3e67a4SJesse Barnes } while (high1 != high2); 5500a3e67a4SJesse Barnes 5515eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 5525eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 5535eddb70bSChris Wilson return (high1 << 8) | low; 5540a3e67a4SJesse Barnes } 5550a3e67a4SJesse Barnes 556f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 5579880b7a5SJesse Barnes { 5589880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5599db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 5609880b7a5SJesse Barnes 5619880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 56244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5639db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5649880b7a5SJesse Barnes return 0; 5659880b7a5SJesse Barnes } 5669880b7a5SJesse Barnes 5679880b7a5SJesse Barnes return I915_READ(reg); 5689880b7a5SJesse Barnes } 5699880b7a5SJesse Barnes 570f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 5710af7e4dfSMario Kleiner int *vpos, int *hpos) 5720af7e4dfSMario Kleiner { 5730af7e4dfSMario Kleiner drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5740af7e4dfSMario Kleiner u32 vbl = 0, position = 0; 5750af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 5760af7e4dfSMario Kleiner bool in_vbl = true; 5770af7e4dfSMario Kleiner int ret = 0; 578fe2b8f9dSPaulo Zanoni enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 579fe2b8f9dSPaulo Zanoni pipe); 5800af7e4dfSMario Kleiner 5810af7e4dfSMario Kleiner if (!i915_pipe_enabled(dev, pipe)) { 5820af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 5839db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5840af7e4dfSMario Kleiner return 0; 5850af7e4dfSMario Kleiner } 5860af7e4dfSMario Kleiner 5870af7e4dfSMario Kleiner /* Get vtotal. */ 588fe2b8f9dSPaulo Zanoni vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 5890af7e4dfSMario Kleiner 5900af7e4dfSMario Kleiner if (INTEL_INFO(dev)->gen >= 4) { 5910af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 5920af7e4dfSMario Kleiner * scanout position from Display scan line register. 5930af7e4dfSMario Kleiner */ 5940af7e4dfSMario Kleiner position = I915_READ(PIPEDSL(pipe)); 5950af7e4dfSMario Kleiner 5960af7e4dfSMario Kleiner /* Decode into vertical scanout position. Don't have 5970af7e4dfSMario Kleiner * horizontal scanout position. 5980af7e4dfSMario Kleiner */ 5990af7e4dfSMario Kleiner *vpos = position & 0x1fff; 6000af7e4dfSMario Kleiner *hpos = 0; 6010af7e4dfSMario Kleiner } else { 6020af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 6030af7e4dfSMario Kleiner * We can split this into vertical and horizontal 6040af7e4dfSMario Kleiner * scanout position. 6050af7e4dfSMario Kleiner */ 6060af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 6070af7e4dfSMario Kleiner 608fe2b8f9dSPaulo Zanoni htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 6090af7e4dfSMario Kleiner *vpos = position / htotal; 6100af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 6110af7e4dfSMario Kleiner } 6120af7e4dfSMario Kleiner 6130af7e4dfSMario Kleiner /* Query vblank area. */ 614fe2b8f9dSPaulo Zanoni vbl = I915_READ(VBLANK(cpu_transcoder)); 6150af7e4dfSMario Kleiner 6160af7e4dfSMario Kleiner /* Test position against vblank region. */ 6170af7e4dfSMario Kleiner vbl_start = vbl & 0x1fff; 6180af7e4dfSMario Kleiner vbl_end = (vbl >> 16) & 0x1fff; 6190af7e4dfSMario Kleiner 6200af7e4dfSMario Kleiner if ((*vpos < vbl_start) || (*vpos > vbl_end)) 6210af7e4dfSMario Kleiner in_vbl = false; 6220af7e4dfSMario Kleiner 6230af7e4dfSMario Kleiner /* Inside "upper part" of vblank area? Apply corrective offset: */ 6240af7e4dfSMario Kleiner if (in_vbl && (*vpos >= vbl_start)) 6250af7e4dfSMario Kleiner *vpos = *vpos - vtotal; 6260af7e4dfSMario Kleiner 6270af7e4dfSMario Kleiner /* Readouts valid? */ 6280af7e4dfSMario Kleiner if (vbl > 0) 6290af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 6300af7e4dfSMario Kleiner 6310af7e4dfSMario Kleiner /* In vblank? */ 6320af7e4dfSMario Kleiner if (in_vbl) 6330af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 6340af7e4dfSMario Kleiner 6350af7e4dfSMario Kleiner return ret; 6360af7e4dfSMario Kleiner } 6370af7e4dfSMario Kleiner 638f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 6390af7e4dfSMario Kleiner int *max_error, 6400af7e4dfSMario Kleiner struct timeval *vblank_time, 6410af7e4dfSMario Kleiner unsigned flags) 6420af7e4dfSMario Kleiner { 6434041b853SChris Wilson struct drm_crtc *crtc; 6440af7e4dfSMario Kleiner 6457eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 6464041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 6470af7e4dfSMario Kleiner return -EINVAL; 6480af7e4dfSMario Kleiner } 6490af7e4dfSMario Kleiner 6500af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 6514041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 6524041b853SChris Wilson if (crtc == NULL) { 6534041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 6544041b853SChris Wilson return -EINVAL; 6554041b853SChris Wilson } 6564041b853SChris Wilson 6574041b853SChris Wilson if (!crtc->enabled) { 6584041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 6594041b853SChris Wilson return -EBUSY; 6604041b853SChris Wilson } 6610af7e4dfSMario Kleiner 6620af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 6634041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 6644041b853SChris Wilson vblank_time, flags, 6654041b853SChris Wilson crtc); 6660af7e4dfSMario Kleiner } 6670af7e4dfSMario Kleiner 668321a1b30SEgbert Eich static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 669321a1b30SEgbert Eich { 670321a1b30SEgbert Eich enum drm_connector_status old_status; 671321a1b30SEgbert Eich 672321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 673321a1b30SEgbert Eich old_status = connector->status; 674321a1b30SEgbert Eich 675321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 676321a1b30SEgbert Eich DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 677321a1b30SEgbert Eich connector->base.id, 678321a1b30SEgbert Eich drm_get_connector_name(connector), 679321a1b30SEgbert Eich old_status, connector->status); 680321a1b30SEgbert Eich return (old_status != connector->status); 681321a1b30SEgbert Eich } 682321a1b30SEgbert Eich 6835ca58282SJesse Barnes /* 6845ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 6855ca58282SJesse Barnes */ 686ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 687ac4c16c5SEgbert Eich 6885ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 6895ca58282SJesse Barnes { 6905ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 6915ca58282SJesse Barnes hotplug_work); 6925ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 693c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 694cd569aedSEgbert Eich struct intel_connector *intel_connector; 695cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 696cd569aedSEgbert Eich struct drm_connector *connector; 697cd569aedSEgbert Eich unsigned long irqflags; 698cd569aedSEgbert Eich bool hpd_disabled = false; 699321a1b30SEgbert Eich bool changed = false; 700142e2398SEgbert Eich u32 hpd_event_bits; 7015ca58282SJesse Barnes 70252d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 70352d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 70452d7ecedSDaniel Vetter return; 70552d7ecedSDaniel Vetter 706a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 707e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 708e67189abSJesse Barnes 709cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 710142e2398SEgbert Eich 711142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 712142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 713cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 714cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 715cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 716cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 717cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 718cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 719cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 720cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 721cd569aedSEgbert Eich drm_get_connector_name(connector)); 722cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 723cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 724cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 725cd569aedSEgbert Eich hpd_disabled = true; 726cd569aedSEgbert Eich } 727142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 728142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 729142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 730142e2398SEgbert Eich } 731cd569aedSEgbert Eich } 732cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 733cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 734cd569aedSEgbert Eich * some connectors */ 735ac4c16c5SEgbert Eich if (hpd_disabled) { 736cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 737ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 738ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 739ac4c16c5SEgbert Eich } 740cd569aedSEgbert Eich 741cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 742cd569aedSEgbert Eich 743321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 744321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 745321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 746321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 747cd569aedSEgbert Eich if (intel_encoder->hot_plug) 748cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 749321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 750321a1b30SEgbert Eich changed = true; 751321a1b30SEgbert Eich } 752321a1b30SEgbert Eich } 75340ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 75440ee3381SKeith Packard 755321a1b30SEgbert Eich if (changed) 756321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 7575ca58282SJesse Barnes } 7585ca58282SJesse Barnes 759d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev) 760f97108d1SJesse Barnes { 761f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 762b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 7639270388eSDaniel Vetter u8 new_delay; 7649270388eSDaniel Vetter 765d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 766f97108d1SJesse Barnes 76773edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 76873edd18fSDaniel Vetter 76920e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 7709270388eSDaniel Vetter 7717648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 772b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 773b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 774f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 775f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 776f97108d1SJesse Barnes 777f97108d1SJesse Barnes /* Handle RCS change request from hw */ 778b5b72e89SMatthew Garrett if (busy_up > max_avg) { 77920e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 78020e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 78120e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 78220e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 783b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 78420e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 78520e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 78620e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 78720e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 788f97108d1SJesse Barnes } 789f97108d1SJesse Barnes 7907648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 79120e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 792f97108d1SJesse Barnes 793d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 7949270388eSDaniel Vetter 795f97108d1SJesse Barnes return; 796f97108d1SJesse Barnes } 797f97108d1SJesse Barnes 798549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 799549f7365SChris Wilson struct intel_ring_buffer *ring) 800549f7365SChris Wilson { 801475553deSChris Wilson if (ring->obj == NULL) 802475553deSChris Wilson return; 803475553deSChris Wilson 804b2eadbc8SChris Wilson trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 8059862e600SChris Wilson 806549f7365SChris Wilson wake_up_all(&ring->irq_queue); 80710cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 808549f7365SChris Wilson } 809549f7365SChris Wilson 8104912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 8113b8d8d91SJesse Barnes { 8124912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 813c6a828d3SDaniel Vetter rps.work); 814edbfdb45SPaulo Zanoni u32 pm_iir; 8157b9e0ae6SChris Wilson u8 new_delay; 8163b8d8d91SJesse Barnes 81759cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 818c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 819c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 8204848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 821edbfdb45SPaulo Zanoni snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 82259cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 8234912d041SBen Widawsky 82460611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 82560611c13SPaulo Zanoni WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 82660611c13SPaulo Zanoni 8274848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 8283b8d8d91SJesse Barnes return; 8293b8d8d91SJesse Barnes 8304fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 8317b9e0ae6SChris Wilson 8327425034aSVille Syrjälä if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 833c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay + 1; 8347425034aSVille Syrjälä 8357425034aSVille Syrjälä /* 8367425034aSVille Syrjälä * For better performance, jump directly 8377425034aSVille Syrjälä * to RPe if we're below it. 8387425034aSVille Syrjälä */ 8397425034aSVille Syrjälä if (IS_VALLEYVIEW(dev_priv->dev) && 8407425034aSVille Syrjälä dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 8417425034aSVille Syrjälä new_delay = dev_priv->rps.rpe_delay; 8427425034aSVille Syrjälä } else 843c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay - 1; 8443b8d8d91SJesse Barnes 84579249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 84679249636SBen Widawsky * interrupt 84779249636SBen Widawsky */ 848d8289c9eSVille Syrjälä if (new_delay >= dev_priv->rps.min_delay && 849d8289c9eSVille Syrjälä new_delay <= dev_priv->rps.max_delay) { 8500a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 8510a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 8520a073b84SJesse Barnes else 8534912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 85479249636SBen Widawsky } 8553b8d8d91SJesse Barnes 85652ceb908SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) { 85752ceb908SJesse Barnes /* 85852ceb908SJesse Barnes * On VLV, when we enter RC6 we may not be at the minimum 85952ceb908SJesse Barnes * voltage level, so arm a timer to check. It should only 86052ceb908SJesse Barnes * fire when there's activity or once after we've entered 86152ceb908SJesse Barnes * RC6, and then won't be re-armed until the next RPS interrupt. 86252ceb908SJesse Barnes */ 86352ceb908SJesse Barnes mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 86452ceb908SJesse Barnes msecs_to_jiffies(100)); 86552ceb908SJesse Barnes } 86652ceb908SJesse Barnes 8674fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 8683b8d8d91SJesse Barnes } 8693b8d8d91SJesse Barnes 870e3689190SBen Widawsky 871e3689190SBen Widawsky /** 872e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 873e3689190SBen Widawsky * occurred. 874e3689190SBen Widawsky * @work: workqueue struct 875e3689190SBen Widawsky * 876e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 877e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 878e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 879e3689190SBen Widawsky */ 880e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 881e3689190SBen Widawsky { 882e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 883a4da4fa4SDaniel Vetter l3_parity.error_work); 884e3689190SBen Widawsky u32 error_status, row, bank, subbank; 885e3689190SBen Widawsky char *parity_event[5]; 886e3689190SBen Widawsky uint32_t misccpctl; 887e3689190SBen Widawsky unsigned long flags; 888e3689190SBen Widawsky 889e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 890e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 891e3689190SBen Widawsky * any time we access those registers. 892e3689190SBen Widawsky */ 893e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 894e3689190SBen Widawsky 895e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 896e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 897e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 898e3689190SBen Widawsky 899e3689190SBen Widawsky error_status = I915_READ(GEN7_L3CDERRST1); 900e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 901e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 902e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 903e3689190SBen Widawsky 904e3689190SBen Widawsky I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 905e3689190SBen Widawsky GEN7_L3CDERRST1_ENABLE); 906e3689190SBen Widawsky POSTING_READ(GEN7_L3CDERRST1); 907e3689190SBen Widawsky 908e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 909e3689190SBen Widawsky 910e3689190SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 91143eaea13SPaulo Zanoni ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 912e3689190SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 913e3689190SBen Widawsky 914e3689190SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 915e3689190SBen Widawsky 916cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 917e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 918e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 919e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 920e3689190SBen Widawsky parity_event[4] = NULL; 921e3689190SBen Widawsky 922e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 923e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 924e3689190SBen Widawsky 925e3689190SBen Widawsky DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 926e3689190SBen Widawsky row, bank, subbank); 927e3689190SBen Widawsky 928e3689190SBen Widawsky kfree(parity_event[3]); 929e3689190SBen Widawsky kfree(parity_event[2]); 930e3689190SBen Widawsky kfree(parity_event[1]); 931e3689190SBen Widawsky } 932e3689190SBen Widawsky 933d0ecd7e2SDaniel Vetter static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 934e3689190SBen Widawsky { 935e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 936e3689190SBen Widawsky 937e1ef7cc2SBen Widawsky if (!HAS_L3_GPU_CACHE(dev)) 938e3689190SBen Widawsky return; 939e3689190SBen Widawsky 940d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 94143eaea13SPaulo Zanoni ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 942d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 943e3689190SBen Widawsky 944a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 945e3689190SBen Widawsky } 946e3689190SBen Widawsky 947f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev, 948f1af8fc1SPaulo Zanoni struct drm_i915_private *dev_priv, 949f1af8fc1SPaulo Zanoni u32 gt_iir) 950f1af8fc1SPaulo Zanoni { 951f1af8fc1SPaulo Zanoni if (gt_iir & 952f1af8fc1SPaulo Zanoni (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 953f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[RCS]); 954f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 955f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[VCS]); 956f1af8fc1SPaulo Zanoni } 957f1af8fc1SPaulo Zanoni 958e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 959e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 960e7b4c6b1SDaniel Vetter u32 gt_iir) 961e7b4c6b1SDaniel Vetter { 962e7b4c6b1SDaniel Vetter 963cc609d5dSBen Widawsky if (gt_iir & 964cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 965e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 966cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 967e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 968cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 969e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 970e7b4c6b1SDaniel Vetter 971cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 972cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 973cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 974e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 975e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 976e7b4c6b1SDaniel Vetter } 977e3689190SBen Widawsky 978cc609d5dSBen Widawsky if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 979d0ecd7e2SDaniel Vetter ivybridge_parity_error_irq_handler(dev); 980e7b4c6b1SDaniel Vetter } 981e7b4c6b1SDaniel Vetter 982b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 983b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 984b543fb04SEgbert Eich 98510a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev, 986b543fb04SEgbert Eich u32 hotplug_trigger, 987b543fb04SEgbert Eich const u32 *hpd) 988b543fb04SEgbert Eich { 989b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 990b543fb04SEgbert Eich int i; 99110a504deSDaniel Vetter bool storm_detected = false; 992b543fb04SEgbert Eich 99391d131d2SDaniel Vetter if (!hotplug_trigger) 99491d131d2SDaniel Vetter return; 99591d131d2SDaniel Vetter 996b5ea2d56SDaniel Vetter spin_lock(&dev_priv->irq_lock); 997b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 998821450c6SEgbert Eich 999b8f102e8SEgbert Eich WARN(((hpd[i] & hotplug_trigger) && 1000b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1001b8f102e8SEgbert Eich "Received HPD interrupt although disabled\n"); 1002b8f102e8SEgbert Eich 1003b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 1004b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1005b543fb04SEgbert Eich continue; 1006b543fb04SEgbert Eich 1007bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 1008b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1009b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 1010b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1011b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1012b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 1013b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1014b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1015b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1016142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 1017b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 101810a504deSDaniel Vetter storm_detected = true; 1019b543fb04SEgbert Eich } else { 1020b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 1021b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1022b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt); 1023b543fb04SEgbert Eich } 1024b543fb04SEgbert Eich } 1025b543fb04SEgbert Eich 102610a504deSDaniel Vetter if (storm_detected) 102710a504deSDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 1028b5ea2d56SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 10295876fa0dSDaniel Vetter 1030645416f5SDaniel Vetter /* 1031645416f5SDaniel Vetter * Our hotplug handler can grab modeset locks (by calling down into the 1032645416f5SDaniel Vetter * fb helpers). Hence it must not be run on our own dev-priv->wq work 1033645416f5SDaniel Vetter * queue for otherwise the flush_work in the pageflip code will 1034645416f5SDaniel Vetter * deadlock. 1035645416f5SDaniel Vetter */ 1036645416f5SDaniel Vetter schedule_work(&dev_priv->hotplug_work); 1037b543fb04SEgbert Eich } 1038b543fb04SEgbert Eich 1039515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 1040515ac2bbSDaniel Vetter { 104128c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 104228c70f16SDaniel Vetter 104328c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1044515ac2bbSDaniel Vetter } 1045515ac2bbSDaniel Vetter 1046ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 1047ce99c256SDaniel Vetter { 10489ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 10499ee32feaSDaniel Vetter 10509ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1051ce99c256SDaniel Vetter } 1052ce99c256SDaniel Vetter 10531403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 10541403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 10551403c0d4SPaulo Zanoni * the work queue. */ 10561403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1057baf02a1fSBen Widawsky { 105841a05a3aSDaniel Vetter if (pm_iir & GEN6_PM_RPS_EVENTS) { 105959cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 10604848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 10614d3b3d5fSPaulo Zanoni snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 106259cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 10632adbee62SDaniel Vetter 10642adbee62SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 106541a05a3aSDaniel Vetter } 1066baf02a1fSBen Widawsky 10671403c0d4SPaulo Zanoni if (HAS_VEBOX(dev_priv->dev)) { 106812638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 106912638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 107012638c57SBen Widawsky 107112638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 107212638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 107312638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 107412638c57SBen Widawsky } 107512638c57SBen Widawsky } 10761403c0d4SPaulo Zanoni } 1077baf02a1fSBen Widawsky 1078ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 10797e231dbeSJesse Barnes { 10807e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 10817e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 10827e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 10837e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 10847e231dbeSJesse Barnes unsigned long irqflags; 10857e231dbeSJesse Barnes int pipe; 10867e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 10877e231dbeSJesse Barnes 10887e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 10897e231dbeSJesse Barnes 10907e231dbeSJesse Barnes while (true) { 10917e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 10927e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 10937e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 10947e231dbeSJesse Barnes 10957e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 10967e231dbeSJesse Barnes goto out; 10977e231dbeSJesse Barnes 10987e231dbeSJesse Barnes ret = IRQ_HANDLED; 10997e231dbeSJesse Barnes 1100e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 11017e231dbeSJesse Barnes 11027e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 11037e231dbeSJesse Barnes for_each_pipe(pipe) { 11047e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 11057e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 11067e231dbeSJesse Barnes 11077e231dbeSJesse Barnes /* 11087e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 11097e231dbeSJesse Barnes */ 11107e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 11117e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 11127e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 11137e231dbeSJesse Barnes pipe_name(pipe)); 11147e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 11157e231dbeSJesse Barnes } 11167e231dbeSJesse Barnes } 11177e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 11187e231dbeSJesse Barnes 111931acc7f5SJesse Barnes for_each_pipe(pipe) { 112031acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 112131acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 112231acc7f5SJesse Barnes 112331acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 112431acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 112531acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 112631acc7f5SJesse Barnes } 112731acc7f5SJesse Barnes } 112831acc7f5SJesse Barnes 11297e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 11307e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 11317e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1132b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 11337e231dbeSJesse Barnes 11347e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 11357e231dbeSJesse Barnes hotplug_status); 113691d131d2SDaniel Vetter 113710a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 113891d131d2SDaniel Vetter 11397e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 11407e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 11417e231dbeSJesse Barnes } 11427e231dbeSJesse Barnes 1143515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1144515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 11457e231dbeSJesse Barnes 114660611c13SPaulo Zanoni if (pm_iir) 1147d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 11487e231dbeSJesse Barnes 11497e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 11507e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 11517e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 11527e231dbeSJesse Barnes } 11537e231dbeSJesse Barnes 11547e231dbeSJesse Barnes out: 11557e231dbeSJesse Barnes return ret; 11567e231dbeSJesse Barnes } 11577e231dbeSJesse Barnes 115823e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1159776ad806SJesse Barnes { 1160776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 11619db4a9c7SJesse Barnes int pipe; 1162b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1163776ad806SJesse Barnes 116410a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 116591d131d2SDaniel Vetter 1166cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1167cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1168776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1169cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1170cfc33bf7SVille Syrjälä port_name(port)); 1171cfc33bf7SVille Syrjälä } 1172776ad806SJesse Barnes 1173ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1174ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1175ce99c256SDaniel Vetter 1176776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1177515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1178776ad806SJesse Barnes 1179776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1180776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1181776ad806SJesse Barnes 1182776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1183776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1184776ad806SJesse Barnes 1185776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1186776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1187776ad806SJesse Barnes 11889db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 11899db4a9c7SJesse Barnes for_each_pipe(pipe) 11909db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 11919db4a9c7SJesse Barnes pipe_name(pipe), 11929db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1193776ad806SJesse Barnes 1194776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1195776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1196776ad806SJesse Barnes 1197776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1198776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1199776ad806SJesse Barnes 1200776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 12018664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 12028664281bSPaulo Zanoni false)) 12038664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 12048664281bSPaulo Zanoni 12058664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 12068664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 12078664281bSPaulo Zanoni false)) 12088664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 12098664281bSPaulo Zanoni } 12108664281bSPaulo Zanoni 12118664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 12128664281bSPaulo Zanoni { 12138664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 12148664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 12158664281bSPaulo Zanoni 1216de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1217de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1218de032bf4SPaulo Zanoni 12198664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_A) 12208664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 12218664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 12228664281bSPaulo Zanoni 12238664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_B) 12248664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 12258664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 12268664281bSPaulo Zanoni 12278664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_C) 12288664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 12298664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 12308664281bSPaulo Zanoni 12318664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 12328664281bSPaulo Zanoni } 12338664281bSPaulo Zanoni 12348664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 12358664281bSPaulo Zanoni { 12368664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 12378664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 12388664281bSPaulo Zanoni 1239de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1240de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1241de032bf4SPaulo Zanoni 12428664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 12438664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 12448664281bSPaulo Zanoni false)) 12458664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 12468664281bSPaulo Zanoni 12478664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 12488664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 12498664281bSPaulo Zanoni false)) 12508664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 12518664281bSPaulo Zanoni 12528664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 12538664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 12548664281bSPaulo Zanoni false)) 12558664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 12568664281bSPaulo Zanoni 12578664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1258776ad806SJesse Barnes } 1259776ad806SJesse Barnes 126023e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 126123e81d69SAdam Jackson { 126223e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 126323e81d69SAdam Jackson int pipe; 1264b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 126523e81d69SAdam Jackson 126610a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 126791d131d2SDaniel Vetter 1268cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1269cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 127023e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1271cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1272cfc33bf7SVille Syrjälä port_name(port)); 1273cfc33bf7SVille Syrjälä } 127423e81d69SAdam Jackson 127523e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1276ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 127723e81d69SAdam Jackson 127823e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1279515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 128023e81d69SAdam Jackson 128123e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 128223e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 128323e81d69SAdam Jackson 128423e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 128523e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 128623e81d69SAdam Jackson 128723e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 128823e81d69SAdam Jackson for_each_pipe(pipe) 128923e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 129023e81d69SAdam Jackson pipe_name(pipe), 129123e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 12928664281bSPaulo Zanoni 12938664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 12948664281bSPaulo Zanoni cpt_serr_int_handler(dev); 129523e81d69SAdam Jackson } 129623e81d69SAdam Jackson 1297c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1298c008bc6eSPaulo Zanoni { 1299c008bc6eSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1300c008bc6eSPaulo Zanoni 1301c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 1302c008bc6eSPaulo Zanoni dp_aux_irq_handler(dev); 1303c008bc6eSPaulo Zanoni 1304c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 1305c008bc6eSPaulo Zanoni intel_opregion_asle_intr(dev); 1306c008bc6eSPaulo Zanoni 1307c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_VBLANK) 1308c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 0); 1309c008bc6eSPaulo Zanoni 1310c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_VBLANK) 1311c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 1); 1312c008bc6eSPaulo Zanoni 1313c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 1314c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1315c008bc6eSPaulo Zanoni 1316c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1317c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1318c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1319c008bc6eSPaulo Zanoni 1320c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1321c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1322c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1323c008bc6eSPaulo Zanoni 1324c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEA_FLIP_DONE) { 1325c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 0); 1326c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 0); 1327c008bc6eSPaulo Zanoni } 1328c008bc6eSPaulo Zanoni 1329c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEB_FLIP_DONE) { 1330c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 1); 1331c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 1); 1332c008bc6eSPaulo Zanoni } 1333c008bc6eSPaulo Zanoni 1334c008bc6eSPaulo Zanoni /* check event from PCH */ 1335c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 1336c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 1337c008bc6eSPaulo Zanoni 1338c008bc6eSPaulo Zanoni if (HAS_PCH_CPT(dev)) 1339c008bc6eSPaulo Zanoni cpt_irq_handler(dev, pch_iir); 1340c008bc6eSPaulo Zanoni else 1341c008bc6eSPaulo Zanoni ibx_irq_handler(dev, pch_iir); 1342c008bc6eSPaulo Zanoni 1343c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 1344c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 1345c008bc6eSPaulo Zanoni } 1346c008bc6eSPaulo Zanoni 1347c008bc6eSPaulo Zanoni if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1348c008bc6eSPaulo Zanoni ironlake_rps_change_irq_handler(dev); 1349c008bc6eSPaulo Zanoni } 1350c008bc6eSPaulo Zanoni 13519719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 13529719fb98SPaulo Zanoni { 13539719fb98SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 13549719fb98SPaulo Zanoni int i; 13559719fb98SPaulo Zanoni 13569719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 13579719fb98SPaulo Zanoni ivb_err_int_handler(dev); 13589719fb98SPaulo Zanoni 13599719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 13609719fb98SPaulo Zanoni dp_aux_irq_handler(dev); 13619719fb98SPaulo Zanoni 13629719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 13639719fb98SPaulo Zanoni intel_opregion_asle_intr(dev); 13649719fb98SPaulo Zanoni 13659719fb98SPaulo Zanoni for (i = 0; i < 3; i++) { 13669719fb98SPaulo Zanoni if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 13679719fb98SPaulo Zanoni drm_handle_vblank(dev, i); 13689719fb98SPaulo Zanoni if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 13699719fb98SPaulo Zanoni intel_prepare_page_flip(dev, i); 13709719fb98SPaulo Zanoni intel_finish_page_flip_plane(dev, i); 13719719fb98SPaulo Zanoni } 13729719fb98SPaulo Zanoni } 13739719fb98SPaulo Zanoni 13749719fb98SPaulo Zanoni /* check event from PCH */ 13759719fb98SPaulo Zanoni if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 13769719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 13779719fb98SPaulo Zanoni 13789719fb98SPaulo Zanoni cpt_irq_handler(dev, pch_iir); 13799719fb98SPaulo Zanoni 13809719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 13819719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 13829719fb98SPaulo Zanoni } 13839719fb98SPaulo Zanoni } 13849719fb98SPaulo Zanoni 1385f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1386b1f14ad0SJesse Barnes { 1387b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1388b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1389f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 13900e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 1391333a8204SPaulo Zanoni bool err_int_reenable = false; 1392b1f14ad0SJesse Barnes 1393b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1394b1f14ad0SJesse Barnes 13958664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 13968664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 1397907b28c5SChris Wilson intel_uncore_check_errors(dev); 13988664281bSPaulo Zanoni 1399b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1400b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1401b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 140223a78516SPaulo Zanoni POSTING_READ(DEIER); 14030e43406bSChris Wilson 140444498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 140544498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 140644498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 140744498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 140844498aeaSPaulo Zanoni * due to its back queue). */ 1409ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 141044498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 141144498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 141244498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1413ab5c608bSBen Widawsky } 141444498aeaSPaulo Zanoni 14158664281bSPaulo Zanoni /* On Haswell, also mask ERR_INT because we don't want to risk 14168664281bSPaulo Zanoni * generating "unclaimed register" interrupts from inside the interrupt 14178664281bSPaulo Zanoni * handler. */ 14184bc9d430SDaniel Vetter if (IS_HASWELL(dev)) { 14194bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1420333a8204SPaulo Zanoni err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; 1421333a8204SPaulo Zanoni if (err_int_reenable) 14228664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 14234bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 14244bc9d430SDaniel Vetter } 14258664281bSPaulo Zanoni 14260e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 14270e43406bSChris Wilson if (gt_iir) { 1428d8fc8a47SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) 14290e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 1430d8fc8a47SPaulo Zanoni else 1431d8fc8a47SPaulo Zanoni ilk_gt_irq_handler(dev, dev_priv, gt_iir); 14320e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 14330e43406bSChris Wilson ret = IRQ_HANDLED; 14340e43406bSChris Wilson } 1435b1f14ad0SJesse Barnes 1436b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 14370e43406bSChris Wilson if (de_iir) { 1438f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) 14399719fb98SPaulo Zanoni ivb_display_irq_handler(dev, de_iir); 1440f1af8fc1SPaulo Zanoni else 1441f1af8fc1SPaulo Zanoni ilk_display_irq_handler(dev, de_iir); 14420e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 14430e43406bSChris Wilson ret = IRQ_HANDLED; 14440e43406bSChris Wilson } 14450e43406bSChris Wilson 1446f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) { 1447f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 14480e43406bSChris Wilson if (pm_iir) { 1449d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 1450b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 14510e43406bSChris Wilson ret = IRQ_HANDLED; 14520e43406bSChris Wilson } 1453f1af8fc1SPaulo Zanoni } 1454b1f14ad0SJesse Barnes 1455333a8204SPaulo Zanoni if (err_int_reenable) { 14564bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 14574bc9d430SDaniel Vetter if (ivb_can_enable_err_int(dev)) 14588664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 14594bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 14604bc9d430SDaniel Vetter } 14618664281bSPaulo Zanoni 1462b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1463b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1464ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 146544498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 146644498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1467ab5c608bSBen Widawsky } 1468b1f14ad0SJesse Barnes 1469b1f14ad0SJesse Barnes return ret; 1470b1f14ad0SJesse Barnes } 1471b1f14ad0SJesse Barnes 1472*17e1df07SDaniel Vetter static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1473*17e1df07SDaniel Vetter bool reset_completed) 1474*17e1df07SDaniel Vetter { 1475*17e1df07SDaniel Vetter struct intel_ring_buffer *ring; 1476*17e1df07SDaniel Vetter int i; 1477*17e1df07SDaniel Vetter 1478*17e1df07SDaniel Vetter /* 1479*17e1df07SDaniel Vetter * Notify all waiters for GPU completion events that reset state has 1480*17e1df07SDaniel Vetter * been changed, and that they need to restart their wait after 1481*17e1df07SDaniel Vetter * checking for potential errors (and bail out to drop locks if there is 1482*17e1df07SDaniel Vetter * a gpu reset pending so that i915_error_work_func can acquire them). 1483*17e1df07SDaniel Vetter */ 1484*17e1df07SDaniel Vetter 1485*17e1df07SDaniel Vetter /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1486*17e1df07SDaniel Vetter for_each_ring(ring, dev_priv, i) 1487*17e1df07SDaniel Vetter wake_up_all(&ring->irq_queue); 1488*17e1df07SDaniel Vetter 1489*17e1df07SDaniel Vetter /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1490*17e1df07SDaniel Vetter wake_up_all(&dev_priv->pending_flip_queue); 1491*17e1df07SDaniel Vetter 1492*17e1df07SDaniel Vetter /* 1493*17e1df07SDaniel Vetter * Signal tasks blocked in i915_gem_wait_for_error that the pending 1494*17e1df07SDaniel Vetter * reset state is cleared. 1495*17e1df07SDaniel Vetter */ 1496*17e1df07SDaniel Vetter if (reset_completed) 1497*17e1df07SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 1498*17e1df07SDaniel Vetter } 1499*17e1df07SDaniel Vetter 15008a905236SJesse Barnes /** 15018a905236SJesse Barnes * i915_error_work_func - do process context error handling work 15028a905236SJesse Barnes * @work: work struct 15038a905236SJesse Barnes * 15048a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 15058a905236SJesse Barnes * was detected. 15068a905236SJesse Barnes */ 15078a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 15088a905236SJesse Barnes { 15091f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 15101f83fee0SDaniel Vetter work); 15111f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 15121f83fee0SDaniel Vetter gpu_error); 15138a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1514cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1515cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1516cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1517*17e1df07SDaniel Vetter int ret; 15188a905236SJesse Barnes 1519f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 15208a905236SJesse Barnes 15217db0ba24SDaniel Vetter /* 15227db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 15237db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 15247db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 15257db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 15267db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 15277db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 15287db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 15297db0ba24SDaniel Vetter * work we don't need to worry about any other races. 15307db0ba24SDaniel Vetter */ 15317db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 153244d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 15337db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 15347db0ba24SDaniel Vetter reset_event); 15351f83fee0SDaniel Vetter 1536*17e1df07SDaniel Vetter /* 1537*17e1df07SDaniel Vetter * All state reset _must_ be completed before we update the 1538*17e1df07SDaniel Vetter * reset counter, for otherwise waiters might miss the reset 1539*17e1df07SDaniel Vetter * pending state and not properly drop locks, resulting in 1540*17e1df07SDaniel Vetter * deadlocks with the reset work. 1541*17e1df07SDaniel Vetter */ 1542f69061beSDaniel Vetter ret = i915_reset(dev); 1543f69061beSDaniel Vetter 1544*17e1df07SDaniel Vetter intel_display_handle_reset(dev); 1545*17e1df07SDaniel Vetter 1546f69061beSDaniel Vetter if (ret == 0) { 1547f69061beSDaniel Vetter /* 1548f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1549f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1550f69061beSDaniel Vetter * complete. 1551f69061beSDaniel Vetter * 1552f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1553f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1554f69061beSDaniel Vetter * updates before 1555f69061beSDaniel Vetter * the counter increment. 1556f69061beSDaniel Vetter */ 1557f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1558f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1559f69061beSDaniel Vetter 1560f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1561f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 15621f83fee0SDaniel Vetter } else { 15631f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1564f316a42cSBen Gamari } 15651f83fee0SDaniel Vetter 1566*17e1df07SDaniel Vetter /* 1567*17e1df07SDaniel Vetter * Note: The wake_up also serves as a memory barrier so that 1568*17e1df07SDaniel Vetter * waiters see the update value of the reset counter atomic_t. 1569*17e1df07SDaniel Vetter */ 1570*17e1df07SDaniel Vetter i915_error_wake_up(dev_priv, true); 1571f316a42cSBen Gamari } 15728a905236SJesse Barnes } 15738a905236SJesse Barnes 157435aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 1575c0e09200SDave Airlie { 15768a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 1577bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 157863eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 1579050ee91fSBen Widawsky int pipe, i; 158063eeaf38SJesse Barnes 158135aed2e6SChris Wilson if (!eir) 158235aed2e6SChris Wilson return; 158363eeaf38SJesse Barnes 1584a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 15858a905236SJesse Barnes 1586bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 1587bd9854f9SBen Widawsky 15888a905236SJesse Barnes if (IS_G4X(dev)) { 15898a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 15908a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 15918a905236SJesse Barnes 1592a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1593a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1594050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1595050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1596a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1597a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 15988a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 15993143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 16008a905236SJesse Barnes } 16018a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 16028a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1603a70491ccSJoe Perches pr_err("page table error\n"); 1604a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 16058a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 16063143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 16078a905236SJesse Barnes } 16088a905236SJesse Barnes } 16098a905236SJesse Barnes 1610a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 161163eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 161263eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1613a70491ccSJoe Perches pr_err("page table error\n"); 1614a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 161563eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 16163143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 161763eeaf38SJesse Barnes } 16188a905236SJesse Barnes } 16198a905236SJesse Barnes 162063eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 1621a70491ccSJoe Perches pr_err("memory refresh error:\n"); 16229db4a9c7SJesse Barnes for_each_pipe(pipe) 1623a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 16249db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 162563eeaf38SJesse Barnes /* pipestat has already been acked */ 162663eeaf38SJesse Barnes } 162763eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 1628a70491ccSJoe Perches pr_err("instruction error\n"); 1629a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1630050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1631050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1632a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 163363eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 163463eeaf38SJesse Barnes 1635a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1636a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1637a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 163863eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 16393143a2bfSChris Wilson POSTING_READ(IPEIR); 164063eeaf38SJesse Barnes } else { 164163eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 164263eeaf38SJesse Barnes 1643a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1644a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1645a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1646a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 164763eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 16483143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 164963eeaf38SJesse Barnes } 165063eeaf38SJesse Barnes } 165163eeaf38SJesse Barnes 165263eeaf38SJesse Barnes I915_WRITE(EIR, eir); 16533143a2bfSChris Wilson POSTING_READ(EIR); 165463eeaf38SJesse Barnes eir = I915_READ(EIR); 165563eeaf38SJesse Barnes if (eir) { 165663eeaf38SJesse Barnes /* 165763eeaf38SJesse Barnes * some errors might have become stuck, 165863eeaf38SJesse Barnes * mask them. 165963eeaf38SJesse Barnes */ 166063eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 166163eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 166263eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 166363eeaf38SJesse Barnes } 166435aed2e6SChris Wilson } 166535aed2e6SChris Wilson 166635aed2e6SChris Wilson /** 166735aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 166835aed2e6SChris Wilson * @dev: drm device 166935aed2e6SChris Wilson * 167035aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 167135aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 167235aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 167335aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 167435aed2e6SChris Wilson * of a ring dump etc.). 167535aed2e6SChris Wilson */ 1676527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 167735aed2e6SChris Wilson { 167835aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 167935aed2e6SChris Wilson 168035aed2e6SChris Wilson i915_capture_error_state(dev); 168135aed2e6SChris Wilson i915_report_and_clear_eir(dev); 16828a905236SJesse Barnes 1683ba1234d1SBen Gamari if (wedged) { 1684f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1685f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 1686ba1234d1SBen Gamari 168711ed50ecSBen Gamari /* 1688*17e1df07SDaniel Vetter * Wakeup waiting processes so that the reset work function 1689*17e1df07SDaniel Vetter * i915_error_work_func doesn't deadlock trying to grab various 1690*17e1df07SDaniel Vetter * locks. By bumping the reset counter first, the woken 1691*17e1df07SDaniel Vetter * processes will see a reset in progress and back off, 1692*17e1df07SDaniel Vetter * releasing their locks and then wait for the reset completion. 1693*17e1df07SDaniel Vetter * We must do this for _all_ gpu waiters that might hold locks 1694*17e1df07SDaniel Vetter * that the reset work needs to acquire. 1695*17e1df07SDaniel Vetter * 1696*17e1df07SDaniel Vetter * Note: The wake_up serves as the required memory barrier to 1697*17e1df07SDaniel Vetter * ensure that the waiters see the updated value of the reset 1698*17e1df07SDaniel Vetter * counter atomic_t. 169911ed50ecSBen Gamari */ 1700*17e1df07SDaniel Vetter i915_error_wake_up(dev_priv, false); 170111ed50ecSBen Gamari } 170211ed50ecSBen Gamari 1703122f46baSDaniel Vetter /* 1704122f46baSDaniel Vetter * Our reset work can grab modeset locks (since it needs to reset the 1705122f46baSDaniel Vetter * state of outstanding pagelips). Hence it must not be run on our own 1706122f46baSDaniel Vetter * dev-priv->wq work queue for otherwise the flush_work in the pageflip 1707122f46baSDaniel Vetter * code will deadlock. 1708122f46baSDaniel Vetter */ 1709122f46baSDaniel Vetter schedule_work(&dev_priv->gpu_error.work); 17108a905236SJesse Barnes } 17118a905236SJesse Barnes 171221ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 17134e5359cdSSimon Farnsworth { 17144e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 17154e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 17164e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 171705394f39SChris Wilson struct drm_i915_gem_object *obj; 17184e5359cdSSimon Farnsworth struct intel_unpin_work *work; 17194e5359cdSSimon Farnsworth unsigned long flags; 17204e5359cdSSimon Farnsworth bool stall_detected; 17214e5359cdSSimon Farnsworth 17224e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 17234e5359cdSSimon Farnsworth if (intel_crtc == NULL) 17244e5359cdSSimon Farnsworth return; 17254e5359cdSSimon Farnsworth 17264e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 17274e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 17284e5359cdSSimon Farnsworth 1729e7d841caSChris Wilson if (work == NULL || 1730e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1731e7d841caSChris Wilson !work->enable_stall_check) { 17324e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 17334e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 17344e5359cdSSimon Farnsworth return; 17354e5359cdSSimon Farnsworth } 17364e5359cdSSimon Farnsworth 17374e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 173805394f39SChris Wilson obj = work->pending_flip_obj; 1739a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 17409db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 1741446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1742f343c5f6SBen Widawsky i915_gem_obj_ggtt_offset(obj); 17434e5359cdSSimon Farnsworth } else { 17449db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 1745f343c5f6SBen Widawsky stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 174601f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 17474e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 17484e5359cdSSimon Farnsworth } 17494e5359cdSSimon Farnsworth 17504e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 17514e5359cdSSimon Farnsworth 17524e5359cdSSimon Farnsworth if (stall_detected) { 17534e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 17544e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 17554e5359cdSSimon Farnsworth } 17564e5359cdSSimon Farnsworth } 17574e5359cdSSimon Farnsworth 175842f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 175942f52ef8SKeith Packard * we use as a pipe index 176042f52ef8SKeith Packard */ 1761f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 17620a3e67a4SJesse Barnes { 17630a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1764e9d21d7fSKeith Packard unsigned long irqflags; 176571e0ffa5SJesse Barnes 17665eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 176771e0ffa5SJesse Barnes return -EINVAL; 17680a3e67a4SJesse Barnes 17691ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1770f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 17717c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 17727c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 17730a3e67a4SJesse Barnes else 17747c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 17757c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 17768692d00eSChris Wilson 17778692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 17788692d00eSChris Wilson if (dev_priv->info->gen == 3) 17796b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 17801ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 17818692d00eSChris Wilson 17820a3e67a4SJesse Barnes return 0; 17830a3e67a4SJesse Barnes } 17840a3e67a4SJesse Barnes 1785f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1786f796cf8fSJesse Barnes { 1787f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1788f796cf8fSJesse Barnes unsigned long irqflags; 1789b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1790b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1791f796cf8fSJesse Barnes 1792f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 1793f796cf8fSJesse Barnes return -EINVAL; 1794f796cf8fSJesse Barnes 1795f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1796b518421fSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 1797b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1798b1f14ad0SJesse Barnes 1799b1f14ad0SJesse Barnes return 0; 1800b1f14ad0SJesse Barnes } 1801b1f14ad0SJesse Barnes 18027e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 18037e231dbeSJesse Barnes { 18047e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 18057e231dbeSJesse Barnes unsigned long irqflags; 180631acc7f5SJesse Barnes u32 imr; 18077e231dbeSJesse Barnes 18087e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 18097e231dbeSJesse Barnes return -EINVAL; 18107e231dbeSJesse Barnes 18117e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 18127e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 181331acc7f5SJesse Barnes if (pipe == 0) 18147e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 181531acc7f5SJesse Barnes else 18167e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 18177e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 181831acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 181931acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 18207e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 18217e231dbeSJesse Barnes 18227e231dbeSJesse Barnes return 0; 18237e231dbeSJesse Barnes } 18247e231dbeSJesse Barnes 182542f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 182642f52ef8SKeith Packard * we use as a pipe index 182742f52ef8SKeith Packard */ 1828f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 18290a3e67a4SJesse Barnes { 18300a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1831e9d21d7fSKeith Packard unsigned long irqflags; 18320a3e67a4SJesse Barnes 18331ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 18348692d00eSChris Wilson if (dev_priv->info->gen == 3) 18356b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 18368692d00eSChris Wilson 18377c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 18387c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 18397c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 18401ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 18410a3e67a4SJesse Barnes } 18420a3e67a4SJesse Barnes 1843f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1844f796cf8fSJesse Barnes { 1845f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1846f796cf8fSJesse Barnes unsigned long irqflags; 1847b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1848b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1849f796cf8fSJesse Barnes 1850f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1851b518421fSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 1852b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1853b1f14ad0SJesse Barnes } 1854b1f14ad0SJesse Barnes 18557e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 18567e231dbeSJesse Barnes { 18577e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 18587e231dbeSJesse Barnes unsigned long irqflags; 185931acc7f5SJesse Barnes u32 imr; 18607e231dbeSJesse Barnes 18617e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 186231acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 186331acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 18647e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 186531acc7f5SJesse Barnes if (pipe == 0) 18667e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 186731acc7f5SJesse Barnes else 18687e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 18697e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 18707e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 18717e231dbeSJesse Barnes } 18727e231dbeSJesse Barnes 1873893eead0SChris Wilson static u32 1874893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 1875852835f3SZou Nan hai { 1876893eead0SChris Wilson return list_entry(ring->request_list.prev, 1877893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 1878893eead0SChris Wilson } 1879893eead0SChris Wilson 18809107e9d2SChris Wilson static bool 18819107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 1882893eead0SChris Wilson { 18839107e9d2SChris Wilson return (list_empty(&ring->request_list) || 18849107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 1885f65d9421SBen Gamari } 1886f65d9421SBen Gamari 18876274f212SChris Wilson static struct intel_ring_buffer * 18886274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 1889a24a11e6SChris Wilson { 1890a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 18916274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 1892a24a11e6SChris Wilson 1893a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 1894a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 1895a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 18966274f212SChris Wilson return NULL; 1897a24a11e6SChris Wilson 1898a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 1899a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 1900a24a11e6SChris Wilson */ 19016274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 1902a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 1903a24a11e6SChris Wilson do { 1904a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 1905a24a11e6SChris Wilson if (cmd == ipehr) 1906a24a11e6SChris Wilson break; 1907a24a11e6SChris Wilson 1908a24a11e6SChris Wilson acthd -= 4; 1909a24a11e6SChris Wilson if (acthd < acthd_min) 19106274f212SChris Wilson return NULL; 1911a24a11e6SChris Wilson } while (1); 1912a24a11e6SChris Wilson 19136274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 19146274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 1915a24a11e6SChris Wilson } 1916a24a11e6SChris Wilson 19176274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 19186274f212SChris Wilson { 19196274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 19206274f212SChris Wilson struct intel_ring_buffer *signaller; 19216274f212SChris Wilson u32 seqno, ctl; 19226274f212SChris Wilson 19236274f212SChris Wilson ring->hangcheck.deadlock = true; 19246274f212SChris Wilson 19256274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 19266274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 19276274f212SChris Wilson return -1; 19286274f212SChris Wilson 19296274f212SChris Wilson /* cursory check for an unkickable deadlock */ 19306274f212SChris Wilson ctl = I915_READ_CTL(signaller); 19316274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 19326274f212SChris Wilson return -1; 19336274f212SChris Wilson 19346274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 19356274f212SChris Wilson } 19366274f212SChris Wilson 19376274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 19386274f212SChris Wilson { 19396274f212SChris Wilson struct intel_ring_buffer *ring; 19406274f212SChris Wilson int i; 19416274f212SChris Wilson 19426274f212SChris Wilson for_each_ring(ring, dev_priv, i) 19436274f212SChris Wilson ring->hangcheck.deadlock = false; 19446274f212SChris Wilson } 19456274f212SChris Wilson 1946ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action 1947ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 19481ec14ad3SChris Wilson { 19491ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 19501ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 19519107e9d2SChris Wilson u32 tmp; 19529107e9d2SChris Wilson 19536274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 1954f2f4d82fSJani Nikula return HANGCHECK_ACTIVE; 19556274f212SChris Wilson 19569107e9d2SChris Wilson if (IS_GEN2(dev)) 1957f2f4d82fSJani Nikula return HANGCHECK_HUNG; 19589107e9d2SChris Wilson 19599107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 19609107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 19619107e9d2SChris Wilson * and break the hang. This should work on 19629107e9d2SChris Wilson * all but the second generation chipsets. 19639107e9d2SChris Wilson */ 19649107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 19651ec14ad3SChris Wilson if (tmp & RING_WAIT) { 19661ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 19671ec14ad3SChris Wilson ring->name); 19681ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 1969f2f4d82fSJani Nikula return HANGCHECK_KICK; 19701ec14ad3SChris Wilson } 1971a24a11e6SChris Wilson 19726274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 19736274f212SChris Wilson switch (semaphore_passed(ring)) { 19746274f212SChris Wilson default: 1975f2f4d82fSJani Nikula return HANGCHECK_HUNG; 19766274f212SChris Wilson case 1: 1977a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 1978a24a11e6SChris Wilson ring->name); 1979a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 1980f2f4d82fSJani Nikula return HANGCHECK_KICK; 19816274f212SChris Wilson case 0: 1982f2f4d82fSJani Nikula return HANGCHECK_WAIT; 19836274f212SChris Wilson } 19849107e9d2SChris Wilson } 19859107e9d2SChris Wilson 1986f2f4d82fSJani Nikula return HANGCHECK_HUNG; 1987a24a11e6SChris Wilson } 1988d1e61e7fSChris Wilson 1989f65d9421SBen Gamari /** 1990f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 199105407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 199205407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 199305407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 199405407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 199505407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 1996f65d9421SBen Gamari */ 1997a658b5d2SDamien Lespiau static void i915_hangcheck_elapsed(unsigned long data) 1998f65d9421SBen Gamari { 1999f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 2000f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 2001b4519513SChris Wilson struct intel_ring_buffer *ring; 2002b4519513SChris Wilson int i; 200305407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 20049107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 20059107e9d2SChris Wilson #define BUSY 1 20069107e9d2SChris Wilson #define KICK 5 20079107e9d2SChris Wilson #define HUNG 20 20089107e9d2SChris Wilson #define FIRE 30 2009893eead0SChris Wilson 20103e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 20113e0dc6b0SBen Widawsky return; 20123e0dc6b0SBen Widawsky 2013b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 201405407ff8SMika Kuoppala u32 seqno, acthd; 20159107e9d2SChris Wilson bool busy = true; 2016b4519513SChris Wilson 20176274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 20186274f212SChris Wilson 201905407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 202005407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 202105407ff8SMika Kuoppala 202205407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 20239107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 20249107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 20259107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 20269107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 20279107e9d2SChris Wilson ring->name); 20289107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 20299107e9d2SChris Wilson ring->hangcheck.score += HUNG; 20309107e9d2SChris Wilson } else 20319107e9d2SChris Wilson busy = false; 203205407ff8SMika Kuoppala } else { 20336274f212SChris Wilson /* We always increment the hangcheck score 20346274f212SChris Wilson * if the ring is busy and still processing 20356274f212SChris Wilson * the same request, so that no single request 20366274f212SChris Wilson * can run indefinitely (such as a chain of 20376274f212SChris Wilson * batches). The only time we do not increment 20386274f212SChris Wilson * the hangcheck score on this ring, if this 20396274f212SChris Wilson * ring is in a legitimate wait for another 20406274f212SChris Wilson * ring. In that case the waiting ring is a 20416274f212SChris Wilson * victim and we want to be sure we catch the 20426274f212SChris Wilson * right culprit. Then every time we do kick 20436274f212SChris Wilson * the ring, add a small increment to the 20446274f212SChris Wilson * score so that we can catch a batch that is 20456274f212SChris Wilson * being repeatedly kicked and so responsible 20466274f212SChris Wilson * for stalling the machine. 20479107e9d2SChris Wilson */ 2048ad8beaeaSMika Kuoppala ring->hangcheck.action = ring_stuck(ring, 2049ad8beaeaSMika Kuoppala acthd); 2050ad8beaeaSMika Kuoppala 2051ad8beaeaSMika Kuoppala switch (ring->hangcheck.action) { 2052f2f4d82fSJani Nikula case HANGCHECK_WAIT: 20536274f212SChris Wilson break; 2054f2f4d82fSJani Nikula case HANGCHECK_ACTIVE: 2055ea04cb31SJani Nikula ring->hangcheck.score += BUSY; 20566274f212SChris Wilson break; 2057f2f4d82fSJani Nikula case HANGCHECK_KICK: 2058ea04cb31SJani Nikula ring->hangcheck.score += KICK; 20596274f212SChris Wilson break; 2060f2f4d82fSJani Nikula case HANGCHECK_HUNG: 2061ea04cb31SJani Nikula ring->hangcheck.score += HUNG; 20626274f212SChris Wilson stuck[i] = true; 20636274f212SChris Wilson break; 20646274f212SChris Wilson } 206505407ff8SMika Kuoppala } 20669107e9d2SChris Wilson } else { 20679107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 20689107e9d2SChris Wilson * attempts across multiple batches. 20699107e9d2SChris Wilson */ 20709107e9d2SChris Wilson if (ring->hangcheck.score > 0) 20719107e9d2SChris Wilson ring->hangcheck.score--; 2072cbb465e7SChris Wilson } 2073f65d9421SBen Gamari 207405407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 207505407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 20769107e9d2SChris Wilson busy_count += busy; 207705407ff8SMika Kuoppala } 207805407ff8SMika Kuoppala 207905407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 20809107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 2081b8d88d1dSDaniel Vetter DRM_INFO("%s on %s\n", 208205407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 2083a43adf07SChris Wilson ring->name); 2084a43adf07SChris Wilson rings_hung++; 208505407ff8SMika Kuoppala } 208605407ff8SMika Kuoppala } 208705407ff8SMika Kuoppala 208805407ff8SMika Kuoppala if (rings_hung) 208905407ff8SMika Kuoppala return i915_handle_error(dev, true); 209005407ff8SMika Kuoppala 209105407ff8SMika Kuoppala if (busy_count) 209205407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 209305407ff8SMika Kuoppala * being added */ 209410cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 209510cd45b6SMika Kuoppala } 209610cd45b6SMika Kuoppala 209710cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev) 209810cd45b6SMika Kuoppala { 209910cd45b6SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 210010cd45b6SMika Kuoppala if (!i915_enable_hangcheck) 210110cd45b6SMika Kuoppala return; 210210cd45b6SMika Kuoppala 210399584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 210410cd45b6SMika Kuoppala round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2105f65d9421SBen Gamari } 2106f65d9421SBen Gamari 210791738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 210891738a95SPaulo Zanoni { 210991738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 211091738a95SPaulo Zanoni 211191738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 211291738a95SPaulo Zanoni return; 211391738a95SPaulo Zanoni 211491738a95SPaulo Zanoni /* south display irq */ 211591738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 211691738a95SPaulo Zanoni /* 211791738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 211891738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 211991738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 212091738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 212191738a95SPaulo Zanoni */ 212291738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 212391738a95SPaulo Zanoni POSTING_READ(SDEIER); 212491738a95SPaulo Zanoni } 212591738a95SPaulo Zanoni 2126d18ea1b5SDaniel Vetter static void gen5_gt_irq_preinstall(struct drm_device *dev) 2127d18ea1b5SDaniel Vetter { 2128d18ea1b5SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 2129d18ea1b5SDaniel Vetter 2130d18ea1b5SDaniel Vetter /* and GT */ 2131d18ea1b5SDaniel Vetter I915_WRITE(GTIMR, 0xffffffff); 2132d18ea1b5SDaniel Vetter I915_WRITE(GTIER, 0x0); 2133d18ea1b5SDaniel Vetter POSTING_READ(GTIER); 2134d18ea1b5SDaniel Vetter 2135d18ea1b5SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 2136d18ea1b5SDaniel Vetter /* and PM */ 2137d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIMR, 0xffffffff); 2138d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIER, 0x0); 2139d18ea1b5SDaniel Vetter POSTING_READ(GEN6_PMIER); 2140d18ea1b5SDaniel Vetter } 2141d18ea1b5SDaniel Vetter } 2142d18ea1b5SDaniel Vetter 2143c0e09200SDave Airlie /* drm_dma.h hooks 2144c0e09200SDave Airlie */ 2145f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2146036a4a7dSZhenyu Wang { 2147036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2148036a4a7dSZhenyu Wang 21494697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 21504697995bSJesse Barnes 2151036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2152bdfcdb63SDaniel Vetter 2153036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2154036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 21553143a2bfSChris Wilson POSTING_READ(DEIER); 2156036a4a7dSZhenyu Wang 2157d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 2158c650156aSZhenyu Wang 215991738a95SPaulo Zanoni ibx_irq_preinstall(dev); 21607d99163dSBen Widawsky } 21617d99163dSBen Widawsky 21627e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 21637e231dbeSJesse Barnes { 21647e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 21657e231dbeSJesse Barnes int pipe; 21667e231dbeSJesse Barnes 21677e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 21687e231dbeSJesse Barnes 21697e231dbeSJesse Barnes /* VLV magic */ 21707e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 21717e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 21727e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 21737e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 21747e231dbeSJesse Barnes 21757e231dbeSJesse Barnes /* and GT */ 21767e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 21777e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2178d18ea1b5SDaniel Vetter 2179d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 21807e231dbeSJesse Barnes 21817e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 21827e231dbeSJesse Barnes 21837e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 21847e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 21857e231dbeSJesse Barnes for_each_pipe(pipe) 21867e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 21877e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 21887e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 21897e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 21907e231dbeSJesse Barnes POSTING_READ(VLV_IER); 21917e231dbeSJesse Barnes } 21927e231dbeSJesse Barnes 219382a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 219482a28bcfSDaniel Vetter { 219582a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 219682a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 219782a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 2198fee884edSDaniel Vetter u32 hotplug_irqs, hotplug, enabled_irqs = 0; 219982a28bcfSDaniel Vetter 220082a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2201fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 220282a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2203cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2204fee884edSDaniel Vetter enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 220582a28bcfSDaniel Vetter } else { 2206fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 220782a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2208cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2209fee884edSDaniel Vetter enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 221082a28bcfSDaniel Vetter } 221182a28bcfSDaniel Vetter 2212fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 221382a28bcfSDaniel Vetter 22147fe0b973SKeith Packard /* 22157fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 22167fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 22177fe0b973SKeith Packard * 22187fe0b973SKeith Packard * This register is the same on all known PCH chips. 22197fe0b973SKeith Packard */ 22207fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 22217fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 22227fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 22237fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 22247fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 22257fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 22267fe0b973SKeith Packard } 22277fe0b973SKeith Packard 2228d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2229d46da437SPaulo Zanoni { 2230d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 223182a28bcfSDaniel Vetter u32 mask; 2232d46da437SPaulo Zanoni 2233692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2234692a04cfSDaniel Vetter return; 2235692a04cfSDaniel Vetter 22368664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 22378664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2238de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 22398664281bSPaulo Zanoni } else { 22408664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 22418664281bSPaulo Zanoni 22428664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 22438664281bSPaulo Zanoni } 2244ab5c608bSBen Widawsky 2245d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2246d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2247d46da437SPaulo Zanoni } 2248d46da437SPaulo Zanoni 22490a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 22500a9a8c91SDaniel Vetter { 22510a9a8c91SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 22520a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 22530a9a8c91SDaniel Vetter 22540a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 22550a9a8c91SDaniel Vetter 22560a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 22570a9a8c91SDaniel Vetter if (HAS_L3_GPU_CACHE(dev)) { 22580a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 22590a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 22600a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 22610a9a8c91SDaniel Vetter } 22620a9a8c91SDaniel Vetter 22630a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 22640a9a8c91SDaniel Vetter if (IS_GEN5(dev)) { 22650a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 22660a9a8c91SDaniel Vetter ILK_BSD_USER_INTERRUPT; 22670a9a8c91SDaniel Vetter } else { 22680a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 22690a9a8c91SDaniel Vetter } 22700a9a8c91SDaniel Vetter 22710a9a8c91SDaniel Vetter I915_WRITE(GTIIR, I915_READ(GTIIR)); 22720a9a8c91SDaniel Vetter I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 22730a9a8c91SDaniel Vetter I915_WRITE(GTIER, gt_irqs); 22740a9a8c91SDaniel Vetter POSTING_READ(GTIER); 22750a9a8c91SDaniel Vetter 22760a9a8c91SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 22770a9a8c91SDaniel Vetter pm_irqs |= GEN6_PM_RPS_EVENTS; 22780a9a8c91SDaniel Vetter 22790a9a8c91SDaniel Vetter if (HAS_VEBOX(dev)) 22800a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 22810a9a8c91SDaniel Vetter 2282605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = 0xffffffff; 22830a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2284605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 22850a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIER, pm_irqs); 22860a9a8c91SDaniel Vetter POSTING_READ(GEN6_PMIER); 22870a9a8c91SDaniel Vetter } 22880a9a8c91SDaniel Vetter } 22890a9a8c91SDaniel Vetter 2290f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2291036a4a7dSZhenyu Wang { 22924bc9d430SDaniel Vetter unsigned long irqflags; 2293036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22948e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 22958e76f8dcSPaulo Zanoni 22968e76f8dcSPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) { 22978e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 22988e76f8dcSPaulo Zanoni DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 22998e76f8dcSPaulo Zanoni DE_PLANEB_FLIP_DONE_IVB | 23008e76f8dcSPaulo Zanoni DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 23018e76f8dcSPaulo Zanoni DE_ERR_INT_IVB); 23028e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 23038e76f8dcSPaulo Zanoni DE_PIPEA_VBLANK_IVB); 23048e76f8dcSPaulo Zanoni 23058e76f8dcSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 23068e76f8dcSPaulo Zanoni } else { 23078e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2308ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 23098664281bSPaulo Zanoni DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 23108e76f8dcSPaulo Zanoni DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 23118e76f8dcSPaulo Zanoni extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 23128e76f8dcSPaulo Zanoni } 2313036a4a7dSZhenyu Wang 23141ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2315036a4a7dSZhenyu Wang 2316036a4a7dSZhenyu Wang /* should always can generate irq */ 2317036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 23181ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 23198e76f8dcSPaulo Zanoni I915_WRITE(DEIER, display_mask | extra_mask); 23203143a2bfSChris Wilson POSTING_READ(DEIER); 2321036a4a7dSZhenyu Wang 23220a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 2323036a4a7dSZhenyu Wang 2324d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 23257fe0b973SKeith Packard 2326f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 23276005ce42SDaniel Vetter /* Enable PCU event interrupts 23286005ce42SDaniel Vetter * 23296005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 23304bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 23314bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 23324bc9d430SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2333f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 23344bc9d430SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2335f97108d1SJesse Barnes } 2336f97108d1SJesse Barnes 2337036a4a7dSZhenyu Wang return 0; 2338036a4a7dSZhenyu Wang } 2339036a4a7dSZhenyu Wang 23407e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 23417e231dbeSJesse Barnes { 23427e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 23437e231dbeSJesse Barnes u32 enable_mask; 234431acc7f5SJesse Barnes u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2345b79480baSDaniel Vetter unsigned long irqflags; 23467e231dbeSJesse Barnes 23477e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 234831acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 234931acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 235031acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 23517e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 23527e231dbeSJesse Barnes 235331acc7f5SJesse Barnes /* 235431acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 235531acc7f5SJesse Barnes * toggle them based on usage. 235631acc7f5SJesse Barnes */ 235731acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 235831acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 235931acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 23607e231dbeSJesse Barnes 236120afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 236220afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 236320afbda2SDaniel Vetter 23647e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 23657e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 23667e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 23677e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 23687e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 23697e231dbeSJesse Barnes POSTING_READ(VLV_IER); 23707e231dbeSJesse Barnes 2371b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2372b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2373b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 237431acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2375515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 237631acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2377b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 237831acc7f5SJesse Barnes 23797e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 23807e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 23817e231dbeSJesse Barnes 23820a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 23837e231dbeSJesse Barnes 23847e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 23857e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 23867e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 23877e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 23887e231dbeSJesse Barnes #endif 23897e231dbeSJesse Barnes 23907e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 239120afbda2SDaniel Vetter 239220afbda2SDaniel Vetter return 0; 239320afbda2SDaniel Vetter } 239420afbda2SDaniel Vetter 23957e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 23967e231dbeSJesse Barnes { 23977e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 23987e231dbeSJesse Barnes int pipe; 23997e231dbeSJesse Barnes 24007e231dbeSJesse Barnes if (!dev_priv) 24017e231dbeSJesse Barnes return; 24027e231dbeSJesse Barnes 2403ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2404ac4c16c5SEgbert Eich 24057e231dbeSJesse Barnes for_each_pipe(pipe) 24067e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 24077e231dbeSJesse Barnes 24087e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 24097e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 24107e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 24117e231dbeSJesse Barnes for_each_pipe(pipe) 24127e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 24137e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 24147e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 24157e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 24167e231dbeSJesse Barnes POSTING_READ(VLV_IER); 24177e231dbeSJesse Barnes } 24187e231dbeSJesse Barnes 2419f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 2420036a4a7dSZhenyu Wang { 2421036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 24224697995bSJesse Barnes 24234697995bSJesse Barnes if (!dev_priv) 24244697995bSJesse Barnes return; 24254697995bSJesse Barnes 2426ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2427ac4c16c5SEgbert Eich 2428036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 2429036a4a7dSZhenyu Wang 2430036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2431036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 2432036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 24338664281bSPaulo Zanoni if (IS_GEN7(dev)) 24348664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2435036a4a7dSZhenyu Wang 2436036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2437036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 2438036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 2439192aac1fSKeith Packard 2440ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 2441ab5c608bSBen Widawsky return; 2442ab5c608bSBen Widawsky 2443192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 2444192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 2445192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 24468664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 24478664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2448036a4a7dSZhenyu Wang } 2449036a4a7dSZhenyu Wang 2450c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 2451c2798b19SChris Wilson { 2452c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2453c2798b19SChris Wilson int pipe; 2454c2798b19SChris Wilson 2455c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2456c2798b19SChris Wilson 2457c2798b19SChris Wilson for_each_pipe(pipe) 2458c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2459c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2460c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2461c2798b19SChris Wilson POSTING_READ16(IER); 2462c2798b19SChris Wilson } 2463c2798b19SChris Wilson 2464c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 2465c2798b19SChris Wilson { 2466c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2467c2798b19SChris Wilson 2468c2798b19SChris Wilson I915_WRITE16(EMR, 2469c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2470c2798b19SChris Wilson 2471c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 2472c2798b19SChris Wilson dev_priv->irq_mask = 2473c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2474c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2475c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2476c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2477c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2478c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 2479c2798b19SChris Wilson 2480c2798b19SChris Wilson I915_WRITE16(IER, 2481c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2482c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2483c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2484c2798b19SChris Wilson I915_USER_INTERRUPT); 2485c2798b19SChris Wilson POSTING_READ16(IER); 2486c2798b19SChris Wilson 2487c2798b19SChris Wilson return 0; 2488c2798b19SChris Wilson } 2489c2798b19SChris Wilson 249090a72f87SVille Syrjälä /* 249190a72f87SVille Syrjälä * Returns true when a page flip has completed. 249290a72f87SVille Syrjälä */ 249390a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 249490a72f87SVille Syrjälä int pipe, u16 iir) 249590a72f87SVille Syrjälä { 249690a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 249790a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 249890a72f87SVille Syrjälä 249990a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 250090a72f87SVille Syrjälä return false; 250190a72f87SVille Syrjälä 250290a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 250390a72f87SVille Syrjälä return false; 250490a72f87SVille Syrjälä 250590a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 250690a72f87SVille Syrjälä 250790a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 250890a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 250990a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 251090a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 251190a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 251290a72f87SVille Syrjälä */ 251390a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 251490a72f87SVille Syrjälä return false; 251590a72f87SVille Syrjälä 251690a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 251790a72f87SVille Syrjälä 251890a72f87SVille Syrjälä return true; 251990a72f87SVille Syrjälä } 252090a72f87SVille Syrjälä 2521ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2522c2798b19SChris Wilson { 2523c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2524c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2525c2798b19SChris Wilson u16 iir, new_iir; 2526c2798b19SChris Wilson u32 pipe_stats[2]; 2527c2798b19SChris Wilson unsigned long irqflags; 2528c2798b19SChris Wilson int pipe; 2529c2798b19SChris Wilson u16 flip_mask = 2530c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2531c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2532c2798b19SChris Wilson 2533c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 2534c2798b19SChris Wilson 2535c2798b19SChris Wilson iir = I915_READ16(IIR); 2536c2798b19SChris Wilson if (iir == 0) 2537c2798b19SChris Wilson return IRQ_NONE; 2538c2798b19SChris Wilson 2539c2798b19SChris Wilson while (iir & ~flip_mask) { 2540c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2541c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 2542c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 2543c2798b19SChris Wilson * interrupts (for non-MSI). 2544c2798b19SChris Wilson */ 2545c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2546c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2547c2798b19SChris Wilson i915_handle_error(dev, false); 2548c2798b19SChris Wilson 2549c2798b19SChris Wilson for_each_pipe(pipe) { 2550c2798b19SChris Wilson int reg = PIPESTAT(pipe); 2551c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2552c2798b19SChris Wilson 2553c2798b19SChris Wilson /* 2554c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 2555c2798b19SChris Wilson */ 2556c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2557c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2558c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2559c2798b19SChris Wilson pipe_name(pipe)); 2560c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2561c2798b19SChris Wilson } 2562c2798b19SChris Wilson } 2563c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2564c2798b19SChris Wilson 2565c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 2566c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 2567c2798b19SChris Wilson 2568d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 2569c2798b19SChris Wilson 2570c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 2571c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2572c2798b19SChris Wilson 2573c2798b19SChris Wilson if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 257490a72f87SVille Syrjälä i8xx_handle_vblank(dev, 0, iir)) 257590a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2576c2798b19SChris Wilson 2577c2798b19SChris Wilson if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 257890a72f87SVille Syrjälä i8xx_handle_vblank(dev, 1, iir)) 257990a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2580c2798b19SChris Wilson 2581c2798b19SChris Wilson iir = new_iir; 2582c2798b19SChris Wilson } 2583c2798b19SChris Wilson 2584c2798b19SChris Wilson return IRQ_HANDLED; 2585c2798b19SChris Wilson } 2586c2798b19SChris Wilson 2587c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 2588c2798b19SChris Wilson { 2589c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2590c2798b19SChris Wilson int pipe; 2591c2798b19SChris Wilson 2592c2798b19SChris Wilson for_each_pipe(pipe) { 2593c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 2594c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2595c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2596c2798b19SChris Wilson } 2597c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2598c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2599c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 2600c2798b19SChris Wilson } 2601c2798b19SChris Wilson 2602a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 2603a266c7d5SChris Wilson { 2604a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2605a266c7d5SChris Wilson int pipe; 2606a266c7d5SChris Wilson 2607a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2608a266c7d5SChris Wilson 2609a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2610a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2611a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2612a266c7d5SChris Wilson } 2613a266c7d5SChris Wilson 261400d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 2615a266c7d5SChris Wilson for_each_pipe(pipe) 2616a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2617a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2618a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2619a266c7d5SChris Wilson POSTING_READ(IER); 2620a266c7d5SChris Wilson } 2621a266c7d5SChris Wilson 2622a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 2623a266c7d5SChris Wilson { 2624a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 262538bde180SChris Wilson u32 enable_mask; 2626a266c7d5SChris Wilson 262738bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 262838bde180SChris Wilson 262938bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 263038bde180SChris Wilson dev_priv->irq_mask = 263138bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 263238bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 263338bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 263438bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 263538bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 263638bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 263738bde180SChris Wilson 263838bde180SChris Wilson enable_mask = 263938bde180SChris Wilson I915_ASLE_INTERRUPT | 264038bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 264138bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 264238bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 264338bde180SChris Wilson I915_USER_INTERRUPT; 264438bde180SChris Wilson 2645a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 264620afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 264720afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 264820afbda2SDaniel Vetter 2649a266c7d5SChris Wilson /* Enable in IER... */ 2650a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2651a266c7d5SChris Wilson /* and unmask in IMR */ 2652a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2653a266c7d5SChris Wilson } 2654a266c7d5SChris Wilson 2655a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2656a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2657a266c7d5SChris Wilson POSTING_READ(IER); 2658a266c7d5SChris Wilson 2659f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 266020afbda2SDaniel Vetter 266120afbda2SDaniel Vetter return 0; 266220afbda2SDaniel Vetter } 266320afbda2SDaniel Vetter 266490a72f87SVille Syrjälä /* 266590a72f87SVille Syrjälä * Returns true when a page flip has completed. 266690a72f87SVille Syrjälä */ 266790a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 266890a72f87SVille Syrjälä int plane, int pipe, u32 iir) 266990a72f87SVille Syrjälä { 267090a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 267190a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 267290a72f87SVille Syrjälä 267390a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 267490a72f87SVille Syrjälä return false; 267590a72f87SVille Syrjälä 267690a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 267790a72f87SVille Syrjälä return false; 267890a72f87SVille Syrjälä 267990a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 268090a72f87SVille Syrjälä 268190a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 268290a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 268390a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 268490a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 268590a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 268690a72f87SVille Syrjälä */ 268790a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 268890a72f87SVille Syrjälä return false; 268990a72f87SVille Syrjälä 269090a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 269190a72f87SVille Syrjälä 269290a72f87SVille Syrjälä return true; 269390a72f87SVille Syrjälä } 269490a72f87SVille Syrjälä 2695ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 2696a266c7d5SChris Wilson { 2697a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2698a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26998291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2700a266c7d5SChris Wilson unsigned long irqflags; 270138bde180SChris Wilson u32 flip_mask = 270238bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 270338bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 270438bde180SChris Wilson int pipe, ret = IRQ_NONE; 2705a266c7d5SChris Wilson 2706a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2707a266c7d5SChris Wilson 2708a266c7d5SChris Wilson iir = I915_READ(IIR); 270938bde180SChris Wilson do { 271038bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 27118291ee90SChris Wilson bool blc_event = false; 2712a266c7d5SChris Wilson 2713a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2714a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2715a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2716a266c7d5SChris Wilson * interrupts (for non-MSI). 2717a266c7d5SChris Wilson */ 2718a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2719a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2720a266c7d5SChris Wilson i915_handle_error(dev, false); 2721a266c7d5SChris Wilson 2722a266c7d5SChris Wilson for_each_pipe(pipe) { 2723a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2724a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2725a266c7d5SChris Wilson 272638bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 2727a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2728a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2729a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2730a266c7d5SChris Wilson pipe_name(pipe)); 2731a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 273238bde180SChris Wilson irq_received = true; 2733a266c7d5SChris Wilson } 2734a266c7d5SChris Wilson } 2735a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2736a266c7d5SChris Wilson 2737a266c7d5SChris Wilson if (!irq_received) 2738a266c7d5SChris Wilson break; 2739a266c7d5SChris Wilson 2740a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 2741a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 2742a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2743a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2744b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2745a266c7d5SChris Wilson 2746a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2747a266c7d5SChris Wilson hotplug_status); 274891d131d2SDaniel Vetter 274910a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 275091d131d2SDaniel Vetter 2751a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 275238bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 2753a266c7d5SChris Wilson } 2754a266c7d5SChris Wilson 275538bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 2756a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 2757a266c7d5SChris Wilson 2758a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 2759a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2760a266c7d5SChris Wilson 2761a266c7d5SChris Wilson for_each_pipe(pipe) { 276238bde180SChris Wilson int plane = pipe; 276338bde180SChris Wilson if (IS_MOBILE(dev)) 276438bde180SChris Wilson plane = !plane; 27655e2032d4SVille Syrjälä 276690a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 276790a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 276890a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 2769a266c7d5SChris Wilson 2770a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2771a266c7d5SChris Wilson blc_event = true; 2772a266c7d5SChris Wilson } 2773a266c7d5SChris Wilson 2774a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2775a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 2776a266c7d5SChris Wilson 2777a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 2778a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 2779a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 2780a266c7d5SChris Wilson * we would never get another interrupt. 2781a266c7d5SChris Wilson * 2782a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 2783a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 2784a266c7d5SChris Wilson * another one. 2785a266c7d5SChris Wilson * 2786a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 2787a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 2788a266c7d5SChris Wilson * the posting read. This should be rare enough to never 2789a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 2790a266c7d5SChris Wilson * stray interrupts. 2791a266c7d5SChris Wilson */ 279238bde180SChris Wilson ret = IRQ_HANDLED; 2793a266c7d5SChris Wilson iir = new_iir; 279438bde180SChris Wilson } while (iir & ~flip_mask); 2795a266c7d5SChris Wilson 2796d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 27978291ee90SChris Wilson 2798a266c7d5SChris Wilson return ret; 2799a266c7d5SChris Wilson } 2800a266c7d5SChris Wilson 2801a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 2802a266c7d5SChris Wilson { 2803a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2804a266c7d5SChris Wilson int pipe; 2805a266c7d5SChris Wilson 2806ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2807ac4c16c5SEgbert Eich 2808a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2809a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2810a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2811a266c7d5SChris Wilson } 2812a266c7d5SChris Wilson 281300d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 281455b39755SChris Wilson for_each_pipe(pipe) { 281555b39755SChris Wilson /* Clear enable bits; then clear status bits */ 2816a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 281755b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 281855b39755SChris Wilson } 2819a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2820a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2821a266c7d5SChris Wilson 2822a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 2823a266c7d5SChris Wilson } 2824a266c7d5SChris Wilson 2825a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 2826a266c7d5SChris Wilson { 2827a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2828a266c7d5SChris Wilson int pipe; 2829a266c7d5SChris Wilson 2830a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2831a266c7d5SChris Wilson 2832a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2833a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2834a266c7d5SChris Wilson 2835a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 2836a266c7d5SChris Wilson for_each_pipe(pipe) 2837a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2838a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2839a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2840a266c7d5SChris Wilson POSTING_READ(IER); 2841a266c7d5SChris Wilson } 2842a266c7d5SChris Wilson 2843a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 2844a266c7d5SChris Wilson { 2845a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2846bbba0a97SChris Wilson u32 enable_mask; 2847a266c7d5SChris Wilson u32 error_mask; 2848b79480baSDaniel Vetter unsigned long irqflags; 2849a266c7d5SChris Wilson 2850a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 2851bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2852adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 2853bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2854bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2855bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2856bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2857bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2858bbba0a97SChris Wilson 2859bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 286021ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 286121ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 2862bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 2863bbba0a97SChris Wilson 2864bbba0a97SChris Wilson if (IS_G4X(dev)) 2865bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 2866a266c7d5SChris Wilson 2867b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2868b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2869b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2870515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2871b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2872a266c7d5SChris Wilson 2873a266c7d5SChris Wilson /* 2874a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 2875a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 2876a266c7d5SChris Wilson */ 2877a266c7d5SChris Wilson if (IS_G4X(dev)) { 2878a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 2879a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 2880a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 2881a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2882a266c7d5SChris Wilson } else { 2883a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 2884a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2885a266c7d5SChris Wilson } 2886a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 2887a266c7d5SChris Wilson 2888a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2889a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2890a266c7d5SChris Wilson POSTING_READ(IER); 2891a266c7d5SChris Wilson 289220afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 289320afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 289420afbda2SDaniel Vetter 2895f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 289620afbda2SDaniel Vetter 289720afbda2SDaniel Vetter return 0; 289820afbda2SDaniel Vetter } 289920afbda2SDaniel Vetter 2900bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 290120afbda2SDaniel Vetter { 290220afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2903e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 2904cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 290520afbda2SDaniel Vetter u32 hotplug_en; 290620afbda2SDaniel Vetter 2907b5ea2d56SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2908b5ea2d56SDaniel Vetter 2909bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 2910bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2911bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 2912adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 2913e5868a31SEgbert Eich /* enable bits are the same for all generations */ 2914cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2915cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2916cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 2917a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 2918a266c7d5SChris Wilson to generate a spurious hotplug event about three 2919a266c7d5SChris Wilson seconds later. So just do it once. 2920a266c7d5SChris Wilson */ 2921a266c7d5SChris Wilson if (IS_G4X(dev)) 2922a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 292385fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 2924a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2925a266c7d5SChris Wilson 2926a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 2927a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2928a266c7d5SChris Wilson } 2929bac56d5bSEgbert Eich } 2930a266c7d5SChris Wilson 2931ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 2932a266c7d5SChris Wilson { 2933a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2934a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2935a266c7d5SChris Wilson u32 iir, new_iir; 2936a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 2937a266c7d5SChris Wilson unsigned long irqflags; 2938a266c7d5SChris Wilson int irq_received; 2939a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 294021ad8330SVille Syrjälä u32 flip_mask = 294121ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 294221ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2943a266c7d5SChris Wilson 2944a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2945a266c7d5SChris Wilson 2946a266c7d5SChris Wilson iir = I915_READ(IIR); 2947a266c7d5SChris Wilson 2948a266c7d5SChris Wilson for (;;) { 29492c8ba29fSChris Wilson bool blc_event = false; 29502c8ba29fSChris Wilson 295121ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 2952a266c7d5SChris Wilson 2953a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2954a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2955a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2956a266c7d5SChris Wilson * interrupts (for non-MSI). 2957a266c7d5SChris Wilson */ 2958a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2959a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2960a266c7d5SChris Wilson i915_handle_error(dev, false); 2961a266c7d5SChris Wilson 2962a266c7d5SChris Wilson for_each_pipe(pipe) { 2963a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2964a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2965a266c7d5SChris Wilson 2966a266c7d5SChris Wilson /* 2967a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 2968a266c7d5SChris Wilson */ 2969a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2970a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2971a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2972a266c7d5SChris Wilson pipe_name(pipe)); 2973a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2974a266c7d5SChris Wilson irq_received = 1; 2975a266c7d5SChris Wilson } 2976a266c7d5SChris Wilson } 2977a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2978a266c7d5SChris Wilson 2979a266c7d5SChris Wilson if (!irq_received) 2980a266c7d5SChris Wilson break; 2981a266c7d5SChris Wilson 2982a266c7d5SChris Wilson ret = IRQ_HANDLED; 2983a266c7d5SChris Wilson 2984a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 2985adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2986a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2987b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 2988b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 29894f7fd709SDaniel Vetter HOTPLUG_INT_STATUS_I915); 2990a266c7d5SChris Wilson 2991a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2992a266c7d5SChris Wilson hotplug_status); 299391d131d2SDaniel Vetter 299410a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, 299510a504deSDaniel Vetter IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 299691d131d2SDaniel Vetter 2997a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2998a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 2999a266c7d5SChris Wilson } 3000a266c7d5SChris Wilson 300121ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 3002a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3003a266c7d5SChris Wilson 3004a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3005a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3006a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 3007a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 3008a266c7d5SChris Wilson 3009a266c7d5SChris Wilson for_each_pipe(pipe) { 30102c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 301190a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 301290a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3013a266c7d5SChris Wilson 3014a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3015a266c7d5SChris Wilson blc_event = true; 3016a266c7d5SChris Wilson } 3017a266c7d5SChris Wilson 3018a266c7d5SChris Wilson 3019a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3020a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3021a266c7d5SChris Wilson 3022515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3023515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 3024515ac2bbSDaniel Vetter 3025a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3026a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3027a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3028a266c7d5SChris Wilson * we would never get another interrupt. 3029a266c7d5SChris Wilson * 3030a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3031a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3032a266c7d5SChris Wilson * another one. 3033a266c7d5SChris Wilson * 3034a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3035a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3036a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3037a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3038a266c7d5SChris Wilson * stray interrupts. 3039a266c7d5SChris Wilson */ 3040a266c7d5SChris Wilson iir = new_iir; 3041a266c7d5SChris Wilson } 3042a266c7d5SChris Wilson 3043d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 30442c8ba29fSChris Wilson 3045a266c7d5SChris Wilson return ret; 3046a266c7d5SChris Wilson } 3047a266c7d5SChris Wilson 3048a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 3049a266c7d5SChris Wilson { 3050a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3051a266c7d5SChris Wilson int pipe; 3052a266c7d5SChris Wilson 3053a266c7d5SChris Wilson if (!dev_priv) 3054a266c7d5SChris Wilson return; 3055a266c7d5SChris Wilson 3056ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3057ac4c16c5SEgbert Eich 3058a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3059a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3060a266c7d5SChris Wilson 3061a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 3062a266c7d5SChris Wilson for_each_pipe(pipe) 3063a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3064a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3065a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3066a266c7d5SChris Wilson 3067a266c7d5SChris Wilson for_each_pipe(pipe) 3068a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 3069a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3070a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3071a266c7d5SChris Wilson } 3072a266c7d5SChris Wilson 3073ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 3074ac4c16c5SEgbert Eich { 3075ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3076ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 3077ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3078ac4c16c5SEgbert Eich unsigned long irqflags; 3079ac4c16c5SEgbert Eich int i; 3080ac4c16c5SEgbert Eich 3081ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3082ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3083ac4c16c5SEgbert Eich struct drm_connector *connector; 3084ac4c16c5SEgbert Eich 3085ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3086ac4c16c5SEgbert Eich continue; 3087ac4c16c5SEgbert Eich 3088ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3089ac4c16c5SEgbert Eich 3090ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3091ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3092ac4c16c5SEgbert Eich 3093ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 3094ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 3095ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3096ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 3097ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 3098ac4c16c5SEgbert Eich if (!connector->polled) 3099ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3100ac4c16c5SEgbert Eich } 3101ac4c16c5SEgbert Eich } 3102ac4c16c5SEgbert Eich } 3103ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 3104ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 3105ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3106ac4c16c5SEgbert Eich } 3107ac4c16c5SEgbert Eich 3108f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 3109f71d4af4SJesse Barnes { 31108b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 31118b2e326dSChris Wilson 31128b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 311399584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3114c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3115a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 31168b2e326dSChris Wilson 311799584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 311899584db3SDaniel Vetter i915_hangcheck_elapsed, 311961bac78eSDaniel Vetter (unsigned long) dev); 3120ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3121ac4c16c5SEgbert Eich (unsigned long) dev_priv); 312261bac78eSDaniel Vetter 312397a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 31249ee32feaSDaniel Vetter 3125f71d4af4SJesse Barnes dev->driver->get_vblank_counter = i915_get_vblank_counter; 3126f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 31277d4e146fSEugeni Dodonov if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3128f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3129f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3130f71d4af4SJesse Barnes } 3131f71d4af4SJesse Barnes 3132c3613de9SKeith Packard if (drm_core_check_feature(dev, DRIVER_MODESET)) 3133f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3134c3613de9SKeith Packard else 3135c3613de9SKeith Packard dev->driver->get_vblank_timestamp = NULL; 3136f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3137f71d4af4SJesse Barnes 31387e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 31397e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 31407e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 31417e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 31427e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 31437e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 31447e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3145fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3146f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3147f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3148f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3149f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3150f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3151f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3152f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 315382a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3154f71d4af4SJesse Barnes } else { 3155c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3156c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3157c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3158c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3159c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3160a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3161a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3162a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3163a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3164a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 316520afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3166c2798b19SChris Wilson } else { 3167a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3168a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3169a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3170a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3171bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3172c2798b19SChris Wilson } 3173f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3174f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3175f71d4af4SJesse Barnes } 3176f71d4af4SJesse Barnes } 317720afbda2SDaniel Vetter 317820afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 317920afbda2SDaniel Vetter { 318020afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3181821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3182821450c6SEgbert Eich struct drm_connector *connector; 3183b5ea2d56SDaniel Vetter unsigned long irqflags; 3184821450c6SEgbert Eich int i; 318520afbda2SDaniel Vetter 3186821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3187821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3188821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3189821450c6SEgbert Eich } 3190821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3191821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3192821450c6SEgbert Eich connector->polled = intel_connector->polled; 3193821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3194821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3195821450c6SEgbert Eich } 3196b5ea2d56SDaniel Vetter 3197b5ea2d56SDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3198b5ea2d56SDaniel Vetter * just to make the assert_spin_locked checks happy. */ 3199b5ea2d56SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 320020afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 320120afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 3202b5ea2d56SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 320320afbda2SDaniel Vetter } 3204c67a470bSPaulo Zanoni 3205c67a470bSPaulo Zanoni /* Disable interrupts so we can allow Package C8+. */ 3206c67a470bSPaulo Zanoni void hsw_pc8_disable_interrupts(struct drm_device *dev) 3207c67a470bSPaulo Zanoni { 3208c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3209c67a470bSPaulo Zanoni unsigned long irqflags; 3210c67a470bSPaulo Zanoni 3211c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3212c67a470bSPaulo Zanoni 3213c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3214c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3215c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3216c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3217c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3218c67a470bSPaulo Zanoni 3219c67a470bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3220c67a470bSPaulo Zanoni ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3221c67a470bSPaulo Zanoni ilk_disable_gt_irq(dev_priv, 0xffffffff); 3222c67a470bSPaulo Zanoni snb_disable_pm_irq(dev_priv, 0xffffffff); 3223c67a470bSPaulo Zanoni 3224c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = true; 3225c67a470bSPaulo Zanoni 3226c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3227c67a470bSPaulo Zanoni } 3228c67a470bSPaulo Zanoni 3229c67a470bSPaulo Zanoni /* Restore interrupts so we can recover from Package C8+. */ 3230c67a470bSPaulo Zanoni void hsw_pc8_restore_interrupts(struct drm_device *dev) 3231c67a470bSPaulo Zanoni { 3232c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3233c67a470bSPaulo Zanoni unsigned long irqflags; 3234c67a470bSPaulo Zanoni uint32_t val, expected; 3235c67a470bSPaulo Zanoni 3236c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3237c67a470bSPaulo Zanoni 3238c67a470bSPaulo Zanoni val = I915_READ(DEIMR); 3239c67a470bSPaulo Zanoni expected = ~DE_PCH_EVENT_IVB; 3240c67a470bSPaulo Zanoni WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); 3241c67a470bSPaulo Zanoni 3242c67a470bSPaulo Zanoni val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3243c67a470bSPaulo Zanoni expected = ~SDE_HOTPLUG_MASK_CPT; 3244c67a470bSPaulo Zanoni WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", 3245c67a470bSPaulo Zanoni val, expected); 3246c67a470bSPaulo Zanoni 3247c67a470bSPaulo Zanoni val = I915_READ(GTIMR); 3248c67a470bSPaulo Zanoni expected = 0xffffffff; 3249c67a470bSPaulo Zanoni WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); 3250c67a470bSPaulo Zanoni 3251c67a470bSPaulo Zanoni val = I915_READ(GEN6_PMIMR); 3252c67a470bSPaulo Zanoni expected = 0xffffffff; 3253c67a470bSPaulo Zanoni WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, 3254c67a470bSPaulo Zanoni expected); 3255c67a470bSPaulo Zanoni 3256c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = false; 3257c67a470bSPaulo Zanoni 3258c67a470bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3259c67a470bSPaulo Zanoni ibx_enable_display_interrupt(dev_priv, 3260c67a470bSPaulo Zanoni ~dev_priv->pc8.regsave.sdeimr & 3261c67a470bSPaulo Zanoni ~SDE_HOTPLUG_MASK_CPT); 3262c67a470bSPaulo Zanoni ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3263c67a470bSPaulo Zanoni snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3264c67a470bSPaulo Zanoni I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3265c67a470bSPaulo Zanoni 3266c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3267c67a470bSPaulo Zanoni } 3268